Unnamed: 0
int64
0
0
repo_id
stringlengths
5
186
file_path
stringlengths
15
223
content
stringlengths
1
32.8M
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaHLSLDiagnoseTU.cpp
/////////////////////////////////////////////////////////////////////////////// // // // SemaHLSLDiagnoseTU.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // This file implements the Translation Unit Diagnose for HLSL. // // // /////////////////////////////////////////////////////////////////////////////// #include "dxc/DXIL/DxilShaderModel.h" #include "dxc/HlslIntrinsicOp.h" #include "dxc/Support/Global.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/Sema/SemaDiagnostic.h" #include "clang/Sema/SemaHLSL.h" #include "llvm/Support/Debug.h" #include <optional> using namespace clang; using namespace llvm; using namespace hlsl; // // This is similar to clang/Analysis/CallGraph, but the following differences // motivate this: // // - track traversed vs. observed nodes explicitly // - fully visit all reachable functions // - merge graph visiting with checking for recursion // - track global variables and types used (NYI) // namespace { struct CallNode { FunctionDecl *CallerFn; ::llvm::SmallPtrSet<FunctionDecl *, 4> CalleeFns; }; typedef ::llvm::DenseMap<FunctionDecl *, CallNode> CallNodes; typedef ::llvm::SmallPtrSet<Decl *, 8> FnCallStack; typedef ::llvm::SmallPtrSet<FunctionDecl *, 128> FunctionSet; typedef ::llvm::SmallVector<FunctionDecl *, 32> PendingFunctions; typedef ::llvm::DenseMap<FunctionDecl *, FunctionDecl *> FunctionMap; // Returns the definition of a function. // This serves two purposes - ignore built-in functions, and pick // a single Decl * to be used in maps and sets. FunctionDecl *getFunctionWithBody(FunctionDecl *F) { if (!F) return nullptr; if (F->doesThisDeclarationHaveABody()) return F; F = F->getFirstDecl(); for (auto &&Candidate : F->redecls()) { if (Candidate->doesThisDeclarationHaveABody()) { return Candidate; } } return nullptr; } // AST visitor that maintains visited and pending collections, as well // as recording nodes of caller/callees. class FnReferenceVisitor : public RecursiveASTVisitor<FnReferenceVisitor> { private: CallNodes &m_callNodes; FunctionSet &m_visitedFunctions; PendingFunctions &m_pendingFunctions; FunctionDecl *m_source; CallNodes::iterator m_sourceIt; public: FnReferenceVisitor(FunctionSet &visitedFunctions, PendingFunctions &pendingFunctions, CallNodes &callNodes) : m_callNodes(callNodes), m_visitedFunctions(visitedFunctions), m_pendingFunctions(pendingFunctions) {} void setSourceFn(FunctionDecl *F) { F = getFunctionWithBody(F); m_source = F; m_sourceIt = m_callNodes.find(F); } bool VisitDeclRefExpr(DeclRefExpr *ref) { ValueDecl *valueDecl = ref->getDecl(); RecordFunctionDecl(dyn_cast_or_null<FunctionDecl>(valueDecl)); return true; } bool VisitCXXMemberCallExpr(CXXMemberCallExpr *callExpr) { RecordFunctionDecl(callExpr->getMethodDecl()); return true; } void RecordFunctionDecl(FunctionDecl *funcDecl) { funcDecl = getFunctionWithBody(funcDecl); if (funcDecl) { if (m_sourceIt == m_callNodes.end()) { auto result = m_callNodes.insert( std::make_pair(m_source, CallNode{m_source, {}})); DXASSERT(result.second == true, "else setSourceFn didn't assign m_sourceIt"); m_sourceIt = result.first; } m_sourceIt->second.CalleeFns.insert(funcDecl); if (!m_visitedFunctions.count(funcDecl)) { m_pendingFunctions.push_back(funcDecl); } } } }; // A call graph that can check for reachability and recursion efficiently. class CallGraphWithRecurseGuard { private: CallNodes m_callNodes; FunctionSet m_visitedFunctions; FunctionMap m_functionsCheckedForRecursion; FunctionDecl *CheckRecursion(FnCallStack &CallStack, FunctionDecl *D) { auto it = m_functionsCheckedForRecursion.find(D); if (it != m_functionsCheckedForRecursion.end()) return it->second; if (CallStack.insert(D).second == false) return D; auto node = m_callNodes.find(D); if (node != m_callNodes.end()) { for (FunctionDecl *Callee : node->second.CalleeFns) { FunctionDecl *pResult = CheckRecursion(CallStack, Callee); if (pResult) { m_functionsCheckedForRecursion[D] = pResult; return pResult; } } } CallStack.erase(D); m_functionsCheckedForRecursion[D] = nullptr; return nullptr; } public: void BuildForEntry(FunctionDecl *EntryFnDecl) { DXASSERT_NOMSG(EntryFnDecl); EntryFnDecl = getFunctionWithBody(EntryFnDecl); PendingFunctions pendingFunctions; FnReferenceVisitor visitor(m_visitedFunctions, pendingFunctions, m_callNodes); pendingFunctions.push_back(EntryFnDecl); while (!pendingFunctions.empty()) { FunctionDecl *pendingDecl = pendingFunctions.pop_back_val(); if (m_visitedFunctions.insert(pendingDecl).second == true) { visitor.setSourceFn(pendingDecl); visitor.TraverseDecl(pendingDecl); } } } // return true if FD2 is reachable from FD1 bool CheckReachability(FunctionDecl *FD1, FunctionDecl *FD2) { if (FD1 == FD2) return true; auto node = m_callNodes.find(FD1); if (node != m_callNodes.end()) { for (FunctionDecl *Callee : node->second.CalleeFns) { if (CheckReachability(Callee, FD2)) return true; } } return false; } FunctionDecl *CheckRecursion(FunctionDecl *EntryFnDecl) { FnCallStack CallStack; EntryFnDecl = getFunctionWithBody(EntryFnDecl); return CheckRecursion(CallStack, EntryFnDecl); } const CallNodes &GetCallGraph() { return m_callNodes; } const FunctionSet GetVisitedFunctions() { return m_visitedFunctions; } void dump() const { llvm::dbgs() << "Call Nodes:\n"; for (auto &node : m_callNodes) { llvm::dbgs() << node.first->getName().str().c_str() << " [" << (void *)node.first << "]:\n"; for (auto callee : node.second.CalleeFns) { llvm::dbgs() << " " << callee->getName().str().c_str() << " [" << (void *)callee << "]\n"; } } } }; struct NameLookup { FunctionDecl *Found; FunctionDecl *Other; }; NameLookup GetSingleFunctionDeclByName(clang::Sema *self, StringRef Name, bool checkPatch) { auto DN = DeclarationName(&self->getASTContext().Idents.get(Name)); FunctionDecl *pFoundDecl = nullptr; for (auto idIter = self->IdResolver.begin(DN), idEnd = self->IdResolver.end(); idIter != idEnd; ++idIter) { FunctionDecl *pFnDecl = dyn_cast<FunctionDecl>(*idIter); if (!pFnDecl) continue; if (checkPatch && !self->getASTContext().IsPatchConstantFunctionDecl(pFnDecl)) continue; if (pFoundDecl) { return NameLookup{pFoundDecl, pFnDecl}; } pFoundDecl = pFnDecl; } return NameLookup{pFoundDecl, nullptr}; } bool IsTargetProfileLib6x(Sema &S) { // Remaining functions are exported only if target is 'lib_6_x'. const hlsl::ShaderModel *SM = hlsl::ShaderModel::GetByName(S.getLangOpts().HLSLProfile.c_str()); bool isLib6x = SM->IsLib() && SM->GetMinor() == hlsl::ShaderModel::kOfflineMinor; return isLib6x; } bool IsExported(Sema *self, clang::FunctionDecl *FD, bool isDefaultLinkageExternal) { // Entry points are exported. if (FD->hasAttr<HLSLShaderAttr>()) return true; // Internal linkage functions include functions marked 'static'. if (FD->getLinkageAndVisibility().getLinkage() == InternalLinkage) return false; // Explicit 'export' functions are exported. if (FD->hasAttr<HLSLExportAttr>()) return true; return isDefaultLinkageExternal; } bool getDefaultLinkageExternal(clang::Sema *self) { const LangOptions &opts = self->getLangOpts(); bool isDefaultLinkageExternal = opts.DefaultLinkage == DXIL::DefaultLinkage::External; if (opts.DefaultLinkage == DXIL::DefaultLinkage::Default && !opts.ExportShadersOnly && IsTargetProfileLib6x(*self)) isDefaultLinkageExternal = true; return isDefaultLinkageExternal; } std::vector<FunctionDecl *> GetAllExportedFDecls(clang::Sema *self) { // Add to the end, process from the beginning, to ensure AllExportedFDecls // will contain functions in decl order. std::vector<FunctionDecl *> AllExportedFDecls; std::deque<DeclContext *> Worklist; Worklist.push_back(self->getASTContext().getTranslationUnitDecl()); while (Worklist.size()) { DeclContext *DC = Worklist.front(); Worklist.pop_front(); if (auto *FD = dyn_cast<FunctionDecl>(DC)) { AllExportedFDecls.push_back(FD); } else { for (auto *D : DC->decls()) { if (auto *FD = dyn_cast<FunctionDecl>(D)) { if (FD->hasBody() && IsExported(self, FD, getDefaultLinkageExternal(self))) Worklist.push_back(FD); } else if (auto *DC2 = dyn_cast<DeclContext>(D)) { Worklist.push_back(DC2); } } } } return AllExportedFDecls; } // in the non-library case, this function will be run only once, // but in the library case, this function will be run for each // viable top-level function declaration by // ValidateNoRecursionInTranslationUnit. // (viable as in, is exported) clang::FunctionDecl *ValidateNoRecursion(CallGraphWithRecurseGuard &callGraph, clang::FunctionDecl *FD) { // Validate that there is no recursion reachable by this function declaration // NOTE: the information gathered here could be used to bypass code generation // on functions that are unreachable (as an early form of dead code // elimination). if (FD) { callGraph.BuildForEntry(FD); return callGraph.CheckRecursion(FD); } return nullptr; } class HLSLCallDiagnoseVisitor : public RecursiveASTVisitor<HLSLCallDiagnoseVisitor> { public: explicit HLSLCallDiagnoseVisitor( Sema *S, const hlsl::ShaderModel *SM, DXIL::ShaderKind EntrySK, DXIL::NodeLaunchType NodeLaunchTy, const FunctionDecl *EntryDecl, llvm::SmallPtrSetImpl<CallExpr *> &DiagnosedCalls) : sema(S), SM(SM), EntrySK(EntrySK), NodeLaunchTy(NodeLaunchTy), EntryDecl(EntryDecl), DiagnosedCalls(DiagnosedCalls) {} bool VisitCallExpr(CallExpr *CE) { // Set flag if already diagnosed from another entry, allowing some // diagnostics to be skipped when they are not dependent on entry // properties. bool locallyVisited = DiagnosedCalls.count(CE) != 0; if (!locallyVisited) DiagnosedCalls.insert(CE); sema->DiagnoseReachableHLSLCall(CE, SM, EntrySK, NodeLaunchTy, EntryDecl, locallyVisited); return true; } clang::Sema *getSema() { return sema; } private: clang::Sema *sema; const hlsl::ShaderModel *SM; DXIL::ShaderKind EntrySK; DXIL::NodeLaunchType NodeLaunchTy; const FunctionDecl *EntryDecl; llvm::SmallPtrSetImpl<CallExpr *> &DiagnosedCalls; }; std::optional<uint32_t> getFunctionInputPatchCount(const FunctionDecl *function) { for (const auto *param : function->params()) { if (!hlsl::IsHLSLInputPatchType(param->getType())) continue; return hlsl::GetHLSLInputPatchCount(param->getType()); } return std::nullopt; } std::optional<uint32_t> getFunctionOutputPatchCount(const FunctionDecl *function) { for (const auto *param : function->params()) { if (!hlsl::IsHLSLOutputPatchType(param->getType())) continue; return hlsl::GetHLSLOutputPatchCount(param->getType()); } return std::nullopt; } std::optional<uint32_t> getFunctionOutputControlPointsCount(const FunctionDecl *function) { if (const auto *Attr = function->getAttr<HLSLOutputControlPointsAttr>()) { return Attr->getCount(); } return std::nullopt; } } // namespace void hlsl::DiagnoseTranslationUnit(clang::Sema *self) { DXASSERT_NOMSG(self != nullptr); // Don't bother with global validation if compilation has already failed. if (self->getDiagnostics().hasErrorOccurred()) { return; } // Check RT shader if available for their payload use and match payload access // against availiable payload modifiers. // We have to do it late because we could have payload access in a called // function and have to check the callgraph if the root shader has the right // access rights to the payload structure. if (self->getLangOpts().IsHLSLLibrary) { if (self->getLangOpts().EnablePayloadAccessQualifiers) { ASTContext &ctx = self->getASTContext(); TranslationUnitDecl *TU = ctx.getTranslationUnitDecl(); DiagnoseRaytracingPayloadAccess(*self, TU); } } // TODO: make these error 'real' errors rather than on-the-fly things // Validate that the entry point is available. DiagnosticsEngine &Diags = self->getDiagnostics(); FunctionDecl *pEntryPointDecl = nullptr; std::vector<FunctionDecl *> FDeclsToCheck; if (self->getLangOpts().IsHLSLLibrary) { FDeclsToCheck = GetAllExportedFDecls(self); } else { const std::string &EntryPointName = self->getLangOpts().HLSLEntryFunction; if (!EntryPointName.empty()) { NameLookup NL = GetSingleFunctionDeclByName(self, EntryPointName, /*checkPatch*/ false); if (NL.Found && NL.Other) { // NOTE: currently we cannot hit this codepath when CodeGen is enabled, // because CodeGenModule::getMangledName will mangle the entry point // name into the bare string, and so ambiguous points will produce an // error earlier on. unsigned id = Diags.getCustomDiagID(clang::DiagnosticsEngine::Level::Error, "ambiguous entry point function"); Diags.Report(NL.Found->getSourceRange().getBegin(), id); Diags.Report(NL.Other->getLocation(), diag::note_previous_definition); return; } pEntryPointDecl = NL.Found; if (!pEntryPointDecl || !pEntryPointDecl->hasBody()) { unsigned id = Diags.getCustomDiagID(clang::DiagnosticsEngine::Level::Error, "missing entry point definition"); Diags.Report(id); return; } FDeclsToCheck.push_back(NL.Found); } } const auto *shaderModel = hlsl::ShaderModel::GetByName(self->getLangOpts().HLSLProfile.c_str()); std::set<FunctionDecl *> DiagnosedDecls; llvm::SmallPtrSet<CallExpr *, 16> DiagnosedCalls; // for each FDecl, check for recursion for (FunctionDecl *FDecl : FDeclsToCheck) { CallGraphWithRecurseGuard callGraph; FunctionDecl *result = ValidateNoRecursion(callGraph, FDecl); if (result) { // don't emit duplicate diagnostics for the same recursive function // if A and B call recursive function C, only emit 1 diagnostic for C. if (DiagnosedDecls.find(result) == DiagnosedDecls.end()) { DiagnosedDecls.insert(result); self->Diag(result->getSourceRange().getBegin(), diag::err_hlsl_no_recursion) << FDecl->getQualifiedNameAsString() << result->getQualifiedNameAsString(); self->Diag(result->getSourceRange().getBegin(), diag::note_hlsl_no_recursion); } } FunctionDecl *pPatchFnDecl = nullptr; if (const HLSLPatchConstantFuncAttr *attr = FDecl->getAttr<HLSLPatchConstantFuncAttr>()) { NameLookup NL = GetSingleFunctionDeclByName(self, attr->getFunctionName(), /*checkPatch*/ true); if (!NL.Found || !NL.Found->hasBody()) { self->Diag(attr->getLocation(), diag::err_hlsl_missing_patch_constant_function) << attr->getFunctionName(); } pPatchFnDecl = NL.Found; } if (pPatchFnDecl) { FunctionDecl *patchResult = ValidateNoRecursion(callGraph, pPatchFnDecl); // In this case, recursion was detected in the patch-constant function if (patchResult) { if (DiagnosedDecls.find(patchResult) == DiagnosedDecls.end()) { DiagnosedDecls.insert(patchResult); self->Diag(patchResult->getSourceRange().getBegin(), diag::err_hlsl_no_recursion) << pPatchFnDecl->getQualifiedNameAsString() << patchResult->getQualifiedNameAsString(); self->Diag(patchResult->getSourceRange().getBegin(), diag::note_hlsl_no_recursion); } } // The patch function decl and the entry function decl should be // disconnected with respect to the call graph. // Only check this if neither function decl is recursive if (!result && !patchResult) { CallGraphWithRecurseGuard CG; CG.BuildForEntry(pPatchFnDecl); if (CG.CheckReachability(pPatchFnDecl, FDecl)) { self->Diag(FDecl->getSourceRange().getBegin(), diag::err_hlsl_patch_reachability_not_allowed) << 1 << FDecl->getName() << 0 << pPatchFnDecl->getName(); } CG.BuildForEntry(FDecl); if (CG.CheckReachability(FDecl, pPatchFnDecl)) { self->Diag(FDecl->getSourceRange().getBegin(), diag::err_hlsl_patch_reachability_not_allowed) << 0 << pPatchFnDecl->getName() << 1 << FDecl->getName(); } } // Input/Output control point validation. { auto hullPatchCount = getFunctionInputPatchCount(pPatchFnDecl); auto functionPatchCount = getFunctionInputPatchCount(FDecl); if (hullPatchCount.has_value() && functionPatchCount.has_value() && hullPatchCount.value() != functionPatchCount.value()) { self->Diag(pPatchFnDecl->getSourceRange().getBegin(), diag::err_hlsl_patch_size_mismatch) << "input" << functionPatchCount.value() << hullPatchCount.value(); } } { auto hullPatchCount = getFunctionOutputPatchCount(pPatchFnDecl); auto functionPatchCount = getFunctionOutputControlPointsCount(FDecl); if (hullPatchCount.has_value() && functionPatchCount.has_value() && hullPatchCount.value() != functionPatchCount.value()) { self->Diag(pPatchFnDecl->getSourceRange().getBegin(), diag::err_hlsl_patch_size_mismatch) << "output" << functionPatchCount.value() << hullPatchCount.value(); } } } DXIL::ShaderKind EntrySK = shaderModel->GetKind(); DXIL::NodeLaunchType NodeLaunchTy = DXIL::NodeLaunchType::Invalid; if (EntrySK == DXIL::ShaderKind::Library) { // For library, check if the exported function is entry with shader // attribute. if (const auto *Attr = FDecl->getAttr<clang::HLSLShaderAttr>()) EntrySK = ShaderModel::KindFromFullName(Attr->getStage()); if (EntrySK == DXIL::ShaderKind::Node) { if (const auto *pAttr = FDecl->getAttr<HLSLNodeLaunchAttr>()) NodeLaunchTy = ShaderModel::NodeLaunchTypeFromName(pAttr->getLaunchType()); else NodeLaunchTy = DXIL::NodeLaunchType::Broadcasting; } } // Visit all visited functions in call graph to collect illegal intrinsic // calls. for (FunctionDecl *FD : callGraph.GetVisitedFunctions()) { HLSLCallDiagnoseVisitor Visitor(self, shaderModel, EntrySK, NodeLaunchTy, FDecl, DiagnosedCalls); Visitor.TraverseDecl(FD); } } }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaDeclCXX.cpp
//===------ SemaDeclCXX.cpp - Semantic Analysis for C++ Declarations ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for C++ declarations. // //===----------------------------------------------------------------------===// #include "clang/Basic/OperatorKinds.h" #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/CharUnits.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/StmtVisitor.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/LiteralSupport.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/CXXFieldCollector.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/Template.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include <map> #include <set> #include "clang/Basic/Specifiers.h" // HLSL Change using namespace clang; //===----------------------------------------------------------------------===// // CheckDefaultArgumentVisitor //===----------------------------------------------------------------------===// namespace { /// CheckDefaultArgumentVisitor - C++ [dcl.fct.default] Traverses /// the default argument of a parameter to determine whether it /// contains any ill-formed subexpressions. For example, this will /// diagnose the use of local variables or parameters within the /// default argument expression. class CheckDefaultArgumentVisitor : public StmtVisitor<CheckDefaultArgumentVisitor, bool> { Expr *DefaultArg; Sema *S; public: CheckDefaultArgumentVisitor(Expr *defarg, Sema *s) : DefaultArg(defarg), S(s) {} bool VisitExpr(Expr *Node); bool VisitDeclRefExpr(DeclRefExpr *DRE); bool VisitCXXThisExpr(CXXThisExpr *ThisE); bool VisitLambdaExpr(LambdaExpr *Lambda); bool VisitPseudoObjectExpr(PseudoObjectExpr *POE); }; /// VisitExpr - Visit all of the children of this expression. bool CheckDefaultArgumentVisitor::VisitExpr(Expr *Node) { bool IsInvalid = false; for (Stmt *SubStmt : Node->children()) IsInvalid |= Visit(SubStmt); return IsInvalid; } /// VisitDeclRefExpr - Visit a reference to a declaration, to /// determine whether this declaration can be used in the default /// argument expression. bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(DeclRefExpr *DRE) { NamedDecl *Decl = DRE->getDecl(); if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(Decl)) { // C++ [dcl.fct.default]p9 // Default arguments are evaluated each time the function is // called. The order of evaluation of function arguments is // unspecified. Consequently, parameters of a function shall not // be used in default argument expressions, even if they are not // evaluated. Parameters of a function declared before a default // argument expression are in scope and can hide namespace and // class member names. return S->Diag(DRE->getLocStart(), diag::err_param_default_argument_references_param) << Param->getDeclName() << DefaultArg->getSourceRange(); } else if (VarDecl *VDecl = dyn_cast<VarDecl>(Decl)) { // C++ [dcl.fct.default]p7 // Local variables shall not be used in default argument // expressions. if (VDecl->isLocalVarDecl()) return S->Diag(DRE->getLocStart(), diag::err_param_default_argument_references_local) << VDecl->getDeclName() << DefaultArg->getSourceRange(); } return false; } /// VisitCXXThisExpr - Visit a C++ "this" expression. bool CheckDefaultArgumentVisitor::VisitCXXThisExpr(CXXThisExpr *ThisE) { // C++ [dcl.fct.default]p8: // The keyword this shall not be used in a default argument of a // member function. return S->Diag(ThisE->getLocStart(), diag::err_param_default_argument_references_this) << ThisE->getSourceRange(); } bool CheckDefaultArgumentVisitor::VisitPseudoObjectExpr(PseudoObjectExpr *POE) { bool Invalid = false; for (PseudoObjectExpr::semantics_iterator i = POE->semantics_begin(), e = POE->semantics_end(); i != e; ++i) { Expr *E = *i; // Look through bindings. if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) { E = OVE->getSourceExpr(); assert(E && "pseudo-object binding without source expression?"); } Invalid |= Visit(E); } return Invalid; } bool CheckDefaultArgumentVisitor::VisitLambdaExpr(LambdaExpr *Lambda) { // C++11 [expr.lambda.prim]p13: // A lambda-expression appearing in a default argument shall not // implicitly or explicitly capture any entity. if (Lambda->capture_begin() == Lambda->capture_end()) return false; return S->Diag(Lambda->getLocStart(), diag::err_lambda_capture_default_arg); } } void Sema::ImplicitExceptionSpecification::CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method) { // If we have an MSAny spec already, don't bother. if (!Method || ComputedEST == EST_MSAny) return; const FunctionProtoType *Proto = Method->getType()->getAs<FunctionProtoType>(); Proto = Self->ResolveExceptionSpec(CallLoc, Proto); if (!Proto) return; ExceptionSpecificationType EST = Proto->getExceptionSpecType(); // If this function can throw any exceptions, make a note of that. if (EST == EST_MSAny || EST == EST_None) { ClearExceptions(); ComputedEST = EST; return; } // FIXME: If the call to this decl is using any of its default arguments, we // need to search them for potentially-throwing calls. // If this function has a basic noexcept, it doesn't affect the outcome. if (EST == EST_BasicNoexcept) return; // If we have a throw-all spec at this point, ignore the function. if (ComputedEST == EST_None) return; // If we're still at noexcept(true) and there's a nothrow() callee, // change to that specification. if (EST == EST_DynamicNone) { if (ComputedEST == EST_BasicNoexcept) ComputedEST = EST_DynamicNone; return; } // Check out noexcept specs. if (EST == EST_ComputedNoexcept) { FunctionProtoType::NoexceptResult NR = Proto->getNoexceptSpec(Self->Context); assert(NR != FunctionProtoType::NR_NoNoexcept && "Must have noexcept result for EST_ComputedNoexcept."); assert(NR != FunctionProtoType::NR_Dependent && "Should not generate implicit declarations for dependent cases, " "and don't know how to handle them anyway."); // noexcept(false) -> no spec on the new function if (NR == FunctionProtoType::NR_Throw) { ClearExceptions(); ComputedEST = EST_None; } // noexcept(true) won't change anything either. return; } assert(EST == EST_Dynamic && "EST case not considered earlier."); assert(ComputedEST != EST_None && "Shouldn't collect exceptions when throw-all is guaranteed."); ComputedEST = EST_Dynamic; // Record the exceptions in this function's exception specification. for (const auto &E : Proto->exceptions()) if (ExceptionsSeen.insert(Self->Context.getCanonicalType(E)).second) Exceptions.push_back(E); } void Sema::ImplicitExceptionSpecification::CalledExpr(Expr *E) { if (!E || ComputedEST == EST_MSAny) return; // FIXME: // // C++0x [except.spec]p14: // [An] implicit exception-specification specifies the type-id T if and // only if T is allowed by the exception-specification of a function directly // invoked by f's implicit definition; f shall allow all exceptions if any // function it directly invokes allows all exceptions, and f shall allow no // exceptions if every function it directly invokes allows no exceptions. // // Note in particular that if an implicit exception-specification is generated // for a function containing a throw-expression, that specification can still // be noexcept(true). // // Note also that 'directly invoked' is not defined in the standard, and there // is no indication that we should only consider potentially-evaluated calls. // // Ultimately we should implement the intent of the standard: the exception // specification should be the set of exceptions which can be thrown by the // implicit definition. For now, we assume that any non-nothrow expression can // throw any exception. if (Self->canThrow(E)) ComputedEST = EST_None; } bool Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg, SourceLocation EqualLoc) { if (RequireCompleteType(Param->getLocation(), Param->getType(), diag::err_typecheck_decl_incomplete_type)) { Param->setInvalidDecl(); return true; } // C++ [dcl.fct.default]p5 // A default argument expression is implicitly converted (clause // 4) to the parameter type. The default argument expression has // the same semantic constraints as the initializer expression in // a declaration of a variable of the parameter type, using the // copy-initialization semantics (8.5). InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, Param); InitializationKind Kind = InitializationKind::CreateCopy(Param->getLocation(), EqualLoc); InitializationSequence InitSeq(*this, Entity, Kind, Arg); ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Arg); if (Result.isInvalid()) return true; Arg = Result.getAs<Expr>(); CheckCompletedExpr(Arg, EqualLoc); Arg = MaybeCreateExprWithCleanups(Arg); // Okay: add the default argument to the parameter Param->setDefaultArg(Arg); // We have already instantiated this parameter; provide each of the // instantiations with the uninstantiated default argument. UnparsedDefaultArgInstantiationsMap::iterator InstPos = UnparsedDefaultArgInstantiations.find(Param); if (InstPos != UnparsedDefaultArgInstantiations.end()) { for (unsigned I = 0, N = InstPos->second.size(); I != N; ++I) InstPos->second[I]->setUninstantiatedDefaultArg(Arg); // We're done tracking this parameter's instantiations. UnparsedDefaultArgInstantiations.erase(InstPos); } return false; } /// ActOnParamDefaultArgument - Check whether the default argument /// provided for a function parameter is well-formed. If so, attach it /// to the parameter declaration. void Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *DefaultArg) { if (!param || !DefaultArg) return; ParmVarDecl *Param = cast<ParmVarDecl>(param); UnparsedDefaultArgLocs.erase(Param); // Default arguments are only permitted in C++ if (!getLangOpts().CPlusPlus) { Diag(EqualLoc, diag::err_param_default_argument) << DefaultArg->getSourceRange(); Param->setInvalidDecl(); return; } // Check for unexpanded parameter packs. if (DiagnoseUnexpandedParameterPack(DefaultArg, UPPC_DefaultArgument)) { Param->setInvalidDecl(); return; } // C++11 [dcl.fct.default]p3 // A default argument expression [...] shall not be specified for a // parameter pack. if (Param->isParameterPack()) { Diag(EqualLoc, diag::err_param_default_argument_on_parameter_pack) << DefaultArg->getSourceRange(); return; } // Check that the default argument is well-formed CheckDefaultArgumentVisitor DefaultArgChecker(DefaultArg, this); if (DefaultArgChecker.Visit(DefaultArg)) { Param->setInvalidDecl(); return; } SetParamDefaultArgument(Param, DefaultArg, EqualLoc); } /// ActOnParamUnparsedDefaultArgument - We've seen a default /// argument for a function parameter, but we can't parse it yet /// because we're inside a class definition. Note that this default /// argument will be parsed later. void Sema::ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc) { if (!param) return; ParmVarDecl *Param = cast<ParmVarDecl>(param); Param->setUnparsedDefaultArg(); UnparsedDefaultArgLocs[Param] = ArgLoc; } /// ActOnParamDefaultArgumentError - Parsing or semantic analysis of /// the default argument for the parameter param failed. void Sema::ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc) { if (!param) return; ParmVarDecl *Param = cast<ParmVarDecl>(param); Param->setInvalidDecl(); UnparsedDefaultArgLocs.erase(Param); Param->setDefaultArg(new(Context) OpaqueValueExpr(EqualLoc, Param->getType().getNonReferenceType(), VK_RValue)); } /// CheckExtraCXXDefaultArguments - Check for any extra default /// arguments in the declarator, which is not a function declaration /// or definition and therefore is not permitted to have default /// arguments. This routine should be invoked for every declarator /// that is not a function declaration or definition. void Sema::CheckExtraCXXDefaultArguments(Declarator &D) { // C++ [dcl.fct.default]p3 // A default argument expression shall be specified only in the // parameter-declaration-clause of a function declaration or in a // template-parameter (14.1). It shall not be specified for a // parameter pack. If it is specified in a // parameter-declaration-clause, it shall not occur within a // declarator or abstract-declarator of a parameter-declaration. bool MightBeFunction = D.isFunctionDeclarationContext(); for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { DeclaratorChunk &chunk = D.getTypeObject(i); if (chunk.Kind == DeclaratorChunk::Function) { if (MightBeFunction) { // This is a function declaration. It can have default arguments, but // keep looking in case its return type is a function type with default // arguments. MightBeFunction = false; continue; } for (unsigned argIdx = 0, e = chunk.Fun.NumParams; argIdx != e; ++argIdx) { ParmVarDecl *Param = cast<ParmVarDecl>(chunk.Fun.Params[argIdx].Param); if (Param->hasUnparsedDefaultArg()) { CachedTokens *Toks = chunk.Fun.Params[argIdx].DefaultArgTokens; SourceRange SR; if (Toks->size() > 1) SR = SourceRange((*Toks)[1].getLocation(), Toks->back().getLocation()); else SR = UnparsedDefaultArgLocs[Param]; Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc) << SR; delete Toks; chunk.Fun.Params[argIdx].DefaultArgTokens = nullptr; } else if (Param->getDefaultArg()) { Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc) << Param->getDefaultArg()->getSourceRange(); Param->setDefaultArg(nullptr); } } } else if (chunk.Kind != DeclaratorChunk::Paren) { MightBeFunction = false; } } } static bool functionDeclHasDefaultArgument(const FunctionDecl *FD) { for (unsigned NumParams = FD->getNumParams(); NumParams > 0; --NumParams) { const ParmVarDecl *PVD = FD->getParamDecl(NumParams-1); if (!PVD->hasDefaultArg()) return false; if (!PVD->hasInheritedDefaultArg()) return true; } return false; } /// MergeCXXFunctionDecl - Merge two declarations of the same C++ /// function, once we already know that they have the same /// type. Subroutine of MergeFunctionDecl. Returns true if there was an /// error, false otherwise. bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S) { bool Invalid = false; // The declaration context corresponding to the scope is the semantic // parent, unless this is a local function declaration, in which case // it is that surrounding function. DeclContext *ScopeDC = New->isLocalExternDecl() ? New->getLexicalDeclContext() : New->getDeclContext(); // Find the previous declaration for the purpose of default arguments. FunctionDecl *PrevForDefaultArgs = Old; for (/**/; PrevForDefaultArgs; // Don't bother looking back past the latest decl if this is a local // extern declaration; nothing else could work. PrevForDefaultArgs = New->isLocalExternDecl() ? nullptr : PrevForDefaultArgs->getPreviousDecl()) { // Ignore hidden declarations. if (!LookupResult::isVisible(*this, PrevForDefaultArgs)) continue; if (S && !isDeclInScope(PrevForDefaultArgs, ScopeDC, S) && !New->isCXXClassMember()) { // Ignore default arguments of old decl if they are not in // the same scope and this is not an out-of-line definition of // a member function. continue; } if (PrevForDefaultArgs->isLocalExternDecl() != New->isLocalExternDecl()) { // If only one of these is a local function declaration, then they are // declared in different scopes, even though isDeclInScope may think // they're in the same scope. (If both are local, the scope check is // sufficent, and if neither is local, then they are in the same scope.) continue; } // We found our guy. break; } // C++ [dcl.fct.default]p4: // For non-template functions, default arguments can be added in // later declarations of a function in the same // scope. Declarations in different scopes have completely // distinct sets of default arguments. That is, declarations in // inner scopes do not acquire default arguments from // declarations in outer scopes, and vice versa. In a given // function declaration, all parameters subsequent to a // parameter with a default argument shall have default // arguments supplied in this or previous declarations. A // default argument shall not be redefined by a later // declaration (not even to the same value). // // C++ [dcl.fct.default]p6: // Except for member functions of class templates, the default arguments // in a member function definition that appears outside of the class // definition are added to the set of default arguments provided by the // member function declaration in the class definition. for (unsigned p = 0, NumParams = PrevForDefaultArgs ? PrevForDefaultArgs->getNumParams() : 0; p < NumParams; ++p) { ParmVarDecl *OldParam = PrevForDefaultArgs->getParamDecl(p); ParmVarDecl *NewParam = New->getParamDecl(p); bool OldParamHasDfl = OldParam ? OldParam->hasDefaultArg() : false; bool NewParamHasDfl = NewParam->hasDefaultArg(); if (OldParamHasDfl && NewParamHasDfl) { unsigned DiagDefaultParamID = diag::err_param_default_argument_redefinition; // MSVC accepts that default parameters be redefined for member functions // of template class. The new default parameter's value is ignored. Invalid = true; if (getLangOpts().MicrosoftExt) { CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(New); if (MD && MD->getParent()->getDescribedClassTemplate()) { // Merge the old default argument into the new parameter. NewParam->setHasInheritedDefaultArg(); if (OldParam->hasUninstantiatedDefaultArg()) NewParam->setUninstantiatedDefaultArg( OldParam->getUninstantiatedDefaultArg()); else NewParam->setDefaultArg(OldParam->getInit()); DiagDefaultParamID = diag::ext_param_default_argument_redefinition; Invalid = false; } } // FIXME: If we knew where the '=' was, we could easily provide a fix-it // hint here. Alternatively, we could walk the type-source information // for NewParam to find the last source location in the type... but it // isn't worth the effort right now. This is the kind of test case that // is hard to get right: // int f(int); // void g(int (*fp)(int) = f); // void g(int (*fp)(int) = &f); Diag(NewParam->getLocation(), DiagDefaultParamID) << NewParam->getDefaultArgRange(); // Look for the function declaration where the default argument was // actually written, which may be a declaration prior to Old. for (auto Older = PrevForDefaultArgs; OldParam->hasInheritedDefaultArg(); /**/) { Older = Older->getPreviousDecl(); OldParam = Older->getParamDecl(p); } Diag(OldParam->getLocation(), diag::note_previous_definition) << OldParam->getDefaultArgRange(); } else if (OldParamHasDfl) { // Merge the old default argument into the new parameter. // It's important to use getInit() here; getDefaultArg() // strips off any top-level ExprWithCleanups. NewParam->setHasInheritedDefaultArg(); if (OldParam->hasUnparsedDefaultArg()) NewParam->setUnparsedDefaultArg(); else if (OldParam->hasUninstantiatedDefaultArg()) NewParam->setUninstantiatedDefaultArg( OldParam->getUninstantiatedDefaultArg()); else NewParam->setDefaultArg(OldParam->getInit()); } else if (NewParamHasDfl) { if (New->getDescribedFunctionTemplate()) { // Paragraph 4, quoted above, only applies to non-template functions. Diag(NewParam->getLocation(), diag::err_param_default_argument_template_redecl) << NewParam->getDefaultArgRange(); Diag(PrevForDefaultArgs->getLocation(), diag::note_template_prev_declaration) << false; } else if (New->getTemplateSpecializationKind() != TSK_ImplicitInstantiation && New->getTemplateSpecializationKind() != TSK_Undeclared) { // C++ [temp.expr.spec]p21: // Default function arguments shall not be specified in a declaration // or a definition for one of the following explicit specializations: // - the explicit specialization of a function template; // - the explicit specialization of a member function template; // - the explicit specialization of a member function of a class // template where the class template specialization to which the // member function specialization belongs is implicitly // instantiated. Diag(NewParam->getLocation(), diag::err_template_spec_default_arg) << (New->getTemplateSpecializationKind() ==TSK_ExplicitSpecialization) << New->getDeclName() << NewParam->getDefaultArgRange(); } else if (New->getDeclContext()->isDependentContext()) { // C++ [dcl.fct.default]p6 (DR217): // Default arguments for a member function of a class template shall // be specified on the initial declaration of the member function // within the class template. // // Reading the tea leaves a bit in DR217 and its reference to DR205 // leads me to the conclusion that one cannot add default function // arguments for an out-of-line definition of a member function of a // dependent type. int WhichKind = 2; if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(New->getDeclContext())) { if (Record->getDescribedClassTemplate()) WhichKind = 0; else if (isa<ClassTemplatePartialSpecializationDecl>(Record)) WhichKind = 1; else WhichKind = 2; } Diag(NewParam->getLocation(), diag::err_param_default_argument_member_template_redecl) << WhichKind << NewParam->getDefaultArgRange(); } } } // DR1344: If a default argument is added outside a class definition and that // default argument makes the function a special member function, the program // is ill-formed. This can only happen for constructors. if (isa<CXXConstructorDecl>(New) && New->getMinRequiredArguments() < Old->getMinRequiredArguments()) { CXXSpecialMember NewSM = getSpecialMember(cast<CXXMethodDecl>(New)), OldSM = getSpecialMember(cast<CXXMethodDecl>(Old)); if (NewSM != OldSM) { ParmVarDecl *NewParam = New->getParamDecl(New->getMinRequiredArguments()); assert(NewParam->hasDefaultArg()); Diag(NewParam->getLocation(), diag::err_default_arg_makes_ctor_special) << NewParam->getDefaultArgRange() << NewSM; Diag(Old->getLocation(), diag::note_previous_declaration); } } const FunctionDecl *Def; // C++11 [dcl.constexpr]p1: If any declaration of a function or function // template has a constexpr specifier then all its declarations shall // contain the constexpr specifier. if (New->isConstexpr() != Old->isConstexpr()) { Diag(New->getLocation(), diag::err_constexpr_redecl_mismatch) << New << New->isConstexpr(); Diag(Old->getLocation(), diag::note_previous_declaration); Invalid = true; } else if (!Old->getMostRecentDecl()->isInlined() && New->isInlined() && Old->isDefined(Def)) { // C++11 [dcl.fcn.spec]p4: // If the definition of a function appears in a translation unit before its // first declaration as inline, the program is ill-formed. Diag(New->getLocation(), diag::err_inline_decl_follows_def) << New; Diag(Def->getLocation(), diag::note_previous_definition); Invalid = true; } // C++11 [dcl.fct.default]p4: If a friend declaration specifies a default // argument expression, that declaration shall be a definition and shall be // the only declaration of the function or function template in the // translation unit. if (Old->getFriendObjectKind() == Decl::FOK_Undeclared && functionDeclHasDefaultArgument(Old)) { Diag(New->getLocation(), diag::err_friend_decl_with_def_arg_redeclared); Diag(Old->getLocation(), diag::note_previous_declaration); Invalid = true; } if (CheckEquivalentExceptionSpec(Old, New)) Invalid = true; return Invalid; } /// \brief Merge the exception specifications of two variable declarations. /// /// This is called when there's a redeclaration of a VarDecl. The function /// checks if the redeclaration might have an exception specification and /// validates compatibility and merges the specs if necessary. void Sema::MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old) { // Shortcut if exceptions are disabled. if (!getLangOpts().CXXExceptions) return; assert(Context.hasSameType(New->getType(), Old->getType()) && "Should only be called if types are otherwise the same."); QualType NewType = New->getType(); QualType OldType = Old->getType(); // We're only interested in pointers and references to functions, as well // as pointers to member functions. if (const ReferenceType *R = NewType->getAs<ReferenceType>()) { NewType = R->getPointeeType(); OldType = OldType->getAs<ReferenceType>()->getPointeeType(); } else if (const PointerType *P = NewType->getAs<PointerType>()) { NewType = P->getPointeeType(); OldType = OldType->getAs<PointerType>()->getPointeeType(); } else if (const MemberPointerType *M = NewType->getAs<MemberPointerType>()) { NewType = M->getPointeeType(); OldType = OldType->getAs<MemberPointerType>()->getPointeeType(); } if (!NewType->isFunctionProtoType()) return; // There's lots of special cases for functions. For function pointers, system // libraries are hopefully not as broken so that we don't need these // workarounds. if (CheckEquivalentExceptionSpec( OldType->getAs<FunctionProtoType>(), Old->getLocation(), NewType->getAs<FunctionProtoType>(), New->getLocation())) { New->setInvalidDecl(); } } /// CheckCXXDefaultArguments - Verify that the default arguments for a /// function declaration are well-formed according to C++ /// [dcl.fct.default]. void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) { unsigned NumParams = FD->getNumParams(); unsigned p; // Find first parameter with a default argument for (p = 0; p < NumParams; ++p) { ParmVarDecl *Param = FD->getParamDecl(p); if (Param->hasDefaultArg()) break; } // C++11 [dcl.fct.default]p4: // In a given function declaration, each parameter subsequent to a parameter // with a default argument shall have a default argument supplied in this or // a previous declaration or shall be a function parameter pack. A default // argument shall not be redefined by a later declaration (not even to the // same value). unsigned LastMissingDefaultArg = 0; for (; p < NumParams; ++p) { ParmVarDecl *Param = FD->getParamDecl(p); if (!Param->hasDefaultArg() && !Param->isParameterPack()) { if (Param->isInvalidDecl()) /* We already complained about this parameter. */; else if (Param->getIdentifier()) Diag(Param->getLocation(), diag::err_param_default_argument_missing_name) << Param->getIdentifier(); else Diag(Param->getLocation(), diag::err_param_default_argument_missing); LastMissingDefaultArg = p; } } if (LastMissingDefaultArg > 0) { // Some default arguments were missing. Clear out all of the // default arguments up to (and including) the last missing // default argument, so that we leave the function parameters // in a semantically valid state. for (p = 0; p <= LastMissingDefaultArg; ++p) { ParmVarDecl *Param = FD->getParamDecl(p); if (Param->hasDefaultArg()) { Param->setDefaultArg(nullptr); } } } } // CheckConstexprParameterTypes - Check whether a function's parameter types // are all literal types. If so, return true. If not, produce a suitable // diagnostic and return false. static bool CheckConstexprParameterTypes(Sema &SemaRef, const FunctionDecl *FD) { unsigned ArgIndex = 0; const FunctionProtoType *FT = FD->getType()->getAs<FunctionProtoType>(); for (FunctionProtoType::param_type_iterator i = FT->param_type_begin(), e = FT->param_type_end(); i != e; ++i, ++ArgIndex) { const ParmVarDecl *PD = FD->getParamDecl(ArgIndex); SourceLocation ParamLoc = PD->getLocation(); if (!(*i)->isDependentType() && SemaRef.RequireLiteralType(ParamLoc, *i, diag::err_constexpr_non_literal_param, ArgIndex+1, PD->getSourceRange(), isa<CXXConstructorDecl>(FD))) return false; } return true; } /// \brief Get diagnostic %select index for tag kind for /// record diagnostic message. /// WARNING: Indexes apply to particular diagnostics only! /// /// \returns diagnostic %select index. static unsigned getRecordDiagFromTagKind(TagTypeKind Tag) { switch (Tag) { case TTK_Struct: return 0; case TTK_Interface: return 1; case TTK_Class: return 2; default: llvm_unreachable("Invalid tag kind for record diagnostic!"); } } // CheckConstexprFunctionDecl - Check whether a function declaration satisfies // the requirements of a constexpr function definition or a constexpr // constructor definition. If so, return true. If not, produce appropriate // diagnostics and return false. // // This implements C++11 [dcl.constexpr]p3,4, as amended by DR1360. bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) { const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD); if (MD && MD->isInstance()) { // C++11 [dcl.constexpr]p4: // The definition of a constexpr constructor shall satisfy the following // constraints: // - the class shall not have any virtual base classes; const CXXRecordDecl *RD = MD->getParent(); if (RD->getNumVBases()) { Diag(NewFD->getLocation(), diag::err_constexpr_virtual_base) << isa<CXXConstructorDecl>(NewFD) << getRecordDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases(); for (const auto &I : RD->vbases()) Diag(I.getLocStart(), diag::note_constexpr_virtual_base_here) << I.getSourceRange(); return false; } } if (!isa<CXXConstructorDecl>(NewFD)) { // C++11 [dcl.constexpr]p3: // The definition of a constexpr function shall satisfy the following // constraints: // - it shall not be virtual; const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD); if (Method && Method->isVirtual()) { Method = Method->getCanonicalDecl(); Diag(Method->getLocation(), diag::err_constexpr_virtual); // If it's not obvious why this function is virtual, find an overridden // function which uses the 'virtual' keyword. const CXXMethodDecl *WrittenVirtual = Method; while (!WrittenVirtual->isVirtualAsWritten()) WrittenVirtual = *WrittenVirtual->begin_overridden_methods(); if (WrittenVirtual != Method) Diag(WrittenVirtual->getLocation(), diag::note_overridden_virtual_function); return false; } // - its return type shall be a literal type; QualType RT = NewFD->getReturnType(); if (!RT->isDependentType() && RequireLiteralType(NewFD->getLocation(), RT, diag::err_constexpr_non_literal_return)) return false; } // - each of its parameter types shall be a literal type; if (!CheckConstexprParameterTypes(*this, NewFD)) return false; return true; } /// Check the given declaration statement is legal within a constexpr function /// body. C++11 [dcl.constexpr]p3,p4, and C++1y [dcl.constexpr]p3. /// /// \return true if the body is OK (maybe only as an extension), false if we /// have diagnosed a problem. static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl, DeclStmt *DS, SourceLocation &Cxx1yLoc) { // C++11 [dcl.constexpr]p3 and p4: // The definition of a constexpr function(p3) or constructor(p4) [...] shall // contain only for (const auto *DclIt : DS->decls()) { switch (DclIt->getKind()) { case Decl::StaticAssert: case Decl::Using: case Decl::UsingShadow: case Decl::UsingDirective: case Decl::UnresolvedUsingTypename: case Decl::UnresolvedUsingValue: // - static_assert-declarations // - using-declarations, // - using-directives, continue; case Decl::Typedef: case Decl::TypeAlias: { // - typedef declarations and alias-declarations that do not define // classes or enumerations, const auto *TN = cast<TypedefNameDecl>(DclIt); if (TN->getUnderlyingType()->isVariablyModifiedType()) { // Don't allow variably-modified types in constexpr functions. TypeLoc TL = TN->getTypeSourceInfo()->getTypeLoc(); SemaRef.Diag(TL.getBeginLoc(), diag::err_constexpr_vla) << TL.getSourceRange() << TL.getType() << isa<CXXConstructorDecl>(Dcl); return false; } continue; } case Decl::Enum: case Decl::CXXRecord: // C++1y allows types to be defined, not just declared. if (cast<TagDecl>(DclIt)->isThisDeclarationADefinition()) SemaRef.Diag(DS->getLocStart(), SemaRef.getLangOpts().CPlusPlus14 ? diag::warn_cxx11_compat_constexpr_type_definition : diag::ext_constexpr_type_definition) << isa<CXXConstructorDecl>(Dcl); continue; case Decl::EnumConstant: case Decl::IndirectField: case Decl::ParmVar: // These can only appear with other declarations which are banned in // C++11 and permitted in C++1y, so ignore them. continue; case Decl::Var: { // C++1y [dcl.constexpr]p3 allows anything except: // a definition of a variable of non-literal type or of static or // thread storage duration or for which no initialization is performed. const auto *VD = cast<VarDecl>(DclIt); if (VD->isThisDeclarationADefinition()) { if (VD->isStaticLocal()) { SemaRef.Diag(VD->getLocation(), diag::err_constexpr_local_var_static) << isa<CXXConstructorDecl>(Dcl) << (VD->getTLSKind() == VarDecl::TLS_Dynamic); return false; } if (!VD->getType()->isDependentType() && SemaRef.RequireLiteralType( VD->getLocation(), VD->getType(), diag::err_constexpr_local_var_non_literal_type, isa<CXXConstructorDecl>(Dcl))) return false; if (!VD->getType()->isDependentType() && !VD->hasInit() && !VD->isCXXForRangeDecl()) { SemaRef.Diag(VD->getLocation(), diag::err_constexpr_local_var_no_init) << isa<CXXConstructorDecl>(Dcl); return false; } } SemaRef.Diag(VD->getLocation(), SemaRef.getLangOpts().CPlusPlus14 ? diag::warn_cxx11_compat_constexpr_local_var : diag::ext_constexpr_local_var) << isa<CXXConstructorDecl>(Dcl); continue; } case Decl::NamespaceAlias: case Decl::Function: // These are disallowed in C++11 and permitted in C++1y. Allow them // everywhere as an extension. if (!Cxx1yLoc.isValid()) Cxx1yLoc = DS->getLocStart(); continue; default: SemaRef.Diag(DS->getLocStart(), diag::err_constexpr_body_invalid_stmt) << isa<CXXConstructorDecl>(Dcl); return false; } } return true; } /// Check that the given field is initialized within a constexpr constructor. /// /// \param Dcl The constexpr constructor being checked. /// \param Field The field being checked. This may be a member of an anonymous /// struct or union nested within the class being checked. /// \param Inits All declarations, including anonymous struct/union members and /// indirect members, for which any initialization was provided. /// \param Diagnosed Set to true if an error is produced. static void CheckConstexprCtorInitializer(Sema &SemaRef, const FunctionDecl *Dcl, FieldDecl *Field, llvm::SmallSet<Decl*, 16> &Inits, bool &Diagnosed) { if (Field->isInvalidDecl()) return; if (Field->isUnnamedBitfield()) return; // Anonymous unions with no variant members and empty anonymous structs do not // need to be explicitly initialized. FIXME: Anonymous structs that contain no // indirect fields don't need initializing. if (Field->isAnonymousStructOrUnion() && (Field->getType()->isUnionType() ? !Field->getType()->getAsCXXRecordDecl()->hasVariantMembers() : Field->getType()->getAsCXXRecordDecl()->isEmpty())) return; if (!Inits.count(Field)) { if (!Diagnosed) { SemaRef.Diag(Dcl->getLocation(), diag::err_constexpr_ctor_missing_init); Diagnosed = true; } SemaRef.Diag(Field->getLocation(), diag::note_constexpr_ctor_missing_init); } else if (Field->isAnonymousStructOrUnion()) { const RecordDecl *RD = Field->getType()->castAs<RecordType>()->getDecl(); for (auto *I : RD->fields()) // If an anonymous union contains an anonymous struct of which any member // is initialized, all members must be initialized. if (!RD->isUnion() || Inits.count(I)) CheckConstexprCtorInitializer(SemaRef, Dcl, I, Inits, Diagnosed); } } /// Check the provided statement is allowed in a constexpr function /// definition. static bool CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S, SmallVectorImpl<SourceLocation> &ReturnStmts, SourceLocation &Cxx1yLoc) { // - its function-body shall be [...] a compound-statement that contains only switch (S->getStmtClass()) { case Stmt::NullStmtClass: // - null statements, return true; case Stmt::DeclStmtClass: // - static_assert-declarations // - using-declarations, // - using-directives, // - typedef declarations and alias-declarations that do not define // classes or enumerations, if (!CheckConstexprDeclStmt(SemaRef, Dcl, cast<DeclStmt>(S), Cxx1yLoc)) return false; return true; case Stmt::ReturnStmtClass: // - and exactly one return statement; if (isa<CXXConstructorDecl>(Dcl)) { // C++1y allows return statements in constexpr constructors. if (!Cxx1yLoc.isValid()) Cxx1yLoc = S->getLocStart(); return true; } ReturnStmts.push_back(S->getLocStart()); return true; case Stmt::CompoundStmtClass: { // C++1y allows compound-statements. if (!Cxx1yLoc.isValid()) Cxx1yLoc = S->getLocStart(); CompoundStmt *CompStmt = cast<CompoundStmt>(S); for (auto *BodyIt : CompStmt->body()) { if (!CheckConstexprFunctionStmt(SemaRef, Dcl, BodyIt, ReturnStmts, Cxx1yLoc)) return false; } return true; } case Stmt::AttributedStmtClass: if (!Cxx1yLoc.isValid()) Cxx1yLoc = S->getLocStart(); return true; case Stmt::IfStmtClass: { // C++1y allows if-statements. if (!Cxx1yLoc.isValid()) Cxx1yLoc = S->getLocStart(); IfStmt *If = cast<IfStmt>(S); if (!CheckConstexprFunctionStmt(SemaRef, Dcl, If->getThen(), ReturnStmts, Cxx1yLoc)) return false; if (If->getElse() && !CheckConstexprFunctionStmt(SemaRef, Dcl, If->getElse(), ReturnStmts, Cxx1yLoc)) return false; return true; } case Stmt::WhileStmtClass: case Stmt::DoStmtClass: case Stmt::ForStmtClass: case Stmt::CXXForRangeStmtClass: case Stmt::ContinueStmtClass: // C++1y allows all of these. We don't allow them as extensions in C++11, // because they don't make sense without variable mutation. if (!SemaRef.getLangOpts().CPlusPlus14) break; if (!Cxx1yLoc.isValid()) Cxx1yLoc = S->getLocStart(); for (Stmt *SubStmt : S->children()) if (SubStmt && !CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts, Cxx1yLoc)) return false; return true; case Stmt::SwitchStmtClass: case Stmt::CaseStmtClass: case Stmt::DefaultStmtClass: case Stmt::BreakStmtClass: // C++1y allows switch-statements, and since they don't need variable // mutation, we can reasonably allow them in C++11 as an extension. if (!Cxx1yLoc.isValid()) Cxx1yLoc = S->getLocStart(); for (Stmt *SubStmt : S->children()) if (SubStmt && !CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts, Cxx1yLoc)) return false; return true; default: if (!isa<Expr>(S)) break; // C++1y allows expression-statements. if (!Cxx1yLoc.isValid()) Cxx1yLoc = S->getLocStart(); return true; } SemaRef.Diag(S->getLocStart(), diag::err_constexpr_body_invalid_stmt) << isa<CXXConstructorDecl>(Dcl); return false; } /// Check the body for the given constexpr function declaration only contains /// the permitted types of statement. C++11 [dcl.constexpr]p3,p4. /// /// \return true if the body is OK, false if we have diagnosed a problem. bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) { if (isa<CXXTryStmt>(Body)) { // C++11 [dcl.constexpr]p3: // The definition of a constexpr function shall satisfy the following // constraints: [...] // - its function-body shall be = delete, = default, or a // compound-statement // // C++11 [dcl.constexpr]p4: // In the definition of a constexpr constructor, [...] // - its function-body shall not be a function-try-block; Diag(Body->getLocStart(), diag::err_constexpr_function_try_block) << isa<CXXConstructorDecl>(Dcl); return false; } SmallVector<SourceLocation, 4> ReturnStmts; // - its function-body shall be [...] a compound-statement that contains only // [... list of cases ...] CompoundStmt *CompBody = cast<CompoundStmt>(Body); SourceLocation Cxx1yLoc; for (auto *BodyIt : CompBody->body()) { if (!CheckConstexprFunctionStmt(*this, Dcl, BodyIt, ReturnStmts, Cxx1yLoc)) return false; } if (Cxx1yLoc.isValid()) Diag(Cxx1yLoc, getLangOpts().CPlusPlus14 ? diag::warn_cxx11_compat_constexpr_body_invalid_stmt : diag::ext_constexpr_body_invalid_stmt) << isa<CXXConstructorDecl>(Dcl); if (const CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Dcl)) { const CXXRecordDecl *RD = Constructor->getParent(); // DR1359: // - every non-variant non-static data member and base class sub-object // shall be initialized; // DR1460: // - if the class is a union having variant members, exactly one of them // shall be initialized; if (RD->isUnion()) { if (Constructor->getNumCtorInitializers() == 0 && RD->hasVariantMembers()) { Diag(Dcl->getLocation(), diag::err_constexpr_union_ctor_no_init); return false; } } else if (!Constructor->isDependentContext() && !Constructor->isDelegatingConstructor()) { assert(RD->getNumVBases() == 0 && "constexpr ctor with virtual bases"); // Skip detailed checking if we have enough initializers, and we would // allow at most one initializer per member. bool AnyAnonStructUnionMembers = false; unsigned Fields = 0; for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Fields) { if (I->isAnonymousStructOrUnion()) { AnyAnonStructUnionMembers = true; break; } } // DR1460: // - if the class is a union-like class, but is not a union, for each of // its anonymous union members having variant members, exactly one of // them shall be initialized; if (AnyAnonStructUnionMembers || Constructor->getNumCtorInitializers() != RD->getNumBases() + Fields) { // Check initialization of non-static data members. Base classes are // always initialized so do not need to be checked. Dependent bases // might not have initializers in the member initializer list. llvm::SmallSet<Decl*, 16> Inits; for (const auto *I: Constructor->inits()) { if (FieldDecl *FD = I->getMember()) Inits.insert(FD); else if (IndirectFieldDecl *ID = I->getIndirectMember()) Inits.insert(ID->chain_begin(), ID->chain_end()); } bool Diagnosed = false; for (auto *I : RD->fields()) CheckConstexprCtorInitializer(*this, Dcl, I, Inits, Diagnosed); if (Diagnosed) return false; } } } else { if (ReturnStmts.empty()) { // C++1y doesn't require constexpr functions to contain a 'return' // statement. We still do, unless the return type might be void, because // otherwise if there's no return statement, the function cannot // be used in a core constant expression. bool OK = getLangOpts().CPlusPlus14 && (Dcl->getReturnType()->isVoidType() || Dcl->getReturnType()->isDependentType()); Diag(Dcl->getLocation(), OK ? diag::warn_cxx11_compat_constexpr_body_no_return : diag::err_constexpr_body_no_return); return OK; } if (ReturnStmts.size() > 1) { Diag(ReturnStmts.back(), getLangOpts().CPlusPlus14 ? diag::warn_cxx11_compat_constexpr_body_multiple_return : diag::ext_constexpr_body_multiple_return); for (unsigned I = 0; I < ReturnStmts.size() - 1; ++I) Diag(ReturnStmts[I], diag::note_constexpr_body_previous_return); } } // C++11 [dcl.constexpr]p5: // if no function argument values exist such that the function invocation // substitution would produce a constant expression, the program is // ill-formed; no diagnostic required. // C++11 [dcl.constexpr]p3: // - every constructor call and implicit conversion used in initializing the // return value shall be one of those allowed in a constant expression. // C++11 [dcl.constexpr]p4: // - every constructor involved in initializing non-static data members and // base class sub-objects shall be a constexpr constructor. SmallVector<PartialDiagnosticAt, 8> Diags; if (!Expr::isPotentialConstantExpr(Dcl, Diags)) { Diag(Dcl->getLocation(), diag::ext_constexpr_function_never_constant_expr) << isa<CXXConstructorDecl>(Dcl); for (size_t I = 0, N = Diags.size(); I != N; ++I) Diag(Diags[I].first, Diags[I].second); // Don't return false here: we allow this for compatibility in // system headers. } return true; } /// isCurrentClassName - Determine whether the identifier II is the /// name of the class type currently being defined. In the case of /// nested classes, this will only return true if II is the name of /// the innermost class. bool Sema::isCurrentClassName(const IdentifierInfo &II, Scope *, const CXXScopeSpec *SS) { assert(getLangOpts().CPlusPlus && "No class names in C!"); CXXRecordDecl *CurDecl; if (SS && SS->isSet() && !SS->isInvalid()) { DeclContext *DC = computeDeclContext(*SS, true); CurDecl = dyn_cast_or_null<CXXRecordDecl>(DC); } else CurDecl = dyn_cast_or_null<CXXRecordDecl>(CurContext); if (CurDecl && CurDecl->getIdentifier()) return &II == CurDecl->getIdentifier(); return false; } /// \brief Determine whether the identifier II is a typo for the name of /// the class type currently being defined. If so, update it to the identifier /// that should have been used. bool Sema::isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS) { assert(getLangOpts().CPlusPlus && "No class names in C!"); if (!getLangOpts().SpellChecking) return false; CXXRecordDecl *CurDecl; if (SS && SS->isSet() && !SS->isInvalid()) { DeclContext *DC = computeDeclContext(*SS, true); CurDecl = dyn_cast_or_null<CXXRecordDecl>(DC); } else CurDecl = dyn_cast_or_null<CXXRecordDecl>(CurContext); if (CurDecl && CurDecl->getIdentifier() && II != CurDecl->getIdentifier() && 3 * II->getName().edit_distance(CurDecl->getIdentifier()->getName()) < II->getLength()) { II = CurDecl->getIdentifier(); return true; } return false; } /// \brief Determine whether the given class is a base class of the given /// class, including looking at dependent bases. static bool findCircularInheritance(const CXXRecordDecl *Class, const CXXRecordDecl *Current) { SmallVector<const CXXRecordDecl*, 8> Queue; Class = Class->getCanonicalDecl(); while (true) { for (const auto &I : Current->bases()) { CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl(); if (!Base) continue; Base = Base->getDefinition(); if (!Base) continue; if (Base->getCanonicalDecl() == Class) return true; Queue.push_back(Base); } if (Queue.empty()) return false; Current = Queue.pop_back_val(); } return false; } /// \brief Check the validity of a C++ base class specifier. /// /// \returns a new CXXBaseSpecifier if well-formed, emits diagnostics /// and returns NULL otherwise. CXXBaseSpecifier * Sema::CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc) { QualType BaseType = TInfo->getType(); // C++ [class.union]p1: // A union shall not have base classes. if (Class->isUnion()) { Diag(Class->getLocation(), diag::err_base_clause_on_union) << SpecifierRange; return nullptr; } if (EllipsisLoc.isValid() && !TInfo->getType()->containsUnexpandedParameterPack()) { Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << TInfo->getTypeLoc().getSourceRange(); EllipsisLoc = SourceLocation(); } SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc(); if (BaseType->isDependentType()) { // Make sure that we don't have circular inheritance among our dependent // bases. For non-dependent bases, the check for completeness below handles // this. if (CXXRecordDecl *BaseDecl = BaseType->getAsCXXRecordDecl()) { if (BaseDecl->getCanonicalDecl() == Class->getCanonicalDecl() || ((BaseDecl = BaseDecl->getDefinition()) && findCircularInheritance(Class, BaseDecl))) { Diag(BaseLoc, diag::err_circular_inheritance) << BaseType << Context.getTypeDeclType(Class); if (BaseDecl->getCanonicalDecl() != Class->getCanonicalDecl()) Diag(BaseDecl->getLocation(), diag::note_previous_decl) << BaseType; return nullptr; } } return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual, Class->getTagKind() == TTK_Class, Access, TInfo, EllipsisLoc); } // Base specifiers must be record types. if (!BaseType->isRecordType()) { Diag(BaseLoc, diag::err_base_must_be_class) << SpecifierRange; return nullptr; } // C++ [class.union]p1: // A union shall not be used as a base class. if (BaseType->isUnionType()) { Diag(BaseLoc, diag::err_union_as_base_class) << SpecifierRange; return nullptr; } // For the MS ABI, propagate DLL attributes to base class templates. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { if (Attr *ClassAttr = getDLLAttr(Class)) { if (auto *BaseTemplate = dyn_cast_or_null<ClassTemplateSpecializationDecl>( BaseType->getAsCXXRecordDecl())) { propagateDLLAttrToBaseClassTemplate(Class, ClassAttr, BaseTemplate, BaseLoc); } } } // C++ [class.derived]p2: // The class-name in a base-specifier shall not be an incompletely // defined class. if (RequireCompleteType(BaseLoc, BaseType, diag::err_incomplete_base_class, SpecifierRange)) { Class->setInvalidDecl(); return nullptr; } // If the base class is polymorphic or isn't empty, the new one is/isn't, too. RecordDecl *BaseDecl = BaseType->getAs<RecordType>()->getDecl(); assert(BaseDecl && "Record type has no declaration"); BaseDecl = BaseDecl->getDefinition(); assert(BaseDecl && "Base type is not incomplete, but has no definition"); CXXRecordDecl *CXXBaseDecl = cast<CXXRecordDecl>(BaseDecl); assert(CXXBaseDecl && "Base type is not a C++ type"); // A class which contains a flexible array member is not suitable for use as a // base class: // - If the layout determines that a base comes before another base, // the flexible array member would index into the subsequent base. // - If the layout determines that base comes before the derived class, // the flexible array member would index into the derived class. if (CXXBaseDecl->hasFlexibleArrayMember()) { Diag(BaseLoc, diag::err_base_class_has_flexible_array_member) << CXXBaseDecl->getDeclName(); return nullptr; } // C++ [class]p3: // If a class is marked final and it appears as a base-type-specifier in // base-clause, the program is ill-formed. if (FinalAttr *FA = CXXBaseDecl->getAttr<FinalAttr>()) { Diag(BaseLoc, diag::err_class_marked_final_used_as_base) << CXXBaseDecl->getDeclName() << FA->isSpelledAsSealed(); Diag(CXXBaseDecl->getLocation(), diag::note_entity_declared_at) << CXXBaseDecl->getDeclName() << FA->getRange(); return nullptr; } if (BaseDecl->isInvalidDecl()) Class->setInvalidDecl(); // Create the base specifier. return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual, Class->getTagKind() == TTK_Class, Access, TInfo, EllipsisLoc); } /// ActOnBaseSpecifier - Parsed a base specifier. A base specifier is /// one entry in the base class list of a class specifier, for /// example: /// class foo : public bar, virtual private baz { /// 'public bar' and 'virtual private baz' are each base-specifiers. BaseResult Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attributes, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc) { if (!classdecl) return true; AdjustDeclIfTemplate(classdecl); CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(classdecl); if (!Class) return true; // We haven't yet attached the base specifiers. Class->setIsParsingBaseSpecifiers(); // We do not support any C++11 attributes on base-specifiers yet. // Diagnose any attributes we see. if (!Attributes.empty()) { for (AttributeList *Attr = Attributes.getList(); Attr; Attr = Attr->getNext()) { if (Attr->isInvalid() || Attr->getKind() == AttributeList::IgnoredAttribute) continue; Diag(Attr->getLoc(), Attr->getKind() == AttributeList::UnknownAttribute ? diag::warn_unknown_attribute_ignored : diag::err_base_specifier_attribute) << Attr->getName(); } } TypeSourceInfo *TInfo = nullptr; GetTypeFromParser(basetype, &TInfo); if (EllipsisLoc.isInvalid() && DiagnoseUnexpandedParameterPack(SpecifierRange.getBegin(), TInfo, UPPC_BaseType)) return true; if (CXXBaseSpecifier *BaseSpec = CheckBaseSpecifier(Class, SpecifierRange, Virtual, Access, TInfo, EllipsisLoc)) return BaseSpec; else Class->setInvalidDecl(); return true; } /// Use small set to collect indirect bases. As this is only used /// locally, there's no need to abstract the small size parameter. typedef llvm::SmallPtrSet<QualType, 4> IndirectBaseSet; /// \brief Recursively add the bases of Type. Don't add Type itself. static void NoteIndirectBases(ASTContext &Context, IndirectBaseSet &Set, const QualType &Type) { // Even though the incoming type is a base, it might not be // a class -- it could be a template parm, for instance. if (auto Rec = Type->getAs<RecordType>()) { auto Decl = Rec->getAsCXXRecordDecl(); // Iterate over its bases. for (const auto &BaseSpec : Decl->bases()) { QualType Base = Context.getCanonicalType(BaseSpec.getType()) .getUnqualifiedType(); if (Set.insert(Base).second) // If we've not already seen it, recurse. NoteIndirectBases(Context, Set, Base); } } } // HLSL Change Starts static bool isConcreteBaseType(const QualType& t) { // Other than interfaces, all base types are concrete. const Type* type = t.getTypePtr(); if (!type->isRecordType()) return true; // Likely should have been rejected before. return !(type->getAsCXXRecordDecl()->getTagKind() == TagTypeKind::TTK_Interface); } // HLSL Change Ends /// \brief Performs the actual work of attaching the given base class /// specifiers to a C++ class. bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases) { if (NumBases == 0) return false; // HLSL Change Starts if (Class->getTagKind() == TagTypeKind::TTK_Interface) { Diag(Bases[0]->getLocStart(), diag::err_hlsl_interfaces_cannot_inherit); return true; } // HLSL Change Ends // Used to keep track of which base types we have already seen, so // that we can properly diagnose redundant direct base types. Note // that the key is always the unqualified canonical type of the base // class. std::map<QualType, CXXBaseSpecifier*, QualTypeOrdering> KnownBaseTypes; // Used to track indirect bases so we can see if a direct base is // ambiguous. IndirectBaseSet IndirectBaseTypes; // Copy non-redundant base specifiers into permanent storage. bool ConcreteBaseTypeFound = false; // HLSL Change bool ConcreteBaseTypeReported = false; // HLSL Change unsigned NumGoodBases = 0; bool Invalid = false; for (unsigned idx = 0; idx < NumBases; ++idx) { QualType NewBaseType = Context.getCanonicalType(Bases[idx]->getType()); NewBaseType = NewBaseType.getLocalUnqualifiedType(); // HLSL Change Starts if (!ConcreteBaseTypeReported) { if (isConcreteBaseType(NewBaseType)) { if (ConcreteBaseTypeFound) { Diag(Bases[idx]->getLocStart(), diag::err_hlsl_multiple_concrete_bases); Invalid = true; ConcreteBaseTypeReported = true; } else { ConcreteBaseTypeFound = true; } } } // HLSL Change Ends CXXBaseSpecifier *&KnownBase = KnownBaseTypes[NewBaseType]; if (KnownBase) { // C++ [class.mi]p3: // A class shall not be specified as a direct base class of a // derived class more than once. Diag(Bases[idx]->getLocStart(), diag::err_duplicate_base_class) << KnownBase->getType() << Bases[idx]->getSourceRange(); // Delete the duplicate base class specifier; we're going to // overwrite its pointer later. Context.Deallocate(Bases[idx]); Invalid = true; } else { // Okay, add this new base class. KnownBase = Bases[idx]; Bases[NumGoodBases++] = Bases[idx]; // Note this base's direct & indirect bases, if there could be ambiguity. if (NumBases > 1) NoteIndirectBases(Context, IndirectBaseTypes, NewBaseType); if (const RecordType *Record = NewBaseType->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()); if (Class->isInterface() && (!RD->isInterface() || KnownBase->getAccessSpecifier() != AS_public)) { // The Microsoft extension __interface does not permit bases that // are not themselves public interfaces. Diag(KnownBase->getLocStart(), diag::err_invalid_base_in_interface) << getRecordDiagFromTagKind(RD->getTagKind()) << RD->getName() << RD->getSourceRange(); Invalid = true; } if (RD->hasAttr<WeakAttr>()) Class->addAttr(WeakAttr::CreateImplicit(Context)); } } } // Attach the remaining base class specifiers to the derived class. Class->setBases(Bases, NumGoodBases); for (unsigned idx = 0; idx < NumGoodBases; ++idx) { // Check whether this direct base is inaccessible due to ambiguity. QualType BaseType = Bases[idx]->getType(); CanQualType CanonicalBase = Context.getCanonicalType(BaseType) .getUnqualifiedType(); if (IndirectBaseTypes.count(CanonicalBase)) { CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, /*DetectVirtual=*/true); bool found = Class->isDerivedFrom(CanonicalBase->getAsCXXRecordDecl(), Paths); assert(found); (void)found; if (Paths.isAmbiguous(CanonicalBase)) Diag(Bases[idx]->getLocStart (), diag::warn_inaccessible_base_class) << BaseType << getAmbiguousPathsDisplayString(Paths) << Bases[idx]->getSourceRange(); else assert(Bases[idx]->isVirtual()); } // Delete the base class specifier, since its data has been copied // into the CXXRecordDecl. Context.Deallocate(Bases[idx]); } return Invalid; } /// ActOnBaseSpecifiers - Attach the given base specifiers to the /// class, after checking whether there are any duplicate base /// classes. void Sema::ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases) { if (!ClassDecl || !Bases || !NumBases) return; AdjustDeclIfTemplate(ClassDecl); AttachBaseSpecifiers(cast<CXXRecordDecl>(ClassDecl), Bases, NumBases); } /// \brief Determine whether the type \p Derived is a C++ class that is /// derived from the type \p Base. bool Sema::IsDerivedFrom(QualType Derived, QualType Base) { if (!getLangOpts().CPlusPlus) return false; CXXRecordDecl *DerivedRD = Derived->getAsCXXRecordDecl(); if (!DerivedRD) return false; CXXRecordDecl *BaseRD = Base->getAsCXXRecordDecl(); if (!BaseRD) return false; // If either the base or the derived type is invalid, don't try to // check whether one is derived from the other. if (BaseRD->isInvalidDecl() || DerivedRD->isInvalidDecl()) return false; // FIXME: instantiate DerivedRD if necessary. We need a PoI for this. return DerivedRD->hasDefinition() && DerivedRD->isDerivedFrom(BaseRD); } /// \brief Determine whether the type \p Derived is a C++ class that is /// derived from the type \p Base. bool Sema::IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths) { if (!getLangOpts().CPlusPlus) return false; CXXRecordDecl *DerivedRD = Derived->getAsCXXRecordDecl(); if (!DerivedRD) return false; CXXRecordDecl *BaseRD = Base->getAsCXXRecordDecl(); if (!BaseRD) return false; return DerivedRD->isDerivedFrom(BaseRD, Paths); } void Sema::BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePathArray) { assert(BasePathArray.empty() && "Base path array must be empty!"); assert(Paths.isRecordingPaths() && "Must record paths!"); const CXXBasePath &Path = Paths.front(); // We first go backward and check if we have a virtual base. // FIXME: It would be better if CXXBasePath had the base specifier for // the nearest virtual base. unsigned Start = 0; for (unsigned I = Path.size(); I != 0; --I) { if (Path[I - 1].Base->isVirtual()) { Start = I - 1; break; } } // Now add all bases. for (unsigned I = Start, E = Path.size(); I != E; ++I) BasePathArray.push_back(const_cast<CXXBaseSpecifier*>(Path[I].Base)); } /// CheckDerivedToBaseConversion - Check whether the Derived-to-Base /// conversion (where Derived and Base are class types) is /// well-formed, meaning that the conversion is unambiguous (and /// that all of the base classes are accessible). Returns true /// and emits a diagnostic if the code is ill-formed, returns false /// otherwise. Loc is the location where this routine should point to /// if there is an error, and Range is the source range to highlight /// if there is an error. bool Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath) { // First, determine whether the path from Derived to Base is // ambiguous. This is slightly more expensive than checking whether // the Derived to Base conversion exists, because here we need to // explore multiple paths to determine if there is an ambiguity. CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, /*DetectVirtual=*/false); bool DerivationOkay = IsDerivedFrom(Derived, Base, Paths); assert(DerivationOkay && "Can only be used with a derived-to-base conversion"); (void)DerivationOkay; if (!Paths.isAmbiguous(Context.getCanonicalType(Base).getUnqualifiedType())) { if (InaccessibleBaseID) { // Check that the base class can be accessed. switch (CheckBaseClassAccess(Loc, Base, Derived, Paths.front(), InaccessibleBaseID)) { case AR_inaccessible: return true; case AR_accessible: case AR_dependent: case AR_delayed: break; } } // Build a base path if necessary. if (BasePath) BuildBasePathArray(Paths, *BasePath); return false; } if (AmbigiousBaseConvID) { // We know that the derived-to-base conversion is ambiguous, and // we're going to produce a diagnostic. Perform the derived-to-base // search just one more time to compute all of the possible paths so // that we can print them out. This is more expensive than any of // the previous derived-to-base checks we've done, but at this point // performance isn't as much of an issue. Paths.clear(); Paths.setRecordingPaths(true); bool StillOkay = IsDerivedFrom(Derived, Base, Paths); assert(StillOkay && "Can only be used with a derived-to-base conversion"); (void)StillOkay; // Build up a textual representation of the ambiguous paths, e.g., // D -> B -> A, that will be used to illustrate the ambiguous // conversions in the diagnostic. We only print one of the paths // to each base class subobject. std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths); Diag(Loc, AmbigiousBaseConvID) << Derived << Base << PathDisplayStr << Range << Name; } return true; } bool Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath, bool IgnoreAccess) { return CheckDerivedToBaseConversion(Derived, Base, IgnoreAccess ? 0 : diag::err_upcast_to_inaccessible_base, diag::err_ambiguous_derived_to_base_conv, Loc, Range, DeclarationName(), BasePath); } /// @brief Builds a string representing ambiguous paths from a /// specific derived class to different subobjects of the same base /// class. /// /// This function builds a string that can be used in error messages /// to show the different paths that one can take through the /// inheritance hierarchy to go from the derived class to different /// subobjects of a base class. The result looks something like this: /// @code /// struct D -> struct B -> struct A /// struct D -> struct C -> struct A /// @endcode std::string Sema::getAmbiguousPathsDisplayString(CXXBasePaths &Paths) { std::string PathDisplayStr; std::set<unsigned> DisplayedPaths; for (CXXBasePaths::paths_iterator Path = Paths.begin(); Path != Paths.end(); ++Path) { if (DisplayedPaths.insert(Path->back().SubobjectNumber).second) { // We haven't displayed a path to this particular base // class subobject yet. PathDisplayStr += "\n "; PathDisplayStr += Context.getTypeDeclType(Paths.getOrigin()).getAsString(); for (CXXBasePath::const_iterator Element = Path->begin(); Element != Path->end(); ++Element) PathDisplayStr += " -> " + Element->Base->getType().getAsString(); } } return PathDisplayStr; } //===----------------------------------------------------------------------===// // C++ class member Handling //===----------------------------------------------------------------------===// /// ActOnAccessSpecifier - Parsed an access specifier followed by a colon. bool Sema::ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs) { assert(Access != AS_none && "Invalid kind for syntactic access specifier!"); AccessSpecDecl *ASDecl = AccessSpecDecl::Create(Context, Access, CurContext, ASLoc, ColonLoc); CurContext->addHiddenDecl(ASDecl); return ProcessAccessDeclAttributeList(ASDecl, Attrs); } /// CheckOverrideControl - Check C++11 override control semantics. void Sema::CheckOverrideControl(NamedDecl *D) { if (D->isInvalidDecl()) return; // We only care about "override" and "final" declarations. if (!D->hasAttr<OverrideAttr>() && !D->hasAttr<FinalAttr>()) return; CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D); // We can't check dependent instance methods. if (MD && MD->isInstance() && (MD->getParent()->hasAnyDependentBases() || MD->getType()->isDependentType())) return; if (MD && !MD->isVirtual()) { // If we have a non-virtual method, check if if hides a virtual method. // (In that case, it's most likely the method has the wrong type.) SmallVector<CXXMethodDecl *, 8> OverloadedMethods; FindHiddenVirtualMethods(MD, OverloadedMethods); if (!OverloadedMethods.empty()) { if (OverrideAttr *OA = D->getAttr<OverrideAttr>()) { Diag(OA->getLocation(), diag::override_keyword_hides_virtual_member_function) << "override" << (OverloadedMethods.size() > 1); } else if (FinalAttr *FA = D->getAttr<FinalAttr>()) { Diag(FA->getLocation(), diag::override_keyword_hides_virtual_member_function) << (FA->isSpelledAsSealed() ? "sealed" : "final") << (OverloadedMethods.size() > 1); } NoteHiddenVirtualMethods(MD, OverloadedMethods); MD->setInvalidDecl(); return; } // Fall through into the general case diagnostic. // FIXME: We might want to attempt typo correction here. } if (!MD || !MD->isVirtual()) { if (OverrideAttr *OA = D->getAttr<OverrideAttr>()) { Diag(OA->getLocation(), diag::override_keyword_only_allowed_on_virtual_member_functions) << "override" << FixItHint::CreateRemoval(OA->getLocation()); D->dropAttr<OverrideAttr>(); } if (FinalAttr *FA = D->getAttr<FinalAttr>()) { Diag(FA->getLocation(), diag::override_keyword_only_allowed_on_virtual_member_functions) << (FA->isSpelledAsSealed() ? "sealed" : "final") << FixItHint::CreateRemoval(FA->getLocation()); D->dropAttr<FinalAttr>(); } return; } // C++11 [class.virtual]p5: // If a function is marked with the virt-specifier override and // does not override a member function of a base class, the program is // ill-formed. bool HasOverriddenMethods = MD->begin_overridden_methods() != MD->end_overridden_methods(); if (MD->hasAttr<OverrideAttr>() && !HasOverriddenMethods) Diag(MD->getLocation(), diag::err_function_marked_override_not_overriding) << MD->getDeclName(); } void Sema::DiagnoseAbsenceOfOverrideControl(NamedDecl *D) { if (D->isInvalidDecl() || D->hasAttr<OverrideAttr>()) return; CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D); if (!MD || MD->isImplicit() || MD->hasAttr<FinalAttr>() || isa<CXXDestructorDecl>(MD)) return; SourceLocation Loc = MD->getLocation(); SourceLocation SpellingLoc = Loc; if (getSourceManager().isMacroArgExpansion(Loc)) SpellingLoc = getSourceManager().getImmediateExpansionRange(Loc).first; SpellingLoc = getSourceManager().getSpellingLoc(SpellingLoc); if (SpellingLoc.isValid() && getSourceManager().isInSystemHeader(SpellingLoc)) return; if (MD->size_overridden_methods() > 0) { Diag(MD->getLocation(), diag::warn_function_marked_not_override_overriding) << MD->getDeclName(); const CXXMethodDecl *OMD = *MD->begin_overridden_methods(); Diag(OMD->getLocation(), diag::note_overridden_virtual_function); } } /// CheckIfOverriddenFunctionIsMarkedFinal - Checks whether a virtual member /// function overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool Sema::CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old) { FinalAttr *FA = Old->getAttr<FinalAttr>(); if (!FA) return false; Diag(New->getLocation(), diag::err_final_function_overridden) << New->getDeclName() << FA->isSpelledAsSealed(); Diag(Old->getLocation(), diag::note_overridden_virtual_function); return true; } static bool InitializationHasSideEffects(const FieldDecl &FD) { const Type *T = FD.getType()->getBaseElementTypeUnsafe(); // FIXME: Destruction of ObjC lifetime types has side-effects. if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return !RD->isCompleteDefinition() || !RD->hasTrivialDefaultConstructor() || !RD->hasTrivialDestructor(); return false; } static AttributeList *getMSPropertyAttr(AttributeList *list) { for (AttributeList *it = list; it != nullptr; it = it->getNext()) if (it->isDeclspecPropertyAttribute()) return it; return nullptr; } /// ActOnCXXMemberDeclarator - This is invoked when a C++ class member /// declarator is parsed. 'AS' is the access specifier, 'BW' specifies the /// bitfield width if there is one, 'InitExpr' specifies the initializer if /// one has been parsed, and 'InitStyle' is set if an in-class initializer is /// present (but parsing it has been deferred). NamedDecl * Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BW, const VirtSpecifiers &VS, InClassInitStyle InitStyle) { const DeclSpec &DS = D.getDeclSpec(); DeclarationNameInfo NameInfo = GetNameForDeclarator(D); DeclarationName Name = NameInfo.getName(); SourceLocation Loc = NameInfo.getLoc(); // For anonymous bitfields, the location should point to the type. if (Loc.isInvalid()) Loc = D.getLocStart(); Expr *BitWidth = static_cast<Expr*>(BW); assert(isa<CXXRecordDecl>(CurContext)); assert(!DS.isFriendSpecified()); bool isFunc = D.isDeclarationOfFunction(); if (cast<CXXRecordDecl>(CurContext)->isInterface()) { // The Microsoft extension __interface only permits public member functions // and prohibits constructors, destructors, operators, non-public member // functions, static methods and data members. unsigned InvalidDecl; bool ShowDeclName = true; if (!isFunc) InvalidDecl = (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) ? 0 : 1; else if (AS != AS_public) InvalidDecl = 2; else if (DS.getStorageClassSpec() == DeclSpec::SCS_static) InvalidDecl = 3; else switch (Name.getNameKind()) { case DeclarationName::CXXConstructorName: InvalidDecl = 4; ShowDeclName = false; break; case DeclarationName::CXXDestructorName: InvalidDecl = 5; ShowDeclName = false; break; case DeclarationName::CXXOperatorName: case DeclarationName::CXXConversionFunctionName: InvalidDecl = 6; break; default: InvalidDecl = 0; break; } if (InvalidDecl) { if (ShowDeclName) Diag(Loc, diag::err_invalid_member_in_interface) << (InvalidDecl-1) << Name; else Diag(Loc, diag::err_invalid_member_in_interface) << (InvalidDecl-1) << ""; return nullptr; } } // C++ 9.2p6: A member shall not be declared to have automatic storage // duration (auto, register) or with the extern storage-class-specifier. // C++ 7.1.1p8: The mutable specifier can be applied only to names of class // data members and cannot be applied to names declared const or static, // and cannot be applied to reference members. switch (DS.getStorageClassSpec()) { case DeclSpec::SCS_unspecified: case DeclSpec::SCS_typedef: case DeclSpec::SCS_static: break; case DeclSpec::SCS_mutable: if (isFunc) { Diag(DS.getStorageClassSpecLoc(), diag::err_mutable_function); // FIXME: It would be nicer if the keyword was ignored only for this // declarator. Otherwise we could get follow-up errors. D.getMutableDeclSpec().ClearStorageClassSpecs(); } break; default: Diag(DS.getStorageClassSpecLoc(), diag::err_storageclass_invalid_for_member); D.getMutableDeclSpec().ClearStorageClassSpecs(); break; } bool isInstField = ((DS.getStorageClassSpec() == DeclSpec::SCS_unspecified || DS.getStorageClassSpec() == DeclSpec::SCS_mutable) && !isFunc); if (DS.isConstexprSpecified() && isInstField) { SemaDiagnosticBuilder B = Diag(DS.getConstexprSpecLoc(), diag::err_invalid_constexpr_member); SourceLocation ConstexprLoc = DS.getConstexprSpecLoc(); if (InitStyle == ICIS_NoInit) { B << 0 << 0; if (D.getDeclSpec().getTypeQualifiers() & DeclSpec::TQ_const) B << FixItHint::CreateRemoval(ConstexprLoc); else { B << FixItHint::CreateReplacement(ConstexprLoc, "const"); D.getMutableDeclSpec().ClearConstexprSpec(); const char *PrevSpec; unsigned DiagID; bool Failed = D.getMutableDeclSpec().SetTypeQual( DeclSpec::TQ_const, ConstexprLoc, PrevSpec, DiagID, getLangOpts()); (void)Failed; assert(!Failed && "Making a constexpr member const shouldn't fail"); } } else { B << 1; const char *PrevSpec; unsigned DiagID; if (D.getMutableDeclSpec().SetStorageClassSpec( *this, DeclSpec::SCS_static, ConstexprLoc, PrevSpec, DiagID, Context.getPrintingPolicy())) { assert(DS.getStorageClassSpec() == DeclSpec::SCS_mutable && "This is the only DeclSpec that should fail to be applied"); B << 1; } else { B << 0 << FixItHint::CreateInsertion(ConstexprLoc, "static "); isInstField = false; } } } NamedDecl *Member; if (isInstField) { CXXScopeSpec &SS = D.getCXXScopeSpec(); // Data members must have identifiers for names. if (!Name.isIdentifier()) { Diag(Loc, diag::err_bad_variable_name) << Name; return nullptr; } IdentifierInfo *II = Name.getAsIdentifierInfo(); // Member field could not be with "template" keyword. // So TemplateParameterLists should be empty in this case. if (TemplateParameterLists.size()) { TemplateParameterList* TemplateParams = TemplateParameterLists[0]; if (TemplateParams->size()) { // There is no such thing as a member field template. Diag(D.getIdentifierLoc(), diag::err_template_member) << II << SourceRange(TemplateParams->getTemplateLoc(), TemplateParams->getRAngleLoc()); } else { // There is an extraneous 'template<>' for this member. Diag(TemplateParams->getTemplateLoc(), diag::err_template_member_noparams) << II << SourceRange(TemplateParams->getTemplateLoc(), TemplateParams->getRAngleLoc()); } return nullptr; } if (SS.isSet() && !SS.isInvalid()) { // The user provided a superfluous scope specifier inside a class // definition: // // class X { // int X::member; // }; if (DeclContext *DC = computeDeclContext(SS, false)) diagnoseQualifiedDeclaration(SS, DC, Name, D.getIdentifierLoc()); else Diag(D.getIdentifierLoc(), diag::err_member_qualification) << Name << SS.getRange(); SS.clear(); } AttributeList *MSPropertyAttr = getMSPropertyAttr(D.getDeclSpec().getAttributes().getList()); if (MSPropertyAttr) { Member = HandleMSProperty(S, cast<CXXRecordDecl>(CurContext), Loc, D, BitWidth, InitStyle, AS, MSPropertyAttr); if (!Member) return nullptr; isInstField = false; } else { Member = HandleField(S, cast<CXXRecordDecl>(CurContext), Loc, D, BitWidth, InitStyle, AS); assert(Member && "HandleField never returns null"); } } else { Member = HandleDeclarator(S, D, TemplateParameterLists); if (!Member) return nullptr; // Non-instance-fields can't have a bitfield. if (BitWidth) { if (Member->isInvalidDecl()) { // don't emit another diagnostic. } else if (isa<VarDecl>(Member) || isa<VarTemplateDecl>(Member)) { // C++ 9.6p3: A bit-field shall not be a static member. // "static member 'A' cannot be a bit-field" Diag(Loc, diag::err_static_not_bitfield) << Name << BitWidth->getSourceRange(); } else if (isa<TypedefDecl>(Member)) { // "typedef member 'x' cannot be a bit-field" Diag(Loc, diag::err_typedef_not_bitfield) << Name << BitWidth->getSourceRange(); } else { // A function typedef ("typedef int f(); f a;"). // C++ 9.6p3: A bit-field shall have integral or enumeration type. Diag(Loc, diag::err_not_integral_type_bitfield) << Name << cast<ValueDecl>(Member)->getType() << BitWidth->getSourceRange(); } BitWidth = nullptr; Member->setInvalidDecl(); } Member->setAccess(AS); // If we have declared a member function template or static data member // template, set the access of the templated declaration as well. if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Member)) FunTmpl->getTemplatedDecl()->setAccess(AS); else if (VarTemplateDecl *VarTmpl = dyn_cast<VarTemplateDecl>(Member)) VarTmpl->getTemplatedDecl()->setAccess(AS); } if (VS.isOverrideSpecified()) Member->addAttr(new (Context) OverrideAttr(VS.getOverrideLoc(), Context, 0)); if (VS.isFinalSpecified()) Member->addAttr(new (Context) FinalAttr(VS.getFinalLoc(), Context, VS.isFinalSpelledSealed())); if (VS.getLastLocation().isValid()) { // Update the end location of a method that has a virt-specifiers. if (CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(Member)) MD->setRangeEnd(VS.getLastLocation()); } CheckOverrideControl(Member); assert((Name || isInstField) && "No identifier for non-field ?"); if (isInstField) { FieldDecl *FD = cast<FieldDecl>(Member); FieldCollector->Add(FD); if (!Diags.isIgnored(diag::warn_unused_private_field, FD->getLocation())) { // Remember all explicit private FieldDecls that have a name, no side // effects and are not part of a dependent type declaration. if (!FD->isImplicit() && FD->getDeclName() && FD->getAccess() == AS_private && !FD->hasAttr<UnusedAttr>() && !FD->getParent()->isDependentContext() && !InitializationHasSideEffects(*FD)) UnusedPrivateFields.insert(FD); } } return Member; } namespace { class UninitializedFieldVisitor : public EvaluatedExprVisitor<UninitializedFieldVisitor> { Sema &S; // List of Decls to generate a warning on. Also remove Decls that become // initialized. llvm::SmallPtrSetImpl<ValueDecl*> &Decls; // List of base classes of the record. Classes are removed after their // initializers. llvm::SmallPtrSetImpl<QualType> &BaseClasses; // Vector of decls to be removed from the Decl set prior to visiting the // nodes. These Decls may have been initialized in the prior initializer. llvm::SmallVector<ValueDecl*, 4> DeclsToRemove; // If non-null, add a note to the warning pointing back to the constructor. const CXXConstructorDecl *Constructor; // Variables to hold state when processing an initializer list. When // InitList is true, special case initialization of FieldDecls matching // InitListFieldDecl. bool InitList; FieldDecl *InitListFieldDecl; llvm::SmallVector<unsigned, 4> InitFieldIndex; public: typedef EvaluatedExprVisitor<UninitializedFieldVisitor> Inherited; UninitializedFieldVisitor(Sema &S, llvm::SmallPtrSetImpl<ValueDecl*> &Decls, llvm::SmallPtrSetImpl<QualType> &BaseClasses) : Inherited(S.Context), S(S), Decls(Decls), BaseClasses(BaseClasses), Constructor(nullptr), InitList(false), InitListFieldDecl(nullptr) {} // Returns true if the use of ME is not an uninitialized use. bool IsInitListMemberExprInitialized(MemberExpr *ME, bool CheckReferenceOnly) { llvm::SmallVector<FieldDecl*, 4> Fields; bool ReferenceField = false; while (ME) { FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); if (!FD) return false; Fields.push_back(FD); if (FD->getType()->isReferenceType()) ReferenceField = true; ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParenImpCasts()); } // Binding a reference to an unintialized field is not an // uninitialized use. if (CheckReferenceOnly && !ReferenceField) return true; llvm::SmallVector<unsigned, 4> UsedFieldIndex; // Discard the first field since it is the field decl that is being // initialized. for (auto I = Fields.rbegin() + 1, E = Fields.rend(); I != E; ++I) { UsedFieldIndex.push_back((*I)->getFieldIndex()); } for (auto UsedIter = UsedFieldIndex.begin(), UsedEnd = UsedFieldIndex.end(), OrigIter = InitFieldIndex.begin(), OrigEnd = InitFieldIndex.end(); UsedIter != UsedEnd && OrigIter != OrigEnd; ++UsedIter, ++OrigIter) { if (*UsedIter < *OrigIter) return true; if (*UsedIter > *OrigIter) break; } return false; } void HandleMemberExpr(MemberExpr *ME, bool CheckReferenceOnly, bool AddressOf) { if (isa<EnumConstantDecl>(ME->getMemberDecl())) return; // FieldME is the inner-most MemberExpr that is not an anonymous struct // or union. MemberExpr *FieldME = ME; bool AllPODFields = FieldME->getType().isPODType(S.Context); Expr *Base = ME; while (MemberExpr *SubME = dyn_cast<MemberExpr>(Base->IgnoreParenImpCasts())) { if (isa<VarDecl>(SubME->getMemberDecl())) return; if (FieldDecl *FD = dyn_cast<FieldDecl>(SubME->getMemberDecl())) if (!FD->isAnonymousStructOrUnion()) FieldME = SubME; if (!FieldME->getType().isPODType(S.Context)) AllPODFields = false; Base = SubME->getBase(); } if (!isa<CXXThisExpr>(Base->IgnoreParenImpCasts())) return; if (AddressOf && AllPODFields) return; ValueDecl* FoundVD = FieldME->getMemberDecl(); if (ImplicitCastExpr *BaseCast = dyn_cast<ImplicitCastExpr>(Base)) { while (isa<ImplicitCastExpr>(BaseCast->getSubExpr())) { BaseCast = cast<ImplicitCastExpr>(BaseCast->getSubExpr()); } if (BaseCast->getCastKind() == CK_UncheckedDerivedToBase) { QualType T = BaseCast->getType(); if (T->isPointerType() && BaseClasses.count(T->getPointeeType())) { S.Diag(FieldME->getExprLoc(), diag::warn_base_class_is_uninit) << T->getPointeeType() << FoundVD; } } } if (!Decls.count(FoundVD)) return; const bool IsReference = FoundVD->getType()->isReferenceType(); if (InitList && !AddressOf && FoundVD == InitListFieldDecl) { // Special checking for initializer lists. if (IsInitListMemberExprInitialized(ME, CheckReferenceOnly)) { return; } } else { // Prevent double warnings on use of unbounded references. if (CheckReferenceOnly && !IsReference) return; } unsigned diag = IsReference ? diag::warn_reference_field_is_uninit : diag::warn_field_is_uninit; S.Diag(FieldME->getExprLoc(), diag) << FoundVD; if (Constructor) S.Diag(Constructor->getLocation(), diag::note_uninit_in_this_constructor) << (Constructor->isDefaultConstructor() && Constructor->isImplicit()); } void HandleValue(Expr *E, bool AddressOf) { E = E->IgnoreParens(); if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) { HandleMemberExpr(ME, false /*CheckReferenceOnly*/, AddressOf /*AddressOf*/); return; } if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { Visit(CO->getCond()); HandleValue(CO->getTrueExpr(), AddressOf); HandleValue(CO->getFalseExpr(), AddressOf); return; } if (BinaryConditionalOperator *BCO = dyn_cast<BinaryConditionalOperator>(E)) { Visit(BCO->getCond()); HandleValue(BCO->getFalseExpr(), AddressOf); return; } if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) { HandleValue(OVE->getSourceExpr(), AddressOf); return; } if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { switch (BO->getOpcode()) { default: break; case(BO_PtrMemD): case(BO_PtrMemI): HandleValue(BO->getLHS(), AddressOf); Visit(BO->getRHS()); return; case(BO_Comma): Visit(BO->getLHS()); HandleValue(BO->getRHS(), AddressOf); return; } } Visit(E); } void CheckInitListExpr(InitListExpr *ILE) { InitFieldIndex.push_back(0); for (auto Child : ILE->children()) { if (InitListExpr *SubList = dyn_cast<InitListExpr>(Child)) { CheckInitListExpr(SubList); } else { Visit(Child); } ++InitFieldIndex.back(); } InitFieldIndex.pop_back(); } void CheckInitializer(Expr *E, const CXXConstructorDecl *FieldConstructor, FieldDecl *Field, const Type *BaseClass) { // Remove Decls that may have been initialized in the previous // initializer. for (ValueDecl* VD : DeclsToRemove) Decls.erase(VD); DeclsToRemove.clear(); Constructor = FieldConstructor; InitListExpr *ILE = dyn_cast<InitListExpr>(E); if (ILE && Field) { InitList = true; InitListFieldDecl = Field; InitFieldIndex.clear(); CheckInitListExpr(ILE); } else { InitList = false; Visit(E); } if (Field) Decls.erase(Field); if (BaseClass) BaseClasses.erase(BaseClass->getCanonicalTypeInternal()); } void VisitMemberExpr(MemberExpr *ME) { // All uses of unbounded reference fields will warn. HandleMemberExpr(ME, true /*CheckReferenceOnly*/, false /*AddressOf*/); } void VisitImplicitCastExpr(ImplicitCastExpr *E) { if (E->getCastKind() == CK_LValueToRValue) { HandleValue(E->getSubExpr(), false /*AddressOf*/); return; } Inherited::VisitImplicitCastExpr(E); } void VisitCXXConstructExpr(CXXConstructExpr *E) { if (E->getConstructor()->isCopyConstructor()) { Expr *ArgExpr = E->getArg(0); if (InitListExpr *ILE = dyn_cast<InitListExpr>(ArgExpr)) if (ILE->getNumInits() == 1) ArgExpr = ILE->getInit(0); if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr)) if (ICE->getCastKind() == CK_NoOp) ArgExpr = ICE->getSubExpr(); HandleValue(ArgExpr, false /*AddressOf*/); return; } Inherited::VisitCXXConstructExpr(E); } void VisitCXXMemberCallExpr(CXXMemberCallExpr *E) { Expr *Callee = E->getCallee(); if (isa<MemberExpr>(Callee)) { HandleValue(Callee, false /*AddressOf*/); for (auto Arg : E->arguments()) Visit(Arg); return; } Inherited::VisitCXXMemberCallExpr(E); } void VisitCallExpr(CallExpr *E) { // Treat std::move as a use. if (E->getNumArgs() == 1) { if (FunctionDecl *FD = E->getDirectCallee()) { if (FD->isInStdNamespace() && FD->getIdentifier() && FD->getIdentifier()->isStr("move")) { HandleValue(E->getArg(0), false /*AddressOf*/); return; } } } Inherited::VisitCallExpr(E); } void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) { Expr *Callee = E->getCallee(); if (isa<UnresolvedLookupExpr>(Callee)) return Inherited::VisitCXXOperatorCallExpr(E); Visit(Callee); for (auto Arg : E->arguments()) HandleValue(Arg->IgnoreParenImpCasts(), false /*AddressOf*/); } void VisitBinaryOperator(BinaryOperator *E) { // If a field assignment is detected, remove the field from the // uninitiailized field set. if (E->getOpcode() == BO_Assign) if (MemberExpr *ME = dyn_cast<MemberExpr>(E->getLHS())) if (FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) if (!FD->getType()->isReferenceType()) DeclsToRemove.push_back(FD); if (E->isCompoundAssignmentOp()) { HandleValue(E->getLHS(), false /*AddressOf*/); Visit(E->getRHS()); return; } Inherited::VisitBinaryOperator(E); } void VisitUnaryOperator(UnaryOperator *E) { if (E->isIncrementDecrementOp()) { HandleValue(E->getSubExpr(), false /*AddressOf*/); return; } if (E->getOpcode() == UO_AddrOf) { if (MemberExpr *ME = dyn_cast<MemberExpr>(E->getSubExpr())) { HandleValue(ME->getBase(), true /*AddressOf*/); return; } } Inherited::VisitUnaryOperator(E); } }; // Diagnose value-uses of fields to initialize themselves, e.g. // foo(foo) // where foo is not also a parameter to the constructor. // Also diagnose across field uninitialized use such as // x(y), y(x) // TODO: implement -Wuninitialized and fold this into that framework. static void DiagnoseUninitializedFields( Sema &SemaRef, const CXXConstructorDecl *Constructor) { if (SemaRef.getDiagnostics().isIgnored(diag::warn_field_is_uninit, Constructor->getLocation())) { return; } if (Constructor->isInvalidDecl()) return; const CXXRecordDecl *RD = Constructor->getParent(); if (RD->getDescribedClassTemplate()) return; // Holds fields that are uninitialized. llvm::SmallPtrSet<ValueDecl*, 4> UninitializedFields; // At the beginning, all fields are uninitialized. for (auto *I : RD->decls()) { if (auto *FD = dyn_cast<FieldDecl>(I)) { UninitializedFields.insert(FD); } else if (auto *IFD = dyn_cast<IndirectFieldDecl>(I)) { UninitializedFields.insert(IFD->getAnonField()); } } llvm::SmallPtrSet<QualType, 4> UninitializedBaseClasses; for (auto I : RD->bases()) UninitializedBaseClasses.insert(I.getType().getCanonicalType()); if (UninitializedFields.empty() && UninitializedBaseClasses.empty()) return; UninitializedFieldVisitor UninitializedChecker(SemaRef, UninitializedFields, UninitializedBaseClasses); for (const auto *FieldInit : Constructor->inits()) { if (UninitializedFields.empty() && UninitializedBaseClasses.empty()) break; Expr *InitExpr = FieldInit->getInit(); if (!InitExpr) continue; if (CXXDefaultInitExpr *Default = dyn_cast<CXXDefaultInitExpr>(InitExpr)) { InitExpr = Default->getExpr(); if (!InitExpr) continue; // In class initializers will point to the constructor. UninitializedChecker.CheckInitializer(InitExpr, Constructor, FieldInit->getAnyMember(), FieldInit->getBaseClass()); } else { UninitializedChecker.CheckInitializer(InitExpr, nullptr, FieldInit->getAnyMember(), FieldInit->getBaseClass()); } } } } // namespace /// \brief Enter a new C++ default initializer scope. After calling this, the /// caller must call \ref ActOnFinishCXXInClassMemberInitializer, even if /// parsing or instantiating the initializer failed. void Sema::ActOnStartCXXInClassMemberInitializer() { // Create a synthetic function scope to represent the call to the constructor // that notionally surrounds a use of this initializer. PushFunctionScope(); } /// \brief This is invoked after parsing an in-class initializer for a /// non-static C++ class member, and after instantiating an in-class initializer /// in a class template. Such actions are deferred until the class is complete. void Sema::ActOnFinishCXXInClassMemberInitializer(Decl *D, SourceLocation InitLoc, Expr *InitExpr) { // Pop the notional constructor scope we created earlier. PopFunctionScopeInfo(nullptr, D); FieldDecl *FD = dyn_cast<FieldDecl>(D); assert((isa<MSPropertyDecl>(D) || FD->getInClassInitStyle() != ICIS_NoInit) && "must set init style when field is created"); if (!InitExpr) { D->setInvalidDecl(); if (FD) FD->removeInClassInitializer(); return; } if (DiagnoseUnexpandedParameterPack(InitExpr, UPPC_Initializer)) { FD->setInvalidDecl(); FD->removeInClassInitializer(); return; } ExprResult Init = InitExpr; if (!FD->getType()->isDependentType() && !InitExpr->isTypeDependent()) { InitializedEntity Entity = InitializedEntity::InitializeMember(FD); InitializationKind Kind = FD->getInClassInitStyle() == ICIS_ListInit ? InitializationKind::CreateDirectList(InitExpr->getLocStart()) : InitializationKind::CreateCopy(InitExpr->getLocStart(), InitLoc); InitializationSequence Seq(*this, Entity, Kind, InitExpr); Init = Seq.Perform(*this, Entity, Kind, InitExpr); if (Init.isInvalid()) { FD->setInvalidDecl(); return; } } // C++11 [class.base.init]p7: // The initialization of each base and member constitutes a // full-expression. Init = ActOnFinishFullExpr(Init.get(), InitLoc); if (Init.isInvalid()) { FD->setInvalidDecl(); return; } InitExpr = Init.get(); FD->setInClassInitializer(InitExpr); } /// \brief Find the direct and/or virtual base specifiers that /// correspond to the given base type, for use in base initialization /// within a constructor. static bool FindBaseInitializer(Sema &SemaRef, CXXRecordDecl *ClassDecl, QualType BaseType, const CXXBaseSpecifier *&DirectBaseSpec, const CXXBaseSpecifier *&VirtualBaseSpec) { // First, check for a direct base class. DirectBaseSpec = nullptr; for (const auto &Base : ClassDecl->bases()) { if (SemaRef.Context.hasSameUnqualifiedType(BaseType, Base.getType())) { // We found a direct base of this type. That's what we're // initializing. DirectBaseSpec = &Base; break; } } // Check for a virtual base class. // FIXME: We might be able to short-circuit this if we know in advance that // there are no virtual bases. VirtualBaseSpec = nullptr; if (!DirectBaseSpec || !DirectBaseSpec->isVirtual()) { // We haven't found a base yet; search the class hierarchy for a // virtual base class. CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, /*DetectVirtual=*/false); if (SemaRef.IsDerivedFrom(SemaRef.Context.getTypeDeclType(ClassDecl), BaseType, Paths)) { for (CXXBasePaths::paths_iterator Path = Paths.begin(); Path != Paths.end(); ++Path) { if (Path->back().Base->isVirtual()) { VirtualBaseSpec = Path->back().Base; break; } } } } return DirectBaseSpec || VirtualBaseSpec; } /// \brief Handle a C++ member initializer using braced-init-list syntax. MemInitResult Sema::ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc) { return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy, DS, IdLoc, InitList, EllipsisLoc); } /// \brief Handle a C++ member initializer using parentheses syntax. MemInitResult Sema::ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc) { Expr *List = new (Context) ParenListExpr(Context, LParenLoc, Args, RParenLoc); return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy, DS, IdLoc, List, EllipsisLoc); } namespace { // Callback to only accept typo corrections that can be a valid C++ member // intializer: either a non-static field member or a base class. class MemInitializerValidatorCCC : public CorrectionCandidateCallback { public: explicit MemInitializerValidatorCCC(CXXRecordDecl *ClassDecl) : ClassDecl(ClassDecl) {} bool ValidateCandidate(const TypoCorrection &candidate) override { if (NamedDecl *ND = candidate.getCorrectionDecl()) { if (FieldDecl *Member = dyn_cast<FieldDecl>(ND)) return Member->getDeclContext()->getRedeclContext()->Equals(ClassDecl); return isa<TypeDecl>(ND); } return false; } private: CXXRecordDecl *ClassDecl; }; } /// \brief Handle a C++ member initializer. MemInitResult Sema::BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc) { ExprResult Res = CorrectDelayedTyposInExpr(Init); if (!Res.isUsable()) return true; Init = Res.get(); if (!ConstructorD) return true; AdjustDeclIfTemplate(ConstructorD); CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(ConstructorD); if (!Constructor) { // The user wrote a constructor initializer on a function that is // not a C++ constructor. Ignore the error for now, because we may // have more member initializers coming; we'll diagnose it just // once in ActOnMemInitializers. return true; } CXXRecordDecl *ClassDecl = Constructor->getParent(); // C++ [class.base.init]p2: // Names in a mem-initializer-id are looked up in the scope of the // constructor's class and, if not found in that scope, are looked // up in the scope containing the constructor's definition. // [Note: if the constructor's class contains a member with the // same name as a direct or virtual base class of the class, a // mem-initializer-id naming the member or base class and composed // of a single identifier refers to the class member. A // mem-initializer-id for the hidden base class may be specified // using a qualified name. ] if (!SS.getScopeRep() && !TemplateTypeTy) { // Look for a member, first. DeclContext::lookup_result Result = ClassDecl->lookup(MemberOrBase); if (!Result.empty()) { ValueDecl *Member; if ((Member = dyn_cast<FieldDecl>(Result.front())) || (Member = dyn_cast<IndirectFieldDecl>(Result.front()))) { if (EllipsisLoc.isValid()) Diag(EllipsisLoc, diag::err_pack_expansion_member_init) << MemberOrBase << SourceRange(IdLoc, Init->getSourceRange().getEnd()); return BuildMemberInitializer(Member, Init, IdLoc); } } } // It didn't name a member, so see if it names a class. QualType BaseType; TypeSourceInfo *TInfo = nullptr; if (TemplateTypeTy) { BaseType = GetTypeFromParser(TemplateTypeTy, &TInfo); } else if (DS.getTypeSpecType() == TST_decltype) { BaseType = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc()); } else { LookupResult R(*this, MemberOrBase, IdLoc, LookupOrdinaryName); LookupParsedName(R, S, &SS); TypeDecl *TyD = R.getAsSingle<TypeDecl>(); if (!TyD) { if (R.isAmbiguous()) return true; // We don't want access-control diagnostics here. R.suppressDiagnostics(); if (SS.isSet() && isDependentScopeSpecifier(SS)) { bool NotUnknownSpecialization = false; DeclContext *DC = computeDeclContext(SS, false); if (CXXRecordDecl *Record = dyn_cast_or_null<CXXRecordDecl>(DC)) NotUnknownSpecialization = !Record->hasAnyDependentBases(); if (!NotUnknownSpecialization) { // When the scope specifier can refer to a member of an unknown // specialization, we take it as a type name. BaseType = CheckTypenameType(ETK_None, SourceLocation(), SS.getWithLocInContext(Context), *MemberOrBase, IdLoc); if (BaseType.isNull()) return true; R.clear(); R.setLookupName(MemberOrBase); } } // If no results were found, try to correct typos. TypoCorrection Corr; if (R.empty() && BaseType.isNull() && (Corr = CorrectTypo( R.getLookupNameInfo(), R.getLookupKind(), S, &SS, llvm::make_unique<MemInitializerValidatorCCC>(ClassDecl), CTK_ErrorRecovery, ClassDecl))) { if (FieldDecl *Member = Corr.getCorrectionDeclAs<FieldDecl>()) { // We have found a non-static data member with a similar // name to what was typed; complain and initialize that // member. diagnoseTypo(Corr, PDiag(diag::err_mem_init_not_member_or_class_suggest) << MemberOrBase << true); return BuildMemberInitializer(Member, Init, IdLoc); } else if (TypeDecl *Type = Corr.getCorrectionDeclAs<TypeDecl>()) { const CXXBaseSpecifier *DirectBaseSpec; const CXXBaseSpecifier *VirtualBaseSpec; if (FindBaseInitializer(*this, ClassDecl, Context.getTypeDeclType(Type), DirectBaseSpec, VirtualBaseSpec)) { // We have found a direct or virtual base class with a // similar name to what was typed; complain and initialize // that base class. diagnoseTypo(Corr, PDiag(diag::err_mem_init_not_member_or_class_suggest) << MemberOrBase << false, PDiag() /*Suppress note, we provide our own.*/); const CXXBaseSpecifier *BaseSpec = DirectBaseSpec ? DirectBaseSpec : VirtualBaseSpec; Diag(BaseSpec->getLocStart(), diag::note_base_class_specified_here) << BaseSpec->getType() << BaseSpec->getSourceRange(); TyD = Type; } } } if (!TyD && BaseType.isNull()) { Diag(IdLoc, diag::err_mem_init_not_member_or_class) << MemberOrBase << SourceRange(IdLoc,Init->getSourceRange().getEnd()); return true; } } if (BaseType.isNull()) { BaseType = Context.getTypeDeclType(TyD); MarkAnyDeclReferenced(TyD->getLocation(), TyD, /*OdrUse=*/false); if (SS.isSet()) // FIXME: preserve source range information BaseType = Context.getElaboratedType(ETK_None, SS.getScopeRep(), BaseType); } } if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(BaseType, IdLoc); return BuildBaseInitializer(BaseType, TInfo, Init, ClassDecl, EllipsisLoc); } /// Checks a member initializer expression for cases where reference (or /// pointer) members are bound to by-value parameters (or their addresses). static void CheckForDanglingReferenceOrPointer(Sema &S, ValueDecl *Member, Expr *Init, SourceLocation IdLoc) { QualType MemberTy = Member->getType(); // We only handle pointers and references currently. // FIXME: Would this be relevant for ObjC object pointers? Or block pointers? if (!MemberTy->isReferenceType() && !MemberTy->isPointerType()) return; const bool IsPointer = MemberTy->isPointerType(); if (IsPointer) { if (const UnaryOperator *Op = dyn_cast<UnaryOperator>(Init->IgnoreParenImpCasts())) { // The only case we're worried about with pointers requires taking the // address. if (Op->getOpcode() != UO_AddrOf) return; Init = Op->getSubExpr(); } else { // We only handle address-of expression initializers for pointers. return; } } if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Init->IgnoreParens())) { // We only warn when referring to a non-reference parameter declaration. const ParmVarDecl *Parameter = dyn_cast<ParmVarDecl>(DRE->getDecl()); if (!Parameter || Parameter->getType()->isReferenceType()) return; S.Diag(Init->getExprLoc(), IsPointer ? diag::warn_init_ptr_member_to_parameter_addr : diag::warn_bind_ref_member_to_parameter) << Member << Parameter << Init->getSourceRange(); } else { // Other initializers are fine. return; } S.Diag(Member->getLocation(), diag::note_ref_or_ptr_member_declared_here) << (unsigned)IsPointer; } MemInitResult Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc) { FieldDecl *DirectMember = dyn_cast<FieldDecl>(Member); IndirectFieldDecl *IndirectMember = dyn_cast<IndirectFieldDecl>(Member); assert((DirectMember || IndirectMember) && "Member must be a FieldDecl or IndirectFieldDecl"); if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer)) return true; if (Member->isInvalidDecl()) return true; MultiExprArg Args; if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) { Args = MultiExprArg(ParenList->getExprs(), ParenList->getNumExprs()); } else if (InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { Args = MultiExprArg(InitList->getInits(), InitList->getNumInits()); } else { // Template instantiation doesn't reconstruct ParenListExprs for us. Args = Init; } SourceRange InitRange = Init->getSourceRange(); if (Member->getType()->isDependentType() || Init->isTypeDependent()) { // Can't check initialization for a member of dependent type or when // any of the arguments are type-dependent expressions. DiscardCleanupsInEvaluationContext(); } else { bool InitList = false; if (isa<InitListExpr>(Init)) { InitList = true; Args = Init; } // Initialize the member. InitializedEntity MemberEntity = DirectMember ? InitializedEntity::InitializeMember(DirectMember, nullptr) : InitializedEntity::InitializeMember(IndirectMember, nullptr); InitializationKind Kind = InitList ? InitializationKind::CreateDirectList(IdLoc) : InitializationKind::CreateDirect(IdLoc, InitRange.getBegin(), InitRange.getEnd()); InitializationSequence InitSeq(*this, MemberEntity, Kind, Args); ExprResult MemberInit = InitSeq.Perform(*this, MemberEntity, Kind, Args, nullptr); if (MemberInit.isInvalid()) return true; CheckForDanglingReferenceOrPointer(*this, Member, MemberInit.get(), IdLoc); // C++11 [class.base.init]p7: // The initialization of each base and member constitutes a // full-expression. MemberInit = ActOnFinishFullExpr(MemberInit.get(), InitRange.getBegin()); if (MemberInit.isInvalid()) return true; Init = MemberInit.get(); } if (DirectMember) { return new (Context) CXXCtorInitializer(Context, DirectMember, IdLoc, InitRange.getBegin(), Init, InitRange.getEnd()); } else { return new (Context) CXXCtorInitializer(Context, IndirectMember, IdLoc, InitRange.getBegin(), Init, InitRange.getEnd()); } } MemInitResult Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl) { SourceLocation NameLoc = TInfo->getTypeLoc().getLocalSourceRange().getBegin(); if (!LangOpts.CPlusPlus11) return Diag(NameLoc, diag::err_delegating_ctor) << TInfo->getTypeLoc().getLocalSourceRange(); Diag(NameLoc, diag::warn_cxx98_compat_delegating_ctor); bool InitList = true; MultiExprArg Args = Init; if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) { InitList = false; Args = MultiExprArg(ParenList->getExprs(), ParenList->getNumExprs()); } SourceRange InitRange = Init->getSourceRange(); // Initialize the object. InitializedEntity DelegationEntity = InitializedEntity::InitializeDelegation( QualType(ClassDecl->getTypeForDecl(), 0)); InitializationKind Kind = InitList ? InitializationKind::CreateDirectList(NameLoc) : InitializationKind::CreateDirect(NameLoc, InitRange.getBegin(), InitRange.getEnd()); InitializationSequence InitSeq(*this, DelegationEntity, Kind, Args); ExprResult DelegationInit = InitSeq.Perform(*this, DelegationEntity, Kind, Args, nullptr); if (DelegationInit.isInvalid()) return true; assert(cast<CXXConstructExpr>(DelegationInit.get())->getConstructor() && "Delegating constructor with no target?"); // C++11 [class.base.init]p7: // The initialization of each base and member constitutes a // full-expression. DelegationInit = ActOnFinishFullExpr(DelegationInit.get(), InitRange.getBegin()); if (DelegationInit.isInvalid()) return true; // If we are in a dependent context, template instantiation will // perform this type-checking again. Just save the arguments that we // received in a ParenListExpr. // FIXME: This isn't quite ideal, since our ASTs don't capture all // of the information that we have about the base // initializer. However, deconstructing the ASTs is a dicey process, // and this approach is far more likely to get the corner cases right. if (CurContext->isDependentContext()) DelegationInit = Init; return new (Context) CXXCtorInitializer(Context, TInfo, InitRange.getBegin(), DelegationInit.getAs<Expr>(), InitRange.getEnd()); } MemInitResult Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc) { SourceLocation BaseLoc = BaseTInfo->getTypeLoc().getLocalSourceRange().getBegin(); if (!BaseType->isDependentType() && !BaseType->isRecordType()) return Diag(BaseLoc, diag::err_base_init_does_not_name_class) << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange(); // C++ [class.base.init]p2: // [...] Unless the mem-initializer-id names a nonstatic data // member of the constructor's class or a direct or virtual base // of that class, the mem-initializer is ill-formed. A // mem-initializer-list can initialize a base class using any // name that denotes that base class type. bool Dependent = BaseType->isDependentType() || Init->isTypeDependent(); SourceRange InitRange = Init->getSourceRange(); if (EllipsisLoc.isValid()) { // This is a pack expansion. if (!BaseType->containsUnexpandedParameterPack()) { Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << SourceRange(BaseLoc, InitRange.getEnd()); EllipsisLoc = SourceLocation(); } } else { // Check for any unexpanded parameter packs. if (DiagnoseUnexpandedParameterPack(BaseLoc, BaseTInfo, UPPC_Initializer)) return true; if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer)) return true; } // Check for direct and virtual base classes. const CXXBaseSpecifier *DirectBaseSpec = nullptr; const CXXBaseSpecifier *VirtualBaseSpec = nullptr; if (!Dependent) { if (Context.hasSameUnqualifiedType(QualType(ClassDecl->getTypeForDecl(),0), BaseType)) return BuildDelegatingInitializer(BaseTInfo, Init, ClassDecl); FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec, VirtualBaseSpec); // C++ [base.class.init]p2: // Unless the mem-initializer-id names a nonstatic data member of the // constructor's class or a direct or virtual base of that class, the // mem-initializer is ill-formed. if (!DirectBaseSpec && !VirtualBaseSpec) { // If the class has any dependent bases, then it's possible that // one of those types will resolve to the same type as // BaseType. Therefore, just treat this as a dependent base // class initialization. FIXME: Should we try to check the // initialization anyway? It seems odd. if (ClassDecl->hasAnyDependentBases()) Dependent = true; else return Diag(BaseLoc, diag::err_not_direct_base_or_virtual) << BaseType << Context.getTypeDeclType(ClassDecl) << BaseTInfo->getTypeLoc().getLocalSourceRange(); } } if (Dependent) { DiscardCleanupsInEvaluationContext(); return new (Context) CXXCtorInitializer(Context, BaseTInfo, /*IsVirtual=*/false, InitRange.getBegin(), Init, InitRange.getEnd(), EllipsisLoc); } // C++ [base.class.init]p2: // If a mem-initializer-id is ambiguous because it designates both // a direct non-virtual base class and an inherited virtual base // class, the mem-initializer is ill-formed. if (DirectBaseSpec && VirtualBaseSpec) return Diag(BaseLoc, diag::err_base_init_direct_and_virtual) << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange(); const CXXBaseSpecifier *BaseSpec = DirectBaseSpec; if (!BaseSpec) BaseSpec = VirtualBaseSpec; // Initialize the base. bool InitList = true; MultiExprArg Args = Init; if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) { InitList = false; Args = MultiExprArg(ParenList->getExprs(), ParenList->getNumExprs()); } InitializedEntity BaseEntity = InitializedEntity::InitializeBase(Context, BaseSpec, VirtualBaseSpec); InitializationKind Kind = InitList ? InitializationKind::CreateDirectList(BaseLoc) : InitializationKind::CreateDirect(BaseLoc, InitRange.getBegin(), InitRange.getEnd()); InitializationSequence InitSeq(*this, BaseEntity, Kind, Args); ExprResult BaseInit = InitSeq.Perform(*this, BaseEntity, Kind, Args, nullptr); if (BaseInit.isInvalid()) return true; // C++11 [class.base.init]p7: // The initialization of each base and member constitutes a // full-expression. BaseInit = ActOnFinishFullExpr(BaseInit.get(), InitRange.getBegin()); if (BaseInit.isInvalid()) return true; // If we are in a dependent context, template instantiation will // perform this type-checking again. Just save the arguments that we // received in a ParenListExpr. // FIXME: This isn't quite ideal, since our ASTs don't capture all // of the information that we have about the base // initializer. However, deconstructing the ASTs is a dicey process, // and this approach is far more likely to get the corner cases right. if (CurContext->isDependentContext()) BaseInit = Init; return new (Context) CXXCtorInitializer(Context, BaseTInfo, BaseSpec->isVirtual(), InitRange.getBegin(), BaseInit.getAs<Expr>(), InitRange.getEnd(), EllipsisLoc); } // Create a static_cast\<T&&>(expr). static Expr *CastForMoving(Sema &SemaRef, Expr *E, QualType T = QualType()) { if (T.isNull()) T = E->getType(); QualType TargetType = SemaRef.BuildReferenceType( T, /*SpelledAsLValue*/false, SourceLocation(), DeclarationName()); SourceLocation ExprLoc = E->getLocStart(); TypeSourceInfo *TargetLoc = SemaRef.Context.getTrivialTypeSourceInfo( TargetType, ExprLoc); return SemaRef.BuildCXXNamedCast(ExprLoc, tok::kw_static_cast, TargetLoc, E, SourceRange(ExprLoc, ExprLoc), E->getSourceRange()).get(); } /// ImplicitInitializerKind - How an implicit base or member initializer should /// initialize its base or member. enum ImplicitInitializerKind { IIK_Default, IIK_Copy, IIK_Move, IIK_Inherit }; static bool BuildImplicitBaseInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor, ImplicitInitializerKind ImplicitInitKind, CXXBaseSpecifier *BaseSpec, bool IsInheritedVirtualBase, CXXCtorInitializer *&CXXBaseInit) { InitializedEntity InitEntity = InitializedEntity::InitializeBase(SemaRef.Context, BaseSpec, IsInheritedVirtualBase); ExprResult BaseInit; switch (ImplicitInitKind) { case IIK_Inherit: { const CXXRecordDecl *Inherited = Constructor->getInheritedConstructor()->getParent(); const CXXRecordDecl *Base = BaseSpec->getType()->getAsCXXRecordDecl(); if (Base && Inherited->getCanonicalDecl() == Base->getCanonicalDecl()) { // C++11 [class.inhctor]p8: // Each expression in the expression-list is of the form // static_cast<T&&>(p), where p is the name of the corresponding // constructor parameter and T is the declared type of p. SmallVector<Expr*, 16> Args; for (unsigned I = 0, E = Constructor->getNumParams(); I != E; ++I) { ParmVarDecl *PD = Constructor->getParamDecl(I); ExprResult ArgExpr = SemaRef.BuildDeclRefExpr(PD, PD->getType().getNonReferenceType(), VK_LValue, SourceLocation()); if (ArgExpr.isInvalid()) return true; Args.push_back(CastForMoving(SemaRef, ArgExpr.get(), PD->getType())); } InitializationKind InitKind = InitializationKind::CreateDirect( Constructor->getLocation(), SourceLocation(), SourceLocation()); InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, Args); BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, Args); break; } } LLVM_FALLTHROUGH; // HLSL Change case IIK_Default: { InitializationKind InitKind = InitializationKind::CreateDefault(Constructor->getLocation()); InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, None); BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, None); break; } case IIK_Move: case IIK_Copy: { bool Moving = ImplicitInitKind == IIK_Move; ParmVarDecl *Param = Constructor->getParamDecl(0); QualType ParamType = Param->getType().getNonReferenceType(); Expr *CopyCtorArg = DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(), SourceLocation(), Param, false, Constructor->getLocation(), ParamType, VK_LValue, nullptr); SemaRef.MarkDeclRefReferenced(cast<DeclRefExpr>(CopyCtorArg)); // Cast to the base class to avoid ambiguities. QualType ArgTy = SemaRef.Context.getQualifiedType(BaseSpec->getType().getUnqualifiedType(), ParamType.getQualifiers()); if (Moving) { CopyCtorArg = CastForMoving(SemaRef, CopyCtorArg); } CXXCastPath BasePath; BasePath.push_back(BaseSpec); CopyCtorArg = SemaRef.ImpCastExprToType(CopyCtorArg, ArgTy, CK_UncheckedDerivedToBase, Moving ? VK_XValue : VK_LValue, &BasePath).get(); InitializationKind InitKind = InitializationKind::CreateDirect(Constructor->getLocation(), SourceLocation(), SourceLocation()); InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, CopyCtorArg); BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, CopyCtorArg); break; } } BaseInit = SemaRef.MaybeCreateExprWithCleanups(BaseInit); if (BaseInit.isInvalid()) return true; CXXBaseInit = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, SemaRef.Context.getTrivialTypeSourceInfo(BaseSpec->getType(), SourceLocation()), BaseSpec->isVirtual(), SourceLocation(), BaseInit.getAs<Expr>(), SourceLocation(), SourceLocation()); return false; } static bool RefersToRValueRef(Expr *MemRef) { ValueDecl *Referenced = cast<MemberExpr>(MemRef)->getMemberDecl(); return Referenced->getType()->isRValueReferenceType(); } static bool BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor, ImplicitInitializerKind ImplicitInitKind, FieldDecl *Field, IndirectFieldDecl *Indirect, CXXCtorInitializer *&CXXMemberInit) { if (Field->isInvalidDecl()) return true; SourceLocation Loc = Constructor->getLocation(); if (ImplicitInitKind == IIK_Copy || ImplicitInitKind == IIK_Move) { bool Moving = ImplicitInitKind == IIK_Move; ParmVarDecl *Param = Constructor->getParamDecl(0); QualType ParamType = Param->getType().getNonReferenceType(); // Suppress copying zero-width bitfields. if (Field->isBitField() && Field->getBitWidthValue(SemaRef.Context) == 0) return false; Expr *MemberExprBase = DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(), SourceLocation(), Param, false, Loc, ParamType, VK_LValue, nullptr); SemaRef.MarkDeclRefReferenced(cast<DeclRefExpr>(MemberExprBase)); if (Moving) { MemberExprBase = CastForMoving(SemaRef, MemberExprBase); } // Build a reference to this field within the parameter. CXXScopeSpec SS; LookupResult MemberLookup(SemaRef, Field->getDeclName(), Loc, Sema::LookupMemberName); MemberLookup.addDecl(Indirect ? cast<ValueDecl>(Indirect) : cast<ValueDecl>(Field), AS_public); MemberLookup.resolveKind(); ExprResult CtorArg = SemaRef.BuildMemberReferenceExpr(MemberExprBase, ParamType, Loc, /*IsArrow=*/false, SS, /*TemplateKWLoc=*/SourceLocation(), /*FirstQualifierInScope=*/nullptr, MemberLookup, /*TemplateArgs=*/nullptr); if (CtorArg.isInvalid()) return true; // C++11 [class.copy]p15: // - if a member m has rvalue reference type T&&, it is direct-initialized // with static_cast<T&&>(x.m); if (RefersToRValueRef(CtorArg.get())) { CtorArg = CastForMoving(SemaRef, CtorArg.get()); } // When the field we are copying is an array, create index variables for // each dimension of the array. We use these index variables to subscript // the source array, and other clients (e.g., CodeGen) will perform the // necessary iteration with these index variables. SmallVector<VarDecl *, 4> IndexVariables; QualType BaseType = Field->getType(); QualType SizeType = SemaRef.Context.getSizeType(); bool InitializingArray = false; while (const ConstantArrayType *Array = SemaRef.Context.getAsConstantArrayType(BaseType)) { InitializingArray = true; // Create the iteration variable for this array index. IdentifierInfo *IterationVarName = nullptr; { SmallString<8> Str; llvm::raw_svector_ostream OS(Str); OS << "__i" << IndexVariables.size(); IterationVarName = &SemaRef.Context.Idents.get(OS.str()); } VarDecl *IterationVar = VarDecl::Create(SemaRef.Context, SemaRef.CurContext, Loc, Loc, IterationVarName, SizeType, SemaRef.Context.getTrivialTypeSourceInfo(SizeType, Loc), SC_None); IndexVariables.push_back(IterationVar); // Create a reference to the iteration variable. ExprResult IterationVarRef = SemaRef.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc); assert(!IterationVarRef.isInvalid() && "Reference to invented variable cannot fail!"); IterationVarRef = SemaRef.DefaultLvalueConversion(IterationVarRef.get()); assert(!IterationVarRef.isInvalid() && "Conversion of invented variable cannot fail!"); // Subscript the array with this iteration variable. CtorArg = SemaRef.CreateBuiltinArraySubscriptExpr(CtorArg.get(), Loc, IterationVarRef.get(), Loc); if (CtorArg.isInvalid()) return true; BaseType = Array->getElementType(); } // The array subscript expression is an lvalue, which is wrong for moving. if (Moving && InitializingArray) CtorArg = CastForMoving(SemaRef, CtorArg.get()); // Construct the entity that we will be initializing. For an array, this // will be first element in the array, which may require several levels // of array-subscript entities. SmallVector<InitializedEntity, 4> Entities; Entities.reserve(1 + IndexVariables.size()); if (Indirect) Entities.push_back(InitializedEntity::InitializeMember(Indirect)); else Entities.push_back(InitializedEntity::InitializeMember(Field)); for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I) Entities.push_back(InitializedEntity::InitializeElement(SemaRef.Context, 0, Entities.back())); // Direct-initialize to use the copy constructor. InitializationKind InitKind = InitializationKind::CreateDirect(Loc, SourceLocation(), SourceLocation()); Expr *CtorArgE = CtorArg.getAs<Expr>(); InitializationSequence InitSeq(SemaRef, Entities.back(), InitKind, CtorArgE); ExprResult MemberInit = InitSeq.Perform(SemaRef, Entities.back(), InitKind, MultiExprArg(&CtorArgE, 1)); MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit); if (MemberInit.isInvalid()) return true; if (Indirect) { assert(IndexVariables.size() == 0 && "Indirect field improperly initialized"); CXXMemberInit = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Indirect, Loc, Loc, MemberInit.getAs<Expr>(), Loc); } else CXXMemberInit = CXXCtorInitializer::Create(SemaRef.Context, Field, Loc, Loc, MemberInit.getAs<Expr>(), Loc, IndexVariables.data(), IndexVariables.size()); return false; } assert((ImplicitInitKind == IIK_Default || ImplicitInitKind == IIK_Inherit) && "Unhandled implicit init kind!"); QualType FieldBaseElementType = SemaRef.Context.getBaseElementType(Field->getType()); if (FieldBaseElementType->isRecordType()) { InitializedEntity InitEntity = Indirect? InitializedEntity::InitializeMember(Indirect) : InitializedEntity::InitializeMember(Field); InitializationKind InitKind = InitializationKind::CreateDefault(Loc); InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, None); ExprResult MemberInit = InitSeq.Perform(SemaRef, InitEntity, InitKind, None); MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit); if (MemberInit.isInvalid()) return true; if (Indirect) CXXMemberInit = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Indirect, Loc, Loc, MemberInit.get(), Loc); else CXXMemberInit = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Field, Loc, Loc, MemberInit.get(), Loc); return false; } if (!Field->getParent()->isUnion()) { if (FieldBaseElementType->isReferenceType()) { SemaRef.Diag(Constructor->getLocation(), diag::err_uninitialized_member_in_ctor) << (int)Constructor->isImplicit() << SemaRef.Context.getTagDeclType(Constructor->getParent()) << 0 << Field->getDeclName(); SemaRef.Diag(Field->getLocation(), diag::note_declared_at); return true; } if (FieldBaseElementType.isConstQualified()) { SemaRef.Diag(Constructor->getLocation(), diag::err_uninitialized_member_in_ctor) << (int)Constructor->isImplicit() << SemaRef.Context.getTagDeclType(Constructor->getParent()) << 1 << Field->getDeclName(); SemaRef.Diag(Field->getLocation(), diag::note_declared_at); return true; } } if (SemaRef.getLangOpts().ObjCAutoRefCount && FieldBaseElementType->isObjCRetainableType() && FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_None && FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) { // ARC: // Default-initialize Objective-C pointers to NULL. CXXMemberInit = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Field, Loc, Loc, new (SemaRef.Context) ImplicitValueInitExpr(Field->getType()), Loc); return false; } // Nothing to initialize. CXXMemberInit = nullptr; return false; } namespace { struct BaseAndFieldInfo { Sema &S; CXXConstructorDecl *Ctor; bool AnyErrorsInInits; ImplicitInitializerKind IIK; llvm::DenseMap<const void *, CXXCtorInitializer*> AllBaseFields; SmallVector<CXXCtorInitializer*, 8> AllToInit; llvm::DenseMap<TagDecl*, FieldDecl*> ActiveUnionMember; BaseAndFieldInfo(Sema &S, CXXConstructorDecl *Ctor, bool ErrorsInInits) : S(S), Ctor(Ctor), AnyErrorsInInits(ErrorsInInits) { bool Generated = Ctor->isImplicit() || Ctor->isDefaulted(); if (Generated && Ctor->isCopyConstructor()) IIK = IIK_Copy; else if (Generated && Ctor->isMoveConstructor()) IIK = IIK_Move; else if (Ctor->getInheritedConstructor()) IIK = IIK_Inherit; else IIK = IIK_Default; } bool isImplicitCopyOrMove() const { switch (IIK) { case IIK_Copy: case IIK_Move: return true; case IIK_Default: case IIK_Inherit: return false; } llvm_unreachable("Invalid ImplicitInitializerKind!"); } bool addFieldInitializer(CXXCtorInitializer *Init) { AllToInit.push_back(Init); // Check whether this initializer makes the field "used". if (Init->getInit()->HasSideEffects(S.Context)) S.UnusedPrivateFields.remove(Init->getAnyMember()); return false; } bool isInactiveUnionMember(FieldDecl *Field) { RecordDecl *Record = Field->getParent(); if (!Record->isUnion()) return false; if (FieldDecl *Active = ActiveUnionMember.lookup(Record->getCanonicalDecl())) return Active != Field->getCanonicalDecl(); // In an implicit copy or move constructor, ignore any in-class initializer. if (isImplicitCopyOrMove()) return true; // If there's no explicit initialization, the field is active only if it // has an in-class initializer... if (Field->hasInClassInitializer()) return false; // ... or it's an anonymous struct or union whose class has an in-class // initializer. if (!Field->isAnonymousStructOrUnion()) return true; CXXRecordDecl *FieldRD = Field->getType()->getAsCXXRecordDecl(); return !FieldRD->hasInClassInitializer(); } /// \brief Determine whether the given field is, or is within, a union member /// that is inactive (because there was an initializer given for a different /// member of the union, or because the union was not initialized at all). bool isWithinInactiveUnionMember(FieldDecl *Field, IndirectFieldDecl *Indirect) { if (!Indirect) return isInactiveUnionMember(Field); for (auto *C : Indirect->chain()) { FieldDecl *Field = dyn_cast<FieldDecl>(C); if (Field && isInactiveUnionMember(Field)) return true; } return false; } }; } /// \brief Determine whether the given type is an incomplete or zero-lenfgth /// array type. static bool isIncompleteOrZeroLengthArrayType(ASTContext &Context, QualType T) { if (T->isIncompleteArrayType()) return true; while (const ConstantArrayType *ArrayT = Context.getAsConstantArrayType(T)) { if (!ArrayT->getSize()) return true; T = ArrayT->getElementType(); } return false; } static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info, FieldDecl *Field, IndirectFieldDecl *Indirect = nullptr) { if (Field->isInvalidDecl()) return false; // Overwhelmingly common case: we have a direct initializer for this field. if (CXXCtorInitializer *Init = Info.AllBaseFields.lookup(Field->getCanonicalDecl())) return Info.addFieldInitializer(Init); // C++11 [class.base.init]p8: // if the entity is a non-static data member that has a // brace-or-equal-initializer and either // -- the constructor's class is a union and no other variant member of that // union is designated by a mem-initializer-id or // -- the constructor's class is not a union, and, if the entity is a member // of an anonymous union, no other member of that union is designated by // a mem-initializer-id, // the entity is initialized as specified in [dcl.init]. // // We also apply the same rules to handle anonymous structs within anonymous // unions. if (Info.isWithinInactiveUnionMember(Field, Indirect)) return false; if (Field->hasInClassInitializer() && !Info.isImplicitCopyOrMove()) { ExprResult DIE = SemaRef.BuildCXXDefaultInitExpr(Info.Ctor->getLocation(), Field); if (DIE.isInvalid()) return true; CXXCtorInitializer *Init; if (Indirect) Init = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Indirect, SourceLocation(), SourceLocation(), DIE.get(), SourceLocation()); else Init = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Field, SourceLocation(), SourceLocation(), DIE.get(), SourceLocation()); return Info.addFieldInitializer(Init); } // Don't initialize incomplete or zero-length arrays. if (isIncompleteOrZeroLengthArrayType(SemaRef.Context, Field->getType())) return false; // Don't try to build an implicit initializer if there were semantic // errors in any of the initializers (and therefore we might be // missing some that the user actually wrote). if (Info.AnyErrorsInInits) return false; CXXCtorInitializer *Init = nullptr; if (BuildImplicitMemberInitializer(Info.S, Info.Ctor, Info.IIK, Field, Indirect, Init)) return true; if (!Init) return false; return Info.addFieldInitializer(Init); } bool Sema::SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer) { assert(Initializer->isDelegatingInitializer()); Constructor->setNumCtorInitializers(1); CXXCtorInitializer **initializer = new (Context) CXXCtorInitializer*[1]; memcpy(initializer, &Initializer, sizeof (CXXCtorInitializer*)); Constructor->setCtorInitializers(initializer); if (CXXDestructorDecl *Dtor = LookupDestructor(Constructor->getParent())) { MarkFunctionReferenced(Initializer->getSourceLocation(), Dtor); DiagnoseUseOfDecl(Dtor, Initializer->getSourceLocation()); } DelegatingCtorDecls.push_back(Constructor); DiagnoseUninitializedFields(*this, Constructor); return false; } bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers) { if (Constructor->isDependentContext()) { // Just store the initializers as written, they will be checked during // instantiation. if (!Initializers.empty()) { Constructor->setNumCtorInitializers(Initializers.size()); CXXCtorInitializer **baseOrMemberInitializers = new (Context) CXXCtorInitializer*[Initializers.size()]; memcpy(baseOrMemberInitializers, Initializers.data(), Initializers.size() * sizeof(CXXCtorInitializer*)); Constructor->setCtorInitializers(baseOrMemberInitializers); } // Let template instantiation know whether we had errors. if (AnyErrors) Constructor->setInvalidDecl(); return false; } BaseAndFieldInfo Info(*this, Constructor, AnyErrors); // We need to build the initializer AST according to order of construction // and not what user specified in the Initializers list. CXXRecordDecl *ClassDecl = Constructor->getParent()->getDefinition(); if (!ClassDecl) return true; bool HadError = false; for (unsigned i = 0; i < Initializers.size(); i++) { CXXCtorInitializer *Member = Initializers[i]; if (Member->isBaseInitializer()) Info.AllBaseFields[Member->getBaseClass()->getAs<RecordType>()] = Member; else { Info.AllBaseFields[Member->getAnyMember()->getCanonicalDecl()] = Member; if (IndirectFieldDecl *F = Member->getIndirectMember()) { for (auto *C : F->chain()) { FieldDecl *FD = dyn_cast<FieldDecl>(C); if (FD && FD->getParent()->isUnion()) Info.ActiveUnionMember.insert(std::make_pair( FD->getParent()->getCanonicalDecl(), FD->getCanonicalDecl())); } } else if (FieldDecl *FD = Member->getMember()) { if (FD->getParent()->isUnion()) Info.ActiveUnionMember.insert(std::make_pair( FD->getParent()->getCanonicalDecl(), FD->getCanonicalDecl())); } } } // Keep track of the direct virtual bases. llvm::SmallPtrSet<CXXBaseSpecifier *, 16> DirectVBases; for (auto &I : ClassDecl->bases()) { if (I.isVirtual()) DirectVBases.insert(&I); } // Push virtual bases before others. for (auto &VBase : ClassDecl->vbases()) { if (CXXCtorInitializer *Value = Info.AllBaseFields.lookup(VBase.getType()->getAs<RecordType>())) { // [class.base.init]p7, per DR257: // A mem-initializer where the mem-initializer-id names a virtual base // class is ignored during execution of a constructor of any class that // is not the most derived class. if (ClassDecl->isAbstract()) { // FIXME: Provide a fixit to remove the base specifier. This requires // tracking the location of the associated comma for a base specifier. Diag(Value->getSourceLocation(), diag::warn_abstract_vbase_init_ignored) << VBase.getType() << ClassDecl; DiagnoseAbstractType(ClassDecl); } Info.AllToInit.push_back(Value); } else if (!AnyErrors && !ClassDecl->isAbstract()) { // [class.base.init]p8, per DR257: // If a given [...] base class is not named by a mem-initializer-id // [...] and the entity is not a virtual base class of an abstract // class, then [...] the entity is default-initialized. bool IsInheritedVirtualBase = !DirectVBases.count(&VBase); CXXCtorInitializer *CXXBaseInit; if (BuildImplicitBaseInitializer(*this, Constructor, Info.IIK, &VBase, IsInheritedVirtualBase, CXXBaseInit)) { HadError = true; continue; } Info.AllToInit.push_back(CXXBaseInit); } } // Non-virtual bases. for (auto &Base : ClassDecl->bases()) { // Virtuals are in the virtual base list and already constructed. if (Base.isVirtual()) continue; if (CXXCtorInitializer *Value = Info.AllBaseFields.lookup(Base.getType()->getAs<RecordType>())) { Info.AllToInit.push_back(Value); } else if (!AnyErrors) { CXXCtorInitializer *CXXBaseInit; if (BuildImplicitBaseInitializer(*this, Constructor, Info.IIK, &Base, /*IsInheritedVirtualBase=*/false, CXXBaseInit)) { HadError = true; continue; } Info.AllToInit.push_back(CXXBaseInit); } } // Fields. for (auto *Mem : ClassDecl->decls()) { if (auto *F = dyn_cast<FieldDecl>(Mem)) { // C++ [class.bit]p2: // A declaration for a bit-field that omits the identifier declares an // unnamed bit-field. Unnamed bit-fields are not members and cannot be // initialized. if (F->isUnnamedBitfield()) continue; // If we're not generating the implicit copy/move constructor, then we'll // handle anonymous struct/union fields based on their individual // indirect fields. if (F->isAnonymousStructOrUnion() && !Info.isImplicitCopyOrMove()) continue; if (CollectFieldInitializer(*this, Info, F)) HadError = true; continue; } // Beyond this point, we only consider default initialization. if (Info.isImplicitCopyOrMove()) continue; if (auto *F = dyn_cast<IndirectFieldDecl>(Mem)) { if (F->getType()->isIncompleteArrayType()) { assert(ClassDecl->hasFlexibleArrayMember() && "Incomplete array type is not valid"); continue; } // Initialize each field of an anonymous struct individually. if (CollectFieldInitializer(*this, Info, F->getAnonField(), F)) HadError = true; continue; } } unsigned NumInitializers = Info.AllToInit.size(); if (NumInitializers > 0) { Constructor->setNumCtorInitializers(NumInitializers); CXXCtorInitializer **baseOrMemberInitializers = new (Context) CXXCtorInitializer*[NumInitializers]; memcpy(baseOrMemberInitializers, Info.AllToInit.data(), NumInitializers * sizeof(CXXCtorInitializer*)); Constructor->setCtorInitializers(baseOrMemberInitializers); // Constructors implicitly reference the base and member // destructors. MarkBaseAndMemberDestructorsReferenced(Constructor->getLocation(), Constructor->getParent()); } return HadError; } static void PopulateKeysForFields(FieldDecl *Field, SmallVectorImpl<const void*> &IdealInits) { if (const RecordType *RT = Field->getType()->getAs<RecordType>()) { const RecordDecl *RD = RT->getDecl(); if (RD->isAnonymousStructOrUnion()) { for (auto *Field : RD->fields()) PopulateKeysForFields(Field, IdealInits); return; } } IdealInits.push_back(Field->getCanonicalDecl()); } static const void *GetKeyForBase(ASTContext &Context, QualType BaseType) { return Context.getCanonicalType(BaseType).getTypePtr(); } static const void *GetKeyForMember(ASTContext &Context, CXXCtorInitializer *Member) { if (!Member->isAnyMemberInitializer()) return GetKeyForBase(Context, QualType(Member->getBaseClass(), 0)); return Member->getAnyMember()->getCanonicalDecl(); } static void DiagnoseBaseOrMemInitializerOrder( Sema &SemaRef, const CXXConstructorDecl *Constructor, ArrayRef<CXXCtorInitializer *> Inits) { if (Constructor->getDeclContext()->isDependentContext()) return; // Don't check initializers order unless the warning is enabled at the // location of at least one initializer. bool ShouldCheckOrder = false; for (unsigned InitIndex = 0; InitIndex != Inits.size(); ++InitIndex) { CXXCtorInitializer *Init = Inits[InitIndex]; if (!SemaRef.Diags.isIgnored(diag::warn_initializer_out_of_order, Init->getSourceLocation())) { ShouldCheckOrder = true; break; } } if (!ShouldCheckOrder) return; // Build the list of bases and members in the order that they'll // actually be initialized. The explicit initializers should be in // this same order but may be missing things. SmallVector<const void*, 32> IdealInitKeys; const CXXRecordDecl *ClassDecl = Constructor->getParent(); // 1. Virtual bases. for (const auto &VBase : ClassDecl->vbases()) IdealInitKeys.push_back(GetKeyForBase(SemaRef.Context, VBase.getType())); // 2. Non-virtual bases. for (const auto &Base : ClassDecl->bases()) { if (Base.isVirtual()) continue; IdealInitKeys.push_back(GetKeyForBase(SemaRef.Context, Base.getType())); } // 3. Direct fields. for (auto *Field : ClassDecl->fields()) { if (Field->isUnnamedBitfield()) continue; PopulateKeysForFields(Field, IdealInitKeys); } unsigned NumIdealInits = IdealInitKeys.size(); unsigned IdealIndex = 0; CXXCtorInitializer *PrevInit = nullptr; for (unsigned InitIndex = 0; InitIndex != Inits.size(); ++InitIndex) { CXXCtorInitializer *Init = Inits[InitIndex]; const void *InitKey = GetKeyForMember(SemaRef.Context, Init); // Scan forward to try to find this initializer in the idealized // initializers list. for (; IdealIndex != NumIdealInits; ++IdealIndex) if (InitKey == IdealInitKeys[IdealIndex]) break; // If we didn't find this initializer, it must be because we // scanned past it on a previous iteration. That can only // happen if we're out of order; emit a warning. if (IdealIndex == NumIdealInits && PrevInit) { Sema::SemaDiagnosticBuilder D = SemaRef.Diag(PrevInit->getSourceLocation(), diag::warn_initializer_out_of_order); if (PrevInit->isAnyMemberInitializer()) D << 0 << PrevInit->getAnyMember()->getDeclName(); else D << 1 << PrevInit->getTypeSourceInfo()->getType(); if (Init->isAnyMemberInitializer()) D << 0 << Init->getAnyMember()->getDeclName(); else D << 1 << Init->getTypeSourceInfo()->getType(); // Move back to the initializer's location in the ideal list. for (IdealIndex = 0; IdealIndex != NumIdealInits; ++IdealIndex) if (InitKey == IdealInitKeys[IdealIndex]) break; assert(IdealIndex != NumIdealInits && "initializer not found in initializer list"); } PrevInit = Init; } } namespace { bool CheckRedundantInit(Sema &S, CXXCtorInitializer *Init, CXXCtorInitializer *&PrevInit) { if (!PrevInit) { PrevInit = Init; return false; } if (FieldDecl *Field = Init->getAnyMember()) S.Diag(Init->getSourceLocation(), diag::err_multiple_mem_initialization) << Field->getDeclName() << Init->getSourceRange(); else { const Type *BaseClass = Init->getBaseClass(); assert(BaseClass && "neither field nor base"); S.Diag(Init->getSourceLocation(), diag::err_multiple_base_initialization) << QualType(BaseClass, 0) << Init->getSourceRange(); } S.Diag(PrevInit->getSourceLocation(), diag::note_previous_initializer) << 0 << PrevInit->getSourceRange(); return true; } typedef std::pair<NamedDecl *, CXXCtorInitializer *> UnionEntry; typedef llvm::DenseMap<RecordDecl*, UnionEntry> RedundantUnionMap; bool CheckRedundantUnionInit(Sema &S, CXXCtorInitializer *Init, RedundantUnionMap &Unions) { FieldDecl *Field = Init->getAnyMember(); RecordDecl *Parent = Field->getParent(); NamedDecl *Child = Field; while (Parent->isAnonymousStructOrUnion() || Parent->isUnion()) { if (Parent->isUnion()) { UnionEntry &En = Unions[Parent]; if (En.first && En.first != Child) { S.Diag(Init->getSourceLocation(), diag::err_multiple_mem_union_initialization) << Field->getDeclName() << Init->getSourceRange(); S.Diag(En.second->getSourceLocation(), diag::note_previous_initializer) << 0 << En.second->getSourceRange(); return true; } if (!En.first) { En.first = Child; En.second = Init; } if (!Parent->isAnonymousStructOrUnion()) return false; } Child = Parent; Parent = cast<RecordDecl>(Parent->getDeclContext()); } return false; } } /// ActOnMemInitializers - Handle the member initializers for a constructor. void Sema::ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors) { if (!ConstructorDecl) return; AdjustDeclIfTemplate(ConstructorDecl); CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(ConstructorDecl); if (!Constructor) { Diag(ColonLoc, diag::err_only_constructors_take_base_inits); return; } // Mapping for the duplicate initializers check. // For member initializers, this is keyed with a FieldDecl*. // For base initializers, this is keyed with a Type*. llvm::DenseMap<const void *, CXXCtorInitializer *> Members; // Mapping for the inconsistent anonymous-union initializers check. RedundantUnionMap MemberUnions; bool HadError = false; for (unsigned i = 0; i < MemInits.size(); i++) { CXXCtorInitializer *Init = MemInits[i]; // Set the source order index. Init->setSourceOrder(i); if (Init->isAnyMemberInitializer()) { const void *Key = GetKeyForMember(Context, Init); if (CheckRedundantInit(*this, Init, Members[Key]) || CheckRedundantUnionInit(*this, Init, MemberUnions)) HadError = true; } else if (Init->isBaseInitializer()) { const void *Key = GetKeyForMember(Context, Init); if (CheckRedundantInit(*this, Init, Members[Key])) HadError = true; } else { assert(Init->isDelegatingInitializer()); // This must be the only initializer if (MemInits.size() != 1) { Diag(Init->getSourceLocation(), diag::err_delegating_initializer_alone) << Init->getSourceRange() << MemInits[i ? 0 : 1]->getSourceRange(); // We will treat this as being the only initializer. } SetDelegatingInitializer(Constructor, MemInits[i]); // Return immediately as the initializer is set. return; } } if (HadError) return; DiagnoseBaseOrMemInitializerOrder(*this, Constructor, MemInits); SetCtorInitializers(Constructor, AnyErrors, MemInits); DiagnoseUninitializedFields(*this, Constructor); } void Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location, CXXRecordDecl *ClassDecl) { // Ignore dependent contexts. Also ignore unions, since their members never // have destructors implicitly called. if (ClassDecl->isDependentContext() || ClassDecl->isUnion()) return; // FIXME: all the access-control diagnostics are positioned on the // field/base declaration. That's probably good; that said, the // user might reasonably want to know why the destructor is being // emitted, and we currently don't say. // Non-static data members. for (auto *Field : ClassDecl->fields()) { if (Field->isInvalidDecl()) continue; // Don't destroy incomplete or zero-length arrays. if (isIncompleteOrZeroLengthArrayType(Context, Field->getType())) continue; QualType FieldType = Context.getBaseElementType(Field->getType()); const RecordType* RT = FieldType->getAs<RecordType>(); if (!RT) continue; CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); if (FieldClassDecl->isInvalidDecl()) continue; if (FieldClassDecl->hasIrrelevantDestructor()) continue; // The destructor for an implicit anonymous union member is never invoked. if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion()) continue; CXXDestructorDecl *Dtor = LookupDestructor(FieldClassDecl); assert(Dtor && "No dtor found for FieldClassDecl!"); CheckDestructorAccess(Field->getLocation(), Dtor, PDiag(diag::err_access_dtor_field) << Field->getDeclName() << FieldType); MarkFunctionReferenced(Location, Dtor); DiagnoseUseOfDecl(Dtor, Location); } llvm::SmallPtrSet<const RecordType *, 8> DirectVirtualBases; // Bases. for (const auto &Base : ClassDecl->bases()) { // Bases are always records in a well-formed non-dependent class. const RecordType *RT = Base.getType()->getAs<RecordType>(); // Remember direct virtual bases. if (Base.isVirtual()) DirectVirtualBases.insert(RT); CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl()); // If our base class is invalid, we probably can't get its dtor anyway. if (BaseClassDecl->isInvalidDecl()) continue; if (BaseClassDecl->hasIrrelevantDestructor()) continue; CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl); assert(Dtor && "No dtor found for BaseClassDecl!"); // FIXME: caret should be on the start of the class name CheckDestructorAccess(Base.getLocStart(), Dtor, PDiag(diag::err_access_dtor_base) << Base.getType() << Base.getSourceRange(), Context.getTypeDeclType(ClassDecl)); MarkFunctionReferenced(Location, Dtor); DiagnoseUseOfDecl(Dtor, Location); } // Virtual bases. for (const auto &VBase : ClassDecl->vbases()) { // Bases are always records in a well-formed non-dependent class. const RecordType *RT = VBase.getType()->castAs<RecordType>(); // Ignore direct virtual bases. if (DirectVirtualBases.count(RT)) continue; CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl()); // If our base class is invalid, we probably can't get its dtor anyway. if (BaseClassDecl->isInvalidDecl()) continue; if (BaseClassDecl->hasIrrelevantDestructor()) continue; CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl); assert(Dtor && "No dtor found for BaseClassDecl!"); if (CheckDestructorAccess( ClassDecl->getLocation(), Dtor, PDiag(diag::err_access_dtor_vbase) << Context.getTypeDeclType(ClassDecl) << VBase.getType(), Context.getTypeDeclType(ClassDecl)) == AR_accessible) { CheckDerivedToBaseConversion( Context.getTypeDeclType(ClassDecl), VBase.getType(), diag::err_access_dtor_vbase, 0, ClassDecl->getLocation(), SourceRange(), DeclarationName(), nullptr); } MarkFunctionReferenced(Location, Dtor); DiagnoseUseOfDecl(Dtor, Location); } } void Sema::ActOnDefaultCtorInitializers(Decl *CDtorDecl) { if (!CDtorDecl) return; if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(CDtorDecl)) { SetCtorInitializers(Constructor, /*AnyErrors=*/false); DiagnoseUninitializedFields(*this, Constructor); } } bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID) { class NonAbstractTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; AbstractDiagSelID SelID; public: NonAbstractTypeDiagnoser(unsigned DiagID, AbstractDiagSelID SelID) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), SelID(SelID) { } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; if (SelID == -1) S.Diag(Loc, DiagID) << T; else S.Diag(Loc, DiagID) << SelID << T; } } Diagnoser(DiagID, SelID); return RequireNonAbstractType(Loc, T, Diagnoser); } bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { if (!getLangOpts().CPlusPlus) return false; if (const ArrayType *AT = Context.getAsArrayType(T)) return RequireNonAbstractType(Loc, AT->getElementType(), Diagnoser); if (const PointerType *PT = T->getAs<PointerType>()) { // Find the innermost pointer type. while (const PointerType *T = PT->getPointeeType()->getAs<PointerType>()) PT = T; if (const ArrayType *AT = Context.getAsArrayType(PT->getPointeeType())) return RequireNonAbstractType(Loc, AT->getElementType(), Diagnoser); } const RecordType *RT = T->getAs<RecordType>(); if (!RT) return false; const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); // We can't answer whether something is abstract until it has a // definition. If it's currently being defined, we'll walk back // over all the declarations when we have a full definition. const CXXRecordDecl *Def = RD->getDefinition(); if (!Def || Def->isBeingDefined()) return false; if (!RD->isAbstract()) return false; Diagnoser.diagnose(*this, Loc, T); DiagnoseAbstractType(RD); return true; } void Sema::DiagnoseAbstractType(const CXXRecordDecl *RD) { // Check if we've already emitted the list of pure virtual functions // for this class. if (PureVirtualClassDiagSet && PureVirtualClassDiagSet->count(RD)) return; // If the diagnostic is suppressed, don't emit the notes. We're only // going to emit them once, so try to attach them to a diagnostic we're // actually going to show. if (Diags.isLastDiagnosticIgnored()) return; CXXFinalOverriderMap FinalOverriders; RD->getFinalOverriders(FinalOverriders); // Keep a set of seen pure methods so we won't diagnose the same method // more than once. llvm::SmallPtrSet<const CXXMethodDecl *, 8> SeenPureMethods; for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(), MEnd = FinalOverriders.end(); M != MEnd; ++M) { for (OverridingMethods::iterator SO = M->second.begin(), SOEnd = M->second.end(); SO != SOEnd; ++SO) { // C++ [class.abstract]p4: // A class is abstract if it contains or inherits at least one // pure virtual function for which the final overrider is pure // virtual. // if (SO->second.size() != 1) continue; if (!SO->second.front().Method->isPure()) continue; if (!SeenPureMethods.insert(SO->second.front().Method).second) continue; Diag(SO->second.front().Method->getLocation(), diag::note_pure_virtual_function) << SO->second.front().Method->getDeclName() << RD->getDeclName(); } } if (!PureVirtualClassDiagSet) PureVirtualClassDiagSet.reset(new RecordDeclSetTy); PureVirtualClassDiagSet->insert(RD); } namespace { struct AbstractUsageInfo { Sema &S; CXXRecordDecl *Record; CanQualType AbstractType; bool Invalid; AbstractUsageInfo(Sema &S, CXXRecordDecl *Record) : S(S), Record(Record), AbstractType(S.Context.getCanonicalType( S.Context.getTypeDeclType(Record))), Invalid(false) {} void DiagnoseAbstractType() { if (Invalid) return; S.DiagnoseAbstractType(Record); Invalid = true; } void CheckType(const NamedDecl *D, TypeLoc TL, Sema::AbstractDiagSelID Sel); }; struct CheckAbstractUsage { AbstractUsageInfo &Info; const NamedDecl *Ctx; CheckAbstractUsage(AbstractUsageInfo &Info, const NamedDecl *Ctx) : Info(Info), Ctx(Ctx) {} void Visit(TypeLoc TL, Sema::AbstractDiagSelID Sel) { switch (TL.getTypeLocClass()) { #define ABSTRACT_TYPELOC(CLASS, PARENT) #define TYPELOC(CLASS, PARENT) \ case TypeLoc::CLASS: Check(TL.castAs<CLASS##TypeLoc>(), Sel); break; #include "clang/AST/TypeLocNodes.def" } } void Check(FunctionProtoTypeLoc TL, Sema::AbstractDiagSelID Sel) { Visit(TL.getReturnLoc(), Sema::AbstractReturnType); for (unsigned I = 0, E = TL.getNumParams(); I != E; ++I) { if (!TL.getParam(I)) continue; TypeSourceInfo *TSI = TL.getParam(I)->getTypeSourceInfo(); if (TSI) Visit(TSI->getTypeLoc(), Sema::AbstractParamType); } } void Check(ArrayTypeLoc TL, Sema::AbstractDiagSelID Sel) { Visit(TL.getElementLoc(), Sema::AbstractArrayType); } void Check(TemplateSpecializationTypeLoc TL, Sema::AbstractDiagSelID Sel) { // Visit the type parameters from a permissive context. for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) { TemplateArgumentLoc TAL = TL.getArgLoc(I); if (TAL.getArgument().getKind() == TemplateArgument::Type) if (TypeSourceInfo *TSI = TAL.getTypeSourceInfo()) Visit(TSI->getTypeLoc(), Sema::AbstractNone); // TODO: other template argument types? } } // Visit pointee types from a permissive context. #define CheckPolymorphic(Type) \ void Check(Type TL, Sema::AbstractDiagSelID Sel) { \ Visit(TL.getNextTypeLoc(), Sema::AbstractNone); \ } CheckPolymorphic(PointerTypeLoc) CheckPolymorphic(ReferenceTypeLoc) CheckPolymorphic(MemberPointerTypeLoc) CheckPolymorphic(BlockPointerTypeLoc) CheckPolymorphic(AtomicTypeLoc) /// Handle all the types we haven't given a more specific /// implementation for above. void Check(TypeLoc TL, Sema::AbstractDiagSelID Sel) { // Every other kind of type that we haven't called out already // that has an inner type is either (1) sugar or (2) contains that // inner type in some way as a subobject. if (TypeLoc Next = TL.getNextTypeLoc()) return Visit(Next, Sel); // If there's no inner type and we're in a permissive context, // don't diagnose. if (Sel == Sema::AbstractNone) return; // Check whether the type matches the abstract type. QualType T = TL.getType(); if (T->isArrayType()) { Sel = Sema::AbstractArrayType; T = Info.S.Context.getBaseElementType(T); } CanQualType CT = T->getCanonicalTypeUnqualified().getUnqualifiedType(); if (CT != Info.AbstractType) return; // It matched; do some magic. if (Sel == Sema::AbstractArrayType) { Info.S.Diag(Ctx->getLocation(), diag::err_array_of_abstract_type) << T << TL.getSourceRange(); } else { Info.S.Diag(Ctx->getLocation(), diag::err_abstract_type_in_decl) << Sel << T << TL.getSourceRange(); } Info.DiagnoseAbstractType(); } }; void AbstractUsageInfo::CheckType(const NamedDecl *D, TypeLoc TL, Sema::AbstractDiagSelID Sel) { CheckAbstractUsage(*this, D).Visit(TL, Sel); } } /// Check for invalid uses of an abstract type in a method declaration. static void CheckAbstractClassUsage(AbstractUsageInfo &Info, CXXMethodDecl *MD) { // No need to do the check on definitions, which require that // the return/param types be complete. if (MD->doesThisDeclarationHaveABody()) return; // For safety's sake, just ignore it if we don't have type source // information. This should never happen for non-implicit methods, // but... if (TypeSourceInfo *TSI = MD->getTypeSourceInfo()) Info.CheckType(MD, TSI->getTypeLoc(), Sema::AbstractNone); } /// Check for invalid uses of an abstract type within a class definition. static void CheckAbstractClassUsage(AbstractUsageInfo &Info, CXXRecordDecl *RD) { for (auto *D : RD->decls()) { if (D->isImplicit()) continue; // Methods and method templates. if (isa<CXXMethodDecl>(D)) { CheckAbstractClassUsage(Info, cast<CXXMethodDecl>(D)); } else if (isa<FunctionTemplateDecl>(D)) { FunctionDecl *FD = cast<FunctionTemplateDecl>(D)->getTemplatedDecl(); CheckAbstractClassUsage(Info, cast<CXXMethodDecl>(FD)); // Fields and static variables. } else if (isa<FieldDecl>(D)) { FieldDecl *FD = cast<FieldDecl>(D); if (TypeSourceInfo *TSI = FD->getTypeSourceInfo()) Info.CheckType(FD, TSI->getTypeLoc(), Sema::AbstractFieldType); } else if (isa<VarDecl>(D)) { VarDecl *VD = cast<VarDecl>(D); if (TypeSourceInfo *TSI = VD->getTypeSourceInfo()) Info.CheckType(VD, TSI->getTypeLoc(), Sema::AbstractVariableType); // Nested classes and class templates. } else if (isa<CXXRecordDecl>(D)) { CheckAbstractClassUsage(Info, cast<CXXRecordDecl>(D)); } else if (isa<ClassTemplateDecl>(D)) { CheckAbstractClassUsage(Info, cast<ClassTemplateDecl>(D)->getTemplatedDecl()); } } } /// \brief Check class-level dllimport/dllexport attribute. void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) { Attr *ClassAttr = getDLLAttr(Class); // MSVC inherits DLL attributes to partial class template specializations. if (Context.getTargetInfo().getCXXABI().isMicrosoft() && !ClassAttr) { if (auto *Spec = dyn_cast<ClassTemplatePartialSpecializationDecl>(Class)) { if (Attr *TemplateAttr = getDLLAttr(Spec->getSpecializedTemplate()->getTemplatedDecl())) { auto *A = cast<InheritableAttr>(TemplateAttr->clone(getASTContext())); A->setInherited(true); ClassAttr = A; } } } if (!ClassAttr) return; if (!Class->isExternallyVisible()) { Diag(Class->getLocation(), diag::err_attribute_dll_not_extern) << Class << ClassAttr; return; } if (Context.getTargetInfo().getCXXABI().isMicrosoft() && !ClassAttr->isInherited()) { // Diagnose dll attributes on members of class with dll attribute. for (Decl *Member : Class->decls()) { if (!isa<VarDecl>(Member) && !isa<CXXMethodDecl>(Member)) continue; InheritableAttr *MemberAttr = getDLLAttr(Member); if (!MemberAttr || MemberAttr->isInherited() || Member->isInvalidDecl()) continue; Diag(MemberAttr->getLocation(), diag::err_attribute_dll_member_of_dll_class) << MemberAttr << ClassAttr; Diag(ClassAttr->getLocation(), diag::note_previous_attribute); Member->setInvalidDecl(); } } if (Class->getDescribedClassTemplate()) // Don't inherit dll attribute until the template is instantiated. return; // The class is either imported or exported. const bool ClassExported = ClassAttr->getKind() == attr::DLLExport; const bool ClassImported = !ClassExported; TemplateSpecializationKind TSK = Class->getTemplateSpecializationKind(); // Ignore explicit dllexport on explicit class template instantiation declarations. if (ClassExported && !ClassAttr->isInherited() && TSK == TSK_ExplicitInstantiationDeclaration) { Class->dropAttr<DLLExportAttr>(); return; } // Force declaration of implicit members so they can inherit the attribute. ForceDeclarationOfImplicitMembers(Class); // FIXME: MSVC's docs say all bases must be exportable, but this doesn't // seem to be true in practice? for (Decl *Member : Class->decls()) { VarDecl *VD = dyn_cast<VarDecl>(Member); CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member); // Only methods and static fields inherit the attributes. if (!VD && !MD) continue; if (MD) { // Don't process deleted methods. if (MD->isDeleted()) continue; if (MD->isInlined()) { // MinGW does not import or export inline methods. if (!Context.getTargetInfo().getCXXABI().isMicrosoft()) continue; // MSVC versions before 2015 don't export the move assignment operators, // so don't attempt to import them if we have a definition. if (ClassImported && MD->isMoveAssignmentOperator() && !getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015)) continue; } } if (!cast<NamedDecl>(Member)->isExternallyVisible()) continue; if (!getDLLAttr(Member)) { auto *NewAttr = cast<InheritableAttr>(ClassAttr->clone(getASTContext())); NewAttr->setInherited(true); Member->addAttr(NewAttr); } if (MD && ClassExported) { if (TSK == TSK_ExplicitInstantiationDeclaration) // Don't go any further if this is just an explicit instantiation // declaration. continue; if (MD->isUserProvided()) { // Instantiate non-default class member functions ... // .. except for certain kinds of template specializations. if (TSK == TSK_ImplicitInstantiation && !ClassAttr->isInherited()) continue; MarkFunctionReferenced(Class->getLocation(), MD); // The function will be passed to the consumer when its definition is // encountered. } else if (!MD->isTrivial() || MD->isExplicitlyDefaulted() || MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) { // Synthesize and instantiate non-trivial implicit methods, explicitly // defaulted methods, and the copy and move assignment operators. The // latter are exported even if they are trivial, because the address of // an operator can be taken and should compare equal accross libraries. DiagnosticErrorTrap Trap(Diags); MarkFunctionReferenced(Class->getLocation(), MD); if (Trap.hasErrorOccurred()) { Diag(ClassAttr->getLocation(), diag::note_due_to_dllexported_class) << Class->getName() << !getLangOpts().CPlusPlus11; break; } // There is no later point when we will see the definition of this // function, so pass it to the consumer now. Consumer.HandleTopLevelDecl(DeclGroupRef(MD)); } } } } /// \brief Perform propagation of DLL attributes from a derived class to a /// templated base class for MS compatibility. void Sema::propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc) { if (getDLLAttr( BaseTemplateSpec->getSpecializedTemplate()->getTemplatedDecl())) { // If the base class template has a DLL attribute, don't try to change it. return; } auto TSK = BaseTemplateSpec->getSpecializationKind(); if (!getDLLAttr(BaseTemplateSpec) && (TSK == TSK_Undeclared || TSK == TSK_ExplicitInstantiationDeclaration || TSK == TSK_ImplicitInstantiation)) { // The template hasn't been instantiated yet (or it has, but only as an // explicit instantiation declaration or implicit instantiation, which means // we haven't codegenned any members yet), so propagate the attribute. auto *NewAttr = cast<InheritableAttr>(ClassAttr->clone(getASTContext())); NewAttr->setInherited(true); BaseTemplateSpec->addAttr(NewAttr); // If the template is already instantiated, checkDLLAttributeRedeclaration() // needs to be run again to work see the new attribute. Otherwise this will // get run whenever the template is instantiated. if (TSK != TSK_Undeclared) checkClassLevelDLLAttribute(BaseTemplateSpec); return; } if (getDLLAttr(BaseTemplateSpec)) { // The template has already been specialized or instantiated with an // attribute, explicitly or through propagation. We should not try to change // it. return; } // The template was previously instantiated or explicitly specialized without // a dll attribute, It's too late for us to add an attribute, so warn that // this is unsupported. Diag(BaseLoc, diag::warn_attribute_dll_instantiated_base_class) << BaseTemplateSpec->isExplicitSpecialization(); Diag(ClassAttr->getLocation(), diag::note_attribute); if (BaseTemplateSpec->isExplicitSpecialization()) { Diag(BaseTemplateSpec->getLocation(), diag::note_template_class_explicit_specialization_was_here) << BaseTemplateSpec; } else { Diag(BaseTemplateSpec->getPointOfInstantiation(), diag::note_template_class_instantiation_was_here) << BaseTemplateSpec; } } /// \brief Perform semantic checks on a class definition that has been /// completing, introducing implicitly-declared members, checking for /// abstract types, etc. void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) { if (!Record) return; if (Record->isAbstract() && !Record->isInvalidDecl()) { AbstractUsageInfo Info(*this, Record); CheckAbstractClassUsage(Info, Record); } // If this is not an aggregate type and has no user-declared constructor, // complain about any non-static data members of reference or const scalar // type, since they will never get initializers. if (!Record->isInvalidDecl() && !Record->isDependentType() && !Record->isAggregate() && !Record->hasUserDeclaredConstructor() && !Record->isLambda()) { bool Complained = false; for (const auto *F : Record->fields()) { if (F->hasInClassInitializer() || F->isUnnamedBitfield()) continue; if (F->getType()->isReferenceType() || (F->getType().isConstQualified() && F->getType()->isScalarType())) { if (!Complained) { Diag(Record->getLocation(), diag::warn_no_constructor_for_refconst) << Record->getTagKind() << Record; Complained = true; } Diag(F->getLocation(), diag::note_refconst_member_not_initialized) << F->getType()->isReferenceType() << F->getDeclName(); } } } if (Record->getIdentifier()) { // C++ [class.mem]p13: // If T is the name of a class, then each of the following shall have a // name different from T: // - every member of every anonymous union that is a member of class T. // // C++ [class.mem]p14: // In addition, if class T has a user-declared constructor (12.1), every // non-static data member of class T shall have a name different from T. DeclContext::lookup_result R = Record->lookup(Record->getDeclName()); for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) { NamedDecl *D = *I; if ((isa<FieldDecl>(D) && Record->hasUserDeclaredConstructor()) || isa<IndirectFieldDecl>(D)) { Diag(D->getLocation(), diag::err_member_name_of_class) << D->getDeclName(); break; } } } // Warn if the class has virtual methods but non-virtual public destructor. if (Record->isPolymorphic() && !Record->isDependentType()) { CXXDestructorDecl *dtor = Record->getDestructor(); if ((!dtor || (!dtor->isVirtual() && dtor->getAccess() == AS_public)) && !Record->hasAttr<FinalAttr>()) Diag(dtor ? dtor->getLocation() : Record->getLocation(), diag::warn_non_virtual_dtor) << Context.getRecordType(Record); } if (Record->isAbstract()) { if (FinalAttr *FA = Record->getAttr<FinalAttr>()) { Diag(Record->getLocation(), diag::warn_abstract_final_class) << FA->isSpelledAsSealed(); DiagnoseAbstractType(Record); } } bool HasMethodWithOverrideControl = false, HasOverridingMethodWithoutOverrideControl = false; if (!Record->isDependentType()) { for (auto *M : Record->methods()) { // See if a method overloads virtual methods in a base // class without overriding any. if (!M->isStatic()) DiagnoseHiddenVirtualMethods(M); if (M->hasAttr<OverrideAttr>()) HasMethodWithOverrideControl = true; else if (M->size_overridden_methods() > 0) HasOverridingMethodWithoutOverrideControl = true; // Check whether the explicitly-defaulted special members are valid. if (!M->isInvalidDecl() && M->isExplicitlyDefaulted()) CheckExplicitlyDefaultedSpecialMember(M); // For an explicitly defaulted or deleted special member, we defer // determining triviality until the class is complete. That time is now! if (!M->isImplicit() && !M->isUserProvided()) { CXXSpecialMember CSM = getSpecialMember(M); if (CSM != CXXInvalid) { M->setTrivial(SpecialMemberIsTrivial(M, CSM)); // Inform the class that we've finished declaring this member. Record->finishedDefaultedOrDeletedMember(M); } } } } if (HasMethodWithOverrideControl && HasOverridingMethodWithoutOverrideControl) { // At least one method has the 'override' control declared. // Diagnose all other overridden methods which do not have 'override' specified on them. for (auto *M : Record->methods()) DiagnoseAbsenceOfOverrideControl(M); } // ms_struct is a request to use the same ABI rules as MSVC. Check // whether this class uses any C++ features that are implemented // completely differently in MSVC, and if so, emit a diagnostic. // That diagnostic defaults to an error, but we allow projects to // map it down to a warning (or ignore it). It's a fairly common // practice among users of the ms_struct pragma to mass-annotate // headers, sweeping up a bunch of types that the project doesn't // really rely on MSVC-compatible layout for. We must therefore // support "ms_struct except for C++ stuff" as a secondary ABI. if (Record->isMsStruct(Context) && (Record->isPolymorphic() || Record->getNumBases())) { Diag(Record->getLocation(), diag::warn_cxx_ms_struct); } // Declare inheriting constructors. We do this eagerly here because: // - The standard requires an eager diagnostic for conflicting inheriting // constructors from different classes. // - The lazy declaration of the other implicit constructors is so as to not // waste space and performance on classes that are not meant to be // instantiated (e.g. meta-functions). This doesn't apply to classes that // have inheriting constructors. DeclareInheritingConstructors(Record); checkClassLevelDLLAttribute(Record); } /// Look up the special member function that would be called by a special /// member function for a subobject of class type. /// /// \param Class The class type of the subobject. /// \param CSM The kind of special member function. /// \param FieldQuals If the subobject is a field, its cv-qualifiers. /// \param ConstRHS True if this is a copy operation with a const object /// on its RHS, that is, if the argument to the outer special member /// function is 'const' and this is not a field marked 'mutable'. static Sema::SpecialMemberOverloadResult *lookupCallFromSpecialMember( Sema &S, CXXRecordDecl *Class, Sema::CXXSpecialMember CSM, unsigned FieldQuals, bool ConstRHS) { unsigned LHSQuals = 0; if (CSM == Sema::CXXCopyAssignment || CSM == Sema::CXXMoveAssignment) LHSQuals = FieldQuals; unsigned RHSQuals = FieldQuals; if (CSM == Sema::CXXDefaultConstructor || CSM == Sema::CXXDestructor) RHSQuals = 0; else if (ConstRHS) RHSQuals |= Qualifiers::Const; return S.LookupSpecialMember(Class, CSM, RHSQuals & Qualifiers::Const, RHSQuals & Qualifiers::Volatile, false, LHSQuals & Qualifiers::Const, LHSQuals & Qualifiers::Volatile); } /// Is the special member function which would be selected to perform the /// specified operation on the specified class type a constexpr constructor? static bool specialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl, Sema::CXXSpecialMember CSM, unsigned Quals, bool ConstRHS) { Sema::SpecialMemberOverloadResult *SMOR = lookupCallFromSpecialMember(S, ClassDecl, CSM, Quals, ConstRHS); if (!SMOR || !SMOR->getMethod()) // A constructor we wouldn't select can't be "involved in initializing" // anything. return true; return SMOR->getMethod()->isConstexpr(); } /// Determine whether the specified special member function would be constexpr /// if it were implicitly defined. static bool defaultedSpecialMemberIsConstexpr(Sema &S, CXXRecordDecl *ClassDecl, Sema::CXXSpecialMember CSM, bool ConstArg) { if (!S.getLangOpts().CPlusPlus11) return false; // C++11 [dcl.constexpr]p4: // In the definition of a constexpr constructor [...] bool Ctor = true; switch (CSM) { case Sema::CXXDefaultConstructor: // Since default constructor lookup is essentially trivial (and cannot // involve, for instance, template instantiation), we compute whether a // defaulted default constructor is constexpr directly within CXXRecordDecl. // // This is important for performance; we need to know whether the default // constructor is constexpr to determine whether the type is a literal type. return ClassDecl->defaultedDefaultConstructorIsConstexpr(); case Sema::CXXCopyConstructor: case Sema::CXXMoveConstructor: // For copy or move constructors, we need to perform overload resolution. break; case Sema::CXXCopyAssignment: case Sema::CXXMoveAssignment: if (!S.getLangOpts().CPlusPlus14) return false; // In C++1y, we need to perform overload resolution. Ctor = false; break; case Sema::CXXDestructor: case Sema::CXXInvalid: return false; } // -- if the class is a non-empty union, or for each non-empty anonymous // union member of a non-union class, exactly one non-static data member // shall be initialized; [DR1359] // // If we squint, this is guaranteed, since exactly one non-static data member // will be initialized (if the constructor isn't deleted), we just don't know // which one. if (Ctor && ClassDecl->isUnion()) return true; // -- the class shall not have any virtual base classes; if (Ctor && ClassDecl->getNumVBases()) return false; // C++1y [class.copy]p26: // -- [the class] is a literal type, and if (!Ctor && !ClassDecl->isLiteral()) return false; // -- every constructor involved in initializing [...] base class // sub-objects shall be a constexpr constructor; // -- the assignment operator selected to copy/move each direct base // class is a constexpr function, and for (const auto &B : ClassDecl->bases()) { const RecordType *BaseType = B.getType()->getAs<RecordType>(); if (!BaseType) continue; CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); if (!specialMemberIsConstexpr(S, BaseClassDecl, CSM, 0, ConstArg)) return false; } // -- every constructor involved in initializing non-static data members // [...] shall be a constexpr constructor; // -- every non-static data member and base class sub-object shall be // initialized // -- for each non-static data member of X that is of class type (or array // thereof), the assignment operator selected to copy/move that member is // a constexpr function for (const auto *F : ClassDecl->fields()) { if (F->isInvalidDecl()) continue; QualType BaseType = S.Context.getBaseElementType(F->getType()); if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) { CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl()); if (!specialMemberIsConstexpr(S, FieldRecDecl, CSM, BaseType.getCVRQualifiers(), ConstArg && !F->isMutable())) return false; } } // All OK, it's constexpr! return true; } static Sema::ImplicitExceptionSpecification computeImplicitExceptionSpec(Sema &S, SourceLocation Loc, CXXMethodDecl *MD) { switch (S.getSpecialMember(MD)) { case Sema::CXXDefaultConstructor: return S.ComputeDefaultedDefaultCtorExceptionSpec(Loc, MD); case Sema::CXXCopyConstructor: return S.ComputeDefaultedCopyCtorExceptionSpec(MD); case Sema::CXXCopyAssignment: return S.ComputeDefaultedCopyAssignmentExceptionSpec(MD); case Sema::CXXMoveConstructor: return S.ComputeDefaultedMoveCtorExceptionSpec(MD); case Sema::CXXMoveAssignment: return S.ComputeDefaultedMoveAssignmentExceptionSpec(MD); case Sema::CXXDestructor: return S.ComputeDefaultedDtorExceptionSpec(MD); case Sema::CXXInvalid: break; } assert(cast<CXXConstructorDecl>(MD)->getInheritedConstructor() && "only special members have implicit exception specs"); return S.ComputeInheritingCtorExceptionSpec(cast<CXXConstructorDecl>(MD)); } static FunctionProtoType::ExtProtoInfo getImplicitMethodEPI(Sema &S, CXXMethodDecl *MD) { FunctionProtoType::ExtProtoInfo EPI; // Build an exception specification pointing back at this member. EPI.ExceptionSpec.Type = EST_BasicNoexcept; // HLSL Change - EST_Unevaluated to EST_BasicNoexcept EPI.ExceptionSpec.SourceDecl = MD; // Set the calling convention to the default for C++ instance methods. EPI.ExtInfo = EPI.ExtInfo.withCallingConv( S.Context.getDefaultCallingConvention(/*IsVariadic=*/false, /*IsCXXMethod=*/true)); return EPI; } void Sema::EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD) { const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); if (FPT->getExceptionSpecType() != EST_Unevaluated) return; // Evaluate the exception specification. auto ESI = computeImplicitExceptionSpec(*this, Loc, MD).getExceptionSpec(); // Update the type of the special member to use it. UpdateExceptionSpec(MD, ESI); // A user-provided destructor can be defined outside the class. When that // happens, be sure to update the exception specification on both // declarations. const FunctionProtoType *CanonicalFPT = MD->getCanonicalDecl()->getType()->castAs<FunctionProtoType>(); if (CanonicalFPT->getExceptionSpecType() == EST_Unevaluated) UpdateExceptionSpec(MD->getCanonicalDecl(), ESI); } void Sema::CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD) { CXXRecordDecl *RD = MD->getParent(); CXXSpecialMember CSM = getSpecialMember(MD); assert(MD->isExplicitlyDefaulted() && CSM != CXXInvalid && "not an explicitly-defaulted special member"); // Whether this was the first-declared instance of the constructor. // This affects whether we implicitly add an exception spec and constexpr. bool First = MD == MD->getCanonicalDecl(); bool HadError = false; // C++11 [dcl.fct.def.default]p1: // A function that is explicitly defaulted shall // -- be a special member function (checked elsewhere), // -- have the same type (except for ref-qualifiers, and except that a // copy operation can take a non-const reference) as an implicit // declaration, and // -- not have default arguments. unsigned ExpectedParams = 1; if (CSM == CXXDefaultConstructor || CSM == CXXDestructor) ExpectedParams = 0; if (MD->getNumParams() != ExpectedParams) { // This also checks for default arguments: a copy or move constructor with a // default argument is classified as a default constructor, and assignment // operations and destructors can't have default arguments. Diag(MD->getLocation(), diag::err_defaulted_special_member_params) << CSM << MD->getSourceRange(); HadError = true; } else if (MD->isVariadic()) { Diag(MD->getLocation(), diag::err_defaulted_special_member_variadic) << CSM << MD->getSourceRange(); HadError = true; } const FunctionProtoType *Type = MD->getType()->getAs<FunctionProtoType>(); bool CanHaveConstParam = false; if (CSM == CXXCopyConstructor) CanHaveConstParam = RD->implicitCopyConstructorHasConstParam(); else if (CSM == CXXCopyAssignment) CanHaveConstParam = RD->implicitCopyAssignmentHasConstParam(); QualType ReturnType = Context.VoidTy; if (CSM == CXXCopyAssignment || CSM == CXXMoveAssignment) { // Check for return type matching. ReturnType = Type->getReturnType(); QualType ExpectedReturnType = Context.getLValueReferenceType(Context.getTypeDeclType(RD)); if (!Context.hasSameType(ReturnType, ExpectedReturnType)) { Diag(MD->getLocation(), diag::err_defaulted_special_member_return_type) << (CSM == CXXMoveAssignment) << ExpectedReturnType; HadError = true; } // A defaulted special member cannot have cv-qualifiers. if (Type->getTypeQuals()) { Diag(MD->getLocation(), diag::err_defaulted_special_member_quals) << (CSM == CXXMoveAssignment) << getLangOpts().CPlusPlus14; HadError = true; } } // Check for parameter type matching. QualType ArgType = ExpectedParams ? Type->getParamType(0) : QualType(); bool HasConstParam = false; if (ExpectedParams && ArgType->isReferenceType()) { // Argument must be reference to possibly-const T. QualType ReferentType = ArgType->getPointeeType(); HasConstParam = ReferentType.isConstQualified(); if (ReferentType.isVolatileQualified()) { Diag(MD->getLocation(), diag::err_defaulted_special_member_volatile_param) << CSM; HadError = true; } if (HasConstParam && !CanHaveConstParam) { if (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment) { Diag(MD->getLocation(), diag::err_defaulted_special_member_copy_const_param) << (CSM == CXXCopyAssignment); // FIXME: Explain why this special member can't be const. } else { Diag(MD->getLocation(), diag::err_defaulted_special_member_move_const_param) << (CSM == CXXMoveAssignment); } HadError = true; } } else if (ExpectedParams) { // A copy assignment operator can take its argument by value, but a // defaulted one cannot. assert(CSM == CXXCopyAssignment && "unexpected non-ref argument"); Diag(MD->getLocation(), diag::err_defaulted_copy_assign_not_ref); HadError = true; } // C++11 [dcl.fct.def.default]p2: // An explicitly-defaulted function may be declared constexpr only if it // would have been implicitly declared as constexpr, // Do not apply this rule to members of class templates, since core issue 1358 // makes such functions always instantiate to constexpr functions. For // functions which cannot be constexpr (for non-constructors in C++11 and for // destructors in C++1y), this is checked elsewhere. bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, RD, CSM, HasConstParam); if ((getLangOpts().CPlusPlus14 ? !isa<CXXDestructorDecl>(MD) : isa<CXXConstructorDecl>(MD)) && MD->isConstexpr() && !Constexpr && MD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) { Diag(MD->getLocStart(), diag::err_incorrect_defaulted_constexpr) << CSM; // FIXME: Explain why the special member can't be constexpr. HadError = true; } // and may have an explicit exception-specification only if it is compatible // with the exception-specification on the implicit declaration. if (Type->hasExceptionSpec()) { // Delay the check if this is the first declaration of the special member, // since we may not have parsed some necessary in-class initializers yet. if (First) { // If the exception specification needs to be instantiated, do so now, // before we clobber it with an EST_Unevaluated specification below. if (Type->getExceptionSpecType() == EST_Uninstantiated) { InstantiateExceptionSpec(MD->getLocStart(), MD); Type = MD->getType()->getAs<FunctionProtoType>(); } DelayedDefaultedMemberExceptionSpecs.push_back(std::make_pair(MD, Type)); } else CheckExplicitlyDefaultedMemberExceptionSpec(MD, Type); } // If a function is explicitly defaulted on its first declaration, if (First) { // -- it is implicitly considered to be constexpr if the implicit // definition would be, MD->setConstexpr(Constexpr); // -- it is implicitly considered to have the same exception-specification // as if it had been implicitly declared, FunctionProtoType::ExtProtoInfo EPI = Type->getExtProtoInfo(); EPI.ExceptionSpec.Type = EST_Unevaluated; EPI.ExceptionSpec.SourceDecl = MD; MD->setType(Context.getFunctionType(ReturnType, llvm::makeArrayRef(&ArgType, ExpectedParams), EPI, None)); // HLSL Change - special members are all-in params } if (ShouldDeleteSpecialMember(MD, CSM)) { if (First) { SetDeclDeleted(MD, MD->getLocation()); } else { // C++11 [dcl.fct.def.default]p4: // [For a] user-provided explicitly-defaulted function [...] if such a // function is implicitly defined as deleted, the program is ill-formed. Diag(MD->getLocation(), diag::err_out_of_line_default_deletes) << CSM; ShouldDeleteSpecialMember(MD, CSM, /*Diagnose*/true); HadError = true; } } if (HadError) MD->setInvalidDecl(); } /// Check whether the exception specification provided for an /// explicitly-defaulted special member matches the exception specification /// that would have been generated for an implicit special member, per /// C++11 [dcl.fct.def.default]p2. void Sema::CheckExplicitlyDefaultedMemberExceptionSpec( CXXMethodDecl *MD, const FunctionProtoType *SpecifiedType) { // If the exception specification was explicitly specified but hadn't been // parsed when the method was defaulted, grab it now. if (SpecifiedType->getExceptionSpecType() == EST_Unparsed) SpecifiedType = MD->getTypeSourceInfo()->getType()->castAs<FunctionProtoType>(); // Compute the implicit exception specification. CallingConv CC = Context.getDefaultCallingConvention(/*IsVariadic=*/false, /*IsCXXMethod=*/true); FunctionProtoType::ExtProtoInfo EPI(CC); EPI.ExceptionSpec = computeImplicitExceptionSpec(*this, MD->getLocation(), MD) .getExceptionSpec(); const FunctionProtoType *ImplicitType = cast<FunctionProtoType>( Context.getFunctionType(Context.VoidTy, None, EPI, None)); // HLSL Change - special members are all-in params // Ensure that it matches. CheckEquivalentExceptionSpec( PDiag(diag::err_incorrect_defaulted_exception_spec) << getSpecialMember(MD), PDiag(), ImplicitType, SourceLocation(), SpecifiedType, MD->getLocation()); } void Sema::CheckDelayedMemberExceptionSpecs() { decltype(DelayedExceptionSpecChecks) Checks; decltype(DelayedDefaultedMemberExceptionSpecs) Specs; std::swap(Checks, DelayedExceptionSpecChecks); std::swap(Specs, DelayedDefaultedMemberExceptionSpecs); // Perform any deferred checking of exception specifications for virtual // destructors. for (auto &Check : Checks) CheckOverridingFunctionExceptionSpec(Check.first, Check.second); // Check that any explicitly-defaulted methods have exception specifications // compatible with their implicit exception specifications. for (auto &Spec : Specs) CheckExplicitlyDefaultedMemberExceptionSpec(Spec.first, Spec.second); } namespace { struct SpecialMemberDeletionInfo { Sema &S; CXXMethodDecl *MD; Sema::CXXSpecialMember CSM; bool Diagnose; // Properties of the special member, computed for convenience. bool IsConstructor, IsAssignment, IsMove, ConstArg; SourceLocation Loc; bool AllFieldsAreConst; SpecialMemberDeletionInfo(Sema &S, CXXMethodDecl *MD, Sema::CXXSpecialMember CSM, bool Diagnose) : S(S), MD(MD), CSM(CSM), Diagnose(Diagnose), IsConstructor(false), IsAssignment(false), IsMove(false), ConstArg(false), Loc(MD->getLocation()), AllFieldsAreConst(true) { switch (CSM) { case Sema::CXXDefaultConstructor: case Sema::CXXCopyConstructor: IsConstructor = true; break; case Sema::CXXMoveConstructor: IsConstructor = true; IsMove = true; break; case Sema::CXXCopyAssignment: IsAssignment = true; break; case Sema::CXXMoveAssignment: IsAssignment = true; IsMove = true; break; case Sema::CXXDestructor: break; case Sema::CXXInvalid: llvm_unreachable("invalid special member kind"); } if (MD->getNumParams()) { if (const ReferenceType *RT = MD->getParamDecl(0)->getType()->getAs<ReferenceType>()) ConstArg = RT->getPointeeType().isConstQualified(); } } bool inUnion() const { return MD->getParent()->isUnion(); } /// Look up the corresponding special member in the given class. Sema::SpecialMemberOverloadResult *lookupIn(CXXRecordDecl *Class, unsigned Quals, bool IsMutable) { return lookupCallFromSpecialMember(S, Class, CSM, Quals, ConstArg && !IsMutable); } typedef llvm::PointerUnion<CXXBaseSpecifier*, FieldDecl*> Subobject; bool shouldDeleteForBase(CXXBaseSpecifier *Base); bool shouldDeleteForField(FieldDecl *FD); bool shouldDeleteForAllConstMembers(); bool shouldDeleteForClassSubobject(CXXRecordDecl *Class, Subobject Subobj, unsigned Quals); bool shouldDeleteForSubobjectCall(Subobject Subobj, Sema::SpecialMemberOverloadResult *SMOR, bool IsDtorCallInCtor); bool isAccessible(Subobject Subobj, CXXMethodDecl *D); }; } /// Is the given special member inaccessible when used on the given /// sub-object. bool SpecialMemberDeletionInfo::isAccessible(Subobject Subobj, CXXMethodDecl *target) { /// If we're operating on a base class, the object type is the /// type of this special member. QualType objectTy; AccessSpecifier access = target->getAccess(); if (CXXBaseSpecifier *base = Subobj.dyn_cast<CXXBaseSpecifier*>()) { objectTy = S.Context.getTypeDeclType(MD->getParent()); access = CXXRecordDecl::MergeAccess(base->getAccessSpecifier(), access); // If we're operating on a field, the object type is the type of the field. } else { objectTy = S.Context.getTypeDeclType(target->getParent()); } return S.isSpecialMemberAccessibleForDeletion(target, access, objectTy); } /// Check whether we should delete a special member due to the implicit /// definition containing a call to a special member of a subobject. bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall( Subobject Subobj, Sema::SpecialMemberOverloadResult *SMOR, bool IsDtorCallInCtor) { CXXMethodDecl *Decl = SMOR->getMethod(); FieldDecl *Field = Subobj.dyn_cast<FieldDecl*>(); int DiagKind = -1; if (SMOR->getKind() == Sema::SpecialMemberOverloadResult::NoMemberOrDeleted) DiagKind = !Decl ? 0 : 1; else if (SMOR->getKind() == Sema::SpecialMemberOverloadResult::Ambiguous) DiagKind = 2; else if (!isAccessible(Subobj, Decl)) DiagKind = 3; else if (!IsDtorCallInCtor && Field && Field->getParent()->isUnion() && !Decl->isTrivial()) { // A member of a union must have a trivial corresponding special member. // As a weird special case, a destructor call from a union's constructor // must be accessible and non-deleted, but need not be trivial. Such a // destructor is never actually called, but is semantically checked as // if it were. DiagKind = 4; } if (DiagKind == -1) return false; if (Diagnose) { if (Field) { S.Diag(Field->getLocation(), diag::note_deleted_special_member_class_subobject) << CSM << MD->getParent() << /*IsField*/true << Field << DiagKind << IsDtorCallInCtor; } else { CXXBaseSpecifier *Base = Subobj.get<CXXBaseSpecifier*>(); S.Diag(Base->getLocStart(), diag::note_deleted_special_member_class_subobject) << CSM << MD->getParent() << /*IsField*/false << Base->getType() << DiagKind << IsDtorCallInCtor; } if (DiagKind == 1) S.NoteDeletedFunction(Decl); // FIXME: Explain inaccessibility if DiagKind == 3. } return true; } /// Check whether we should delete a special member function due to having a /// direct or virtual base class or non-static data member of class type M. bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject( CXXRecordDecl *Class, Subobject Subobj, unsigned Quals) { FieldDecl *Field = Subobj.dyn_cast<FieldDecl*>(); bool IsMutable = Field && Field->isMutable(); // C++11 [class.ctor]p5: // -- any direct or virtual base class, or non-static data member with no // brace-or-equal-initializer, has class type M (or array thereof) and // either M has no default constructor or overload resolution as applied // to M's default constructor results in an ambiguity or in a function // that is deleted or inaccessible // C++11 [class.copy]p11, C++11 [class.copy]p23: // -- a direct or virtual base class B that cannot be copied/moved because // overload resolution, as applied to B's corresponding special member, // results in an ambiguity or a function that is deleted or inaccessible // from the defaulted special member // C++11 [class.dtor]p5: // -- any direct or virtual base class [...] has a type with a destructor // that is deleted or inaccessible if (!(CSM == Sema::CXXDefaultConstructor && Field && Field->hasInClassInitializer()) && shouldDeleteForSubobjectCall(Subobj, lookupIn(Class, Quals, IsMutable), false)) return true; // C++11 [class.ctor]p5, C++11 [class.copy]p11: // -- any direct or virtual base class or non-static data member has a // type with a destructor that is deleted or inaccessible if (IsConstructor) { Sema::SpecialMemberOverloadResult *SMOR = S.LookupSpecialMember(Class, Sema::CXXDestructor, false, false, false, false, false); if (shouldDeleteForSubobjectCall(Subobj, SMOR, true)) return true; } return false; } /// Check whether we should delete a special member function due to the class /// having a particular direct or virtual base class. bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) { CXXRecordDecl *BaseClass = Base->getType()->getAsCXXRecordDecl(); return shouldDeleteForClassSubobject(BaseClass, Base, 0); } /// Check whether we should delete a special member function due to the class /// having a particular non-static data member. bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) { QualType FieldType = S.Context.getBaseElementType(FD->getType()); CXXRecordDecl *FieldRecord = FieldType->getAsCXXRecordDecl(); if (CSM == Sema::CXXDefaultConstructor) { // For a default constructor, all references must be initialized in-class // and, if a union, it must have a non-const member. if (FieldType->isReferenceType() && !FD->hasInClassInitializer()) { if (Diagnose) S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field) << MD->getParent() << FD << FieldType << /*Reference*/0; return true; } // C++11 [class.ctor]p5: any non-variant non-static data member of // const-qualified type (or array thereof) with no // brace-or-equal-initializer does not have a user-provided default // constructor. if (!inUnion() && FieldType.isConstQualified() && !FD->hasInClassInitializer() && (!FieldRecord || !FieldRecord->hasUserProvidedDefaultConstructor())) { if (Diagnose) S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field) << MD->getParent() << FD << FD->getType() << /*Const*/1; return true; } if (inUnion() && !FieldType.isConstQualified()) AllFieldsAreConst = false; } else if (CSM == Sema::CXXCopyConstructor) { // For a copy constructor, data members must not be of rvalue reference // type. if (FieldType->isRValueReferenceType()) { if (Diagnose) S.Diag(FD->getLocation(), diag::note_deleted_copy_ctor_rvalue_reference) << MD->getParent() << FD << FieldType; return true; } } else if (IsAssignment) { // For an assignment operator, data members must not be of reference type. if (FieldType->isReferenceType()) { if (Diagnose) S.Diag(FD->getLocation(), diag::note_deleted_assign_field) << IsMove << MD->getParent() << FD << FieldType << /*Reference*/0; return true; } if (!FieldRecord && FieldType.isConstQualified()) { // C++11 [class.copy]p23: // -- a non-static data member of const non-class type (or array thereof) if (Diagnose) S.Diag(FD->getLocation(), diag::note_deleted_assign_field) << IsMove << MD->getParent() << FD << FD->getType() << /*Const*/1; return true; } } if (FieldRecord) { // Some additional restrictions exist on the variant members. if (!inUnion() && FieldRecord->isUnion() && FieldRecord->isAnonymousStructOrUnion()) { bool AllVariantFieldsAreConst = true; // FIXME: Handle anonymous unions declared within anonymous unions. for (auto *UI : FieldRecord->fields()) { QualType UnionFieldType = S.Context.getBaseElementType(UI->getType()); if (!UnionFieldType.isConstQualified()) AllVariantFieldsAreConst = false; CXXRecordDecl *UnionFieldRecord = UnionFieldType->getAsCXXRecordDecl(); if (UnionFieldRecord && shouldDeleteForClassSubobject(UnionFieldRecord, UI, UnionFieldType.getCVRQualifiers())) return true; } // At least one member in each anonymous union must be non-const if (CSM == Sema::CXXDefaultConstructor && AllVariantFieldsAreConst && !FieldRecord->field_empty()) { if (Diagnose) S.Diag(FieldRecord->getLocation(), diag::note_deleted_default_ctor_all_const) << MD->getParent() << /*anonymous union*/1; return true; } // Don't check the implicit member of the anonymous union type. // This is technically non-conformant, but sanity demands it. return false; } if (shouldDeleteForClassSubobject(FieldRecord, FD, FieldType.getCVRQualifiers())) return true; } return false; } /// C++11 [class.ctor] p5: /// A defaulted default constructor for a class X is defined as deleted if /// X is a union and all of its variant members are of const-qualified type. bool SpecialMemberDeletionInfo::shouldDeleteForAllConstMembers() { // This is a silly definition, because it gives an empty union a deleted // default constructor. Don't do that. if (CSM == Sema::CXXDefaultConstructor && inUnion() && AllFieldsAreConst && !MD->getParent()->field_empty()) { if (Diagnose) S.Diag(MD->getParent()->getLocation(), diag::note_deleted_default_ctor_all_const) << MD->getParent() << /*not anonymous union*/0; return true; } return false; } /// Determine whether a defaulted special member function should be defined as /// deleted, as specified in C++11 [class.ctor]p5, C++11 [class.copy]p11, /// C++11 [class.copy]p23, and C++11 [class.dtor]p5. bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose) { if (MD->isInvalidDecl()) return false; CXXRecordDecl *RD = MD->getParent(); assert(!RD->isDependentType() && "do deletion after instantiation"); if (!LangOpts.CPlusPlus11 || RD->isInvalidDecl()) return false; // C++11 [expr.lambda.prim]p19: // The closure type associated with a lambda-expression has a // deleted (8.4.3) default constructor and a deleted copy // assignment operator. if (RD->isLambda() && (CSM == CXXDefaultConstructor || CSM == CXXCopyAssignment)) { if (Diagnose) Diag(RD->getLocation(), diag::note_lambda_decl); return true; } // For an anonymous struct or union, the copy and assignment special members // will never be used, so skip the check. For an anonymous union declared at // namespace scope, the constructor and destructor are used. if (CSM != CXXDefaultConstructor && CSM != CXXDestructor && RD->isAnonymousStructOrUnion()) return false; // C++11 [class.copy]p7, p18: // If the class definition declares a move constructor or move assignment // operator, an implicitly declared copy constructor or copy assignment // operator is defined as deleted. if (MD->isImplicit() && (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment)) { CXXMethodDecl *UserDeclaredMove = nullptr; // In Microsoft mode, a user-declared move only causes the deletion of the // corresponding copy operation, not both copy operations. if (RD->hasUserDeclaredMoveConstructor() && (!getLangOpts().MSVCCompat || CSM == CXXCopyConstructor)) { if (!Diagnose) return true; // Find any user-declared move constructor. for (auto *I : RD->ctors()) { if (I->isMoveConstructor()) { UserDeclaredMove = I; break; } } assert(UserDeclaredMove); } else if (RD->hasUserDeclaredMoveAssignment() && (!getLangOpts().MSVCCompat || CSM == CXXCopyAssignment)) { if (!Diagnose) return true; // Find any user-declared move assignment operator. for (auto *I : RD->methods()) { if (I->isMoveAssignmentOperator()) { UserDeclaredMove = I; break; } } assert(UserDeclaredMove); } if (UserDeclaredMove) { Diag(UserDeclaredMove->getLocation(), diag::note_deleted_copy_user_declared_move) << (CSM == CXXCopyAssignment) << RD << UserDeclaredMove->isMoveAssignmentOperator(); return true; } } // Do access control from the special member function ContextRAII MethodContext(*this, MD); // C++11 [class.dtor]p5: // -- for a virtual destructor, lookup of the non-array deallocation function // results in an ambiguity or in a function that is deleted or inaccessible if (CSM == CXXDestructor && MD->isVirtual()) { FunctionDecl *OperatorDelete = nullptr; DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Delete); if (FindDeallocationFunction(MD->getLocation(), MD->getParent(), Name, OperatorDelete, false)) { if (Diagnose) Diag(RD->getLocation(), diag::note_deleted_dtor_no_operator_delete); return true; } } SpecialMemberDeletionInfo SMI(*this, MD, CSM, Diagnose); for (auto &BI : RD->bases()) if (!BI.isVirtual() && SMI.shouldDeleteForBase(&BI)) return true; // Per DR1611, do not consider virtual bases of constructors of abstract // classes, since we are not going to construct them. if (!RD->isAbstract() || !SMI.IsConstructor) { for (auto &BI : RD->vbases()) if (SMI.shouldDeleteForBase(&BI)) return true; } for (auto *FI : RD->fields()) if (!FI->isInvalidDecl() && !FI->isUnnamedBitfield() && SMI.shouldDeleteForField(FI)) return true; if (SMI.shouldDeleteForAllConstMembers()) return true; if (getLangOpts().CUDA) { // We should delete the special member in CUDA mode if target inference // failed. return inferCUDATargetForImplicitSpecialMember(RD, CSM, MD, SMI.ConstArg, Diagnose); } return false; } /// Perform lookup for a special member of the specified kind, and determine /// whether it is trivial. If the triviality can be determined without the /// lookup, skip it. This is intended for use when determining whether a /// special member of a containing object is trivial, and thus does not ever /// perform overload resolution for default constructors. /// /// If \p Selected is not \c NULL, \c *Selected will be filled in with the /// member that was most likely to be intended to be trivial, if any. static bool findTrivialSpecialMember(Sema &S, CXXRecordDecl *RD, Sema::CXXSpecialMember CSM, unsigned Quals, bool ConstRHS, CXXMethodDecl **Selected) { if (Selected) *Selected = nullptr; switch (CSM) { case Sema::CXXInvalid: llvm_unreachable("not a special member"); case Sema::CXXDefaultConstructor: // C++11 [class.ctor]p5: // A default constructor is trivial if: // - all the [direct subobjects] have trivial default constructors // // Note, no overload resolution is performed in this case. if (RD->hasTrivialDefaultConstructor()) return true; if (Selected) { // If there's a default constructor which could have been trivial, dig it // out. Otherwise, if there's any user-provided default constructor, point // to that as an example of why there's not a trivial one. CXXConstructorDecl *DefCtor = nullptr; if (RD->needsImplicitDefaultConstructor()) S.DeclareImplicitDefaultConstructor(RD); for (auto *CI : RD->ctors()) { if (!CI->isDefaultConstructor()) continue; DefCtor = CI; if (!DefCtor->isUserProvided()) break; } *Selected = DefCtor; } return false; case Sema::CXXDestructor: // C++11 [class.dtor]p5: // A destructor is trivial if: // - all the direct [subobjects] have trivial destructors if (RD->hasTrivialDestructor()) return true; if (Selected) { if (RD->needsImplicitDestructor()) S.DeclareImplicitDestructor(RD); *Selected = RD->getDestructor(); } return false; case Sema::CXXCopyConstructor: // C++11 [class.copy]p12: // A copy constructor is trivial if: // - the constructor selected to copy each direct [subobject] is trivial if (RD->hasTrivialCopyConstructor()) { if (Quals == Qualifiers::Const) // We must either select the trivial copy constructor or reach an // ambiguity; no need to actually perform overload resolution. return true; } else if (!Selected) { return false; } // In C++98, we are not supposed to perform overload resolution here, but we // treat that as a language defect, as suggested on cxx-abi-dev, to treat // cases like B as having a non-trivial copy constructor: // struct A { template<typename T> A(T&); }; // struct B { mutable A a; }; goto NeedOverloadResolution; case Sema::CXXCopyAssignment: // C++11 [class.copy]p25: // A copy assignment operator is trivial if: // - the assignment operator selected to copy each direct [subobject] is // trivial if (RD->hasTrivialCopyAssignment()) { if (Quals == Qualifiers::Const) return true; } else if (!Selected) { return false; } // In C++98, we are not supposed to perform overload resolution here, but we // treat that as a language defect. goto NeedOverloadResolution; case Sema::CXXMoveConstructor: case Sema::CXXMoveAssignment: NeedOverloadResolution: Sema::SpecialMemberOverloadResult *SMOR = lookupCallFromSpecialMember(S, RD, CSM, Quals, ConstRHS); // The standard doesn't describe how to behave if the lookup is ambiguous. // We treat it as not making the member non-trivial, just like the standard // mandates for the default constructor. This should rarely matter, because // the member will also be deleted. if (SMOR->getKind() == Sema::SpecialMemberOverloadResult::Ambiguous) return true; if (!SMOR->getMethod()) { assert(SMOR->getKind() == Sema::SpecialMemberOverloadResult::NoMemberOrDeleted); return false; } // We deliberately don't check if we found a deleted special member. We're // not supposed to! if (Selected) *Selected = SMOR->getMethod(); return SMOR->getMethod()->isTrivial(); } llvm_unreachable("unknown special method kind"); } static CXXConstructorDecl *findUserDeclaredCtor(CXXRecordDecl *RD) { for (auto *CI : RD->ctors()) if (!CI->isImplicit()) return CI; // Look for constructor templates. typedef CXXRecordDecl::specific_decl_iterator<FunctionTemplateDecl> tmpl_iter; for (tmpl_iter TI(RD->decls_begin()), TE(RD->decls_end()); TI != TE; ++TI) { if (CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(TI->getTemplatedDecl())) return CD; } return nullptr; } /// The kind of subobject we are checking for triviality. The values of this /// enumeration are used in diagnostics. enum TrivialSubobjectKind { /// The subobject is a base class. TSK_BaseClass, /// The subobject is a non-static data member. TSK_Field, /// The object is actually the complete object. TSK_CompleteObject }; /// Check whether the special member selected for a given type would be trivial. static bool checkTrivialSubobjectCall(Sema &S, SourceLocation SubobjLoc, QualType SubType, bool ConstRHS, Sema::CXXSpecialMember CSM, TrivialSubobjectKind Kind, bool Diagnose) { CXXRecordDecl *SubRD = SubType->getAsCXXRecordDecl(); if (!SubRD) return true; CXXMethodDecl *Selected; if (findTrivialSpecialMember(S, SubRD, CSM, SubType.getCVRQualifiers(), ConstRHS, Diagnose ? &Selected : nullptr)) return true; if (Diagnose) { if (ConstRHS) SubType.addConst(); if (!Selected && CSM == Sema::CXXDefaultConstructor) { S.Diag(SubobjLoc, diag::note_nontrivial_no_def_ctor) << Kind << SubType.getUnqualifiedType(); if (CXXConstructorDecl *CD = findUserDeclaredCtor(SubRD)) S.Diag(CD->getLocation(), diag::note_user_declared_ctor); } else if (!Selected) S.Diag(SubobjLoc, diag::note_nontrivial_no_copy) << Kind << SubType.getUnqualifiedType() << CSM << SubType; else if (Selected->isUserProvided()) { if (Kind == TSK_CompleteObject) S.Diag(Selected->getLocation(), diag::note_nontrivial_user_provided) << Kind << SubType.getUnqualifiedType() << CSM; else { S.Diag(SubobjLoc, diag::note_nontrivial_user_provided) << Kind << SubType.getUnqualifiedType() << CSM; S.Diag(Selected->getLocation(), diag::note_declared_at); } } else { if (Kind != TSK_CompleteObject) S.Diag(SubobjLoc, diag::note_nontrivial_subobject) << Kind << SubType.getUnqualifiedType() << CSM; // Explain why the defaulted or deleted special member isn't trivial. S.SpecialMemberIsTrivial(Selected, CSM, Diagnose); } } return false; } /// Check whether the members of a class type allow a special member to be /// trivial. static bool checkTrivialClassMembers(Sema &S, CXXRecordDecl *RD, Sema::CXXSpecialMember CSM, bool ConstArg, bool Diagnose) { for (const auto *FI : RD->fields()) { if (FI->isInvalidDecl() || FI->isUnnamedBitfield()) continue; QualType FieldType = S.Context.getBaseElementType(FI->getType()); // Pretend anonymous struct or union members are members of this class. if (FI->isAnonymousStructOrUnion()) { if (!checkTrivialClassMembers(S, FieldType->getAsCXXRecordDecl(), CSM, ConstArg, Diagnose)) return false; continue; } // C++11 [class.ctor]p5: // A default constructor is trivial if [...] // -- no non-static data member of its class has a // brace-or-equal-initializer if (CSM == Sema::CXXDefaultConstructor && FI->hasInClassInitializer()) { if (Diagnose) S.Diag(FI->getLocation(), diag::note_nontrivial_in_class_init) << FI; return false; } // Objective C ARC 4.3.5: // [...] nontrivally ownership-qualified types are [...] not trivially // default constructible, copy constructible, move constructible, copy // assignable, move assignable, or destructible [...] if (S.getLangOpts().ObjCAutoRefCount && FieldType.hasNonTrivialObjCLifetime()) { if (Diagnose) S.Diag(FI->getLocation(), diag::note_nontrivial_objc_ownership) << RD << FieldType.getObjCLifetime(); return false; } bool ConstRHS = ConstArg && !FI->isMutable(); if (!checkTrivialSubobjectCall(S, FI->getLocation(), FieldType, ConstRHS, CSM, TSK_Field, Diagnose)) return false; } return true; } /// Diagnose why the specified class does not have a trivial special member of /// the given kind. void Sema::DiagnoseNontrivial(const CXXRecordDecl *RD, CXXSpecialMember CSM) { QualType Ty = Context.getRecordType(RD); bool ConstArg = (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment); checkTrivialSubobjectCall(*this, RD->getLocation(), Ty, ConstArg, CSM, TSK_CompleteObject, /*Diagnose*/true); } /// Determine whether a defaulted or deleted special member function is trivial, /// as specified in C++11 [class.ctor]p5, C++11 [class.copy]p12, /// C++11 [class.copy]p25, and C++11 [class.dtor]p5. bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose) { assert(!MD->isUserProvided() && CSM != CXXInvalid && "not special enough"); CXXRecordDecl *RD = MD->getParent(); bool ConstArg = false; // C++11 [class.copy]p12, p25: [DR1593] // A [special member] is trivial if [...] its parameter-type-list is // equivalent to the parameter-type-list of an implicit declaration [...] switch (CSM) { case CXXDefaultConstructor: case CXXDestructor: // Trivial default constructors and destructors cannot have parameters. break; case CXXCopyConstructor: case CXXCopyAssignment: { // Trivial copy operations always have const, non-volatile parameter types. ConstArg = true; const ParmVarDecl *Param0 = MD->getParamDecl(0); const ReferenceType *RT = Param0->getType()->getAs<ReferenceType>(); if (!RT || RT->getPointeeType().getCVRQualifiers() != Qualifiers::Const) { if (Diagnose) Diag(Param0->getLocation(), diag::note_nontrivial_param_type) << Param0->getSourceRange() << Param0->getType() << Context.getLValueReferenceType( Context.getRecordType(RD).withConst()); return false; } break; } case CXXMoveConstructor: case CXXMoveAssignment: { // Trivial move operations always have non-cv-qualified parameters. const ParmVarDecl *Param0 = MD->getParamDecl(0); const RValueReferenceType *RT = Param0->getType()->getAs<RValueReferenceType>(); if (!RT || RT->getPointeeType().getCVRQualifiers()) { if (Diagnose) Diag(Param0->getLocation(), diag::note_nontrivial_param_type) << Param0->getSourceRange() << Param0->getType() << Context.getRValueReferenceType(Context.getRecordType(RD)); return false; } break; } case CXXInvalid: llvm_unreachable("not a special member"); } if (MD->getMinRequiredArguments() < MD->getNumParams()) { if (Diagnose) Diag(MD->getParamDecl(MD->getMinRequiredArguments())->getLocation(), diag::note_nontrivial_default_arg) << MD->getParamDecl(MD->getMinRequiredArguments())->getSourceRange(); return false; } if (MD->isVariadic()) { if (Diagnose) Diag(MD->getLocation(), diag::note_nontrivial_variadic); return false; } // C++11 [class.ctor]p5, C++11 [class.dtor]p5: // A copy/move [constructor or assignment operator] is trivial if // -- the [member] selected to copy/move each direct base class subobject // is trivial // // C++11 [class.copy]p12, C++11 [class.copy]p25: // A [default constructor or destructor] is trivial if // -- all the direct base classes have trivial [default constructors or // destructors] for (const auto &BI : RD->bases()) if (!checkTrivialSubobjectCall(*this, BI.getLocStart(), BI.getType(), ConstArg, CSM, TSK_BaseClass, Diagnose)) return false; // C++11 [class.ctor]p5, C++11 [class.dtor]p5: // A copy/move [constructor or assignment operator] for a class X is // trivial if // -- for each non-static data member of X that is of class type (or array // thereof), the constructor selected to copy/move that member is // trivial // // C++11 [class.copy]p12, C++11 [class.copy]p25: // A [default constructor or destructor] is trivial if // -- for all of the non-static data members of its class that are of class // type (or array thereof), each such class has a trivial [default // constructor or destructor] if (!checkTrivialClassMembers(*this, RD, CSM, ConstArg, Diagnose)) return false; // C++11 [class.dtor]p5: // A destructor is trivial if [...] // -- the destructor is not virtual if (CSM == CXXDestructor && MD->isVirtual()) { if (Diagnose) Diag(MD->getLocation(), diag::note_nontrivial_virtual_dtor) << RD; return false; } // C++11 [class.ctor]p5, C++11 [class.copy]p12, C++11 [class.copy]p25: // A [special member] for class X is trivial if [...] // -- class X has no virtual functions and no virtual base classes if (CSM != CXXDestructor && MD->getParent()->isDynamicClass()) { if (!Diagnose) return false; if (RD->getNumVBases()) { // Check for virtual bases. We already know that the corresponding // member in all bases is trivial, so vbases must all be direct. CXXBaseSpecifier &BS = *RD->vbases_begin(); assert(BS.isVirtual()); Diag(BS.getLocStart(), diag::note_nontrivial_has_virtual) << RD << 1; return false; } // Must have a virtual method. for (const auto *MI : RD->methods()) { if (MI->isVirtual()) { SourceLocation MLoc = MI->getLocStart(); Diag(MLoc, diag::note_nontrivial_has_virtual) << RD << 0; return false; } } llvm_unreachable("dynamic class with no vbases and no virtual functions"); } // Looks like it's trivial! return true; } /// \brief Data used with FindHiddenVirtualMethod namespace { struct FindHiddenVirtualMethodData { Sema *S; CXXMethodDecl *Method; llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverridenAndUsingBaseMethods; SmallVector<CXXMethodDecl *, 8> OverloadedMethods; }; } /// \brief Check whether any most overriden method from MD in Methods static bool CheckMostOverridenMethods(const CXXMethodDecl *MD, const llvm::SmallPtrSetImpl<const CXXMethodDecl *>& Methods) { if (MD->size_overridden_methods() == 0) return Methods.count(MD->getCanonicalDecl()); for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(), E = MD->end_overridden_methods(); I != E; ++I) if (CheckMostOverridenMethods(*I, Methods)) return true; return false; } /// \brief Member lookup function that determines whether a given C++ /// method overloads virtual methods in a base class without overriding any, /// to be used with CXXRecordDecl::lookupInBases(). static bool FindHiddenVirtualMethod(const CXXBaseSpecifier *Specifier, CXXBasePath &Path, void *UserData) { RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl(); FindHiddenVirtualMethodData &Data = *static_cast<FindHiddenVirtualMethodData*>(UserData); DeclarationName Name = Data.Method->getDeclName(); assert(Name.getNameKind() == DeclarationName::Identifier); bool foundSameNameMethod = false; SmallVector<CXXMethodDecl *, 8> overloadedMethods; for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty(); Path.Decls = Path.Decls.slice(1)) { NamedDecl *D = Path.Decls.front(); if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) { MD = MD->getCanonicalDecl(); foundSameNameMethod = true; // Interested only in hidden virtual methods. if (!MD->isVirtual()) continue; // If the method we are checking overrides a method from its base // don't warn about the other overloaded methods. Clang deviates from GCC // by only diagnosing overloads of inherited virtual functions that do not // override any other virtual functions in the base. GCC's // -Woverloaded-virtual diagnoses any derived function hiding a virtual // function from a base class. These cases may be better served by a // warning (not specific to virtual functions) on call sites when the call // would select a different function from the base class, were it visible. // See FIXME in test/SemaCXX/warn-overload-virtual.cpp for an example. if (!Data.S->IsOverload(Data.Method, MD, false)) return true; // Collect the overload only if its hidden. if (!CheckMostOverridenMethods(MD, Data.OverridenAndUsingBaseMethods)) overloadedMethods.push_back(MD); } } if (foundSameNameMethod) Data.OverloadedMethods.append(overloadedMethods.begin(), overloadedMethods.end()); return foundSameNameMethod; } /// \brief Add the most overriden methods from MD to Methods static void AddMostOverridenMethods(const CXXMethodDecl *MD, llvm::SmallPtrSetImpl<const CXXMethodDecl *>& Methods) { if (MD->size_overridden_methods() == 0) Methods.insert(MD->getCanonicalDecl()); for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(), E = MD->end_overridden_methods(); I != E; ++I) AddMostOverridenMethods(*I, Methods); } /// \brief Check if a method overloads virtual methods in a base class without /// overriding any. void Sema::FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods) { if (!MD->getDeclName().isIdentifier()) return; CXXBasePaths Paths(/*FindAmbiguities=*/true, // true to look in all bases. /*bool RecordPaths=*/false, /*bool DetectVirtual=*/false); FindHiddenVirtualMethodData Data; Data.Method = MD; Data.S = this; // Keep the base methods that were overriden or introduced in the subclass // by 'using' in a set. A base method not in this set is hidden. CXXRecordDecl *DC = MD->getParent(); DeclContext::lookup_result R = DC->lookup(MD->getDeclName()); for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) { NamedDecl *ND = *I; if (UsingShadowDecl *shad = dyn_cast<UsingShadowDecl>(*I)) ND = shad->getTargetDecl(); if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) AddMostOverridenMethods(MD, Data.OverridenAndUsingBaseMethods); } if (DC->lookupInBases(&FindHiddenVirtualMethod, &Data, Paths)) OverloadedMethods = Data.OverloadedMethods; } void Sema::NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods) { for (unsigned i = 0, e = OverloadedMethods.size(); i != e; ++i) { CXXMethodDecl *overloadedMD = OverloadedMethods[i]; PartialDiagnostic PD = PDiag( diag::note_hidden_overloaded_virtual_declared_here) << overloadedMD; HandleFunctionTypeMismatch(PD, MD->getType(), overloadedMD->getType()); Diag(overloadedMD->getLocation(), PD); } } /// \brief Diagnose methods which overload virtual methods in a base class /// without overriding any. void Sema::DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD) { if (MD->isInvalidDecl()) return; if (Diags.isIgnored(diag::warn_overloaded_virtual, MD->getLocation())) return; SmallVector<CXXMethodDecl *, 8> OverloadedMethods; FindHiddenVirtualMethods(MD, OverloadedMethods); if (!OverloadedMethods.empty()) { Diag(MD->getLocation(), diag::warn_overloaded_virtual) << MD << (OverloadedMethods.size() > 1); NoteHiddenVirtualMethods(MD, OverloadedMethods); } } void Sema::ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList) { if (!TagDecl) return; AdjustDeclIfTemplate(TagDecl); for (const AttributeList* l = AttrList; l; l = l->getNext()) { if (l->getKind() != AttributeList::AT_Visibility) continue; l->setInvalid(); Diag(l->getLoc(), diag::warn_attribute_after_definition_ignored) << l->getName(); } ActOnFields(S, RLoc, TagDecl, llvm::makeArrayRef( // strict aliasing violation! reinterpret_cast<Decl**>(FieldCollector->getCurFields()), FieldCollector->getCurNumFields()), LBrac, RBrac, AttrList); CheckCompletedCXXClass( dyn_cast_or_null<CXXRecordDecl>(TagDecl)); } /// AddImplicitlyDeclaredMembersToClass - Adds any implicitly-declared /// special functions, such as the default constructor, copy /// constructor, or destructor, to the given C++ class (C++ /// [special]p1). This routine can only be executed just before the /// definition of the class is complete. void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) { if (!ClassDecl->hasUserDeclaredConstructor()) ++ASTContext::NumImplicitDefaultConstructors; if (!ClassDecl->hasUserDeclaredCopyConstructor()) { ++ASTContext::NumImplicitCopyConstructors; // If the properties or semantics of the copy constructor couldn't be // determined while the class was being declared, force a declaration // of it now. if (ClassDecl->needsOverloadResolutionForCopyConstructor()) DeclareImplicitCopyConstructor(ClassDecl); } if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveConstructor()) { ++ASTContext::NumImplicitMoveConstructors; if (ClassDecl->needsOverloadResolutionForMoveConstructor()) DeclareImplicitMoveConstructor(ClassDecl); } if (!ClassDecl->hasUserDeclaredCopyAssignment()) { ++ASTContext::NumImplicitCopyAssignmentOperators; // If we have a dynamic class, then the copy assignment operator may be // virtual, so we have to declare it immediately. This ensures that, e.g., // it shows up in the right place in the vtable and that we diagnose // problems with the implicit exception specification. if (ClassDecl->isDynamicClass() || ClassDecl->needsOverloadResolutionForCopyAssignment()) DeclareImplicitCopyAssignment(ClassDecl); } if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveAssignment()) { ++ASTContext::NumImplicitMoveAssignmentOperators; // Likewise for the move assignment operator. if (ClassDecl->isDynamicClass() || ClassDecl->needsOverloadResolutionForMoveAssignment()) DeclareImplicitMoveAssignment(ClassDecl); } if (!ClassDecl->hasUserDeclaredDestructor()) { ++ASTContext::NumImplicitDestructors; // If we have a dynamic class, then the destructor may be virtual, so we // have to declare the destructor immediately. This ensures that, e.g., it // shows up in the right place in the vtable and that we diagnose problems // with the implicit exception specification. if (ClassDecl->isDynamicClass() || ClassDecl->needsOverloadResolutionForDestructor()) DeclareImplicitDestructor(ClassDecl); } } unsigned Sema::ActOnReenterTemplateScope(Scope *S, Decl *D) { if (!D) return 0; // The order of template parameters is not important here. All names // get added to the same scope. SmallVector<TemplateParameterList *, 4> ParameterLists; if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) D = TD->getTemplatedDecl(); if (auto *PSD = dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) ParameterLists.push_back(PSD->getTemplateParameters()); if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) { for (unsigned i = 0; i < DD->getNumTemplateParameterLists(); ++i) ParameterLists.push_back(DD->getTemplateParameterList(i)); if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { if (FunctionTemplateDecl *FTD = FD->getDescribedFunctionTemplate()) ParameterLists.push_back(FTD->getTemplateParameters()); } } if (TagDecl *TD = dyn_cast<TagDecl>(D)) { for (unsigned i = 0; i < TD->getNumTemplateParameterLists(); ++i) ParameterLists.push_back(TD->getTemplateParameterList(i)); if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) { if (ClassTemplateDecl *CTD = RD->getDescribedClassTemplate()) ParameterLists.push_back(CTD->getTemplateParameters()); } } unsigned Count = 0; for (TemplateParameterList *Params : ParameterLists) { if (Params->size() > 0) // Ignore explicit specializations; they don't contribute to the template // depth. ++Count; for (NamedDecl *Param : *Params) { if (Param->getDeclName()) { S->AddDecl(Param); IdResolver.AddDecl(Param); } } } return Count; } void Sema::ActOnStartDelayedMemberDeclarations(Scope *S, Decl *RecordD) { if (!RecordD) return; AdjustDeclIfTemplate(RecordD); CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordD); PushDeclContext(S, Record); } void Sema::ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *RecordD) { if (!RecordD) return; PopDeclContext(); } /// This is used to implement the constant expression evaluation part of the /// attribute enable_if extension. There is nothing in standard C++ which would /// require reentering parameters. void Sema::ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param) { if (!Param) return; S->AddDecl(Param); if (Param->getDeclName()) IdResolver.AddDecl(Param); } /// ActOnStartDelayedCXXMethodDeclaration - We have completed /// parsing a top-level (non-nested) C++ class, and we are now /// parsing those parts of the given Method declaration that could /// not be parsed earlier (C++ [class.mem]p2), such as default /// arguments. This action should enter the scope of the given /// Method declaration as if we had just parsed the qualified method /// name. However, it should not bring the parameters into scope; /// that will be performed by ActOnDelayedCXXMethodParameter. void Sema::ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *MethodD) { } /// ActOnDelayedCXXMethodParameter - We've already started a delayed /// C++ method declaration. We're (re-)introducing the given /// function parameter into scope for use in parsing later parts of /// the method declaration. For example, we could see an /// ActOnParamDefaultArgument event for this parameter. void Sema::ActOnDelayedCXXMethodParameter(Scope *S, Decl *ParamD) { if (!ParamD) return; ParmVarDecl *Param = cast<ParmVarDecl>(ParamD); // If this parameter has an unparsed default argument, clear it out // to make way for the parsed default argument. if (Param->hasUnparsedDefaultArg()) Param->setDefaultArg(nullptr); S->AddDecl(Param); if (Param->getDeclName()) IdResolver.AddDecl(Param); } /// ActOnFinishDelayedCXXMethodDeclaration - We have finished /// processing the delayed method declaration for Method. The method /// declaration is now considered finished. There may be a separate /// ActOnStartOfFunctionDef action later (not necessarily /// immediately!) for this method, if it was also defined inside the /// class body. void Sema::ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *MethodD) { if (!MethodD) return; AdjustDeclIfTemplate(MethodD); FunctionDecl *Method = cast<FunctionDecl>(MethodD); // Now that we have our default arguments, check the constructor // again. It could produce additional diagnostics or affect whether // the class has implicitly-declared destructors, among other // things. if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Method)) CheckConstructor(Constructor); // Check the default arguments, which we may have added. if (!Method->isInvalidDecl()) CheckCXXDefaultArguments(Method); } /// CheckConstructorDeclarator - Called by ActOnDeclarator to check /// the well-formedness of the constructor declarator @p D with type @p /// R. If there are any errors in the declarator, this routine will /// emit diagnostics and set the invalid bit to true. In any case, the type /// will be updated to reflect a well-formed type for the constructor and /// returned. QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass &SC) { bool isVirtual = D.getDeclSpec().isVirtualSpecified(); // C++ [class.ctor]p3: // A constructor shall not be virtual (10.3) or static (9.4). A // constructor can be invoked for a const, volatile or const // volatile object. A constructor shall not be declared const, // volatile, or const volatile (9.3.2). if (isVirtual) { if (!D.isInvalidType()) Diag(D.getIdentifierLoc(), diag::err_constructor_cannot_be) << "virtual" << SourceRange(D.getDeclSpec().getVirtualSpecLoc()) << SourceRange(D.getIdentifierLoc()); D.setInvalidType(); } if (SC == SC_Static) { if (!D.isInvalidType()) Diag(D.getIdentifierLoc(), diag::err_constructor_cannot_be) << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc()) << SourceRange(D.getIdentifierLoc()); D.setInvalidType(); SC = SC_None; } if (unsigned TypeQuals = D.getDeclSpec().getTypeQualifiers()) { diagnoseIgnoredQualifiers( diag::err_constructor_return_type, TypeQuals, SourceLocation(), D.getDeclSpec().getConstSpecLoc(), D.getDeclSpec().getVolatileSpecLoc(), D.getDeclSpec().getRestrictSpecLoc(), D.getDeclSpec().getAtomicSpecLoc()); D.setInvalidType(); } DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo(); if (FTI.TypeQuals != 0) { if (FTI.TypeQuals & Qualifiers::Const) Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor) << "const" << SourceRange(D.getIdentifierLoc()); if (FTI.TypeQuals & Qualifiers::Volatile) Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor) << "volatile" << SourceRange(D.getIdentifierLoc()); if (FTI.TypeQuals & Qualifiers::Restrict) Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor) << "restrict" << SourceRange(D.getIdentifierLoc()); D.setInvalidType(); } // C++0x [class.ctor]p4: // A constructor shall not be declared with a ref-qualifier. if (FTI.hasRefQualifier()) { Diag(FTI.getRefQualifierLoc(), diag::err_ref_qualifier_constructor) << FTI.RefQualifierIsLValueRef << FixItHint::CreateRemoval(FTI.getRefQualifierLoc()); D.setInvalidType(); } // Rebuild the function type "R" without any type qualifiers (in // case any of the errors above fired) and with "void" as the // return type, since constructors don't have return types. const FunctionProtoType *Proto = R->getAs<FunctionProtoType>(); if (Proto->getReturnType() == Context.VoidTy && !D.isInvalidType()) return R; FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo(); EPI.TypeQuals = 0; EPI.RefQualifier = RQ_None; return Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(), EPI, None); // HLSL Change - constructors members are all-in params } /// CheckConstructor - Checks a fully-formed constructor for /// well-formedness, issuing any diagnostics required. Returns true if /// the constructor declarator is invalid. void Sema::CheckConstructor(CXXConstructorDecl *Constructor) { CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(Constructor->getDeclContext()); if (!ClassDecl) return Constructor->setInvalidDecl(); // C++ [class.copy]p3: // A declaration of a constructor for a class X is ill-formed if // its first parameter is of type (optionally cv-qualified) X and // either there are no other parameters or else all other // parameters have default arguments. if (!Constructor->isInvalidDecl() && ((Constructor->getNumParams() == 1) || (Constructor->getNumParams() > 1 && Constructor->getParamDecl(1)->hasDefaultArg())) && Constructor->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) { QualType ParamType = Constructor->getParamDecl(0)->getType(); QualType ClassTy = Context.getTagDeclType(ClassDecl); if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) { SourceLocation ParamLoc = Constructor->getParamDecl(0)->getLocation(); const char *ConstRef = Constructor->getParamDecl(0)->getIdentifier() ? "const &" : " const &"; Diag(ParamLoc, diag::err_constructor_byvalue_arg) << FixItHint::CreateInsertion(ParamLoc, ConstRef); // FIXME: Rather that making the constructor invalid, we should endeavor // to fix the type. Constructor->setInvalidDecl(); } } } /// CheckDestructor - Checks a fully-formed destructor definition for /// well-formedness, issuing any diagnostics required. Returns true /// on error. bool Sema::CheckDestructor(CXXDestructorDecl *Destructor) { CXXRecordDecl *RD = Destructor->getParent(); if (!Destructor->getOperatorDelete() && Destructor->isVirtual()) { SourceLocation Loc; if (!Destructor->isImplicit()) Loc = Destructor->getLocation(); else Loc = RD->getLocation(); // If we have a virtual destructor, look up the deallocation function FunctionDecl *OperatorDelete = nullptr; DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Delete); if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete)) return true; // If there's no class-specific operator delete, look up the global // non-array delete. if (!OperatorDelete) OperatorDelete = FindUsualDeallocationFunction(Loc, true, Name); MarkFunctionReferenced(Loc, OperatorDelete); Destructor->setOperatorDelete(OperatorDelete); } return false; } /// CheckDestructorDeclarator - Called by ActOnDeclarator to check /// the well-formednes of the destructor declarator @p D with type @p /// R. If there are any errors in the declarator, this routine will /// emit diagnostics and set the declarator to invalid. Even if this happens, /// will be updated to reflect a well-formed type for the destructor and /// returned. QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC) { // C++ [class.dtor]p1: // [...] A typedef-name that names a class is a class-name // (7.1.3); however, a typedef-name that names a class shall not // be used as the identifier in the declarator for a destructor // declaration. QualType DeclaratorType = GetTypeFromParser(D.getName().DestructorName); if (const TypedefType *TT = DeclaratorType->getAs<TypedefType>()) Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name) << DeclaratorType << isa<TypeAliasDecl>(TT->getDecl()); else if (const TemplateSpecializationType *TST = DeclaratorType->getAs<TemplateSpecializationType>()) if (TST->isTypeAlias()) Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name) << DeclaratorType << 1; // C++ [class.dtor]p2: // A destructor is used to destroy objects of its class type. A // destructor takes no parameters, and no return type can be // specified for it (not even void). The address of a destructor // shall not be taken. A destructor shall not be static. A // destructor can be invoked for a const, volatile or const // volatile object. A destructor shall not be declared const, // volatile or const volatile (9.3.2). if (SC == SC_Static) { if (!D.isInvalidType()) Diag(D.getIdentifierLoc(), diag::err_destructor_cannot_be) << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc()) << SourceRange(D.getIdentifierLoc()) << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc()); SC = SC_None; } if (!D.isInvalidType()) { // Destructors don't have return types, but the parser will // happily parse something like: // // class X { // float ~X(); // }; // // The return type will be eliminated later. if (D.getDeclSpec().hasTypeSpecifier()) Diag(D.getIdentifierLoc(), diag::err_destructor_return_type) << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc()) << SourceRange(D.getIdentifierLoc()); else if (unsigned TypeQuals = D.getDeclSpec().getTypeQualifiers()) { diagnoseIgnoredQualifiers(diag::err_destructor_return_type, TypeQuals, SourceLocation(), D.getDeclSpec().getConstSpecLoc(), D.getDeclSpec().getVolatileSpecLoc(), D.getDeclSpec().getRestrictSpecLoc(), D.getDeclSpec().getAtomicSpecLoc()); D.setInvalidType(); } } DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo(); if (FTI.TypeQuals != 0 && !D.isInvalidType()) { if (FTI.TypeQuals & Qualifiers::Const) Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor) << "const" << SourceRange(D.getIdentifierLoc()); if (FTI.TypeQuals & Qualifiers::Volatile) Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor) << "volatile" << SourceRange(D.getIdentifierLoc()); if (FTI.TypeQuals & Qualifiers::Restrict) Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor) << "restrict" << SourceRange(D.getIdentifierLoc()); D.setInvalidType(); } // C++0x [class.dtor]p2: // A destructor shall not be declared with a ref-qualifier. if (FTI.hasRefQualifier()) { Diag(FTI.getRefQualifierLoc(), diag::err_ref_qualifier_destructor) << FTI.RefQualifierIsLValueRef << FixItHint::CreateRemoval(FTI.getRefQualifierLoc()); D.setInvalidType(); } // Make sure we don't have any parameters. if (FTIHasNonVoidParameters(FTI)) { Diag(D.getIdentifierLoc(), diag::err_destructor_with_params); // Delete the parameters. FTI.freeParams(); D.setInvalidType(); } // Make sure the destructor isn't variadic. if (FTI.isVariadic) { Diag(D.getIdentifierLoc(), diag::err_destructor_variadic); D.setInvalidType(); } // Rebuild the function type "R" without any type qualifiers or // parameters (in case any of the errors above fired) and with // "void" as the return type, since destructors don't have return // types. if (!D.isInvalidType()) return R; const FunctionProtoType *Proto = R->getAs<FunctionProtoType>(); FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo(); EPI.Variadic = false; EPI.TypeQuals = 0; EPI.RefQualifier = RQ_None; return Context.getFunctionType(Context.VoidTy, None, EPI, None); // HLSL Change - no destructor args } static void extendLeft(SourceRange &R, const SourceRange &Before) { if (Before.isInvalid()) return; R.setBegin(Before.getBegin()); if (R.getEnd().isInvalid()) R.setEnd(Before.getEnd()); } static void extendRight(SourceRange &R, const SourceRange &After) { if (After.isInvalid()) return; if (R.getBegin().isInvalid()) R.setBegin(After.getBegin()); R.setEnd(After.getEnd()); } /// CheckConversionDeclarator - Called by ActOnDeclarator to check the /// well-formednes of the conversion function declarator @p D with /// type @p R. If there are any errors in the declarator, this routine /// will emit diagnostics and return true. Otherwise, it will return /// false. Either way, the type @p R will be updated to reflect a /// well-formed type for the conversion operator. void Sema::CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC) { // C++ [class.conv.fct]p1: // Neither parameter types nor return type can be specified. The // type of a conversion function (8.3.5) is "function taking no // parameter returning conversion-type-id." if (SC == SC_Static) { if (!D.isInvalidType()) Diag(D.getIdentifierLoc(), diag::err_conv_function_not_member) << SourceRange(D.getDeclSpec().getStorageClassSpecLoc()) << D.getName().getSourceRange(); D.setInvalidType(); SC = SC_None; } TypeSourceInfo *ConvTSI = nullptr; QualType ConvType = GetTypeFromParser(D.getName().ConversionFunctionId, &ConvTSI); if (D.getDeclSpec().hasTypeSpecifier() && !D.isInvalidType()) { // Conversion functions don't have return types, but the parser will // happily parse something like: // // class X { // float operator bool(); // }; // // The return type will be changed later anyway. Diag(D.getIdentifierLoc(), diag::err_conv_function_return_type) << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc()) << SourceRange(D.getIdentifierLoc()); D.setInvalidType(); } const FunctionProtoType *Proto = R->getAs<FunctionProtoType>(); // Make sure we don't have any parameters. if (Proto->getNumParams() > 0) { Diag(D.getIdentifierLoc(), diag::err_conv_function_with_params); // Delete the parameters. D.getFunctionTypeInfo().freeParams(); D.setInvalidType(); } else if (Proto->isVariadic()) { Diag(D.getIdentifierLoc(), diag::err_conv_function_variadic); D.setInvalidType(); } // Diagnose "&operator bool()" and other such nonsense. This // is actually a gcc extension which we don't support. if (Proto->getReturnType() != ConvType) { bool NeedsTypedef = false; SourceRange Before, After; // Walk the chunks and extract information on them for our diagnostic. bool PastFunctionChunk = false; for (auto &Chunk : D.type_objects()) { switch (Chunk.Kind) { case DeclaratorChunk::Function: if (!PastFunctionChunk) { if (Chunk.Fun.HasTrailingReturnType) { TypeSourceInfo *TRT = nullptr; GetTypeFromParser(Chunk.Fun.getTrailingReturnType(), &TRT); if (TRT) extendRight(After, TRT->getTypeLoc().getSourceRange()); } PastFunctionChunk = true; break; } LLVM_FALLTHROUGH; // HLSL Change case DeclaratorChunk::Array: NeedsTypedef = true; extendRight(After, Chunk.getSourceRange()); break; case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: extendLeft(Before, Chunk.getSourceRange()); break; case DeclaratorChunk::Paren: extendLeft(Before, Chunk.Loc); extendRight(After, Chunk.EndLoc); break; } } SourceLocation Loc = Before.isValid() ? Before.getBegin() : After.isValid() ? After.getBegin() : D.getIdentifierLoc(); auto &&DB = Diag(Loc, diag::err_conv_function_with_complex_decl); DB << Before << After; if (!NeedsTypedef) { DB << /*don't need a typedef*/0; // If we can provide a correct fix-it hint, do so. if (After.isInvalid() && ConvTSI) { SourceLocation InsertLoc = PP.getLocForEndOfToken(ConvTSI->getTypeLoc().getLocEnd()); DB << FixItHint::CreateInsertion(InsertLoc, " ") << FixItHint::CreateInsertionFromRange( InsertLoc, CharSourceRange::getTokenRange(Before)) << FixItHint::CreateRemoval(Before); } } else if (!Proto->getReturnType()->isDependentType()) { DB << /*typedef*/1 << Proto->getReturnType(); } else if (getLangOpts().CPlusPlus11) { DB << /*alias template*/2 << Proto->getReturnType(); } else { DB << /*might not be fixable*/3; } // Recover by incorporating the other type chunks into the result type. // Note, this does *not* change the name of the function. This is compatible // with the GCC extension: // struct S { &operator int(); } s; // int &r = s.operator int(); // ok in GCC // S::operator int&() {} // error in GCC, function name is 'operator int'. ConvType = Proto->getReturnType(); } // C++ [class.conv.fct]p4: // The conversion-type-id shall not represent a function type nor // an array type. if (ConvType->isArrayType()) { Diag(D.getIdentifierLoc(), diag::err_conv_function_to_array); ConvType = Context.getPointerType(ConvType); D.setInvalidType(); } else if (ConvType->isFunctionType()) { Diag(D.getIdentifierLoc(), diag::err_conv_function_to_function); ConvType = Context.getPointerType(ConvType); D.setInvalidType(); } // Rebuild the function type "R" without any parameters (in case any // of the errors above fired) and with the conversion type as the // return type. if (D.isInvalidType()) R = Context.getFunctionType(ConvType, None, Proto->getExtProtoInfo(), None); // HLSL Change // C++0x explicit conversion operators. if (D.getDeclSpec().isExplicitSpecified()) Diag(D.getDeclSpec().getExplicitSpecLoc(), getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_explicit_conversion_functions : diag::ext_explicit_conversion_functions) << SourceRange(D.getDeclSpec().getExplicitSpecLoc()); } /// ActOnConversionDeclarator - Called by ActOnDeclarator to complete /// the declaration of the given C++ conversion function. This routine /// is responsible for recording the conversion function in the C++ /// class, if possible. Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) { assert(Conversion && "Expected to receive a conversion function declaration"); CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Conversion->getDeclContext()); // Make sure we aren't redeclaring the conversion function. QualType ConvType = Context.getCanonicalType(Conversion->getConversionType()); // C++ [class.conv.fct]p1: // [...] A conversion function is never used to convert a // (possibly cv-qualified) object to the (possibly cv-qualified) // same object type (or a reference to it), to a (possibly // cv-qualified) base class of that type (or a reference to it), // or to (possibly cv-qualified) void. // FIXME: Suppress this warning if the conversion function ends up being a // virtual function that overrides a virtual function in a base class. QualType ClassType = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl)); if (const ReferenceType *ConvTypeRef = ConvType->getAs<ReferenceType>()) ConvType = ConvTypeRef->getPointeeType(); if (Conversion->getTemplateSpecializationKind() != TSK_Undeclared && Conversion->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) /* Suppress diagnostics for instantiations. */; else if (ConvType->isRecordType()) { ConvType = Context.getCanonicalType(ConvType).getUnqualifiedType(); if (ConvType == ClassType) Diag(Conversion->getLocation(), diag::warn_conv_to_self_not_used) << ClassType; else if (IsDerivedFrom(ClassType, ConvType)) Diag(Conversion->getLocation(), diag::warn_conv_to_base_not_used) << ClassType << ConvType; } else if (ConvType->isVoidType()) { Diag(Conversion->getLocation(), diag::warn_conv_to_void_not_used) << ClassType << ConvType; } if (FunctionTemplateDecl *ConversionTemplate = Conversion->getDescribedFunctionTemplate()) return ConversionTemplate; return Conversion; } //===----------------------------------------------------------------------===// // Namespace Handling //===----------------------------------------------------------------------===// /// \brief Diagnose a mismatch in 'inline' qualifiers when a namespace is /// reopened. static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc, SourceLocation Loc, IdentifierInfo *II, bool *IsInline, NamespaceDecl *PrevNS) { assert(*IsInline != PrevNS->isInline()); // HACK: Work around a bug in libstdc++4.6's <atomic>, where // std::__atomic[0,1,2] are defined as non-inline namespaces, then reopened as // inline namespaces, with the intention of bringing names into namespace std. // // We support this just well enough to get that case working; this is not // sufficient to support reopening namespaces as inline in general. if (*IsInline && II && II->getName().startswith("__atomic") && S.getSourceManager().isInSystemHeader(Loc)) { // Mark all prior declarations of the namespace as inline. for (NamespaceDecl *NS = PrevNS->getMostRecentDecl(); NS; NS = NS->getPreviousDecl()) NS->setInline(*IsInline); // Patch up the lookup table for the containing namespace. This isn't really // correct, but it's good enough for this particular case. for (auto *I : PrevNS->decls()) if (auto *ND = dyn_cast<NamedDecl>(I)) PrevNS->getParent()->makeDeclVisibleInContext(ND); return; } if (PrevNS->isInline()) // The user probably just forgot the 'inline', so suggest that it // be added back. S.Diag(Loc, diag::warn_inline_namespace_reopened_noninline) << FixItHint::CreateInsertion(KeywordLoc, "inline "); else S.Diag(Loc, diag::err_inline_namespace_mismatch) << *IsInline; S.Diag(PrevNS->getLocation(), diag::note_previous_definition); *IsInline = PrevNS->isInline(); } /// ActOnStartNamespaceDef - This is called at the start of a namespace /// definition. Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *II, SourceLocation LBrace, AttributeList *AttrList) { SourceLocation StartLoc = InlineLoc.isValid() ? InlineLoc : NamespaceLoc; // For anonymous namespace, take the location of the left brace. SourceLocation Loc = II ? IdentLoc : LBrace; bool IsInline = InlineLoc.isValid(); bool IsInvalid = false; bool IsStd = false; bool AddToKnown = false; Scope *DeclRegionScope = NamespcScope->getParent(); NamespaceDecl *PrevNS = nullptr; if (II) { // C++ [namespace.def]p2: // The identifier in an original-namespace-definition shall not // have been previously defined in the declarative region in // which the original-namespace-definition appears. The // identifier in an original-namespace-definition is the name of // the namespace. Subsequently in that declarative region, it is // treated as an original-namespace-name. // // Since namespace names are unique in their scope, and we don't // look through using directives, just look for any ordinary names. const unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Member | Decl::IDNS_Type | Decl::IDNS_Using | Decl::IDNS_Tag | Decl::IDNS_Namespace; NamedDecl *PrevDecl = nullptr; DeclContext::lookup_result R = CurContext->getRedeclContext()->lookup(II); for (DeclContext::lookup_iterator I = R.begin(), E = R.end(); I != E; ++I) { if ((*I)->getIdentifierNamespace() & IDNS) { PrevDecl = *I; break; } } PrevNS = dyn_cast_or_null<NamespaceDecl>(PrevDecl); if (PrevNS) { // This is an extended namespace definition. if (IsInline != PrevNS->isInline()) DiagnoseNamespaceInlineMismatch(*this, NamespaceLoc, Loc, II, &IsInline, PrevNS); } else if (PrevDecl) { // This is an invalid name redefinition. Diag(Loc, diag::err_redefinition_different_kind) << II; Diag(PrevDecl->getLocation(), diag::note_previous_definition); IsInvalid = true; // Continue on to push Namespc as current DeclContext and return it. } else if (II->isStr("std") && CurContext->getRedeclContext()->isTranslationUnit()) { // This is the first "real" definition of the namespace "std", so update // our cache of the "std" namespace to point at this definition. PrevNS = getStdNamespace(); IsStd = true; AddToKnown = !IsInline; } else { // We've seen this namespace for the first time. AddToKnown = !IsInline; } } else { // Anonymous namespaces. // Determine whether the parent already has an anonymous namespace. DeclContext *Parent = CurContext->getRedeclContext(); if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(Parent)) { PrevNS = TU->getAnonymousNamespace(); } else { NamespaceDecl *ND = cast<NamespaceDecl>(Parent); PrevNS = ND->getAnonymousNamespace(); } if (PrevNS && IsInline != PrevNS->isInline()) DiagnoseNamespaceInlineMismatch(*this, NamespaceLoc, NamespaceLoc, II, &IsInline, PrevNS); } NamespaceDecl *Namespc = NamespaceDecl::Create(Context, CurContext, IsInline, StartLoc, Loc, II, PrevNS); if (IsInvalid) Namespc->setInvalidDecl(); ProcessDeclAttributeList(DeclRegionScope, Namespc, AttrList); // FIXME: Should we be merging attributes? if (const VisibilityAttr *Attr = Namespc->getAttr<VisibilityAttr>()) PushNamespaceVisibilityAttr(Attr, Loc); if (IsStd) StdNamespace = Namespc; if (AddToKnown) KnownNamespaces[Namespc] = false; if (II) { PushOnScopeChains(Namespc, DeclRegionScope); } else { // Link the anonymous namespace into its parent. DeclContext *Parent = CurContext->getRedeclContext(); if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(Parent)) { TU->setAnonymousNamespace(Namespc); } else { cast<NamespaceDecl>(Parent)->setAnonymousNamespace(Namespc); } CurContext->addDecl(Namespc); // C++ [namespace.unnamed]p1. An unnamed-namespace-definition // behaves as if it were replaced by // namespace unique { /* empty body */ } // using namespace unique; // namespace unique { namespace-body } // where all occurrences of 'unique' in a translation unit are // replaced by the same identifier and this identifier differs // from all other identifiers in the entire program. // We just create the namespace with an empty name and then add an // implicit using declaration, just like the standard suggests. // // CodeGen enforces the "universally unique" aspect by giving all // declarations semantically contained within an anonymous // namespace internal linkage. if (!PrevNS) { UsingDirectiveDecl* UD = UsingDirectiveDecl::Create(Context, Parent, /* 'using' */ LBrace, /* 'namespace' */ SourceLocation(), /* qualifier */ NestedNameSpecifierLoc(), /* identifier */ SourceLocation(), Namespc, /* Ancestor */ Parent); UD->setImplicit(); Parent->addDecl(UD); } } ActOnDocumentableDecl(Namespc); // Although we could have an invalid decl (i.e. the namespace name is a // redefinition), push it as current DeclContext and try to continue parsing. // FIXME: We should be able to push Namespc here, so that the each DeclContext // for the namespace has the declarations that showed up in that particular // namespace definition. PushDeclContext(NamespcScope, Namespc); return Namespc; } /// getNamespaceDecl - Returns the namespace a decl represents. If the decl /// is a namespace alias, returns the namespace it points to. static inline NamespaceDecl *getNamespaceDecl(NamedDecl *D) { if (NamespaceAliasDecl *AD = dyn_cast_or_null<NamespaceAliasDecl>(D)) return AD->getNamespace(); return dyn_cast_or_null<NamespaceDecl>(D); } /// ActOnFinishNamespaceDef - This callback is called after a namespace is /// exited. Decl is the DeclTy returned by ActOnStartNamespaceDef. void Sema::ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace) { NamespaceDecl *Namespc = dyn_cast_or_null<NamespaceDecl>(Dcl); assert(Namespc && "Invalid parameter, expected NamespaceDecl"); Namespc->setRBraceLoc(RBrace); PopDeclContext(); if (Namespc->hasAttr<VisibilityAttr>()) PopPragmaVisibility(true, RBrace); } CXXRecordDecl *Sema::getStdBadAlloc() const { return cast_or_null<CXXRecordDecl>( StdBadAlloc.get(Context.getExternalSource())); } NamespaceDecl *Sema::getStdNamespace() const { return cast_or_null<NamespaceDecl>( StdNamespace.get(Context.getExternalSource())); } /// \brief Retrieve the special "std" namespace, which may require us to /// implicitly define the namespace. NamespaceDecl *Sema::getOrCreateStdNamespace() { if (!StdNamespace) { // The "std" namespace has not yet been defined, so build one implicitly. StdNamespace = NamespaceDecl::Create(Context, Context.getTranslationUnitDecl(), /*Inline=*/false, SourceLocation(), SourceLocation(), &PP.getIdentifierTable().get("std"), /*PrevDecl=*/nullptr); getStdNamespace()->setImplicit(true); } return getStdNamespace(); } bool Sema::isStdInitializerList(QualType Ty, QualType *Element) { assert(getLangOpts().CPlusPlus && "Looking for std::initializer_list outside of C++."); // We're looking for implicit instantiations of // template <typename E> class std::initializer_list. if (!StdNamespace) // If we haven't seen namespace std yet, this can't be it. return false; ClassTemplateDecl *Template = nullptr; const TemplateArgument *Arguments = nullptr; if (const RecordType *RT = Ty->getAs<RecordType>()) { ClassTemplateSpecializationDecl *Specialization = dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl()); if (!Specialization) return false; Template = Specialization->getSpecializedTemplate(); Arguments = Specialization->getTemplateArgs().data(); } else if (const TemplateSpecializationType *TST = Ty->getAs<TemplateSpecializationType>()) { Template = dyn_cast_or_null<ClassTemplateDecl>( TST->getTemplateName().getAsTemplateDecl()); Arguments = TST->getArgs(); } if (!Template) return false; if (!StdInitializerList) { // Haven't recognized std::initializer_list yet, maybe this is it. CXXRecordDecl *TemplateClass = Template->getTemplatedDecl(); if (TemplateClass->getIdentifier() != &PP.getIdentifierTable().get("initializer_list") || !getStdNamespace()->InEnclosingNamespaceSetOf( TemplateClass->getDeclContext())) return false; // This is a template called std::initializer_list, but is it the right // template? TemplateParameterList *Params = Template->getTemplateParameters(); if (Params->getMinRequiredArguments() != 1) return false; if (!isa<TemplateTypeParmDecl>(Params->getParam(0))) return false; // It's the right template. StdInitializerList = Template; } if (Template->getCanonicalDecl() != StdInitializerList->getCanonicalDecl()) return false; // This is an instance of std::initializer_list. Find the argument type. if (Element) *Element = Arguments[0].getAsType(); return true; } static ClassTemplateDecl *LookupStdInitializerList(Sema &S, SourceLocation Loc){ NamespaceDecl *Std = S.getStdNamespace(); if (!Std) { S.Diag(Loc, diag::err_implied_std_initializer_list_not_found); return nullptr; } LookupResult Result(S, &S.PP.getIdentifierTable().get("initializer_list"), Loc, Sema::LookupOrdinaryName); if (!S.LookupQualifiedName(Result, Std)) { S.Diag(Loc, diag::err_implied_std_initializer_list_not_found); return nullptr; } ClassTemplateDecl *Template = Result.getAsSingle<ClassTemplateDecl>(); if (!Template) { Result.suppressDiagnostics(); // We found something weird. Complain about the first thing we found. NamedDecl *Found = *Result.begin(); S.Diag(Found->getLocation(), diag::err_malformed_std_initializer_list); return nullptr; } // We found some template called std::initializer_list. Now verify that it's // correct. TemplateParameterList *Params = Template->getTemplateParameters(); if (Params->getMinRequiredArguments() != 1 || !isa<TemplateTypeParmDecl>(Params->getParam(0))) { S.Diag(Template->getLocation(), diag::err_malformed_std_initializer_list); return nullptr; } return Template; } QualType Sema::BuildStdInitializerList(QualType Element, SourceLocation Loc) { if (!StdInitializerList) { StdInitializerList = LookupStdInitializerList(*this, Loc); if (!StdInitializerList) return QualType(); } TemplateArgumentListInfo Args(Loc, Loc); Args.addArgument(TemplateArgumentLoc(TemplateArgument(Element), Context.getTrivialTypeSourceInfo(Element, Loc))); return Context.getCanonicalType( CheckTemplateIdType(TemplateName(StdInitializerList), Loc, Args)); } bool Sema::isInitListConstructor(const CXXConstructorDecl* Ctor) { // C++ [dcl.init.list]p2: // A constructor is an initializer-list constructor if its first parameter // is of type std::initializer_list<E> or reference to possibly cv-qualified // std::initializer_list<E> for some type E, and either there are no other // parameters or else all other parameters have default arguments. if (Ctor->getNumParams() < 1 || (Ctor->getNumParams() > 1 && !Ctor->getParamDecl(1)->hasDefaultArg())) return false; QualType ArgType = Ctor->getParamDecl(0)->getType(); if (const ReferenceType *RT = ArgType->getAs<ReferenceType>()) ArgType = RT->getPointeeType().getUnqualifiedType(); return isStdInitializerList(ArgType, nullptr); } /// \brief Determine whether a using statement is in a context where it will be /// apply in all contexts. static bool IsUsingDirectiveInToplevelContext(DeclContext *CurContext) { switch (CurContext->getDeclKind()) { case Decl::TranslationUnit: return true; case Decl::LinkageSpec: return IsUsingDirectiveInToplevelContext(CurContext->getParent()); default: return false; } } namespace { // Callback to only accept typo corrections that are namespaces. class NamespaceValidatorCCC : public CorrectionCandidateCallback { public: bool ValidateCandidate(const TypoCorrection &candidate) override { if (NamedDecl *ND = candidate.getCorrectionDecl()) return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND); return false; } }; } static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident) { R.clear(); if (TypoCorrection Corrected = S.CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), Sc, &SS, llvm::make_unique<NamespaceValidatorCCC>(), Sema::CTK_ErrorRecovery)) { if (DeclContext *DC = S.computeDeclContext(SS, false)) { std::string CorrectedStr(Corrected.getAsString(S.getLangOpts())); bool DroppedSpecifier = Corrected.WillReplaceSpecifier() && Ident->getName().equals(CorrectedStr); S.diagnoseTypo(Corrected, S.PDiag(diag::err_using_directive_member_suggest) << Ident << DC << DroppedSpecifier << SS.getRange(), S.PDiag(diag::note_namespace_defined_here)); } else { S.diagnoseTypo(Corrected, S.PDiag(diag::err_using_directive_suggest) << Ident, S.PDiag(diag::note_namespace_defined_here)); } R.addDecl(Corrected.getCorrectionDecl()); return true; } return false; } Decl *Sema::ActOnUsingDirective(Scope *S, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList) { assert(!SS.isInvalid() && "Invalid CXXScopeSpec."); assert(NamespcName && "Invalid NamespcName."); assert(IdentLoc.isValid() && "Invalid NamespceName location."); // This can only happen along a recovery path. while (S->getFlags() & Scope::TemplateParamScope) S = S->getParent(); assert(S->getFlags() & Scope::DeclScope && "Invalid Scope."); UsingDirectiveDecl *UDir = nullptr; NestedNameSpecifier *Qualifier = nullptr; if (SS.isSet()) Qualifier = SS.getScopeRep(); // Lookup namespace name. LookupResult R(*this, NamespcName, IdentLoc, LookupNamespaceName); LookupParsedName(R, S, &SS); if (R.isAmbiguous()) return nullptr; if (R.empty()) { R.clear(); // Allow "using namespace std;" or "using namespace ::std;" even if // "std" hasn't been defined yet, for GCC compatibility. if ((!Qualifier || Qualifier->getKind() == NestedNameSpecifier::Global) && NamespcName->isStr("std")) { Diag(IdentLoc, diag::ext_using_undefined_std); R.addDecl(getOrCreateStdNamespace()); R.resolveKind(); } // Otherwise, attempt typo correction. else TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, NamespcName); } if (!R.empty()) { NamedDecl *Named = R.getFoundDecl(); assert((isa<NamespaceDecl>(Named) || isa<NamespaceAliasDecl>(Named)) && "expected namespace decl"); // The use of a nested name specifier may trigger deprecation warnings. DiagnoseUseOfDecl(Named, IdentLoc); // C++ [namespace.udir]p1: // A using-directive specifies that the names in the nominated // namespace can be used in the scope in which the // using-directive appears after the using-directive. During // unqualified name lookup (3.4.1), the names appear as if they // were declared in the nearest enclosing namespace which // contains both the using-directive and the nominated // namespace. [Note: in this context, "contains" means "contains // directly or indirectly". ] // Find enclosing context containing both using-directive and // nominated namespace. NamespaceDecl *NS = getNamespaceDecl(Named); DeclContext *CommonAncestor = cast<DeclContext>(NS); while (CommonAncestor && !CommonAncestor->Encloses(CurContext)) CommonAncestor = CommonAncestor->getParent(); UDir = UsingDirectiveDecl::Create(Context, CurContext, UsingLoc, NamespcLoc, SS.getWithLocInContext(Context), IdentLoc, Named, CommonAncestor); if (IsUsingDirectiveInToplevelContext(CurContext) && !SourceMgr.isInMainFile(SourceMgr.getExpansionLoc(IdentLoc))) { Diag(IdentLoc, diag::warn_using_directive_in_header); } PushUsingDirective(S, UDir); } else { Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange(); } if (UDir) ProcessDeclAttributeList(S, UDir, AttrList); return UDir; } void Sema::PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir) { // If the scope has an associated entity and the using directive is at // namespace or translation unit scope, add the UsingDirectiveDecl into // its lookup structure so qualified name lookup can find it. DeclContext *Ctx = S->getEntity(); if (Ctx && !Ctx->isFunctionOrMethod()) Ctx->addDecl(UDir); else // Otherwise, it is at block scope. The using-directives will affect lookup // only to the end of the scope. S->PushUsingDirective(UDir); } Decl *Sema::ActOnUsingDeclaration(Scope *S, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc) { assert(S->getFlags() & Scope::DeclScope && "Invalid Scope."); switch (Name.getKind()) { case UnqualifiedId::IK_ImplicitSelfParam: case UnqualifiedId::IK_Identifier: case UnqualifiedId::IK_OperatorFunctionId: case UnqualifiedId::IK_LiteralOperatorId: case UnqualifiedId::IK_ConversionFunctionId: break; case UnqualifiedId::IK_ConstructorName: case UnqualifiedId::IK_ConstructorTemplateId: // C++11 inheriting constructors. Diag(Name.getLocStart(), getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_using_decl_constructor : diag::err_using_decl_constructor) << SS.getRange(); if (getLangOpts().CPlusPlus11) break; return nullptr; case UnqualifiedId::IK_DestructorName: Diag(Name.getLocStart(), diag::err_using_decl_destructor) << SS.getRange(); return nullptr; case UnqualifiedId::IK_TemplateId: Diag(Name.getLocStart(), diag::err_using_decl_template_id) << SourceRange(Name.TemplateId->LAngleLoc, Name.TemplateId->RAngleLoc); return nullptr; } DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name); DeclarationName TargetName = TargetNameInfo.getName(); if (!TargetName) return nullptr; // Warn about access declarations. if (!HasUsingKeyword) { Diag(Name.getLocStart(), getLangOpts().CPlusPlus11 ? diag::err_access_decl : diag::warn_access_decl_deprecated) << FixItHint::CreateInsertion(SS.getRange().getBegin(), "using "); } if (DiagnoseUnexpandedParameterPack(SS, UPPC_UsingDeclaration) || DiagnoseUnexpandedParameterPack(TargetNameInfo, UPPC_UsingDeclaration)) return nullptr; NamedDecl *UD = BuildUsingDeclaration(S, AS, UsingLoc, SS, TargetNameInfo, AttrList, /* IsInstantiation */ false, HasTypenameKeyword, TypenameLoc); if (UD) PushOnScopeChains(UD, S, /*AddToContext*/ false); return UD; } /// \brief Determine whether a using declaration considers the given /// declarations as "equivalent", e.g., if they are redeclarations of /// the same entity or are both typedefs of the same type. static bool IsEquivalentForUsingDecl(ASTContext &Context, NamedDecl *D1, NamedDecl *D2) { if (D1->getCanonicalDecl() == D2->getCanonicalDecl()) return true; if (TypedefNameDecl *TD1 = dyn_cast<TypedefNameDecl>(D1)) if (TypedefNameDecl *TD2 = dyn_cast<TypedefNameDecl>(D2)) return Context.hasSameType(TD1->getUnderlyingType(), TD2->getUnderlyingType()); return false; } /// Determines whether to create a using shadow decl for a particular /// decl, given the set of decls existing prior to this using lookup. bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig, const LookupResult &Previous, UsingShadowDecl *&PrevShadow) { // Diagnose finding a decl which is not from a base class of the // current class. We do this now because there are cases where this // function will silently decide not to build a shadow decl, which // will pre-empt further diagnostics. // // We don't need to do this in C++0x because we do the check once on // the qualifier. // // FIXME: diagnose the following if we care enough: // struct A { int foo; }; // struct B : A { using A::foo; }; // template <class T> struct C : A {}; // template <class T> struct D : C<T> { using B::foo; } // <--- // This is invalid (during instantiation) in C++03 because B::foo // resolves to the using decl in B, which is not a base class of D<T>. // We can't diagnose it immediately because C<T> is an unknown // specialization. The UsingShadowDecl in D<T> then points directly // to A::foo, which will look well-formed when we instantiate. // The right solution is to not collapse the shadow-decl chain. if (!getLangOpts().CPlusPlus11 && CurContext->isRecord()) { DeclContext *OrigDC = Orig->getDeclContext(); // Handle enums and anonymous structs. if (isa<EnumDecl>(OrigDC)) OrigDC = OrigDC->getParent(); CXXRecordDecl *OrigRec = cast<CXXRecordDecl>(OrigDC); while (OrigRec->isAnonymousStructOrUnion()) OrigRec = cast<CXXRecordDecl>(OrigRec->getDeclContext()); if (cast<CXXRecordDecl>(CurContext)->isProvablyNotDerivedFrom(OrigRec)) { if (OrigDC == CurContext) { Diag(Using->getLocation(), diag::err_using_decl_nested_name_specifier_is_current_class) << Using->getQualifierLoc().getSourceRange(); Diag(Orig->getLocation(), diag::note_using_decl_target); return true; } Diag(Using->getQualifierLoc().getBeginLoc(), diag::err_using_decl_nested_name_specifier_is_not_base_class) << Using->getQualifier() << cast<CXXRecordDecl>(CurContext) << Using->getQualifierLoc().getSourceRange(); Diag(Orig->getLocation(), diag::note_using_decl_target); return true; } } if (Previous.empty()) return false; NamedDecl *Target = Orig; if (isa<UsingShadowDecl>(Target)) Target = cast<UsingShadowDecl>(Target)->getTargetDecl(); // If the target happens to be one of the previous declarations, we // don't have a conflict. // // FIXME: but we might be increasing its access, in which case we // should redeclare it. NamedDecl *NonTag = nullptr, *Tag = nullptr; bool FoundEquivalentDecl = false; for (LookupResult::iterator I = Previous.begin(), E = Previous.end(); I != E; ++I) { NamedDecl *D = (*I)->getUnderlyingDecl(); if (IsEquivalentForUsingDecl(Context, D, Target)) { if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(*I)) PrevShadow = Shadow; FoundEquivalentDecl = true; } (isa<TagDecl>(D) ? Tag : NonTag) = D; } if (FoundEquivalentDecl) return false; if (FunctionDecl *FD = Target->getAsFunction()) { NamedDecl *OldDecl = nullptr; switch (CheckOverload(nullptr, FD, Previous, OldDecl, /*IsForUsingDecl*/ true)) { case Ovl_Overload: return false; case Ovl_NonFunction: Diag(Using->getLocation(), diag::err_using_decl_conflict); break; // We found a decl with the exact signature. case Ovl_Match: // If we're in a record, we want to hide the target, so we // return true (without a diagnostic) to tell the caller not to // build a shadow decl. if (CurContext->isRecord()) return true; // If we're not in a record, this is an error. Diag(Using->getLocation(), diag::err_using_decl_conflict); break; } Diag(Target->getLocation(), diag::note_using_decl_target); Diag(OldDecl->getLocation(), diag::note_using_decl_conflict); return true; } // Target is not a function. if (isa<TagDecl>(Target)) { // No conflict between a tag and a non-tag. if (!Tag) return false; Diag(Using->getLocation(), diag::err_using_decl_conflict); Diag(Target->getLocation(), diag::note_using_decl_target); Diag(Tag->getLocation(), diag::note_using_decl_conflict); return true; } // No conflict between a tag and a non-tag. if (!NonTag) return false; Diag(Using->getLocation(), diag::err_using_decl_conflict); Diag(Target->getLocation(), diag::note_using_decl_target); Diag(NonTag->getLocation(), diag::note_using_decl_conflict); return true; } /// Builds a shadow declaration corresponding to a 'using' declaration. UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Orig, UsingShadowDecl *PrevDecl) { // If we resolved to another shadow declaration, just coalesce them. NamedDecl *Target = Orig; if (isa<UsingShadowDecl>(Target)) { Target = cast<UsingShadowDecl>(Target)->getTargetDecl(); assert(!isa<UsingShadowDecl>(Target) && "nested shadow declaration"); } UsingShadowDecl *Shadow = UsingShadowDecl::Create(Context, CurContext, UD->getLocation(), UD, Target); UD->addShadowDecl(Shadow); Shadow->setAccess(UD->getAccess()); if (Orig->isInvalidDecl() || UD->isInvalidDecl()) Shadow->setInvalidDecl(); Shadow->setPreviousDecl(PrevDecl); if (S) PushOnScopeChains(Shadow, S); else CurContext->addDecl(Shadow); return Shadow; } /// Hides a using shadow declaration. This is required by the current /// using-decl implementation when a resolvable using declaration in a /// class is followed by a declaration which would hide or override /// one or more of the using decl's targets; for example: /// /// struct Base { void foo(int); }; /// struct Derived : Base { /// using Base::foo; /// void foo(int); /// }; /// /// The governing language is C++03 [namespace.udecl]p12: /// /// When a using-declaration brings names from a base class into a /// derived class scope, member functions in the derived class /// override and/or hide member functions with the same name and /// parameter types in a base class (rather than conflicting). /// /// There are two ways to implement this: /// (1) optimistically create shadow decls when they're not hidden /// by existing declarations, or /// (2) don't create any shadow decls (or at least don't make them /// visible) until we've fully parsed/instantiated the class. /// The problem with (1) is that we might have to retroactively remove /// a shadow decl, which requires several O(n) operations because the /// decl structures are (very reasonably) not designed for removal. /// (2) avoids this but is very fiddly and phase-dependent. void Sema::HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow) { if (Shadow->getDeclName().getNameKind() == DeclarationName::CXXConversionFunctionName) cast<CXXRecordDecl>(Shadow->getDeclContext())->removeConversion(Shadow); // Remove it from the DeclContext... Shadow->getDeclContext()->removeDecl(Shadow); // ...and the scope, if applicable... if (S) { S->RemoveDecl(Shadow); IdResolver.RemoveDecl(Shadow); } // ...and the using decl. Shadow->getUsingDecl()->removeShadowDecl(Shadow); // TODO: complain somehow if Shadow was used. It shouldn't // be possible for this to happen, because...? } /// Find the base specifier for a base class with the given type. static CXXBaseSpecifier *findDirectBaseWithType(CXXRecordDecl *Derived, QualType DesiredBase, bool &AnyDependentBases) { // Check whether the named type is a direct base class. CanQualType CanonicalDesiredBase = DesiredBase->getCanonicalTypeUnqualified(); for (auto &Base : Derived->bases()) { CanQualType BaseType = Base.getType()->getCanonicalTypeUnqualified(); if (CanonicalDesiredBase == BaseType) return &Base; if (BaseType->isDependentType()) AnyDependentBases = true; } return nullptr; } namespace { class UsingValidatorCCC : public CorrectionCandidateCallback { public: UsingValidatorCCC(bool HasTypenameKeyword, bool IsInstantiation, NestedNameSpecifier *NNS, CXXRecordDecl *RequireMemberOf) : HasTypenameKeyword(HasTypenameKeyword), IsInstantiation(IsInstantiation), OldNNS(NNS), RequireMemberOf(RequireMemberOf) {} bool ValidateCandidate(const TypoCorrection &Candidate) override { NamedDecl *ND = Candidate.getCorrectionDecl(); // Keywords are not valid here. if (!ND || isa<NamespaceDecl>(ND)) return false; // Completely unqualified names are invalid for a 'using' declaration. if (Candidate.WillReplaceSpecifier() && !Candidate.getCorrectionSpecifier()) return false; if (RequireMemberOf) { auto *FoundRecord = dyn_cast<CXXRecordDecl>(ND); if (FoundRecord && FoundRecord->isInjectedClassName()) { // No-one ever wants a using-declaration to name an injected-class-name // of a base class, unless they're declaring an inheriting constructor. ASTContext &Ctx = ND->getASTContext(); if (!Ctx.getLangOpts().CPlusPlus11) return false; QualType FoundType = Ctx.getRecordType(FoundRecord); // Check that the injected-class-name is named as a member of its own // type; we don't want to suggest 'using Derived::Base;', since that // means something else. NestedNameSpecifier *Specifier = Candidate.WillReplaceSpecifier() ? Candidate.getCorrectionSpecifier() : OldNNS; if (!Specifier->getAsType() || !Ctx.hasSameType(QualType(Specifier->getAsType(), 0), FoundType)) return false; // Check that this inheriting constructor declaration actually names a // direct base class of the current class. bool AnyDependentBases = false; if (!findDirectBaseWithType(RequireMemberOf, Ctx.getRecordType(FoundRecord), AnyDependentBases) && !AnyDependentBases) return false; } else { auto *RD = dyn_cast<CXXRecordDecl>(ND->getDeclContext()); if (!RD || RequireMemberOf->isProvablyNotDerivedFrom(RD)) return false; // FIXME: Check that the base class member is accessible? } } if (isa<TypeDecl>(ND)) return HasTypenameKeyword || !IsInstantiation; return !HasTypenameKeyword; } private: bool HasTypenameKeyword; bool IsInstantiation; NestedNameSpecifier *OldNNS; CXXRecordDecl *RequireMemberOf; }; } // end anonymous namespace /// Builds a using declaration. /// /// \param IsInstantiation - Whether this call arises from an /// instantiation of an unresolved using declaration. We treat /// the lookup differently for these declarations. NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc) { assert(!SS.isInvalid() && "Invalid CXXScopeSpec."); SourceLocation IdentLoc = NameInfo.getLoc(); assert(IdentLoc.isValid() && "Invalid TargetName location."); // FIXME: We ignore attributes for now. if (SS.isEmpty()) { Diag(IdentLoc, diag::err_using_requires_qualname); return nullptr; } // Do the redeclaration lookup in the current scope. LookupResult Previous(*this, NameInfo, LookupUsingDeclName, ForRedeclaration); Previous.setHideTags(false); if (S) { LookupName(Previous, S); // It is really dumb that we have to do this. LookupResult::Filter F = Previous.makeFilter(); while (F.hasNext()) { NamedDecl *D = F.next(); if (!isDeclInScope(D, CurContext, S)) F.erase(); // If we found a local extern declaration that's not ordinarily visible, // and this declaration is being added to a non-block scope, ignore it. // We're only checking for scope conflicts here, not also for violations // of the linkage rules. else if (!CurContext->isFunctionOrMethod() && D->isLocalExternDecl() && !(D->getIdentifierNamespace() & Decl::IDNS_Ordinary)) F.erase(); } F.done(); } else { assert(IsInstantiation && "no scope in non-instantiation"); assert(CurContext->isRecord() && "scope not record in instantiation"); LookupQualifiedName(Previous, CurContext); } // Check for invalid redeclarations. if (CheckUsingDeclRedeclaration(UsingLoc, HasTypenameKeyword, SS, IdentLoc, Previous)) return nullptr; // Check for bad qualifiers. if (CheckUsingDeclQualifier(UsingLoc, SS, NameInfo, IdentLoc)) return nullptr; DeclContext *LookupContext = computeDeclContext(SS); NamedDecl *D; NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context); if (!LookupContext) { if (HasTypenameKeyword) { // FIXME: not all declaration name kinds are legal here D = UnresolvedUsingTypenameDecl::Create(Context, CurContext, UsingLoc, TypenameLoc, QualifierLoc, IdentLoc, NameInfo.getName()); } else { D = UnresolvedUsingValueDecl::Create(Context, CurContext, UsingLoc, QualifierLoc, NameInfo); } D->setAccess(AS); CurContext->addDecl(D); return D; } auto Build = [&](bool Invalid) { UsingDecl *UD = UsingDecl::Create(Context, CurContext, UsingLoc, QualifierLoc, NameInfo, HasTypenameKeyword); UD->setAccess(AS); CurContext->addDecl(UD); UD->setInvalidDecl(Invalid); return UD; }; auto BuildInvalid = [&]{ return Build(true); }; auto BuildValid = [&]{ return Build(false); }; if (RequireCompleteDeclContext(SS, LookupContext)) return BuildInvalid(); // Look up the target name. LookupResult R(*this, NameInfo, LookupOrdinaryName); // Unlike most lookups, we don't always want to hide tag // declarations: tag names are visible through the using declaration // even if hidden by ordinary names, *except* in a dependent context // where it's important for the sanity of two-phase lookup. if (!IsInstantiation) R.setHideTags(false); // For the purposes of this lookup, we have a base object type // equal to that of the current context. if (CurContext->isRecord()) { R.setBaseObjectType( Context.getTypeDeclType(cast<CXXRecordDecl>(CurContext))); } LookupQualifiedName(R, LookupContext); // Try to correct typos if possible. If constructor name lookup finds no // results, that means the named class has no explicit constructors, and we // suppressed declaring implicit ones (probably because it's dependent or // invalid). if (R.empty() && NameInfo.getName().getNameKind() != DeclarationName::CXXConstructorName) { if (TypoCorrection Corrected = CorrectTypo( R.getLookupNameInfo(), R.getLookupKind(), S, &SS, llvm::make_unique<UsingValidatorCCC>( HasTypenameKeyword, IsInstantiation, SS.getScopeRep(), dyn_cast<CXXRecordDecl>(CurContext)), CTK_ErrorRecovery)) { // We reject any correction for which ND would be NULL. NamedDecl *ND = Corrected.getCorrectionDecl(); // We reject candidates where DroppedSpecifier == true, hence the // literal '0' below. diagnoseTypo(Corrected, PDiag(diag::err_no_member_suggest) << NameInfo.getName() << LookupContext << 0 << SS.getRange()); // If we corrected to an inheriting constructor, handle it as one. auto *RD = dyn_cast<CXXRecordDecl>(ND); if (RD && RD->isInjectedClassName()) { // Fix up the information we'll use to build the using declaration. if (Corrected.WillReplaceSpecifier()) { NestedNameSpecifierLocBuilder Builder; Builder.MakeTrivial(Context, Corrected.getCorrectionSpecifier(), QualifierLoc.getSourceRange()); QualifierLoc = Builder.getWithLocInContext(Context); } NameInfo.setName(Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(Context.getRecordType(RD)))); NameInfo.setNamedTypeInfo(nullptr); for (auto *Ctor : LookupConstructors(RD)) R.addDecl(Ctor); } else { // FIXME: Pick up all the declarations if we found an overloaded function. R.addDecl(ND); } } else { Diag(IdentLoc, diag::err_no_member) << NameInfo.getName() << LookupContext << SS.getRange(); return BuildInvalid(); } } if (R.isAmbiguous()) return BuildInvalid(); if (HasTypenameKeyword) { // If we asked for a typename and got a non-type decl, error out. if (!R.getAsSingle<TypeDecl>()) { Diag(IdentLoc, diag::err_using_typename_non_type); for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) Diag((*I)->getUnderlyingDecl()->getLocation(), diag::note_using_decl_target); return BuildInvalid(); } } else { // If we asked for a non-typename and we got a type, error out, // but only if this is an instantiation of an unresolved using // decl. Otherwise just silently find the type name. if (IsInstantiation && R.getAsSingle<TypeDecl>()) { Diag(IdentLoc, diag::err_using_dependent_value_is_type); Diag(R.getFoundDecl()->getLocation(), diag::note_using_decl_target); return BuildInvalid(); } } // C++0x N2914 [namespace.udecl]p6: // A using-declaration shall not name a namespace. if (R.getAsSingle<NamespaceDecl>()) { Diag(IdentLoc, diag::err_using_decl_can_not_refer_to_namespace) << SS.getRange(); return BuildInvalid(); } UsingDecl *UD = BuildValid(); // The normal rules do not apply to inheriting constructor declarations. if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) { // Suppress access diagnostics; the access check is instead performed at the // point of use for an inheriting constructor. R.suppressDiagnostics(); CheckInheritingConstructorUsingDecl(UD); return UD; } // Otherwise, look up the target name. for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { UsingShadowDecl *PrevDecl = nullptr; if (!CheckUsingShadowDecl(UD, *I, Previous, PrevDecl)) BuildUsingShadowDecl(S, UD, *I, PrevDecl); } return UD; } /// Additional checks for a using declaration referring to a constructor name. bool Sema::CheckInheritingConstructorUsingDecl(UsingDecl *UD) { assert(!UD->hasTypename() && "expecting a constructor name"); const Type *SourceType = UD->getQualifier()->getAsType(); assert(SourceType && "Using decl naming constructor doesn't have type in scope spec."); CXXRecordDecl *TargetClass = cast<CXXRecordDecl>(CurContext); // Check whether the named type is a direct base class. bool AnyDependentBases = false; auto *Base = findDirectBaseWithType(TargetClass, QualType(SourceType, 0), AnyDependentBases); if (!Base && !AnyDependentBases) { Diag(UD->getUsingLoc(), diag::err_using_decl_constructor_not_in_direct_base) << UD->getNameInfo().getSourceRange() << QualType(SourceType, 0) << TargetClass; UD->setInvalidDecl(); return true; } if (Base) Base->setInheritConstructors(); return false; } /// Checks that the given using declaration is not an invalid /// redeclaration. Note that this is checking only for the using decl /// itself, not for any ill-formedness among the UsingShadowDecls. bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Prev) { // C++03 [namespace.udecl]p8: // C++0x [namespace.udecl]p10: // A using-declaration is a declaration and can therefore be used // repeatedly where (and only where) multiple declarations are // allowed. // // That's in non-member contexts. if (!CurContext->getRedeclContext()->isRecord()) return false; NestedNameSpecifier *Qual = SS.getScopeRep(); for (LookupResult::iterator I = Prev.begin(), E = Prev.end(); I != E; ++I) { NamedDecl *D = *I; bool DTypename; NestedNameSpecifier *DQual; if (UsingDecl *UD = dyn_cast<UsingDecl>(D)) { DTypename = UD->hasTypename(); DQual = UD->getQualifier(); } else if (UnresolvedUsingValueDecl *UD = dyn_cast<UnresolvedUsingValueDecl>(D)) { DTypename = false; DQual = UD->getQualifier(); } else if (UnresolvedUsingTypenameDecl *UD = dyn_cast<UnresolvedUsingTypenameDecl>(D)) { DTypename = true; DQual = UD->getQualifier(); } else continue; // using decls differ if one says 'typename' and the other doesn't. // FIXME: non-dependent using decls? if (HasTypenameKeyword != DTypename) continue; // using decls differ if they name different scopes (but note that // template instantiation can cause this check to trigger when it // didn't before instantiation). if (Context.getCanonicalNestedNameSpecifier(Qual) != Context.getCanonicalNestedNameSpecifier(DQual)) continue; Diag(NameLoc, diag::err_using_decl_redeclaration) << SS.getRange(); Diag(D->getLocation(), diag::note_using_decl) << 1; return true; } return false; } /// Checks that the given nested-name qualifier used in a using decl /// in the current context is appropriately related to the current /// scope. If an error is found, diagnoses it and returns true. bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc) { DeclContext *NamedContext = computeDeclContext(SS); if (!CurContext->isRecord()) { // C++03 [namespace.udecl]p3: // C++0x [namespace.udecl]p8: // A using-declaration for a class member shall be a member-declaration. // If we weren't able to compute a valid scope, it must be a // dependent class scope. if (!NamedContext || NamedContext->isRecord()) { auto *RD = dyn_cast_or_null<CXXRecordDecl>(NamedContext); if (RD && RequireCompleteDeclContext(const_cast<CXXScopeSpec&>(SS), RD)) RD = nullptr; Diag(NameLoc, diag::err_using_decl_can_not_refer_to_class_member) << SS.getRange(); // If we have a complete, non-dependent source type, try to suggest a // way to get the same effect. if (!RD) return true; // Find what this using-declaration was referring to. LookupResult R(*this, NameInfo, LookupOrdinaryName); R.setHideTags(false); R.suppressDiagnostics(); LookupQualifiedName(R, RD); if (R.getAsSingle<TypeDecl>()) { if (getLangOpts().CPlusPlus11) { // Convert 'using X::Y;' to 'using Y = X::Y;'. Diag(SS.getBeginLoc(), diag::note_using_decl_class_member_workaround) << 0 // alias declaration << FixItHint::CreateInsertion(SS.getBeginLoc(), NameInfo.getName().getAsString() + " = "); } else { // Convert 'using X::Y;' to 'typedef X::Y Y;'. SourceLocation InsertLoc = PP.getLocForEndOfToken(NameInfo.getLocEnd()); Diag(InsertLoc, diag::note_using_decl_class_member_workaround) << 1 // typedef declaration << FixItHint::CreateReplacement(UsingLoc, "typedef") << FixItHint::CreateInsertion( InsertLoc, " " + NameInfo.getName().getAsString()); } } else if (R.getAsSingle<VarDecl>()) { // Don't provide a fixit outside C++11 mode; we don't want to suggest // repeating the type of the static data member here. FixItHint FixIt; if (getLangOpts().CPlusPlus11) { // Convert 'using X::Y;' to 'auto &Y = X::Y;'. FixIt = FixItHint::CreateReplacement( UsingLoc, "auto &" + NameInfo.getName().getAsString() + " = "); } Diag(UsingLoc, diag::note_using_decl_class_member_workaround) << 2 // reference declaration << FixIt; } return true; } // Otherwise, everything is known to be fine. return false; } // The current scope is a record. // If the named context is dependent, we can't decide much. if (!NamedContext) { // FIXME: in C++0x, we can diagnose if we can prove that the // nested-name-specifier does not refer to a base class, which is // still possible in some cases. // Otherwise we have to conservatively report that things might be // okay. return false; } if (!NamedContext->isRecord()) { // Ideally this would point at the last name in the specifier, // but we don't have that level of source info. Diag(SS.getRange().getBegin(), diag::err_using_decl_nested_name_specifier_is_not_class) << SS.getScopeRep() << SS.getRange(); return true; } if (!NamedContext->isDependentContext() && RequireCompleteDeclContext(const_cast<CXXScopeSpec&>(SS), NamedContext)) return true; if (getLangOpts().CPlusPlus11) { // C++0x [namespace.udecl]p3: // In a using-declaration used as a member-declaration, the // nested-name-specifier shall name a base class of the class // being defined. if (cast<CXXRecordDecl>(CurContext)->isProvablyNotDerivedFrom( cast<CXXRecordDecl>(NamedContext))) { if (CurContext == NamedContext) { Diag(NameLoc, diag::err_using_decl_nested_name_specifier_is_current_class) << SS.getRange(); return true; } Diag(SS.getRange().getBegin(), diag::err_using_decl_nested_name_specifier_is_not_base_class) << SS.getScopeRep() << cast<CXXRecordDecl>(CurContext) << SS.getRange(); return true; } return false; } // C++03 [namespace.udecl]p4: // A using-declaration used as a member-declaration shall refer // to a member of a base class of the class being defined [etc.]. // Salient point: SS doesn't have to name a base class as long as // lookup only finds members from base classes. Therefore we can // diagnose here only if we can prove that that can't happen, // i.e. if the class hierarchies provably don't intersect. // TODO: it would be nice if "definitely valid" results were cached // in the UsingDecl and UsingShadowDecl so that these checks didn't // need to be repeated. struct UserData { llvm::SmallPtrSet<const CXXRecordDecl*, 4> Bases; static bool collect(const CXXRecordDecl *Base, void *OpaqueData) { UserData *Data = reinterpret_cast<UserData*>(OpaqueData); Data->Bases.insert(Base); return true; } bool hasDependentBases(const CXXRecordDecl *Class) { return !Class->forallBases(collect, this); } /// Returns true if the base is dependent or is one of the /// accumulated base classes. static bool doesNotContain(const CXXRecordDecl *Base, void *OpaqueData) { UserData *Data = reinterpret_cast<UserData*>(OpaqueData); return !Data->Bases.count(Base); } bool mightShareBases(const CXXRecordDecl *Class) { return Bases.count(Class) || !Class->forallBases(doesNotContain, this); } }; UserData Data; // Returns false if we find a dependent base. if (Data.hasDependentBases(cast<CXXRecordDecl>(CurContext))) return false; // Returns false if the class has a dependent base or if it or one // of its bases is present in the base set of the current context. if (Data.mightShareBases(cast<CXXRecordDecl>(NamedContext))) return false; Diag(SS.getRange().getBegin(), diag::err_using_decl_nested_name_specifier_is_not_base_class) << SS.getScopeRep() << cast<CXXRecordDecl>(CurContext) << SS.getRange(); return true; } Decl *Sema::ActOnAliasDeclaration(Scope *S, AccessSpecifier AS, MultiTemplateParamsArg TemplateParamLists, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec) { // Skip up to the relevant declaration scope. while (S->getFlags() & Scope::TemplateParamScope) S = S->getParent(); assert((S->getFlags() & Scope::DeclScope) && "got alias-declaration outside of declaration scope"); if (Type.isInvalid()) return nullptr; bool Invalid = false; DeclarationNameInfo NameInfo = GetNameFromUnqualifiedId(Name); TypeSourceInfo *TInfo = nullptr; GetTypeFromParser(Type.get(), &TInfo); if (DiagnoseClassNameShadow(CurContext, NameInfo)) return nullptr; if (DiagnoseUnexpandedParameterPack(Name.StartLocation, TInfo, UPPC_DeclarationType)) { Invalid = true; TInfo = Context.getTrivialTypeSourceInfo(Context.IntTy, TInfo->getTypeLoc().getBeginLoc()); } LookupResult Previous(*this, NameInfo, LookupOrdinaryName, ForRedeclaration); LookupName(Previous, S); // Warn about shadowing the name of a template parameter. if (Previous.isSingleResult() && Previous.getFoundDecl()->isTemplateParameter()) { DiagnoseTemplateParameterShadow(Name.StartLocation,Previous.getFoundDecl()); Previous.clear(); } assert(Name.Kind == UnqualifiedId::IK_Identifier && "name in alias declaration must be an identifier"); TypeAliasDecl *NewTD = TypeAliasDecl::Create(Context, CurContext, UsingLoc, Name.StartLocation, Name.Identifier, TInfo); NewTD->setAccess(AS); if (Invalid) NewTD->setInvalidDecl(); ProcessDeclAttributeList(S, NewTD, AttrList); CheckTypedefForVariablyModifiedType(S, NewTD); Invalid |= NewTD->isInvalidDecl(); bool Redeclaration = false; NamedDecl *NewND; if (TemplateParamLists.size()) { TypeAliasTemplateDecl *OldDecl = nullptr; TemplateParameterList *OldTemplateParams = nullptr; if (TemplateParamLists.size() != 1) { Diag(UsingLoc, diag::err_alias_template_extra_headers) << SourceRange(TemplateParamLists[1]->getTemplateLoc(), TemplateParamLists[TemplateParamLists.size()-1]->getRAngleLoc()); } TemplateParameterList *TemplateParams = TemplateParamLists[0]; // Only consider previous declarations in the same scope. FilterLookupForScope(Previous, CurContext, S, /*ConsiderLinkage*/false, /*ExplicitInstantiationOrSpecialization*/false); if (!Previous.empty()) { Redeclaration = true; OldDecl = Previous.getAsSingle<TypeAliasTemplateDecl>(); if (!OldDecl && !Invalid) { Diag(UsingLoc, diag::err_redefinition_different_kind) << Name.Identifier; NamedDecl *OldD = Previous.getRepresentativeDecl(); if (OldD->getLocation().isValid()) Diag(OldD->getLocation(), diag::note_previous_definition); Invalid = true; } if (!Invalid && OldDecl && !OldDecl->isInvalidDecl()) { if (TemplateParameterListsAreEqual(TemplateParams, OldDecl->getTemplateParameters(), /*Complain=*/true, TPL_TemplateMatch)) OldTemplateParams = OldDecl->getTemplateParameters(); else Invalid = true; TypeAliasDecl *OldTD = OldDecl->getTemplatedDecl(); if (!Invalid && !Context.hasSameType(OldTD->getUnderlyingType(), NewTD->getUnderlyingType())) { // FIXME: The C++0x standard does not clearly say this is ill-formed, // but we can't reasonably accept it. Diag(NewTD->getLocation(), diag::err_redefinition_different_typedef) << 2 << NewTD->getUnderlyingType() << OldTD->getUnderlyingType(); if (OldTD->getLocation().isValid()) Diag(OldTD->getLocation(), diag::note_previous_definition); Invalid = true; } } } // Merge any previous default template arguments into our parameters, // and check the parameter list. if (CheckTemplateParameterList(TemplateParams, OldTemplateParams, TPC_TypeAliasTemplate)) return nullptr; TypeAliasTemplateDecl *NewDecl = TypeAliasTemplateDecl::Create(Context, CurContext, UsingLoc, Name.Identifier, TemplateParams, NewTD); NewTD->setDescribedAliasTemplate(NewDecl); NewDecl->setAccess(AS); if (Invalid) NewDecl->setInvalidDecl(); else if (OldDecl) NewDecl->setPreviousDecl(OldDecl); NewND = NewDecl; } else { if (auto *TD = dyn_cast_or_null<TagDecl>(DeclFromDeclSpec)) { setTagNameForLinkagePurposes(TD, NewTD); handleTagNumbering(TD, S); } ActOnTypedefNameDecl(S, CurContext, NewTD, Previous, Redeclaration); NewND = NewTD; } if (!Redeclaration) PushOnScopeChains(NewND, S); ActOnDocumentableDecl(NewND); return NewND; } Decl *Sema::ActOnNamespaceAliasDef(Scope *S, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident) { // Lookup the namespace name. LookupResult R(*this, Ident, IdentLoc, LookupNamespaceName); LookupParsedName(R, S, &SS); if (R.isAmbiguous()) return nullptr; if (R.empty()) { if (!TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, Ident)) { Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange(); return nullptr; } } assert(!R.isAmbiguous() && !R.empty()); // Check if we have a previous declaration with the same name. NamedDecl *PrevDecl = LookupSingleName(S, Alias, AliasLoc, LookupOrdinaryName, ForRedeclaration); if (PrevDecl && !isDeclInScope(PrevDecl, CurContext, S)) PrevDecl = nullptr; NamedDecl *ND = R.getFoundDecl(); if (PrevDecl) { if (NamespaceAliasDecl *AD = dyn_cast<NamespaceAliasDecl>(PrevDecl)) { // We already have an alias with the same name that points to the same // namespace; check that it matches. if (!AD->getNamespace()->Equals(getNamespaceDecl(ND))) { Diag(AliasLoc, diag::err_redefinition_different_namespace_alias) << Alias; Diag(PrevDecl->getLocation(), diag::note_previous_namespace_alias) << AD->getNamespace(); return nullptr; } } else { unsigned DiagID = isa<NamespaceDecl>(PrevDecl) ? diag::err_redefinition : diag::err_redefinition_different_kind; Diag(AliasLoc, DiagID) << Alias; Diag(PrevDecl->getLocation(), diag::note_previous_definition); return nullptr; } } // The use of a nested name specifier may trigger deprecation warnings. DiagnoseUseOfDecl(ND, IdentLoc); NamespaceAliasDecl *AliasDecl = NamespaceAliasDecl::Create(Context, CurContext, NamespaceLoc, AliasLoc, Alias, SS.getWithLocInContext(Context), IdentLoc, ND); if (PrevDecl) AliasDecl->setPreviousDecl(cast<NamespaceAliasDecl>(PrevDecl)); PushOnScopeChains(AliasDecl, S); return AliasDecl; } Sema::ImplicitExceptionSpecification Sema::ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD) { CXXRecordDecl *ClassDecl = MD->getParent(); // C++ [except.spec]p14: // An implicitly declared special member function (Clause 12) shall have an // exception-specification. [...] ImplicitExceptionSpecification ExceptSpec(*this); if (ClassDecl->isInvalidDecl()) return ExceptSpec; // Direct base-class constructors. for (const auto &B : ClassDecl->bases()) { if (B.isVirtual()) // Handled below. continue; if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl); // If this is a deleted function, add it anyway. This might be conformant // with the standard. This might not. I'm not sure. It might not matter. if (Constructor) ExceptSpec.CalledDecl(B.getLocStart(), Constructor); } } // Virtual base-class constructors. for (const auto &B : ClassDecl->vbases()) { if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl); // If this is a deleted function, add it anyway. This might be conformant // with the standard. This might not. I'm not sure. It might not matter. if (Constructor) ExceptSpec.CalledDecl(B.getLocStart(), Constructor); } } // Field constructors. for (const auto *F : ClassDecl->fields()) { if (F->hasInClassInitializer()) { if (Expr *E = F->getInClassInitializer()) ExceptSpec.CalledExpr(E); } else if (const RecordType *RecordTy = Context.getBaseElementType(F->getType())->getAs<RecordType>()) { CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl()); CXXConstructorDecl *Constructor = LookupDefaultConstructor(FieldRecDecl); // If this is a deleted function, add it anyway. This might be conformant // with the standard. This might not. I'm not sure. It might not matter. // In particular, the problem is that this function never gets called. It // might just be ill-formed because this function attempts to refer to // a deleted function here. if (Constructor) ExceptSpec.CalledDecl(F->getLocation(), Constructor); } } return ExceptSpec; } Sema::ImplicitExceptionSpecification Sema::ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD) { CXXRecordDecl *ClassDecl = CD->getParent(); // C++ [except.spec]p14: // An inheriting constructor [...] shall have an exception-specification. [...] ImplicitExceptionSpecification ExceptSpec(*this); if (ClassDecl->isInvalidDecl()) return ExceptSpec; // Inherited constructor. const CXXConstructorDecl *InheritedCD = CD->getInheritedConstructor(); const CXXRecordDecl *InheritedDecl = InheritedCD->getParent(); // FIXME: Copying or moving the parameters could add extra exceptions to the // set, as could the default arguments for the inherited constructor. This // will be addressed when we implement the resolution of core issue 1351. ExceptSpec.CalledDecl(CD->getLocStart(), InheritedCD); // Direct base-class constructors. for (const auto &B : ClassDecl->bases()) { if (B.isVirtual()) // Handled below. continue; if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); if (BaseClassDecl == InheritedDecl) continue; CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl); if (Constructor) ExceptSpec.CalledDecl(B.getLocStart(), Constructor); } } // Virtual base-class constructors. for (const auto &B : ClassDecl->vbases()) { if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); if (BaseClassDecl == InheritedDecl) continue; CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl); if (Constructor) ExceptSpec.CalledDecl(B.getLocStart(), Constructor); } } // Field constructors. for (const auto *F : ClassDecl->fields()) { if (F->hasInClassInitializer()) { if (Expr *E = F->getInClassInitializer()) ExceptSpec.CalledExpr(E); } else if (const RecordType *RecordTy = Context.getBaseElementType(F->getType())->getAs<RecordType>()) { CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl()); CXXConstructorDecl *Constructor = LookupDefaultConstructor(FieldRecDecl); if (Constructor) ExceptSpec.CalledDecl(F->getLocation(), Constructor); } } return ExceptSpec; } namespace { /// RAII object to register a special member as being currently declared. struct DeclaringSpecialMember { Sema &S; Sema::SpecialMemberDecl D; bool WasAlreadyBeingDeclared; DeclaringSpecialMember(Sema &S, CXXRecordDecl *RD, Sema::CXXSpecialMember CSM) : S(S), D(RD, CSM) { WasAlreadyBeingDeclared = !S.SpecialMembersBeingDeclared.insert(D).second; if (WasAlreadyBeingDeclared) // This almost never happens, but if it does, ensure that our cache // doesn't contain a stale result. S.SpecialMemberCache.clear(); // FIXME: Register a note to be produced if we encounter an error while // declaring the special member. } ~DeclaringSpecialMember() { if (!WasAlreadyBeingDeclared) S.SpecialMembersBeingDeclared.erase(D); } /// \brief Are we already trying to declare this special member? bool isAlreadyBeingDeclared() const { return WasAlreadyBeingDeclared; } }; } CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl) { // C++ [class.ctor]p5: // A default constructor for a class X is a constructor of class X // that can be called without an argument. If there is no // user-declared constructor for class X, a default constructor is // implicitly declared. An implicitly-declared default constructor // is an inline public member of its class. assert(ClassDecl->needsImplicitDefaultConstructor() && "Should not build implicit default constructor!"); DeclaringSpecialMember DSM(*this, ClassDecl, CXXDefaultConstructor); if (DSM.isAlreadyBeingDeclared()) return nullptr; bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, CXXDefaultConstructor, false); // Create the actual constructor declaration. CanQualType ClassType = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl)); SourceLocation ClassLoc = ClassDecl->getLocation(); DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(ClassType); DeclarationNameInfo NameInfo(Name, ClassLoc); CXXConstructorDecl *DefaultCon = CXXConstructorDecl::Create( Context, ClassDecl, ClassLoc, NameInfo, /*Type*/QualType(), /*TInfo=*/nullptr, /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true, Constexpr); DefaultCon->setAccess(AS_public); DefaultCon->setDefaulted(); if (getLangOpts().CUDA) { inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDefaultConstructor, DefaultCon, /* ConstRHS */ false, /* Diagnose */ false); } // Build an exception specification pointing back at this constructor. FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, DefaultCon); DefaultCon->setType(Context.getFunctionType(Context.VoidTy, None, EPI, None)); // HLSL Change - add param mods // We don't need to use SpecialMemberIsTrivial here; triviality for default // constructors is easy to compute. DefaultCon->setTrivial(ClassDecl->hasTrivialDefaultConstructor()); if (ShouldDeleteSpecialMember(DefaultCon, CXXDefaultConstructor)) SetDeclDeleted(DefaultCon, ClassLoc); // Note that we have declared this constructor. ++ASTContext::NumImplicitDefaultConstructorsDeclared; if (Scope *S = getScopeForContext(ClassDecl)) PushOnScopeChains(DefaultCon, S, false); ClassDecl->addDecl(DefaultCon); return DefaultCon; } void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor) { assert((Constructor->isDefaulted() && Constructor->isDefaultConstructor() && !Constructor->doesThisDeclarationHaveABody() && !Constructor->isDeleted()) && "DefineImplicitDefaultConstructor - call it for implicit default ctor"); CXXRecordDecl *ClassDecl = Constructor->getParent(); assert(ClassDecl && "DefineImplicitDefaultConstructor - invalid constructor"); SynthesizedFunctionScope Scope(*this, Constructor); DiagnosticErrorTrap Trap(Diags); if (SetCtorInitializers(Constructor, /*AnyErrors=*/false) || Trap.hasErrorOccurred()) { Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXDefaultConstructor << Context.getTagDeclType(ClassDecl); Constructor->setInvalidDecl(); return; } // The exception specification is needed because we are defining the // function. ResolveExceptionSpec(CurrentLocation, Constructor->getType()->castAs<FunctionProtoType>()); SourceLocation Loc = Constructor->getLocEnd().isValid() ? Constructor->getLocEnd() : Constructor->getLocation(); Constructor->setBody(new (Context) CompoundStmt(Loc)); Constructor->markUsed(Context); MarkVTableUsed(CurrentLocation, ClassDecl); if (ASTMutationListener *L = getASTMutationListener()) { L->CompletedImplicitDefinition(Constructor); } DiagnoseUninitializedFields(*this, Constructor); } void Sema::ActOnFinishDelayedMemberInitializers(Decl *D) { // Perform any delayed checks on exception specifications. CheckDelayedMemberExceptionSpecs(); } namespace { /// Information on inheriting constructors to declare. class InheritingConstructorInfo { public: InheritingConstructorInfo(Sema &SemaRef, CXXRecordDecl *Derived) : SemaRef(SemaRef), Derived(Derived) { // Mark the constructors that we already have in the derived class. // // C++11 [class.inhctor]p3: [...] a constructor is implicitly declared [...] // unless there is a user-declared constructor with the same signature in // the class where the using-declaration appears. visitAll(Derived, &InheritingConstructorInfo::noteDeclaredInDerived); } void inheritAll(CXXRecordDecl *RD) { visitAll(RD, &InheritingConstructorInfo::inherit); } private: /// Information about an inheriting constructor. struct InheritingConstructor { InheritingConstructor() : DeclaredInDerived(false), BaseCtor(nullptr), DerivedCtor(nullptr) {} /// If \c true, a constructor with this signature is already declared /// in the derived class. bool DeclaredInDerived; /// The constructor which is inherited. const CXXConstructorDecl *BaseCtor; /// The derived constructor we declared. CXXConstructorDecl *DerivedCtor; }; /// Inheriting constructors with a given canonical type. There can be at /// most one such non-template constructor, and any number of templated /// constructors. struct InheritingConstructorsForType { InheritingConstructor NonTemplate; SmallVector<std::pair<TemplateParameterList *, InheritingConstructor>, 4> Templates; InheritingConstructor &getEntry(Sema &S, const CXXConstructorDecl *Ctor) { if (FunctionTemplateDecl *FTD = Ctor->getDescribedFunctionTemplate()) { TemplateParameterList *ParamList = FTD->getTemplateParameters(); for (unsigned I = 0, N = Templates.size(); I != N; ++I) if (S.TemplateParameterListsAreEqual(ParamList, Templates[I].first, false, S.TPL_TemplateMatch)) return Templates[I].second; Templates.push_back(std::make_pair(ParamList, InheritingConstructor())); return Templates.back().second; } return NonTemplate; } }; /// Get or create the inheriting constructor record for a constructor. InheritingConstructor &getEntry(const CXXConstructorDecl *Ctor, QualType CtorType) { return Map[CtorType.getCanonicalType()->castAs<FunctionProtoType>()] .getEntry(SemaRef, Ctor); } typedef void (InheritingConstructorInfo::*VisitFn)(const CXXConstructorDecl*); /// Process all constructors for a class. void visitAll(const CXXRecordDecl *RD, VisitFn Callback) { for (const auto *Ctor : RD->ctors()) (this->*Callback)(Ctor); for (CXXRecordDecl::specific_decl_iterator<FunctionTemplateDecl> I(RD->decls_begin()), E(RD->decls_end()); I != E; ++I) { const FunctionDecl *FD = (*I)->getTemplatedDecl(); if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) (this->*Callback)(CD); } } /// Note that a constructor (or constructor template) was declared in Derived. void noteDeclaredInDerived(const CXXConstructorDecl *Ctor) { getEntry(Ctor, Ctor->getType()).DeclaredInDerived = true; } /// Inherit a single constructor. void inherit(const CXXConstructorDecl *Ctor) { const FunctionProtoType *CtorType = Ctor->getType()->castAs<FunctionProtoType>(); ArrayRef<QualType> ArgTypes = CtorType->getParamTypes(); FunctionProtoType::ExtProtoInfo EPI = CtorType->getExtProtoInfo(); SourceLocation UsingLoc = getUsingLoc(Ctor->getParent()); // Core issue (no number yet): the ellipsis is always discarded. if (EPI.Variadic) { SemaRef.Diag(UsingLoc, diag::warn_using_decl_constructor_ellipsis); SemaRef.Diag(Ctor->getLocation(), diag::note_using_decl_constructor_ellipsis); EPI.Variadic = false; } // Declare a constructor for each number of parameters. // // C++11 [class.inhctor]p1: // The candidate set of inherited constructors from the class X named in // the using-declaration consists of [... modulo defects ...] for each // constructor or constructor template of X, the set of constructors or // constructor templates that results from omitting any ellipsis parameter // specification and successively omitting parameters with a default // argument from the end of the parameter-type-list unsigned MinParams = minParamsToInherit(Ctor); unsigned Params = Ctor->getNumParams(); if (Params >= MinParams) { do declareCtor(UsingLoc, Ctor, SemaRef.Context.getFunctionType( Ctor->getReturnType(), ArgTypes.slice(0, Params), EPI, None)); // HLSL Change - add param mods while (Params > MinParams && Ctor->getParamDecl(--Params)->hasDefaultArg()); } } /// Find the using-declaration which specified that we should inherit the /// constructors of \p Base. SourceLocation getUsingLoc(const CXXRecordDecl *Base) { // No fancy lookup required; just look for the base constructor name // directly within the derived class. ASTContext &Context = SemaRef.Context; DeclarationName Name = Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(Context.getRecordType(Base))); DeclContext::lookup_result Decls = Derived->lookup(Name); return Decls.empty() ? Derived->getLocation() : Decls[0]->getLocation(); } unsigned minParamsToInherit(const CXXConstructorDecl *Ctor) { // C++11 [class.inhctor]p3: // [F]or each constructor template in the candidate set of inherited // constructors, a constructor template is implicitly declared if (Ctor->getDescribedFunctionTemplate()) return 0; // For each non-template constructor in the candidate set of inherited // constructors other than a constructor having no parameters or a // copy/move constructor having a single parameter, a constructor is // implicitly declared [...] if (Ctor->getNumParams() == 0) return 1; if (Ctor->isCopyOrMoveConstructor()) return 2; // Per discussion on core reflector, never inherit a constructor which // would become a default, copy, or move constructor of Derived either. const ParmVarDecl *PD = Ctor->getParamDecl(0); const ReferenceType *RT = PD->getType()->getAs<ReferenceType>(); return (RT && RT->getPointeeCXXRecordDecl() == Derived) ? 2 : 1; } /// Declare a single inheriting constructor, inheriting the specified /// constructor, with the given type. void declareCtor(SourceLocation UsingLoc, const CXXConstructorDecl *BaseCtor, QualType DerivedType) { InheritingConstructor &Entry = getEntry(BaseCtor, DerivedType); // C++11 [class.inhctor]p3: // ... a constructor is implicitly declared with the same constructor // characteristics unless there is a user-declared constructor with // the same signature in the class where the using-declaration appears if (Entry.DeclaredInDerived) return; // C++11 [class.inhctor]p7: // If two using-declarations declare inheriting constructors with the // same signature, the program is ill-formed if (Entry.DerivedCtor) { if (BaseCtor->getParent() != Entry.BaseCtor->getParent()) { // Only diagnose this once per constructor. if (Entry.DerivedCtor->isInvalidDecl()) return; Entry.DerivedCtor->setInvalidDecl(); SemaRef.Diag(UsingLoc, diag::err_using_decl_constructor_conflict); SemaRef.Diag(BaseCtor->getLocation(), diag::note_using_decl_constructor_conflict_current_ctor); SemaRef.Diag(Entry.BaseCtor->getLocation(), diag::note_using_decl_constructor_conflict_previous_ctor); SemaRef.Diag(Entry.DerivedCtor->getLocation(), diag::note_using_decl_constructor_conflict_previous_using); } else { // Core issue (no number): if the same inheriting constructor is // produced by multiple base class constructors from the same base // class, the inheriting constructor is defined as deleted. SemaRef.SetDeclDeleted(Entry.DerivedCtor, UsingLoc); } return; } ASTContext &Context = SemaRef.Context; DeclarationName Name = Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(Context.getRecordType(Derived))); DeclarationNameInfo NameInfo(Name, UsingLoc); TemplateParameterList *TemplateParams = nullptr; if (const FunctionTemplateDecl *FTD = BaseCtor->getDescribedFunctionTemplate()) { TemplateParams = FTD->getTemplateParameters(); // We're reusing template parameters from a different DeclContext. This // is questionable at best, but works out because the template depth in // both places is guaranteed to be 0. // FIXME: Rebuild the template parameters in the new context, and // transform the function type to refer to them. } // Build type source info pointing at the using-declaration. This is // required by template instantiation. TypeSourceInfo *TInfo = Context.getTrivialTypeSourceInfo(DerivedType, UsingLoc); FunctionProtoTypeLoc ProtoLoc = TInfo->getTypeLoc().IgnoreParens().castAs<FunctionProtoTypeLoc>(); CXXConstructorDecl *DerivedCtor = CXXConstructorDecl::Create( Context, Derived, UsingLoc, NameInfo, DerivedType, TInfo, BaseCtor->isExplicit(), /*Inline=*/true, /*ImplicitlyDeclared=*/true, /*Constexpr=*/BaseCtor->isConstexpr()); // Build an unevaluated exception specification for this constructor. const FunctionProtoType *FPT = DerivedType->castAs<FunctionProtoType>(); FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); EPI.ExceptionSpec.Type = EST_Unevaluated; EPI.ExceptionSpec.SourceDecl = DerivedCtor; DerivedCtor->setType(Context.getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI, None)); // HLSL Change - add param mods // Build the parameter declarations. SmallVector<ParmVarDecl *, 16> ParamDecls; for (unsigned I = 0, N = FPT->getNumParams(); I != N; ++I) { TypeSourceInfo *TInfo = Context.getTrivialTypeSourceInfo(FPT->getParamType(I), UsingLoc); ParmVarDecl *PD = ParmVarDecl::Create( Context, DerivedCtor, UsingLoc, UsingLoc, /*IdentifierInfo=*/nullptr, FPT->getParamType(I), TInfo, SC_None, /*DefaultArg=*/nullptr); PD->setScopeInfo(0, I); PD->setImplicit(); ParamDecls.push_back(PD); ProtoLoc.setParam(I, PD); } // Set up the new constructor. DerivedCtor->setAccess(BaseCtor->getAccess()); DerivedCtor->setParams(ParamDecls); DerivedCtor->setInheritedConstructor(BaseCtor); if (BaseCtor->isDeleted()) SemaRef.SetDeclDeleted(DerivedCtor, UsingLoc); // If this is a constructor template, build the template declaration. if (TemplateParams) { FunctionTemplateDecl *DerivedTemplate = FunctionTemplateDecl::Create(SemaRef.Context, Derived, UsingLoc, Name, TemplateParams, DerivedCtor); DerivedTemplate->setAccess(BaseCtor->getAccess()); DerivedCtor->setDescribedFunctionTemplate(DerivedTemplate); Derived->addDecl(DerivedTemplate); } else { Derived->addDecl(DerivedCtor); } Entry.BaseCtor = BaseCtor; Entry.DerivedCtor = DerivedCtor; } Sema &SemaRef; CXXRecordDecl *Derived; typedef llvm::DenseMap<const Type *, InheritingConstructorsForType> MapType; MapType Map; }; } void Sema::DeclareInheritingConstructors(CXXRecordDecl *ClassDecl) { // Defer declaring the inheriting constructors until the class is // instantiated. if (ClassDecl->isDependentContext()) return; // Find base classes from which we might inherit constructors. SmallVector<CXXRecordDecl*, 4> InheritedBases; for (const auto &BaseIt : ClassDecl->bases()) if (BaseIt.getInheritConstructors()) InheritedBases.push_back(BaseIt.getType()->getAsCXXRecordDecl()); // Go no further if we're not inheriting any constructors. if (InheritedBases.empty()) return; // Declare the inherited constructors. InheritingConstructorInfo ICI(*this, ClassDecl); for (unsigned I = 0, N = InheritedBases.size(); I != N; ++I) ICI.inheritAll(InheritedBases[I]); } void Sema::DefineInheritingConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor) { CXXRecordDecl *ClassDecl = Constructor->getParent(); assert(Constructor->getInheritedConstructor() && !Constructor->doesThisDeclarationHaveABody() && !Constructor->isDeleted()); SynthesizedFunctionScope Scope(*this, Constructor); DiagnosticErrorTrap Trap(Diags); if (SetCtorInitializers(Constructor, /*AnyErrors=*/false) || Trap.hasErrorOccurred()) { Diag(CurrentLocation, diag::note_inhctor_synthesized_at) << Context.getTagDeclType(ClassDecl); Constructor->setInvalidDecl(); return; } SourceLocation Loc = Constructor->getLocation(); Constructor->setBody(new (Context) CompoundStmt(Loc)); Constructor->markUsed(Context); MarkVTableUsed(CurrentLocation, ClassDecl); if (ASTMutationListener *L = getASTMutationListener()) { L->CompletedImplicitDefinition(Constructor); } } Sema::ImplicitExceptionSpecification Sema::ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD) { CXXRecordDecl *ClassDecl = MD->getParent(); // C++ [except.spec]p14: // An implicitly declared special member function (Clause 12) shall have // an exception-specification. ImplicitExceptionSpecification ExceptSpec(*this); if (ClassDecl->isInvalidDecl()) return ExceptSpec; // Direct base-class destructors. for (const auto &B : ClassDecl->bases()) { if (B.isVirtual()) // Handled below. continue; if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) ExceptSpec.CalledDecl(B.getLocStart(), LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl()))); } // Virtual base-class destructors. for (const auto &B : ClassDecl->vbases()) { if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) ExceptSpec.CalledDecl(B.getLocStart(), LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl()))); } // Field destructors. for (const auto *F : ClassDecl->fields()) { if (const RecordType *RecordTy = Context.getBaseElementType(F->getType())->getAs<RecordType>()) ExceptSpec.CalledDecl(F->getLocation(), LookupDestructor(cast<CXXRecordDecl>(RecordTy->getDecl()))); } return ExceptSpec; } CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) { // C++ [class.dtor]p2: // If a class has no user-declared destructor, a destructor is // declared implicitly. An implicitly-declared destructor is an // inline public member of its class. assert(ClassDecl->needsImplicitDestructor()); DeclaringSpecialMember DSM(*this, ClassDecl, CXXDestructor); if (DSM.isAlreadyBeingDeclared()) return nullptr; // Create the actual destructor declaration. CanQualType ClassType = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl)); SourceLocation ClassLoc = ClassDecl->getLocation(); DeclarationName Name = Context.DeclarationNames.getCXXDestructorName(ClassType); DeclarationNameInfo NameInfo(Name, ClassLoc); CXXDestructorDecl *Destructor = CXXDestructorDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, QualType(), nullptr, /*isInline=*/true, /*isImplicitlyDeclared=*/true); Destructor->setAccess(AS_public); Destructor->setDefaulted(); if (getLangOpts().CUDA) { inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDestructor, Destructor, /* ConstRHS */ false, /* Diagnose */ false); } // Build an exception specification pointing back at this destructor. FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, Destructor); Destructor->setType(Context.getFunctionType(Context.VoidTy, None, EPI, None)); // HLSL Change AddOverriddenMethods(ClassDecl, Destructor); // We don't need to use SpecialMemberIsTrivial here; triviality for // destructors is easy to compute. Destructor->setTrivial(ClassDecl->hasTrivialDestructor()); if (ShouldDeleteSpecialMember(Destructor, CXXDestructor)) SetDeclDeleted(Destructor, ClassLoc); // Note that we have declared this destructor. ++ASTContext::NumImplicitDestructorsDeclared; // Introduce this destructor into its scope. if (Scope *S = getScopeForContext(ClassDecl)) PushOnScopeChains(Destructor, S, false); ClassDecl->addDecl(Destructor); return Destructor; } void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor) { assert((Destructor->isDefaulted() && !Destructor->doesThisDeclarationHaveABody() && !Destructor->isDeleted()) && "DefineImplicitDestructor - call it for implicit default dtor"); CXXRecordDecl *ClassDecl = Destructor->getParent(); assert(ClassDecl && "DefineImplicitDestructor - invalid destructor"); if (Destructor->isInvalidDecl()) return; SynthesizedFunctionScope Scope(*this, Destructor); DiagnosticErrorTrap Trap(Diags); MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(), Destructor->getParent()); if (CheckDestructor(Destructor) || Trap.hasErrorOccurred()) { Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXDestructor << Context.getTagDeclType(ClassDecl); Destructor->setInvalidDecl(); return; } // The exception specification is needed because we are defining the // function. ResolveExceptionSpec(CurrentLocation, Destructor->getType()->castAs<FunctionProtoType>()); SourceLocation Loc = Destructor->getLocEnd().isValid() ? Destructor->getLocEnd() : Destructor->getLocation(); Destructor->setBody(new (Context) CompoundStmt(Loc)); Destructor->markUsed(Context); MarkVTableUsed(CurrentLocation, ClassDecl); if (ASTMutationListener *L = getASTMutationListener()) { L->CompletedImplicitDefinition(Destructor); } } /// \brief Perform any semantic analysis which needs to be delayed until all /// pending class member declarations have been parsed. void Sema::ActOnFinishCXXMemberDecls() { // If the context is an invalid C++ class, just suppress these checks. if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(CurContext)) { if (Record->isInvalidDecl()) { DelayedDefaultedMemberExceptionSpecs.clear(); DelayedExceptionSpecChecks.clear(); return; } } } static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) { // Don't do anything for template patterns. if (Class->getDescribedClassTemplate()) return; for (Decl *Member : Class->decls()) { auto *CD = dyn_cast<CXXConstructorDecl>(Member); if (!CD) { // Recurse on nested classes. if (auto *NestedRD = dyn_cast<CXXRecordDecl>(Member)) getDefaultArgExprsForConstructors(S, NestedRD); continue; } else if (!CD->isDefaultConstructor() || !CD->hasAttr<DLLExportAttr>()) { continue; } for (unsigned I = 0, E = CD->getNumParams(); I != E; ++I) { // Skip any default arguments that we've already instantiated. if (S.Context.getDefaultArgExprForConstructor(CD, I)) continue; Expr *DefaultArg = S.BuildCXXDefaultArgExpr(Class->getLocation(), CD, CD->getParamDecl(I)).get(); S.DiscardCleanupsInEvaluationContext(); S.Context.addDefaultArgExprForConstructor(CD, I, DefaultArg); } } } void Sema::ActOnFinishCXXMemberDefaultArgs(Decl *D) { auto *RD = dyn_cast<CXXRecordDecl>(D); // Default constructors that are annotated with __declspec(dllexport) which // have default arguments or don't use the standard calling convention are // wrapped with a thunk called the default constructor closure. if (RD && Context.getTargetInfo().getCXXABI().isMicrosoft()) getDefaultArgExprsForConstructors(*this, RD); } void Sema::AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor) { assert(getLangOpts().CPlusPlus11 && "adjusting dtor exception specs was introduced in c++11"); // C++11 [class.dtor]p3: // A declaration of a destructor that does not have an exception- // specification is implicitly considered to have the same exception- // specification as an implicit declaration. const FunctionProtoType *DtorType = Destructor->getType()-> getAs<FunctionProtoType>(); if (DtorType->hasExceptionSpec()) return; // Replace the destructor's type, building off the existing one. Fortunately, // the only thing of interest in the destructor type is its extended info. // The return and arguments are fixed. FunctionProtoType::ExtProtoInfo EPI = DtorType->getExtProtoInfo(); EPI.ExceptionSpec.Type = EST_Unevaluated; EPI.ExceptionSpec.SourceDecl = Destructor; Destructor->setType(Context.getFunctionType(Context.VoidTy, None, EPI, None)); // HLSL Change - add param mods // FIXME: If the destructor has a body that could throw, and the newly created // spec doesn't allow exceptions, we should emit a warning, because this // change in behavior can break conforming C++03 programs at runtime. // However, we don't have a body or an exception specification yet, so it // needs to be done somewhere else. } namespace { /// \brief An abstract base class for all helper classes used in building the // copy/move operators. These classes serve as factory functions and help us // avoid using the same Expr* in the AST twice. class ExprBuilder { ExprBuilder(const ExprBuilder&) = delete; ExprBuilder &operator=(const ExprBuilder&) = delete; protected: static Expr *assertNotNull(Expr *E) { assert(E && "Expression construction must not fail."); return E; } public: ExprBuilder() {} virtual ~ExprBuilder() {} virtual Expr *build(Sema &S, SourceLocation Loc) const = 0; }; class RefBuilder: public ExprBuilder { VarDecl *Var; QualType VarType; public: Expr *build(Sema &S, SourceLocation Loc) const override { return assertNotNull(S.BuildDeclRefExpr(Var, VarType, VK_LValue, Loc).get()); } RefBuilder(VarDecl *Var, QualType VarType) : Var(Var), VarType(VarType) {} }; class ThisBuilder: public ExprBuilder { public: Expr *build(Sema &S, SourceLocation Loc) const override { return assertNotNull(S.ActOnCXXThis(Loc).getAs<Expr>()); } }; class CastBuilder: public ExprBuilder { const ExprBuilder &Builder; QualType Type; ExprValueKind Kind; const CXXCastPath &Path; public: Expr *build(Sema &S, SourceLocation Loc) const override { return assertNotNull(S.ImpCastExprToType(Builder.build(S, Loc), Type, CK_UncheckedDerivedToBase, Kind, &Path).get()); } CastBuilder(const ExprBuilder &Builder, QualType Type, ExprValueKind Kind, const CXXCastPath &Path) : Builder(Builder), Type(Type), Kind(Kind), Path(Path) {} }; class DerefBuilder: public ExprBuilder { const ExprBuilder &Builder; public: Expr *build(Sema &S, SourceLocation Loc) const override { return assertNotNull( S.CreateBuiltinUnaryOp(Loc, UO_Deref, Builder.build(S, Loc)).get()); } DerefBuilder(const ExprBuilder &Builder) : Builder(Builder) {} }; class MemberBuilder: public ExprBuilder { const ExprBuilder &Builder; QualType Type; CXXScopeSpec SS; bool IsArrow; LookupResult &MemberLookup; public: Expr *build(Sema &S, SourceLocation Loc) const override { return assertNotNull(S.BuildMemberReferenceExpr( Builder.build(S, Loc), Type, Loc, IsArrow, SS, SourceLocation(), nullptr, MemberLookup, nullptr).get()); } MemberBuilder(const ExprBuilder &Builder, QualType Type, bool IsArrow, LookupResult &MemberLookup) : Builder(Builder), Type(Type), IsArrow(IsArrow), MemberLookup(MemberLookup) {} }; class MoveCastBuilder: public ExprBuilder { const ExprBuilder &Builder; public: Expr *build(Sema &S, SourceLocation Loc) const override { return assertNotNull(CastForMoving(S, Builder.build(S, Loc))); } MoveCastBuilder(const ExprBuilder &Builder) : Builder(Builder) {} }; class LvalueConvBuilder: public ExprBuilder { const ExprBuilder &Builder; public: Expr *build(Sema &S, SourceLocation Loc) const override { return assertNotNull( S.DefaultLvalueConversion(Builder.build(S, Loc)).get()); } LvalueConvBuilder(const ExprBuilder &Builder) : Builder(Builder) {} }; class SubscriptBuilder: public ExprBuilder { const ExprBuilder &Base; const ExprBuilder &Index; public: Expr *build(Sema &S, SourceLocation Loc) const override { return assertNotNull(S.CreateBuiltinArraySubscriptExpr( Base.build(S, Loc), Loc, Index.build(S, Loc), Loc).get()); } SubscriptBuilder(const ExprBuilder &Base, const ExprBuilder &Index) : Base(Base), Index(Index) {} }; } // end anonymous namespace /// When generating a defaulted copy or move assignment operator, if a field /// should be copied with __builtin_memcpy rather than via explicit assignments, /// do so. This optimization only applies for arrays of scalars, and for arrays /// of class type where the selected copy/move-assignment operator is trivial. static StmtResult buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T, const ExprBuilder &ToB, const ExprBuilder &FromB) { // Compute the size of the memory buffer to be copied. QualType SizeType = S.Context.getSizeType(); llvm::APInt Size(S.Context.getTypeSize(SizeType), S.Context.getTypeSizeInChars(T).getQuantity()); // Take the address of the field references for "from" and "to". We // directly construct UnaryOperators here because semantic analysis // does not permit us to take the address of an xvalue. Expr *From = FromB.build(S, Loc); From = new (S.Context) UnaryOperator(From, UO_AddrOf, S.Context.getPointerType(From->getType()), VK_RValue, OK_Ordinary, Loc); Expr *To = ToB.build(S, Loc); To = new (S.Context) UnaryOperator(To, UO_AddrOf, S.Context.getPointerType(To->getType()), VK_RValue, OK_Ordinary, Loc); const Type *E = T->getBaseElementTypeUnsafe(); bool NeedsCollectableMemCpy = E->isRecordType() && E->getAs<RecordType>()->getDecl()->hasObjectMember(); // Create a reference to the __builtin_objc_memmove_collectable function StringRef MemCpyName = NeedsCollectableMemCpy ? "__builtin_objc_memmove_collectable" : "__builtin_memcpy"; LookupResult R(S, &S.Context.Idents.get(MemCpyName), Loc, Sema::LookupOrdinaryName); S.LookupName(R, S.TUScope, true); FunctionDecl *MemCpy = R.getAsSingle<FunctionDecl>(); if (!MemCpy) // Something went horribly wrong earlier, and we will have complained // about it. return StmtError(); ExprResult MemCpyRef = S.BuildDeclRefExpr(MemCpy, S.Context.BuiltinFnTy, VK_RValue, Loc, nullptr); assert(MemCpyRef.isUsable() && "Builtin reference cannot fail"); Expr *CallArgs[] = { To, From, IntegerLiteral::Create(S.Context, Size, SizeType, Loc) }; ExprResult Call = S.ActOnCallExpr(/*Scope=*/nullptr, MemCpyRef.get(), Loc, CallArgs, Loc); assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!"); return Call.getAs<Stmt>(); } /// \brief Builds a statement that copies/moves the given entity from \p From to /// \c To. /// /// This routine is used to copy/move the members of a class with an /// implicitly-declared copy/move assignment operator. When the entities being /// copied are arrays, this routine builds for loops to copy them. /// /// \param S The Sema object used for type-checking. /// /// \param Loc The location where the implicit copy/move is being generated. /// /// \param T The type of the expressions being copied/moved. Both expressions /// must have this type. /// /// \param To The expression we are copying/moving to. /// /// \param From The expression we are copying/moving from. /// /// \param CopyingBaseSubobject Whether we're copying/moving a base subobject. /// Otherwise, it's a non-static member subobject. /// /// \param Copying Whether we're copying or moving. /// /// \param Depth Internal parameter recording the depth of the recursion. /// /// \returns A statement or a loop that copies the expressions, or StmtResult(0) /// if a memcpy should be used instead. static StmtResult buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T, const ExprBuilder &To, const ExprBuilder &From, bool CopyingBaseSubobject, bool Copying, unsigned Depth = 0) { // C++11 [class.copy]p28: // Each subobject is assigned in the manner appropriate to its type: // // - if the subobject is of class type, as if by a call to operator= with // the subobject as the object expression and the corresponding // subobject of x as a single function argument (as if by explicit // qualification; that is, ignoring any possible virtual overriding // functions in more derived classes); // // C++03 [class.copy]p13: // - if the subobject is of class type, the copy assignment operator for // the class is used (as if by explicit qualification; that is, // ignoring any possible virtual overriding functions in more derived // classes); if (const RecordType *RecordTy = T->getAs<RecordType>()) { CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RecordTy->getDecl()); // Look for operator=. DeclarationName Name = S.Context.DeclarationNames.getCXXOperatorName(OO_Equal); LookupResult OpLookup(S, Name, Loc, Sema::LookupOrdinaryName); S.LookupQualifiedName(OpLookup, ClassDecl, false); // Prior to C++11, filter out any result that isn't a copy/move-assignment // operator. if (!S.getLangOpts().CPlusPlus11) { LookupResult::Filter F = OpLookup.makeFilter(); while (F.hasNext()) { NamedDecl *D = F.next(); if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) if (Method->isCopyAssignmentOperator() || (!Copying && Method->isMoveAssignmentOperator())) continue; F.erase(); } F.done(); } // Suppress the protected check (C++ [class.protected]) for each of the // assignment operators we found. This strange dance is required when // we're assigning via a base classes's copy-assignment operator. To // ensure that we're getting the right base class subobject (without // ambiguities), we need to cast "this" to that subobject type; to // ensure that we don't go through the virtual call mechanism, we need // to qualify the operator= name with the base class (see below). However, // this means that if the base class has a protected copy assignment // operator, the protected member access check will fail. So, we // rewrite "protected" access to "public" access in this case, since we // know by construction that we're calling from a derived class. if (CopyingBaseSubobject) { for (LookupResult::iterator L = OpLookup.begin(), LEnd = OpLookup.end(); L != LEnd; ++L) { if (L.getAccess() == AS_protected) L.setAccess(AS_public); } } // Create the nested-name-specifier that will be used to qualify the // reference to operator=; this is required to suppress the virtual // call mechanism. CXXScopeSpec SS; const Type *CanonicalT = S.Context.getCanonicalType(T.getTypePtr()); SS.MakeTrivial(S.Context, NestedNameSpecifier::Create(S.Context, nullptr, false, CanonicalT), Loc); // Create the reference to operator=. ExprResult OpEqualRef = S.BuildMemberReferenceExpr(To.build(S, Loc), T, Loc, /*isArrow=*/false, SS, /*TemplateKWLoc=*/SourceLocation(), /*FirstQualifierInScope=*/nullptr, OpLookup, /*TemplateArgs=*/nullptr, /*SuppressQualifierCheck=*/true); if (OpEqualRef.isInvalid()) return StmtError(); // Build the call to the assignment operator. Expr *FromInst = From.build(S, Loc); ExprResult Call = S.BuildCallToMemberFunction(/*Scope=*/nullptr, OpEqualRef.getAs<Expr>(), Loc, FromInst, Loc); if (Call.isInvalid()) return StmtError(); // If we built a call to a trivial 'operator=' while copying an array, // bail out. We'll replace the whole shebang with a memcpy. CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Call.get()); if (CE && CE->getMethodDecl()->isTrivial() && Depth) return StmtResult((Stmt*)nullptr); // Convert to an expression-statement, and clean up any produced // temporaries. return S.ActOnExprStmt(Call); } // - if the subobject is of scalar type, the built-in assignment // operator is used. const ConstantArrayType *ArrayTy = S.Context.getAsConstantArrayType(T); if (!ArrayTy) { ExprResult Assignment = S.CreateBuiltinBinOp( Loc, BO_Assign, To.build(S, Loc), From.build(S, Loc)); if (Assignment.isInvalid()) return StmtError(); return S.ActOnExprStmt(Assignment); } // - if the subobject is an array, each element is assigned, in the // manner appropriate to the element type; // Construct a loop over the array bounds, e.g., // // for (__SIZE_TYPE__ i0 = 0; i0 != array-size; ++i0) // // that will copy each of the array elements. QualType SizeType = S.Context.getSizeType(); // Create the iteration variable. IdentifierInfo *IterationVarName = nullptr; { SmallString<8> Str; llvm::raw_svector_ostream OS(Str); OS << "__i" << Depth; IterationVarName = &S.Context.Idents.get(OS.str()); } VarDecl *IterationVar = VarDecl::Create(S.Context, S.CurContext, Loc, Loc, IterationVarName, SizeType, S.Context.getTrivialTypeSourceInfo(SizeType, Loc), SC_None); // Initialize the iteration variable to zero. llvm::APInt Zero(S.Context.getTypeSize(SizeType), 0); IterationVar->setInit(IntegerLiteral::Create(S.Context, Zero, SizeType, Loc)); // Creates a reference to the iteration variable. RefBuilder IterationVarRef(IterationVar, SizeType); LvalueConvBuilder IterationVarRefRVal(IterationVarRef); // Create the DeclStmt that holds the iteration variable. Stmt *InitStmt = new (S.Context) DeclStmt(DeclGroupRef(IterationVar),Loc,Loc); // Subscript the "from" and "to" expressions with the iteration variable. SubscriptBuilder FromIndexCopy(From, IterationVarRefRVal); MoveCastBuilder FromIndexMove(FromIndexCopy); const ExprBuilder *FromIndex; if (Copying) FromIndex = &FromIndexCopy; else FromIndex = &FromIndexMove; SubscriptBuilder ToIndex(To, IterationVarRefRVal); // Build the copy/move for an individual element of the array. StmtResult Copy = buildSingleCopyAssignRecursively(S, Loc, ArrayTy->getElementType(), ToIndex, *FromIndex, CopyingBaseSubobject, Copying, Depth + 1); // Bail out if copying fails or if we determined that we should use memcpy. if (Copy.isInvalid() || !Copy.get()) return Copy; // Create the comparison against the array bound. llvm::APInt Upper = ArrayTy->getSize().zextOrTrunc(S.Context.getTypeSize(SizeType)); Expr *Comparison = new (S.Context) BinaryOperator(IterationVarRefRVal.build(S, Loc), IntegerLiteral::Create(S.Context, Upper, SizeType, Loc), BO_NE, S.Context.BoolTy, VK_RValue, OK_Ordinary, Loc, false); // Create the pre-increment of the iteration variable. Expr *Increment = new (S.Context) UnaryOperator(IterationVarRef.build(S, Loc), UO_PreInc, SizeType, VK_LValue, OK_Ordinary, Loc); // Construct the loop that copies all elements of this array. return S.ActOnForStmt(Loc, Loc, InitStmt, S.MakeFullExpr(Comparison), nullptr, S.MakeFullDiscardedValueExpr(Increment), Loc, Copy.get()); } static StmtResult buildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T, const ExprBuilder &To, const ExprBuilder &From, bool CopyingBaseSubobject, bool Copying) { // Maybe we should use a memcpy? if (T->isArrayType() && !T.isConstQualified() && !T.isVolatileQualified() && T.isTriviallyCopyableType(S.Context)) return buildMemcpyForAssignmentOp(S, Loc, T, To, From); StmtResult Result(buildSingleCopyAssignRecursively(S, Loc, T, To, From, CopyingBaseSubobject, Copying, 0)); // If we ended up picking a trivial assignment operator for an array of a // non-trivially-copyable class type, just emit a memcpy. if (!Result.isInvalid() && !Result.get()) return buildMemcpyForAssignmentOp(S, Loc, T, To, From); return Result; } Sema::ImplicitExceptionSpecification Sema::ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD) { CXXRecordDecl *ClassDecl = MD->getParent(); ImplicitExceptionSpecification ExceptSpec(*this); if (ClassDecl->isInvalidDecl()) return ExceptSpec; const FunctionProtoType *T = MD->getType()->castAs<FunctionProtoType>(); assert(T->getNumParams() == 1 && "not a copy assignment op"); unsigned ArgQuals = T->getParamType(0).getNonReferenceType().getCVRQualifiers(); // C++ [except.spec]p14: // An implicitly declared special member function (Clause 12) shall have an // exception-specification. [...] // It is unspecified whether or not an implicit copy assignment operator // attempts to deduplicate calls to assignment operators of virtual bases are // made. As such, this exception specification is effectively unspecified. // Based on a similar decision made for constness in C++0x, we're erring on // the side of assuming such calls to be made regardless of whether they // actually happen. for (const auto &Base : ClassDecl->bases()) { if (Base.isVirtual()) continue; CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(BaseClassDecl, ArgQuals, false, 0)) ExceptSpec.CalledDecl(Base.getLocStart(), CopyAssign); } for (const auto &Base : ClassDecl->vbases()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(BaseClassDecl, ArgQuals, false, 0)) ExceptSpec.CalledDecl(Base.getLocStart(), CopyAssign); } for (const auto *Field : ClassDecl->fields()) { QualType FieldType = Context.getBaseElementType(Field->getType()); if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) { if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(FieldClassDecl, ArgQuals | FieldType.getCVRQualifiers(), false, 0)) ExceptSpec.CalledDecl(Field->getLocation(), CopyAssign); } } return ExceptSpec; } CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) { // Note: The following rules are largely analoguous to the copy // constructor rules. Note that virtual bases are not taken into account // for determining the argument type of the operator. Note also that // operators taking an object instead of a reference are allowed. assert(ClassDecl->needsImplicitCopyAssignment()); DeclaringSpecialMember DSM(*this, ClassDecl, CXXCopyAssignment); if (DSM.isAlreadyBeingDeclared()) return nullptr; QualType ArgType = Context.getTypeDeclType(ClassDecl); QualType RetType = Context.getLValueReferenceType(ArgType); bool Const = ClassDecl->implicitCopyAssignmentHasConstParam(); if (Const) ArgType = ArgType.withConst(); ArgType = Context.getLValueReferenceType(ArgType); bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, CXXCopyAssignment, Const); // An implicitly-declared copy assignment operator is an inline public // member of its class. DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal); SourceLocation ClassLoc = ClassDecl->getLocation(); DeclarationNameInfo NameInfo(Name, ClassLoc); CXXMethodDecl *CopyAssignment = CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/nullptr, /*StorageClass=*/SC_None, /*isInline=*/true, Constexpr, SourceLocation()); CopyAssignment->setAccess(AS_public); CopyAssignment->setDefaulted(); CopyAssignment->setImplicit(); if (getLangOpts().CUDA) { inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyAssignment, CopyAssignment, /* ConstRHS */ Const, /* Diagnose */ false); } // Build an exception specification pointing back at this member. FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, CopyAssignment); CopyAssignment->setType(Context.getFunctionType(RetType, ArgType, EPI, None)); // HLSL Change - add param mods // Add the parameter to the operator. ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment, ClassLoc, ClassLoc, /*Id=*/nullptr, ArgType, /*TInfo=*/nullptr, SC_None, nullptr); CopyAssignment->setParams(FromParam); AddOverriddenMethods(ClassDecl, CopyAssignment); CopyAssignment->setTrivial( ClassDecl->needsOverloadResolutionForCopyAssignment() ? SpecialMemberIsTrivial(CopyAssignment, CXXCopyAssignment) : ClassDecl->hasTrivialCopyAssignment()); if (ShouldDeleteSpecialMember(CopyAssignment, CXXCopyAssignment)) SetDeclDeleted(CopyAssignment, ClassLoc); // Note that we have added this copy-assignment operator. ++ASTContext::NumImplicitCopyAssignmentOperatorsDeclared; if (Scope *S = getScopeForContext(ClassDecl)) PushOnScopeChains(CopyAssignment, S, false); ClassDecl->addDecl(CopyAssignment); return CopyAssignment; } /// Diagnose an implicit copy operation for a class which is odr-used, but /// which is deprecated because the class has a user-declared copy constructor, /// copy assignment operator, or destructor. static void diagnoseDeprecatedCopyOperation(Sema &S, CXXMethodDecl *CopyOp, SourceLocation UseLoc) { assert(CopyOp->isImplicit()); CXXRecordDecl *RD = CopyOp->getParent(); CXXMethodDecl *UserDeclaredOperation = nullptr; // In Microsoft mode, assignment operations don't affect constructors and // vice versa. if (RD->hasUserDeclaredDestructor()) { UserDeclaredOperation = RD->getDestructor(); } else if (!isa<CXXConstructorDecl>(CopyOp) && RD->hasUserDeclaredCopyConstructor() && !S.getLangOpts().MSVCCompat) { // Find any user-declared copy constructor. for (auto *I : RD->ctors()) { if (I->isCopyConstructor()) { UserDeclaredOperation = I; break; } } assert(UserDeclaredOperation); } else if (isa<CXXConstructorDecl>(CopyOp) && RD->hasUserDeclaredCopyAssignment() && !S.getLangOpts().MSVCCompat) { // Find any user-declared move assignment operator. for (auto *I : RD->methods()) { if (I->isCopyAssignmentOperator()) { UserDeclaredOperation = I; break; } } assert(UserDeclaredOperation); } if (UserDeclaredOperation) { S.Diag(UserDeclaredOperation->getLocation(), diag::warn_deprecated_copy_operation) << RD << /*copy assignment*/!isa<CXXConstructorDecl>(CopyOp) << /*destructor*/isa<CXXDestructorDecl>(UserDeclaredOperation); S.Diag(UseLoc, diag::note_member_synthesized_at) << (isa<CXXConstructorDecl>(CopyOp) ? Sema::CXXCopyConstructor : Sema::CXXCopyAssignment) << RD; } } void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *CopyAssignOperator) { assert((CopyAssignOperator->isDefaulted() && CopyAssignOperator->isOverloadedOperator() && CopyAssignOperator->getOverloadedOperator() == OO_Equal && !CopyAssignOperator->doesThisDeclarationHaveABody() && !CopyAssignOperator->isDeleted()) && "DefineImplicitCopyAssignment called for wrong function"); CXXRecordDecl *ClassDecl = CopyAssignOperator->getParent(); if (ClassDecl->isInvalidDecl() || CopyAssignOperator->isInvalidDecl()) { CopyAssignOperator->setInvalidDecl(); return; } // C++11 [class.copy]p18: // The [definition of an implicitly declared copy assignment operator] is // deprecated if the class has a user-declared copy constructor or a // user-declared destructor. if (getLangOpts().CPlusPlus11 && CopyAssignOperator->isImplicit()) diagnoseDeprecatedCopyOperation(*this, CopyAssignOperator, CurrentLocation); CopyAssignOperator->markUsed(Context); SynthesizedFunctionScope Scope(*this, CopyAssignOperator); DiagnosticErrorTrap Trap(Diags); // C++0x [class.copy]p30: // The implicitly-defined or explicitly-defaulted copy assignment operator // for a non-union class X performs memberwise copy assignment of its // subobjects. The direct base classes of X are assigned first, in the // order of their declaration in the base-specifier-list, and then the // immediate non-static data members of X are assigned, in the order in // which they were declared in the class definition. // The statements that form the synthesized function body. SmallVector<Stmt*, 8> Statements; // The parameter for the "other" object, which we are copying from. ParmVarDecl *Other = CopyAssignOperator->getParamDecl(0); Qualifiers OtherQuals = Other->getType().getQualifiers(); QualType OtherRefType = Other->getType(); if (const LValueReferenceType *OtherRef = OtherRefType->getAs<LValueReferenceType>()) { OtherRefType = OtherRef->getPointeeType(); OtherQuals = OtherRefType.getQualifiers(); } // Our location for everything implicitly-generated. SourceLocation Loc = CopyAssignOperator->getLocEnd().isValid() ? CopyAssignOperator->getLocEnd() : CopyAssignOperator->getLocation(); // Builds a DeclRefExpr for the "other" object. RefBuilder OtherRef(Other, OtherRefType); // Builds the "this" pointer. ThisBuilder This; // Assign base classes. bool Invalid = false; for (auto &Base : ClassDecl->bases()) { // Form the assignment: // static_cast<Base*>(this)->Base::operator=(static_cast<Base&>(other)); QualType BaseType = Base.getType().getUnqualifiedType(); if (!BaseType->isRecordType()) { Invalid = true; continue; } CXXCastPath BasePath; BasePath.push_back(&Base); // Construct the "from" expression, which is an implicit cast to the // appropriately-qualified base type. CastBuilder From(OtherRef, Context.getQualifiedType(BaseType, OtherQuals), VK_LValue, BasePath); // Dereference "this". DerefBuilder DerefThis(This); CastBuilder To(DerefThis, Context.getCVRQualifiedType( BaseType, CopyAssignOperator->getTypeQualifiers()), VK_LValue, BasePath); // Build the copy. StmtResult Copy = buildSingleCopyAssign(*this, Loc, BaseType, To, From, /*CopyingBaseSubobject=*/true, /*Copying=*/true); if (Copy.isInvalid()) { Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXCopyAssignment << Context.getTagDeclType(ClassDecl); CopyAssignOperator->setInvalidDecl(); return; } // Success! Record the copy. Statements.push_back(Copy.getAs<Expr>()); } // Assign non-static members. for (auto *Field : ClassDecl->fields()) { // FIXME: We should form some kind of AST representation for the implied // memcpy in a union copy operation. if (Field->isUnnamedBitfield() || Field->getParent()->isUnion()) continue; if (Field->isInvalidDecl()) { Invalid = true; continue; } // Check for members of reference type; we can't copy those. if (Field->getType()->isReferenceType()) { Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign) << Context.getTagDeclType(ClassDecl) << 0 << Field->getDeclName(); Diag(Field->getLocation(), diag::note_declared_at); Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXCopyAssignment << Context.getTagDeclType(ClassDecl); Invalid = true; continue; } // Check for members of const-qualified, non-class type. QualType BaseType = Context.getBaseElementType(Field->getType()); if (!BaseType->getAs<RecordType>() && BaseType.isConstQualified()) { Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign) << Context.getTagDeclType(ClassDecl) << 1 << Field->getDeclName(); Diag(Field->getLocation(), diag::note_declared_at); Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXCopyAssignment << Context.getTagDeclType(ClassDecl); Invalid = true; continue; } // Suppress assigning zero-width bitfields. if (Field->isBitField() && Field->getBitWidthValue(Context) == 0) continue; QualType FieldType = Field->getType().getNonReferenceType(); if (FieldType->isIncompleteArrayType()) { assert(ClassDecl->hasFlexibleArrayMember() && "Incomplete array type is not valid"); continue; } // Build references to the field in the object we're copying from and to. CXXScopeSpec SS; // Intentionally empty LookupResult MemberLookup(*this, Field->getDeclName(), Loc, LookupMemberName); MemberLookup.addDecl(Field); MemberLookup.resolveKind(); MemberBuilder From(OtherRef, OtherRefType, /*IsArrow=*/false, MemberLookup); MemberBuilder To(This, getCurrentThisType(), /*IsArrow=*/true, MemberLookup); // Build the copy of this field. StmtResult Copy = buildSingleCopyAssign(*this, Loc, FieldType, To, From, /*CopyingBaseSubobject=*/false, /*Copying=*/true); if (Copy.isInvalid()) { Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXCopyAssignment << Context.getTagDeclType(ClassDecl); CopyAssignOperator->setInvalidDecl(); return; } // Success! Record the copy. Statements.push_back(Copy.getAs<Stmt>()); } if (!Invalid) { // Add a "return *this;" ExprResult ThisObj = CreateBuiltinUnaryOp(Loc, UO_Deref, This.build(*this, Loc)); StmtResult Return = BuildReturnStmt(Loc, ThisObj.get()); if (Return.isInvalid()) Invalid = true; else { Statements.push_back(Return.getAs<Stmt>()); if (Trap.hasErrorOccurred()) { Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXCopyAssignment << Context.getTagDeclType(ClassDecl); Invalid = true; } } } // The exception specification is needed because we are defining the // function. ResolveExceptionSpec(CurrentLocation, CopyAssignOperator->getType()->castAs<FunctionProtoType>()); if (Invalid) { CopyAssignOperator->setInvalidDecl(); return; } StmtResult Body; { CompoundScopeRAII CompoundScope(*this); Body = ActOnCompoundStmt(Loc, Loc, Statements, /*isStmtExpr=*/false); assert(!Body.isInvalid() && "Compound statement creation cannot fail"); } CopyAssignOperator->setBody(Body.getAs<Stmt>()); if (ASTMutationListener *L = getASTMutationListener()) { L->CompletedImplicitDefinition(CopyAssignOperator); } } Sema::ImplicitExceptionSpecification Sema::ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD) { CXXRecordDecl *ClassDecl = MD->getParent(); ImplicitExceptionSpecification ExceptSpec(*this); if (ClassDecl->isInvalidDecl()) return ExceptSpec; // C++0x [except.spec]p14: // An implicitly declared special member function (Clause 12) shall have an // exception-specification. [...] // It is unspecified whether or not an implicit move assignment operator // attempts to deduplicate calls to assignment operators of virtual bases are // made. As such, this exception specification is effectively unspecified. // Based on a similar decision made for constness in C++0x, we're erring on // the side of assuming such calls to be made regardless of whether they // actually happen. // Note that a move constructor is not implicitly declared when there are // virtual bases, but it can still be user-declared and explicitly defaulted. for (const auto &Base : ClassDecl->bases()) { if (Base.isVirtual()) continue; CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(BaseClassDecl, 0, false, 0)) ExceptSpec.CalledDecl(Base.getLocStart(), MoveAssign); } for (const auto &Base : ClassDecl->vbases()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(BaseClassDecl, 0, false, 0)) ExceptSpec.CalledDecl(Base.getLocStart(), MoveAssign); } for (const auto *Field : ClassDecl->fields()) { QualType FieldType = Context.getBaseElementType(Field->getType()); if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) { if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(FieldClassDecl, FieldType.getCVRQualifiers(), false, 0)) ExceptSpec.CalledDecl(Field->getLocation(), MoveAssign); } } return ExceptSpec; } CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) { assert(ClassDecl->needsImplicitMoveAssignment()); DeclaringSpecialMember DSM(*this, ClassDecl, CXXMoveAssignment); if (DSM.isAlreadyBeingDeclared()) return nullptr; // Note: The following rules are largely analoguous to the move // constructor rules. QualType ArgType = Context.getTypeDeclType(ClassDecl); QualType RetType = Context.getLValueReferenceType(ArgType); ArgType = Context.getRValueReferenceType(ArgType); bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, CXXMoveAssignment, false); // An implicitly-declared move assignment operator is an inline public // member of its class. DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal); SourceLocation ClassLoc = ClassDecl->getLocation(); DeclarationNameInfo NameInfo(Name, ClassLoc); CXXMethodDecl *MoveAssignment = CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/nullptr, /*StorageClass=*/SC_None, /*isInline=*/true, Constexpr, SourceLocation()); MoveAssignment->setAccess(AS_public); MoveAssignment->setDefaulted(); MoveAssignment->setImplicit(); if (getLangOpts().CUDA) { inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveAssignment, MoveAssignment, /* ConstRHS */ false, /* Diagnose */ false); } // Build an exception specification pointing back at this member. FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, MoveAssignment); MoveAssignment->setType(Context.getFunctionType(RetType, ArgType, EPI, None)); // HLSL Change - add param mods // Add the parameter to the operator. ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveAssignment, ClassLoc, ClassLoc, /*Id=*/nullptr, ArgType, /*TInfo=*/nullptr, SC_None, nullptr); MoveAssignment->setParams(FromParam); AddOverriddenMethods(ClassDecl, MoveAssignment); MoveAssignment->setTrivial( ClassDecl->needsOverloadResolutionForMoveAssignment() ? SpecialMemberIsTrivial(MoveAssignment, CXXMoveAssignment) : ClassDecl->hasTrivialMoveAssignment()); if (ShouldDeleteSpecialMember(MoveAssignment, CXXMoveAssignment)) { ClassDecl->setImplicitMoveAssignmentIsDeleted(); SetDeclDeleted(MoveAssignment, ClassLoc); } // Note that we have added this copy-assignment operator. ++ASTContext::NumImplicitMoveAssignmentOperatorsDeclared; if (Scope *S = getScopeForContext(ClassDecl)) PushOnScopeChains(MoveAssignment, S, false); ClassDecl->addDecl(MoveAssignment); return MoveAssignment; } /// Check if we're implicitly defining a move assignment operator for a class /// with virtual bases. Such a move assignment might move-assign the virtual /// base multiple times. static void checkMoveAssignmentForRepeatedMove(Sema &S, CXXRecordDecl *Class, SourceLocation CurrentLocation) { assert(!Class->isDependentContext() && "should not define dependent move"); // Only a virtual base could get implicitly move-assigned multiple times. // Only a non-trivial move assignment can observe this. We only want to // diagnose if we implicitly define an assignment operator that assigns // two base classes, both of which move-assign the same virtual base. if (Class->getNumVBases() == 0 || Class->hasTrivialMoveAssignment() || Class->getNumBases() < 2) return; llvm::SmallVector<CXXBaseSpecifier *, 16> Worklist; typedef llvm::DenseMap<CXXRecordDecl*, CXXBaseSpecifier*> VBaseMap; VBaseMap VBases; for (auto &BI : Class->bases()) { Worklist.push_back(&BI); while (!Worklist.empty()) { CXXBaseSpecifier *BaseSpec = Worklist.pop_back_val(); CXXRecordDecl *Base = BaseSpec->getType()->getAsCXXRecordDecl(); // If the base has no non-trivial move assignment operators, // we don't care about moves from it. if (!Base->hasNonTrivialMoveAssignment()) continue; // If there's nothing virtual here, skip it. if (!BaseSpec->isVirtual() && !Base->getNumVBases()) continue; // If we're not actually going to call a move assignment for this base, // or the selected move assignment is trivial, skip it. Sema::SpecialMemberOverloadResult *SMOR = S.LookupSpecialMember(Base, Sema::CXXMoveAssignment, /*ConstArg*/false, /*VolatileArg*/false, /*RValueThis*/true, /*ConstThis*/false, /*VolatileThis*/false); if (!SMOR->getMethod() || SMOR->getMethod()->isTrivial() || !SMOR->getMethod()->isMoveAssignmentOperator()) continue; if (BaseSpec->isVirtual()) { // We're going to move-assign this virtual base, and its move // assignment operator is not trivial. If this can happen for // multiple distinct direct bases of Class, diagnose it. (If it // only happens in one base, we'll diagnose it when synthesizing // that base class's move assignment operator.) CXXBaseSpecifier *&Existing = VBases.insert(std::make_pair(Base->getCanonicalDecl(), &BI)) .first->second; if (Existing && Existing != &BI) { S.Diag(CurrentLocation, diag::warn_vbase_moved_multiple_times) << Class << Base; S.Diag(Existing->getLocStart(), diag::note_vbase_moved_here) << (Base->getCanonicalDecl() == Existing->getType()->getAsCXXRecordDecl()->getCanonicalDecl()) << Base << Existing->getType() << Existing->getSourceRange(); S.Diag(BI.getLocStart(), diag::note_vbase_moved_here) << (Base->getCanonicalDecl() == BI.getType()->getAsCXXRecordDecl()->getCanonicalDecl()) << Base << BI.getType() << BaseSpec->getSourceRange(); // Only diagnose each vbase once. Existing = nullptr; } } else { // Only walk over bases that have defaulted move assignment operators. // We assume that any user-provided move assignment operator handles // the multiple-moves-of-vbase case itself somehow. if (!SMOR->getMethod()->isDefaulted()) continue; // We're going to move the base classes of Base. Add them to the list. for (auto &BI : Base->bases()) Worklist.push_back(&BI); } } } } void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MoveAssignOperator) { assert((MoveAssignOperator->isDefaulted() && MoveAssignOperator->isOverloadedOperator() && MoveAssignOperator->getOverloadedOperator() == OO_Equal && !MoveAssignOperator->doesThisDeclarationHaveABody() && !MoveAssignOperator->isDeleted()) && "DefineImplicitMoveAssignment called for wrong function"); CXXRecordDecl *ClassDecl = MoveAssignOperator->getParent(); if (ClassDecl->isInvalidDecl() || MoveAssignOperator->isInvalidDecl()) { MoveAssignOperator->setInvalidDecl(); return; } MoveAssignOperator->markUsed(Context); SynthesizedFunctionScope Scope(*this, MoveAssignOperator); DiagnosticErrorTrap Trap(Diags); // C++0x [class.copy]p28: // The implicitly-defined or move assignment operator for a non-union class // X performs memberwise move assignment of its subobjects. The direct base // classes of X are assigned first, in the order of their declaration in the // base-specifier-list, and then the immediate non-static data members of X // are assigned, in the order in which they were declared in the class // definition. // Issue a warning if our implicit move assignment operator will move // from a virtual base more than once. checkMoveAssignmentForRepeatedMove(*this, ClassDecl, CurrentLocation); // The statements that form the synthesized function body. SmallVector<Stmt*, 8> Statements; // The parameter for the "other" object, which we are move from. ParmVarDecl *Other = MoveAssignOperator->getParamDecl(0); QualType OtherRefType = Other->getType()-> getAs<RValueReferenceType>()->getPointeeType(); assert(!OtherRefType.getQualifiers() && "Bad argument type of defaulted move assignment"); // Our location for everything implicitly-generated. SourceLocation Loc = MoveAssignOperator->getLocEnd().isValid() ? MoveAssignOperator->getLocEnd() : MoveAssignOperator->getLocation(); // Builds a reference to the "other" object. RefBuilder OtherRef(Other, OtherRefType); // Cast to rvalue. MoveCastBuilder MoveOther(OtherRef); // Builds the "this" pointer. ThisBuilder This; // Assign base classes. bool Invalid = false; for (auto &Base : ClassDecl->bases()) { // C++11 [class.copy]p28: // It is unspecified whether subobjects representing virtual base classes // are assigned more than once by the implicitly-defined copy assignment // operator. // FIXME: Do not assign to a vbase that will be assigned by some other base // class. For a move-assignment, this can result in the vbase being moved // multiple times. // Form the assignment: // static_cast<Base*>(this)->Base::operator=(static_cast<Base&&>(other)); QualType BaseType = Base.getType().getUnqualifiedType(); if (!BaseType->isRecordType()) { Invalid = true; continue; } CXXCastPath BasePath; BasePath.push_back(&Base); // Construct the "from" expression, which is an implicit cast to the // appropriately-qualified base type. CastBuilder From(OtherRef, BaseType, VK_XValue, BasePath); // Dereference "this". DerefBuilder DerefThis(This); // Implicitly cast "this" to the appropriately-qualified base type. CastBuilder To(DerefThis, Context.getCVRQualifiedType( BaseType, MoveAssignOperator->getTypeQualifiers()), VK_LValue, BasePath); // Build the move. StmtResult Move = buildSingleCopyAssign(*this, Loc, BaseType, To, From, /*CopyingBaseSubobject=*/true, /*Copying=*/false); if (Move.isInvalid()) { Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXMoveAssignment << Context.getTagDeclType(ClassDecl); MoveAssignOperator->setInvalidDecl(); return; } // Success! Record the move. Statements.push_back(Move.getAs<Expr>()); } // Assign non-static members. for (auto *Field : ClassDecl->fields()) { // FIXME: We should form some kind of AST representation for the implied // memcpy in a union copy operation. if (Field->isUnnamedBitfield() || Field->getParent()->isUnion()) continue; if (Field->isInvalidDecl()) { Invalid = true; continue; } // Check for members of reference type; we can't move those. if (Field->getType()->isReferenceType()) { Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign) << Context.getTagDeclType(ClassDecl) << 0 << Field->getDeclName(); Diag(Field->getLocation(), diag::note_declared_at); Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXMoveAssignment << Context.getTagDeclType(ClassDecl); Invalid = true; continue; } // Check for members of const-qualified, non-class type. QualType BaseType = Context.getBaseElementType(Field->getType()); if (!BaseType->getAs<RecordType>() && BaseType.isConstQualified()) { Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign) << Context.getTagDeclType(ClassDecl) << 1 << Field->getDeclName(); Diag(Field->getLocation(), diag::note_declared_at); Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXMoveAssignment << Context.getTagDeclType(ClassDecl); Invalid = true; continue; } // Suppress assigning zero-width bitfields. if (Field->isBitField() && Field->getBitWidthValue(Context) == 0) continue; QualType FieldType = Field->getType().getNonReferenceType(); if (FieldType->isIncompleteArrayType()) { assert(ClassDecl->hasFlexibleArrayMember() && "Incomplete array type is not valid"); continue; } // Build references to the field in the object we're copying from and to. LookupResult MemberLookup(*this, Field->getDeclName(), Loc, LookupMemberName); MemberLookup.addDecl(Field); MemberLookup.resolveKind(); MemberBuilder From(MoveOther, OtherRefType, /*IsArrow=*/false, MemberLookup); MemberBuilder To(This, getCurrentThisType(), /*IsArrow=*/true, MemberLookup); assert(!From.build(*this, Loc)->isLValue() && // could be xvalue or prvalue "Member reference with rvalue base must be rvalue except for reference " "members, which aren't allowed for move assignment."); // Build the move of this field. StmtResult Move = buildSingleCopyAssign(*this, Loc, FieldType, To, From, /*CopyingBaseSubobject=*/false, /*Copying=*/false); if (Move.isInvalid()) { Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXMoveAssignment << Context.getTagDeclType(ClassDecl); MoveAssignOperator->setInvalidDecl(); return; } // Success! Record the copy. Statements.push_back(Move.getAs<Stmt>()); } if (!Invalid) { // Add a "return *this;" ExprResult ThisObj = CreateBuiltinUnaryOp(Loc, UO_Deref, This.build(*this, Loc)); StmtResult Return = BuildReturnStmt(Loc, ThisObj.get()); if (Return.isInvalid()) Invalid = true; else { Statements.push_back(Return.getAs<Stmt>()); if (Trap.hasErrorOccurred()) { Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXMoveAssignment << Context.getTagDeclType(ClassDecl); Invalid = true; } } } // The exception specification is needed because we are defining the // function. ResolveExceptionSpec(CurrentLocation, MoveAssignOperator->getType()->castAs<FunctionProtoType>()); if (Invalid) { MoveAssignOperator->setInvalidDecl(); return; } StmtResult Body; { CompoundScopeRAII CompoundScope(*this); Body = ActOnCompoundStmt(Loc, Loc, Statements, /*isStmtExpr=*/false); assert(!Body.isInvalid() && "Compound statement creation cannot fail"); } MoveAssignOperator->setBody(Body.getAs<Stmt>()); if (ASTMutationListener *L = getASTMutationListener()) { L->CompletedImplicitDefinition(MoveAssignOperator); } } Sema::ImplicitExceptionSpecification Sema::ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD) { CXXRecordDecl *ClassDecl = MD->getParent(); ImplicitExceptionSpecification ExceptSpec(*this); if (ClassDecl->isInvalidDecl()) return ExceptSpec; const FunctionProtoType *T = MD->getType()->castAs<FunctionProtoType>(); assert(T->getNumParams() >= 1 && "not a copy ctor"); unsigned Quals = T->getParamType(0).getNonReferenceType().getCVRQualifiers(); // C++ [except.spec]p14: // An implicitly declared special member function (Clause 12) shall have an // exception-specification. [...] for (const auto &Base : ClassDecl->bases()) { // Virtual bases are handled below. if (Base.isVirtual()) continue; CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); if (CXXConstructorDecl *CopyConstructor = LookupCopyingConstructor(BaseClassDecl, Quals)) ExceptSpec.CalledDecl(Base.getLocStart(), CopyConstructor); } for (const auto &Base : ClassDecl->vbases()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); if (CXXConstructorDecl *CopyConstructor = LookupCopyingConstructor(BaseClassDecl, Quals)) ExceptSpec.CalledDecl(Base.getLocStart(), CopyConstructor); } for (const auto *Field : ClassDecl->fields()) { QualType FieldType = Context.getBaseElementType(Field->getType()); if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) { if (CXXConstructorDecl *CopyConstructor = LookupCopyingConstructor(FieldClassDecl, Quals | FieldType.getCVRQualifiers())) ExceptSpec.CalledDecl(Field->getLocation(), CopyConstructor); } } return ExceptSpec; } CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor( CXXRecordDecl *ClassDecl) { // C++ [class.copy]p4: // If the class definition does not explicitly declare a copy // constructor, one is declared implicitly. assert(ClassDecl->needsImplicitCopyConstructor()); DeclaringSpecialMember DSM(*this, ClassDecl, CXXCopyConstructor); if (DSM.isAlreadyBeingDeclared()) return nullptr; QualType ClassType = Context.getTypeDeclType(ClassDecl); QualType ArgType = ClassType; bool Const = ClassDecl->implicitCopyConstructorHasConstParam(); if (Const) ArgType = ArgType.withConst(); ArgType = Context.getLValueReferenceType(ArgType); bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, CXXCopyConstructor, Const); DeclarationName Name = Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(ClassType)); SourceLocation ClassLoc = ClassDecl->getLocation(); DeclarationNameInfo NameInfo(Name, ClassLoc); // An implicitly-declared copy constructor is an inline public // member of its class. CXXConstructorDecl *CopyConstructor = CXXConstructorDecl::Create( Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/nullptr, /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true, Constexpr); CopyConstructor->setAccess(AS_public); CopyConstructor->setDefaulted(); if (getLangOpts().CUDA) { inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyConstructor, CopyConstructor, /* ConstRHS */ Const, /* Diagnose */ false); } // Build an exception specification pointing back at this member. FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, CopyConstructor); CopyConstructor->setType( Context.getFunctionType(Context.VoidTy, ArgType, EPI, None)); // HLSL Change - add param mods // Add the parameter to the constructor. ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyConstructor, ClassLoc, ClassLoc, /*IdentifierInfo=*/nullptr, ArgType, /*TInfo=*/nullptr, SC_None, nullptr); CopyConstructor->setParams(FromParam); CopyConstructor->setTrivial( ClassDecl->needsOverloadResolutionForCopyConstructor() ? SpecialMemberIsTrivial(CopyConstructor, CXXCopyConstructor) : ClassDecl->hasTrivialCopyConstructor()); if (ShouldDeleteSpecialMember(CopyConstructor, CXXCopyConstructor)) SetDeclDeleted(CopyConstructor, ClassLoc); // Note that we have declared this constructor. ++ASTContext::NumImplicitCopyConstructorsDeclared; if (Scope *S = getScopeForContext(ClassDecl)) PushOnScopeChains(CopyConstructor, S, false); ClassDecl->addDecl(CopyConstructor); return CopyConstructor; } void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *CopyConstructor) { assert((CopyConstructor->isDefaulted() && CopyConstructor->isCopyConstructor() && !CopyConstructor->doesThisDeclarationHaveABody() && !CopyConstructor->isDeleted()) && "DefineImplicitCopyConstructor - call it for implicit copy ctor"); CXXRecordDecl *ClassDecl = CopyConstructor->getParent(); assert(ClassDecl && "DefineImplicitCopyConstructor - invalid constructor"); // C++11 [class.copy]p7: // The [definition of an implicitly declared copy constructor] is // deprecated if the class has a user-declared copy assignment operator // or a user-declared destructor. if (getLangOpts().CPlusPlus11 && CopyConstructor->isImplicit()) diagnoseDeprecatedCopyOperation(*this, CopyConstructor, CurrentLocation); SynthesizedFunctionScope Scope(*this, CopyConstructor); DiagnosticErrorTrap Trap(Diags); if (SetCtorInitializers(CopyConstructor, /*AnyErrors=*/false) || Trap.hasErrorOccurred()) { Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXCopyConstructor << Context.getTagDeclType(ClassDecl); CopyConstructor->setInvalidDecl(); } else { SourceLocation Loc = CopyConstructor->getLocEnd().isValid() ? CopyConstructor->getLocEnd() : CopyConstructor->getLocation(); Sema::CompoundScopeRAII CompoundScope(*this); CopyConstructor->setBody( ActOnCompoundStmt(Loc, Loc, None, /*isStmtExpr=*/false).getAs<Stmt>()); } // The exception specification is needed because we are defining the // function. ResolveExceptionSpec(CurrentLocation, CopyConstructor->getType()->castAs<FunctionProtoType>()); CopyConstructor->markUsed(Context); MarkVTableUsed(CurrentLocation, ClassDecl); if (ASTMutationListener *L = getASTMutationListener()) { L->CompletedImplicitDefinition(CopyConstructor); } } Sema::ImplicitExceptionSpecification Sema::ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD) { CXXRecordDecl *ClassDecl = MD->getParent(); // C++ [except.spec]p14: // An implicitly declared special member function (Clause 12) shall have an // exception-specification. [...] ImplicitExceptionSpecification ExceptSpec(*this); if (ClassDecl->isInvalidDecl()) return ExceptSpec; // Direct base-class constructors. for (const auto &B : ClassDecl->bases()) { if (B.isVirtual()) // Handled below. continue; if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); CXXConstructorDecl *Constructor = LookupMovingConstructor(BaseClassDecl, 0); // If this is a deleted function, add it anyway. This might be conformant // with the standard. This might not. I'm not sure. It might not matter. if (Constructor) ExceptSpec.CalledDecl(B.getLocStart(), Constructor); } } // Virtual base-class constructors. for (const auto &B : ClassDecl->vbases()) { if (const RecordType *BaseType = B.getType()->getAs<RecordType>()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); CXXConstructorDecl *Constructor = LookupMovingConstructor(BaseClassDecl, 0); // If this is a deleted function, add it anyway. This might be conformant // with the standard. This might not. I'm not sure. It might not matter. if (Constructor) ExceptSpec.CalledDecl(B.getLocStart(), Constructor); } } // Field constructors. for (const auto *F : ClassDecl->fields()) { QualType FieldType = Context.getBaseElementType(F->getType()); if (CXXRecordDecl *FieldRecDecl = FieldType->getAsCXXRecordDecl()) { CXXConstructorDecl *Constructor = LookupMovingConstructor(FieldRecDecl, FieldType.getCVRQualifiers()); // If this is a deleted function, add it anyway. This might be conformant // with the standard. This might not. I'm not sure. It might not matter. // In particular, the problem is that this function never gets called. It // might just be ill-formed because this function attempts to refer to // a deleted function here. if (Constructor) ExceptSpec.CalledDecl(F->getLocation(), Constructor); } } return ExceptSpec; } CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor( CXXRecordDecl *ClassDecl) { assert(ClassDecl->needsImplicitMoveConstructor()); DeclaringSpecialMember DSM(*this, ClassDecl, CXXMoveConstructor); if (DSM.isAlreadyBeingDeclared()) return nullptr; QualType ClassType = Context.getTypeDeclType(ClassDecl); QualType ArgType = Context.getRValueReferenceType(ClassType); bool Constexpr = defaultedSpecialMemberIsConstexpr(*this, ClassDecl, CXXMoveConstructor, false); DeclarationName Name = Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(ClassType)); SourceLocation ClassLoc = ClassDecl->getLocation(); DeclarationNameInfo NameInfo(Name, ClassLoc); // C++11 [class.copy]p11: // An implicitly-declared copy/move constructor is an inline public // member of its class. CXXConstructorDecl *MoveConstructor = CXXConstructorDecl::Create( Context, ClassDecl, ClassLoc, NameInfo, QualType(), /*TInfo=*/nullptr, /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true, Constexpr); MoveConstructor->setAccess(AS_public); MoveConstructor->setDefaulted(); if (getLangOpts().CUDA) { inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveConstructor, MoveConstructor, /* ConstRHS */ false, /* Diagnose */ false); } // Build an exception specification pointing back at this member. FunctionProtoType::ExtProtoInfo EPI = getImplicitMethodEPI(*this, MoveConstructor); MoveConstructor->setType( Context.getFunctionType(Context.VoidTy, ArgType, EPI, None)); // HLSL Change - all in-args // Add the parameter to the constructor. ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveConstructor, ClassLoc, ClassLoc, /*IdentifierInfo=*/nullptr, ArgType, /*TInfo=*/nullptr, SC_None, nullptr); MoveConstructor->setParams(FromParam); MoveConstructor->setTrivial( ClassDecl->needsOverloadResolutionForMoveConstructor() ? SpecialMemberIsTrivial(MoveConstructor, CXXMoveConstructor) : ClassDecl->hasTrivialMoveConstructor()); if (ShouldDeleteSpecialMember(MoveConstructor, CXXMoveConstructor)) { ClassDecl->setImplicitMoveConstructorIsDeleted(); SetDeclDeleted(MoveConstructor, ClassLoc); } // Note that we have declared this constructor. ++ASTContext::NumImplicitMoveConstructorsDeclared; if (Scope *S = getScopeForContext(ClassDecl)) PushOnScopeChains(MoveConstructor, S, false); ClassDecl->addDecl(MoveConstructor); return MoveConstructor; } void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *MoveConstructor) { assert((MoveConstructor->isDefaulted() && MoveConstructor->isMoveConstructor() && !MoveConstructor->doesThisDeclarationHaveABody() && !MoveConstructor->isDeleted()) && "DefineImplicitMoveConstructor - call it for implicit move ctor"); CXXRecordDecl *ClassDecl = MoveConstructor->getParent(); assert(ClassDecl && "DefineImplicitMoveConstructor - invalid constructor"); SynthesizedFunctionScope Scope(*this, MoveConstructor); DiagnosticErrorTrap Trap(Diags); if (SetCtorInitializers(MoveConstructor, /*AnyErrors=*/false) || Trap.hasErrorOccurred()) { Diag(CurrentLocation, diag::note_member_synthesized_at) << CXXMoveConstructor << Context.getTagDeclType(ClassDecl); MoveConstructor->setInvalidDecl(); } else { SourceLocation Loc = MoveConstructor->getLocEnd().isValid() ? MoveConstructor->getLocEnd() : MoveConstructor->getLocation(); Sema::CompoundScopeRAII CompoundScope(*this); MoveConstructor->setBody(ActOnCompoundStmt( Loc, Loc, None, /*isStmtExpr=*/ false).getAs<Stmt>()); } // The exception specification is needed because we are defining the // function. ResolveExceptionSpec(CurrentLocation, MoveConstructor->getType()->castAs<FunctionProtoType>()); MoveConstructor->markUsed(Context); MarkVTableUsed(CurrentLocation, ClassDecl); if (ASTMutationListener *L = getASTMutationListener()) { L->CompletedImplicitDefinition(MoveConstructor); } } bool Sema::isImplicitlyDeleted(FunctionDecl *FD) { return FD->isDeleted() && FD->isDefaulted() && isa<CXXMethodDecl>(FD); } void Sema::DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLocation, CXXConversionDecl *Conv) { CXXRecordDecl *Lambda = Conv->getParent(); CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); // If we are defining a specialization of a conversion to function-ptr // cache the deduced template arguments for this specialization // so that we can use them to retrieve the corresponding call-operator // and static-invoker. const TemplateArgumentList *DeducedTemplateArgs = nullptr; // Retrieve the corresponding call-operator specialization. if (Lambda->isGenericLambda()) { assert(Conv->isFunctionTemplateSpecialization()); FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); DeducedTemplateArgs = Conv->getTemplateSpecializationArgs(); void *InsertPos = nullptr; FunctionDecl *CallOpSpec = CallOpTemplate->findSpecialization( DeducedTemplateArgs->asArray(), InsertPos); assert(CallOpSpec && "Conversion operator must have a corresponding call operator"); CallOp = cast<CXXMethodDecl>(CallOpSpec); } // Mark the call operator referenced (and add to pending instantiations // if necessary). // For both the conversion and static-invoker template specializations // we construct their body's in this function, so no need to add them // to the PendingInstantiations. MarkFunctionReferenced(CurrentLocation, CallOp); SynthesizedFunctionScope Scope(*this, Conv); DiagnosticErrorTrap Trap(Diags); // Retrieve the static invoker... CXXMethodDecl *Invoker = Lambda->getLambdaStaticInvoker(); // ... and get the corresponding specialization for a generic lambda. if (Lambda->isGenericLambda()) { assert(DeducedTemplateArgs && "Must have deduced template arguments from Conversion Operator"); FunctionTemplateDecl *InvokeTemplate = Invoker->getDescribedFunctionTemplate(); void *InsertPos = nullptr; FunctionDecl *InvokeSpec = InvokeTemplate->findSpecialization( DeducedTemplateArgs->asArray(), InsertPos); assert(InvokeSpec && "Must have a corresponding static invoker specialization"); Invoker = cast<CXXMethodDecl>(InvokeSpec); } // Construct the body of the conversion function { return __invoke; }. Expr *FunctionRef = BuildDeclRefExpr(Invoker, Invoker->getType(), VK_LValue, Conv->getLocation()).get(); assert(FunctionRef && "Can't refer to __invoke function?"); Stmt *Return = BuildReturnStmt(Conv->getLocation(), FunctionRef).get(); Conv->setBody(new (Context) CompoundStmt(Context, Return, Conv->getLocation(), Conv->getLocation())); Conv->markUsed(Context); Conv->setReferenced(); // Fill in the __invoke function with a dummy implementation. IR generation // will fill in the actual details. Invoker->markUsed(Context); Invoker->setReferenced(); Invoker->setBody(new (Context) CompoundStmt(Conv->getLocation())); if (ASTMutationListener *L = getASTMutationListener()) { L->CompletedImplicitDefinition(Conv); L->CompletedImplicitDefinition(Invoker); } } void Sema::DefineImplicitLambdaToBlockPointerConversion( SourceLocation CurrentLocation, CXXConversionDecl *Conv) { assert(!Conv->getParent()->isGenericLambda()); Conv->markUsed(Context); SynthesizedFunctionScope Scope(*this, Conv); DiagnosticErrorTrap Trap(Diags); // Copy-initialize the lambda object as needed to capture it. Expr *This = ActOnCXXThis(CurrentLocation).get(); Expr *DerefThis =CreateBuiltinUnaryOp(CurrentLocation, UO_Deref, This).get(); ExprResult BuildBlock = BuildBlockForLambdaConversion(CurrentLocation, Conv->getLocation(), Conv, DerefThis); // If we're not under ARC, make sure we still get the _Block_copy/autorelease // behavior. Note that only the general conversion function does this // (since it's unusable otherwise); in the case where we inline the // block literal, it has block literal lifetime semantics. if (!BuildBlock.isInvalid() && !getLangOpts().ObjCAutoRefCount) BuildBlock = ImplicitCastExpr::Create(Context, BuildBlock.get()->getType(), CK_CopyAndAutoreleaseBlockObject, BuildBlock.get(), nullptr, VK_RValue); if (BuildBlock.isInvalid()) { Diag(CurrentLocation, diag::note_lambda_to_block_conv); Conv->setInvalidDecl(); return; } // Create the return statement that returns the block from the conversion // function. StmtResult Return = BuildReturnStmt(Conv->getLocation(), BuildBlock.get()); if (Return.isInvalid()) { Diag(CurrentLocation, diag::note_lambda_to_block_conv); Conv->setInvalidDecl(); return; } // Set the body of the conversion function. Stmt *ReturnS = Return.get(); Conv->setBody(new (Context) CompoundStmt(Context, ReturnS, Conv->getLocation(), Conv->getLocation())); // We're done; notify the mutation listener, if any. if (ASTMutationListener *L = getASTMutationListener()) { L->CompletedImplicitDefinition(Conv); } } /// \brief Determine whether the given list arguments contains exactly one /// "real" (non-default) argument. static bool hasOneRealArgument(MultiExprArg Args) { switch (Args.size()) { case 0: return false; default: if (!Args[1]->isDefaultArgument()) return false; LLVM_FALLTHROUGH; // HLSL Change case 1: return !Args[0]->isDefaultArgument(); } return false; } ExprResult Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg ExprArgs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange) { bool Elidable = false; // C++0x [class.copy]p34: // When certain criteria are met, an implementation is allowed to // omit the copy/move construction of a class object, even if the // copy/move constructor and/or destructor for the object have // side effects. [...] // - when a temporary class object that has not been bound to a // reference (12.2) would be copied/moved to a class object // with the same cv-unqualified type, the copy/move operation // can be omitted by constructing the temporary object // directly into the target of the omitted copy/move if (ConstructKind == CXXConstructExpr::CK_Complete && Constructor->isCopyOrMoveConstructor() && hasOneRealArgument(ExprArgs)) { Expr *SubExpr = ExprArgs[0]; Elidable = SubExpr->isTemporaryObject(Context, Constructor->getParent()); } return BuildCXXConstructExpr(ConstructLoc, DeclInitType, Constructor, Elidable, ExprArgs, HadMultipleCandidates, IsListInitialization, IsStdInitListInitialization, RequiresZeroInit, ConstructKind, ParenRange); } /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. ExprResult Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg ExprArgs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange) { MarkFunctionReferenced(ConstructLoc, Constructor); return CXXConstructExpr::Create( Context, DeclInitType, ConstructLoc, Constructor, Elidable, ExprArgs, HadMultipleCandidates, IsListInitialization, IsStdInitListInitialization, RequiresZeroInit, static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind), ParenRange); } ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) { assert(Field->hasInClassInitializer()); // If we already have the in-class initializer nothing needs to be done. if (Field->getInClassInitializer()) return CXXDefaultInitExpr::Create(Context, Loc, Field); // Maybe we haven't instantiated the in-class initializer. Go check the // pattern FieldDecl to see if it has one. CXXRecordDecl *ParentRD = cast<CXXRecordDecl>(Field->getParent()); if (isTemplateInstantiation(ParentRD->getTemplateSpecializationKind())) { CXXRecordDecl *ClassPattern = ParentRD->getTemplateInstantiationPattern(); DeclContext::lookup_result Lookup = ClassPattern->lookup(Field->getDeclName()); assert(Lookup.size() == 1); FieldDecl *Pattern = cast<FieldDecl>(Lookup[0]); if (InstantiateInClassInitializer(Loc, Field, Pattern, getTemplateInstantiationArgs(Field))) return ExprError(); return CXXDefaultInitExpr::Create(Context, Loc, Field); } // DR1351: // If the brace-or-equal-initializer of a non-static data member // invokes a defaulted default constructor of its class or of an // enclosing class in a potentially evaluated subexpression, the // program is ill-formed. // // This resolution is unworkable: the exception specification of the // default constructor can be needed in an unevaluated context, in // particular, in the operand of a noexcept-expression, and we can be // unable to compute an exception specification for an enclosed class. // // Any attempt to resolve the exception specification of a defaulted default // constructor before the initializer is lexically complete will ultimately // come here at which point we can diagnose it. RecordDecl *OutermostClass = ParentRD->getOuterLexicalRecordContext(); if (OutermostClass == ParentRD) { Diag(Field->getLocEnd(), diag::err_in_class_initializer_not_yet_parsed) << ParentRD << Field; } else { Diag(Field->getLocEnd(), diag::err_in_class_initializer_not_yet_parsed_outer_class) << ParentRD << OutermostClass << Field; } return ExprError(); } void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) { if (VD->isInvalidDecl()) return; CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl()); if (ClassDecl->isInvalidDecl()) return; if (ClassDecl->hasIrrelevantDestructor()) return; if (ClassDecl->isDependentContext()) return; CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); MarkFunctionReferenced(VD->getLocation(), Destructor); CheckDestructorAccess(VD->getLocation(), Destructor, PDiag(diag::err_access_dtor_var) << VD->getDeclName() << VD->getType()); DiagnoseUseOfDecl(Destructor, VD->getLocation()); if (Destructor->isTrivial()) return; if (!VD->hasGlobalStorage()) return; // Emit warning for non-trivial dtor in global scope (a real global, // class-static, function-static). Diag(VD->getLocation(), diag::warn_exit_time_destructor); // TODO: this should be re-enabled for static locals by !CXAAtExit if (!VD->isStaticLocal()) Diag(VD->getLocation(), diag::warn_global_destructor); } /// \brief Given a constructor and the set of arguments provided for the /// constructor, convert the arguments and add any required default arguments /// to form a proper call to this constructor. /// /// \returns true if an error occurred, false otherwise. bool Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit, bool IsListInitialization) { // FIXME: This duplicates a lot of code from Sema::ConvertArgumentsForCall. unsigned NumArgs = ArgsPtr.size(); Expr **Args = ArgsPtr.data(); const FunctionProtoType *Proto = Constructor->getType()->getAs<FunctionProtoType>(); assert(Proto && "Constructor without a prototype?"); unsigned NumParams = Proto->getNumParams(); // If too few arguments are available, we'll fill in the rest with defaults. if (NumArgs < NumParams) ConvertedArgs.reserve(NumParams); else ConvertedArgs.reserve(NumArgs); VariadicCallType CallType = Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; SmallVector<Expr *, 8> AllArgs; bool Invalid = GatherArgumentsForCall(Loc, Constructor, Proto, 0, llvm::makeArrayRef(Args, NumArgs), AllArgs, CallType, AllowExplicit, IsListInitialization); ConvertedArgs.append(AllArgs.begin(), AllArgs.end()); DiagnoseSentinelCalls(Constructor, Loc, AllArgs); CheckConstructorCall(Constructor, llvm::makeArrayRef(AllArgs.data(), AllArgs.size()), Proto, Loc); return Invalid; } static inline bool CheckOperatorNewDeleteDeclarationScope(Sema &SemaRef, const FunctionDecl *FnDecl) { const DeclContext *DC = FnDecl->getDeclContext()->getRedeclContext(); if (isa<NamespaceDecl>(DC)) { return SemaRef.Diag(FnDecl->getLocation(), diag::err_operator_new_delete_declared_in_namespace) << FnDecl->getDeclName(); } if (isa<TranslationUnitDecl>(DC) && FnDecl->getStorageClass() == SC_Static) { return SemaRef.Diag(FnDecl->getLocation(), diag::err_operator_new_delete_declared_static) << FnDecl->getDeclName(); } return false; } static inline bool CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl, CanQualType ExpectedResultType, CanQualType ExpectedFirstParamType, unsigned DependentParamTypeDiag, unsigned InvalidParamTypeDiag) { QualType ResultType = FnDecl->getType()->getAs<FunctionType>()->getReturnType(); // Check that the result type is not dependent. if (ResultType->isDependentType()) return SemaRef.Diag(FnDecl->getLocation(), diag::err_operator_new_delete_dependent_result_type) << FnDecl->getDeclName() << ExpectedResultType; // Check that the result type is what we expect. if (SemaRef.Context.getCanonicalType(ResultType) != ExpectedResultType) return SemaRef.Diag(FnDecl->getLocation(), diag::err_operator_new_delete_invalid_result_type) << FnDecl->getDeclName() << ExpectedResultType; // A function template must have at least 2 parameters. if (FnDecl->getDescribedFunctionTemplate() && FnDecl->getNumParams() < 2) return SemaRef.Diag(FnDecl->getLocation(), diag::err_operator_new_delete_template_too_few_parameters) << FnDecl->getDeclName(); // The function decl must have at least 1 parameter. if (FnDecl->getNumParams() == 0) return SemaRef.Diag(FnDecl->getLocation(), diag::err_operator_new_delete_too_few_parameters) << FnDecl->getDeclName(); // Check the first parameter type is not dependent. QualType FirstParamType = FnDecl->getParamDecl(0)->getType(); if (FirstParamType->isDependentType()) return SemaRef.Diag(FnDecl->getLocation(), DependentParamTypeDiag) << FnDecl->getDeclName() << ExpectedFirstParamType; // Check that the first parameter type is what we expect. if (SemaRef.Context.getCanonicalType(FirstParamType).getUnqualifiedType() != ExpectedFirstParamType) return SemaRef.Diag(FnDecl->getLocation(), InvalidParamTypeDiag) << FnDecl->getDeclName() << ExpectedFirstParamType; return false; } static bool CheckOperatorNewDeclaration(Sema &SemaRef, const FunctionDecl *FnDecl) { // C++ [basic.stc.dynamic.allocation]p1: // A program is ill-formed if an allocation function is declared in a // namespace scope other than global scope or declared static in global // scope. if (CheckOperatorNewDeleteDeclarationScope(SemaRef, FnDecl)) return true; CanQualType SizeTy = SemaRef.Context.getCanonicalType(SemaRef.Context.getSizeType()); // C++ [basic.stc.dynamic.allocation]p1: // The return type shall be void*. The first parameter shall have type // std::size_t. if (CheckOperatorNewDeleteTypes(SemaRef, FnDecl, SemaRef.Context.VoidPtrTy, SizeTy, diag::err_operator_new_dependent_param_type, diag::err_operator_new_param_type)) return true; // C++ [basic.stc.dynamic.allocation]p1: // The first parameter shall not have an associated default argument. if (FnDecl->getParamDecl(0)->hasDefaultArg()) return SemaRef.Diag(FnDecl->getLocation(), diag::err_operator_new_default_arg) << FnDecl->getDeclName() << FnDecl->getParamDecl(0)->getDefaultArgRange(); return false; } static bool CheckOperatorDeleteDeclaration(Sema &SemaRef, FunctionDecl *FnDecl) { // C++ [basic.stc.dynamic.deallocation]p1: // A program is ill-formed if deallocation functions are declared in a // namespace scope other than global scope or declared static in global // scope. if (CheckOperatorNewDeleteDeclarationScope(SemaRef, FnDecl)) return true; // C++ [basic.stc.dynamic.deallocation]p2: // Each deallocation function shall return void and its first parameter // shall be void*. if (CheckOperatorNewDeleteTypes(SemaRef, FnDecl, SemaRef.Context.VoidTy, SemaRef.Context.VoidPtrTy, diag::err_operator_delete_dependent_param_type, diag::err_operator_delete_param_type)) return true; return false; } /// CheckOverloadedOperatorDeclaration - Check whether the declaration /// of this overloaded operator is well-formed. If so, returns false; /// otherwise, emits appropriate diagnostics and returns true. bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) { assert(FnDecl && FnDecl->isOverloadedOperator() && "Expected an overloaded operator declaration"); OverloadedOperatorKind Op = FnDecl->getOverloadedOperator(); // HLSL Change Starts if (LangOpts.HLSL) { if (Op == OO_Delete || Op == OO_Array_Delete || Op == OO_New || Op == OO_Array_New || Op == OO_Equal || (Op >= OO_PlusEqual && Op <= OO_GreaterGreaterEqual) || Op == OO_PlusPlus || Op == OO_MinusMinus || Op == OO_ArrowStar || Op == OO_Arrow) { return Diag(FnDecl->getLocation(), diag::err_hlsl_overloading_operator_disallowed) << FnDecl->getDeclName() << 0; } if (!isa<CXXMethodDecl>(FnDecl)) return Diag(FnDecl->getLocation(), diag::err_hlsl_overloading_operator_disallowed) << FnDecl->getDeclName() << 1; } // HLSL Change Ends // C++ [over.oper]p5: // The allocation and deallocation functions, operator new, // operator new[], operator delete and operator delete[], are // described completely in 3.7.3. The attributes and restrictions // found in the rest of this subclause do not apply to them unless // explicitly stated in 3.7.3. if (Op == OO_Delete || Op == OO_Array_Delete) return CheckOperatorDeleteDeclaration(*this, FnDecl); if (Op == OO_New || Op == OO_Array_New) return CheckOperatorNewDeclaration(*this, FnDecl); // C++ [over.oper]p6: // An operator function shall either be a non-static member // function or be a non-member function and have at least one // parameter whose type is a class, a reference to a class, an // enumeration, or a reference to an enumeration. if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(FnDecl)) { if (MethodDecl->isStatic()) return Diag(FnDecl->getLocation(), diag::err_operator_overload_static) << FnDecl->getDeclName(); } else { bool ClassOrEnumParam = false; for (auto Param : FnDecl->params()) { QualType ParamType = Param->getType().getNonReferenceType(); if (ParamType->isDependentType() || ParamType->isRecordType() || ParamType->isEnumeralType()) { ClassOrEnumParam = true; break; } } if (!ClassOrEnumParam) return Diag(FnDecl->getLocation(), diag::err_operator_overload_needs_class_or_enum) << FnDecl->getDeclName(); } // C++ [over.oper]p8: // An operator function cannot have default arguments (8.3.6), // except where explicitly stated below. // // Only the function-call operator allows default arguments // (C++ [over.call]p1). if (Op != OO_Call) { for (auto Param : FnDecl->params()) { if (Param->hasDefaultArg()) return Diag(Param->getLocation(), diag::err_operator_overload_default_arg) << FnDecl->getDeclName() << Param->getDefaultArgRange(); } } static const bool OperatorUses[NUM_OVERLOADED_OPERATORS][3] = { { false, false, false } #define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \ , { Unary, Binary, MemberOnly } #include "clang/Basic/OperatorKinds.def" }; bool CanBeUnaryOperator = OperatorUses[Op][0]; bool CanBeBinaryOperator = OperatorUses[Op][1]; bool MustBeMemberOperator = OperatorUses[Op][2]; // C++ [over.oper]p8: // [...] Operator functions cannot have more or fewer parameters // than the number required for the corresponding operator, as // described in the rest of this subclause. unsigned NumParams = FnDecl->getNumParams() + (isa<CXXMethodDecl>(FnDecl)? 1 : 0); if (Op != OO_Call && ((NumParams == 1 && !CanBeUnaryOperator) || (NumParams == 2 && !CanBeBinaryOperator) || (NumParams < 1) || (NumParams > 2))) { // We have the wrong number of parameters. unsigned ErrorKind; if (CanBeUnaryOperator && CanBeBinaryOperator) { ErrorKind = 2; // 2 -> unary or binary. } else if (CanBeUnaryOperator) { ErrorKind = 0; // 0 -> unary } else { assert(CanBeBinaryOperator && "All non-call overloaded operators are unary or binary!"); ErrorKind = 1; // 1 -> binary } return Diag(FnDecl->getLocation(), diag::err_operator_overload_must_be) << FnDecl->getDeclName() << NumParams << ErrorKind; } // Overloaded operators other than operator() cannot be variadic. if (Op != OO_Call && FnDecl->getType()->getAs<FunctionProtoType>()->isVariadic()) { return Diag(FnDecl->getLocation(), diag::err_operator_overload_variadic) << FnDecl->getDeclName(); } // Some operators must be non-static member functions. if (MustBeMemberOperator && !isa<CXXMethodDecl>(FnDecl)) { return Diag(FnDecl->getLocation(), diag::err_operator_overload_must_be_member) << FnDecl->getDeclName(); } // C++ [over.inc]p1: // The user-defined function called operator++ implements the // prefix and postfix ++ operator. If this function is a member // function with no parameters, or a non-member function with one // parameter of class or enumeration type, it defines the prefix // increment operator ++ for objects of that type. If the function // is a member function with one parameter (which shall be of type // int) or a non-member function with two parameters (the second // of which shall be of type int), it defines the postfix // increment operator ++ for objects of that type. if ((Op == OO_PlusPlus || Op == OO_MinusMinus) && NumParams == 2) { ParmVarDecl *LastParam = FnDecl->getParamDecl(FnDecl->getNumParams() - 1); QualType ParamType = LastParam->getType(); if (!ParamType->isSpecificBuiltinType(BuiltinType::Int) && !ParamType->isDependentType()) return Diag(LastParam->getLocation(), diag::err_operator_overload_post_incdec_must_be_int) << LastParam->getType() << (Op == OO_MinusMinus); } return false; } /// CheckLiteralOperatorDeclaration - Check whether the declaration /// of this literal operator function is well-formed. If so, returns /// false; otherwise, emits appropriate diagnostics and returns true. bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) { if (isa<CXXMethodDecl>(FnDecl)) { Diag(FnDecl->getLocation(), diag::err_literal_operator_outside_namespace) << FnDecl->getDeclName(); return true; } if (FnDecl->isExternC()) { Diag(FnDecl->getLocation(), diag::err_literal_operator_extern_c); return true; } bool Valid = false; // This might be the definition of a literal operator template. FunctionTemplateDecl *TpDecl = FnDecl->getDescribedFunctionTemplate(); // This might be a specialization of a literal operator template. if (!TpDecl) TpDecl = FnDecl->getPrimaryTemplate(); // template <char...> type operator "" name() and // template <class T, T...> type operator "" name() are the only valid // template signatures, and the only valid signatures with no parameters. if (TpDecl) { if (FnDecl->param_size() == 0) { // Must have one or two template parameters TemplateParameterList *Params = TpDecl->getTemplateParameters(); if (Params->size() == 1) { NonTypeTemplateParmDecl *PmDecl = dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(0)); // The template parameter must be a char parameter pack. if (PmDecl && PmDecl->isTemplateParameterPack() && Context.hasSameType(PmDecl->getType(), Context.CharTy)) Valid = true; } else if (Params->size() == 2) { TemplateTypeParmDecl *PmType = dyn_cast<TemplateTypeParmDecl>(Params->getParam(0)); NonTypeTemplateParmDecl *PmArgs = dyn_cast<NonTypeTemplateParmDecl>(Params->getParam(1)); // The second template parameter must be a parameter pack with the // first template parameter as its type. if (PmType && PmArgs && !PmType->isTemplateParameterPack() && PmArgs->isTemplateParameterPack()) { const TemplateTypeParmType *TArgs = PmArgs->getType()->getAs<TemplateTypeParmType>(); if (TArgs && TArgs->getDepth() == PmType->getDepth() && TArgs->getIndex() == PmType->getIndex()) { Valid = true; if (ActiveTemplateInstantiations.empty()) Diag(FnDecl->getLocation(), diag::ext_string_literal_operator_template); } } } } } else if (FnDecl->param_size()) { // Check the first parameter FunctionDecl::param_iterator Param = FnDecl->param_begin(); QualType T = (*Param)->getType().getUnqualifiedType(); // unsigned long long int, long double, and any character type are allowed // as the only parameters. if (Context.hasSameType(T, Context.UnsignedLongLongTy) || Context.hasSameType(T, Context.LongDoubleTy) || Context.hasSameType(T, Context.CharTy) || Context.hasSameType(T, Context.WideCharTy) || Context.hasSameType(T, Context.Char16Ty) || Context.hasSameType(T, Context.Char32Ty)) { if (++Param == FnDecl->param_end()) Valid = true; goto FinishedParams; } // Otherwise it must be a pointer to const; let's strip those qualifiers. const PointerType *PT = T->getAs<PointerType>(); if (!PT) goto FinishedParams; T = PT->getPointeeType(); if (!T.isConstQualified() || T.isVolatileQualified()) goto FinishedParams; T = T.getUnqualifiedType(); // Move on to the second parameter; ++Param; // If there is no second parameter, the first must be a const char * if (Param == FnDecl->param_end()) { if (Context.hasSameType(T, Context.CharTy)) Valid = true; goto FinishedParams; } // const char *, const wchar_t*, const char16_t*, and const char32_t* // are allowed as the first parameter to a two-parameter function if (!(Context.hasSameType(T, Context.CharTy) || Context.hasSameType(T, Context.WideCharTy) || Context.hasSameType(T, Context.Char16Ty) || Context.hasSameType(T, Context.Char32Ty))) goto FinishedParams; // The second and final parameter must be an std::size_t T = (*Param)->getType().getUnqualifiedType(); if (Context.hasSameType(T, Context.getSizeType()) && ++Param == FnDecl->param_end()) Valid = true; } // FIXME: This diagnostic is absolutely terrible. FinishedParams: if (!Valid) { Diag(FnDecl->getLocation(), diag::err_literal_operator_params) << FnDecl->getDeclName(); return true; } // A parameter-declaration-clause containing a default argument is not // equivalent to any of the permitted forms. for (auto Param : FnDecl->params()) { if (Param->hasDefaultArg()) { Diag(Param->getDefaultArgRange().getBegin(), diag::err_literal_operator_default_argument) << Param->getDefaultArgRange(); break; } } StringRef LiteralName = FnDecl->getDeclName().getCXXLiteralIdentifier()->getName(); if (LiteralName[0] != '_') { // C++11 [usrlit.suffix]p1: // Literal suffix identifiers that do not start with an underscore // are reserved for future standardization. Diag(FnDecl->getLocation(), diag::warn_user_literal_reserved) << NumericLiteralParser::isValidUDSuffix(getLangOpts(), LiteralName); } return false; } /// ActOnStartLinkageSpecification - Parsed the beginning of a C++ /// linkage specification, including the language and (if present) /// the '{'. ExternLoc is the location of the 'extern', Lang is the /// language string literal. LBraceLoc, if valid, provides the location of /// the '{' brace. Otherwise, this linkage specification does not /// have any braces. Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc) { StringLiteral *Lit = cast<StringLiteral>(LangStr); if (!Lit->isAscii()) { Diag(LangStr->getExprLoc(), diag::err_language_linkage_spec_not_ascii) << LangStr->getSourceRange(); return nullptr; } StringRef Lang = Lit->getString(); LinkageSpecDecl::LanguageIDs Language; if (Lang == "C") Language = LinkageSpecDecl::lang_c; else if (Lang == "C++") Language = LinkageSpecDecl::lang_cxx; else { Diag(LangStr->getExprLoc(), diag::err_language_linkage_spec_unknown) << LangStr->getSourceRange(); return nullptr; } // FIXME: Add all the various semantics of linkage specifications LinkageSpecDecl *D = LinkageSpecDecl::Create(Context, CurContext, ExternLoc, LangStr->getExprLoc(), Language, LBraceLoc.isValid()); CurContext->addDecl(D); PushDeclContext(S, D); return D; } /// ActOnFinishLinkageSpecification - Complete the definition of /// the C++ linkage specification LinkageSpec. If RBraceLoc is /// valid, it's the position of the closing '}' brace in a linkage /// specification that uses braces. Decl *Sema::ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc) { if (RBraceLoc.isValid()) { LinkageSpecDecl* LSDecl = cast<LinkageSpecDecl>(LinkageSpec); LSDecl->setRBraceLoc(RBraceLoc); } PopDeclContext(); return LinkageSpec; } Decl *Sema::ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc) { Decl *ED = EmptyDecl::Create(Context, CurContext, SemiLoc); // Attribute declarations appertain to empty declaration so we handle // them here. if (AttrList) ProcessDeclAttributeList(S, ED, AttrList); CurContext->addDecl(ED); return ED; } /// \brief Perform semantic analysis for the variable declaration that /// occurs within a C++ catch clause, returning the newly-created /// variable. VarDecl *Sema::BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation Loc, IdentifierInfo *Name) { bool Invalid = false; QualType ExDeclType = TInfo->getType(); // Arrays and functions decay. if (ExDeclType->isArrayType()) ExDeclType = Context.getArrayDecayedType(ExDeclType); else if (ExDeclType->isFunctionType()) ExDeclType = Context.getPointerType(ExDeclType); // C++ 15.3p1: The exception-declaration shall not denote an incomplete type. // The exception-declaration shall not denote a pointer or reference to an // incomplete type, other than [cv] void*. // N2844 forbids rvalue references. if (!ExDeclType->isDependentType() && ExDeclType->isRValueReferenceType()) { Diag(Loc, diag::err_catch_rvalue_ref); Invalid = true; } QualType BaseType = ExDeclType; int Mode = 0; // 0 for direct type, 1 for pointer, 2 for reference unsigned DK = diag::err_catch_incomplete; if (const PointerType *Ptr = BaseType->getAs<PointerType>()) { BaseType = Ptr->getPointeeType(); Mode = 1; DK = diag::err_catch_incomplete_ptr; } else if (const ReferenceType *Ref = BaseType->getAs<ReferenceType>()) { // For the purpose of error recovery, we treat rvalue refs like lvalue refs. BaseType = Ref->getPointeeType(); Mode = 2; DK = diag::err_catch_incomplete_ref; } if (!Invalid && (Mode == 0 || !BaseType->isVoidType()) && !BaseType->isDependentType() && RequireCompleteType(Loc, BaseType, DK)) Invalid = true; if (!Invalid && !ExDeclType->isDependentType() && RequireNonAbstractType(Loc, ExDeclType, diag::err_abstract_type_in_decl, AbstractVariableType)) Invalid = true; // Only the non-fragile NeXT runtime currently supports C++ catches // of ObjC types, and no runtime supports catching ObjC types by value. if (!Invalid && getLangOpts().ObjC1) { QualType T = ExDeclType; if (const ReferenceType *RT = T->getAs<ReferenceType>()) T = RT->getPointeeType(); if (T->isObjCObjectType()) { Diag(Loc, diag::err_objc_object_catch); Invalid = true; } else if (T->isObjCObjectPointerType()) { // FIXME: should this be a test for macosx-fragile specifically? if (getLangOpts().ObjCRuntime.isFragile()) Diag(Loc, diag::warn_objc_pointer_cxx_catch_fragile); } } VarDecl *ExDecl = VarDecl::Create(Context, CurContext, StartLoc, Loc, Name, ExDeclType, TInfo, SC_None); ExDecl->setExceptionVariable(true); // In ARC, infer 'retaining' for variables of retainable type. if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(ExDecl)) Invalid = true; if (!Invalid && !ExDeclType->isDependentType()) { if (const RecordType *recordType = ExDeclType->getAs<RecordType>()) { // Insulate this from anything else we might currently be parsing. EnterExpressionEvaluationContext scope(*this, PotentiallyEvaluated); // C++ [except.handle]p16: // The object declared in an exception-declaration or, if the // exception-declaration does not specify a name, a temporary (12.2) is // copy-initialized (8.5) from the exception object. [...] // The object is destroyed when the handler exits, after the destruction // of any automatic objects initialized within the handler. // // We just pretend to initialize the object with itself, then make sure // it can be destroyed later. QualType initType = Context.getExceptionObjectType(ExDeclType); InitializedEntity entity = InitializedEntity::InitializeVariable(ExDecl); InitializationKind initKind = InitializationKind::CreateCopy(Loc, SourceLocation()); Expr *opaqueValue = new (Context) OpaqueValueExpr(Loc, initType, VK_LValue, OK_Ordinary); InitializationSequence sequence(*this, entity, initKind, opaqueValue); ExprResult result = sequence.Perform(*this, entity, initKind, opaqueValue); if (result.isInvalid()) Invalid = true; else { // If the constructor used was non-trivial, set this as the // "initializer". CXXConstructExpr *construct = result.getAs<CXXConstructExpr>(); if (!construct->getConstructor()->isTrivial()) { Expr *init = MaybeCreateExprWithCleanups(construct); ExDecl->setInit(init); } // And make sure it's destructable. FinalizeVarWithDestructor(ExDecl, recordType); } } } if (Invalid) ExDecl->setInvalidDecl(); return ExDecl; } /// ActOnExceptionDeclarator - Parsed the exception-declarator in a C++ catch /// handler. Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) { TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); bool Invalid = D.isInvalidType(); // Check for unexpanded parameter packs. if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo, UPPC_ExceptionType)) { TInfo = Context.getTrivialTypeSourceInfo(Context.IntTy, D.getIdentifierLoc()); Invalid = true; } IdentifierInfo *II = D.getIdentifier(); if (NamedDecl *PrevDecl = LookupSingleName(S, II, D.getIdentifierLoc(), LookupOrdinaryName, ForRedeclaration)) { // The scope should be freshly made just for us. There is just no way // it contains any previous declaration, except for function parameters in // a function-try-block's catch statement. assert(!S->isDeclScope(PrevDecl)); if (isDeclInScope(PrevDecl, CurContext, S)) { Diag(D.getIdentifierLoc(), diag::err_redefinition) << D.getIdentifier(); Diag(PrevDecl->getLocation(), diag::note_previous_definition); Invalid = true; } else if (PrevDecl->isTemplateParameter()) // Maybe we will complain about the shadowed template parameter. DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl); } if (D.getCXXScopeSpec().isSet() && !Invalid) { Diag(D.getIdentifierLoc(), diag::err_qualified_catch_declarator) << D.getCXXScopeSpec().getRange(); Invalid = true; } VarDecl *ExDecl = BuildExceptionDeclaration(S, TInfo, D.getLocStart(), D.getIdentifierLoc(), D.getIdentifier()); if (Invalid) ExDecl->setInvalidDecl(); // Add the exception declaration into this scope. if (II) PushOnScopeChains(ExDecl, S); else CurContext->addDecl(ExDecl); ProcessDeclAttributes(S, ExDecl, D); return ExDecl; } Decl *Sema::ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc) { StringLiteral *AssertMessage = AssertMessageExpr ? cast<StringLiteral>(AssertMessageExpr) : nullptr; if (DiagnoseUnexpandedParameterPack(AssertExpr, UPPC_StaticAssertExpression)) return nullptr; return BuildStaticAssertDeclaration(StaticAssertLoc, AssertExpr, AssertMessage, RParenLoc, false); } Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessage, SourceLocation RParenLoc, bool Failed) { assert(AssertExpr != nullptr && "Expected non-null condition"); if (!AssertExpr->isTypeDependent() && !AssertExpr->isValueDependent() && !Failed) { // In a static_assert-declaration, the constant-expression shall be a // constant expression that can be contextually converted to bool. ExprResult Converted = PerformContextuallyConvertToBool(AssertExpr); if (Converted.isInvalid()) Failed = true; llvm::APSInt Cond; if (!Failed && VerifyIntegerConstantExpression(Converted.get(), &Cond, diag::err_static_assert_expression_is_not_constant, /*AllowFold=*/false).isInvalid()) Failed = true; if (!Failed && !Cond) { SmallString<256> MsgBuffer; llvm::raw_svector_ostream Msg(MsgBuffer); if (AssertMessage) AssertMessage->printPretty(Msg, nullptr, getPrintingPolicy()); Diag(StaticAssertLoc, diag::err_static_assert_failed) << !AssertMessage << Msg.str() << AssertExpr->getSourceRange(); Failed = true; } } Decl *Decl = StaticAssertDecl::Create(Context, CurContext, StaticAssertLoc, AssertExpr, AssertMessage, RParenLoc, Failed); CurContext->addDecl(Decl); return Decl; } /// \brief Perform semantic analysis of the given friend type declaration. /// /// \returns A friend declaration that. FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo) { assert(TSInfo && "NULL TypeSourceInfo for friend type declaration"); QualType T = TSInfo->getType(); SourceRange TypeRange = TSInfo->getTypeLoc().getLocalSourceRange(); // C++03 [class.friend]p2: // An elaborated-type-specifier shall be used in a friend declaration // for a class.* // // * The class-key of the elaborated-type-specifier is required. if (!ActiveTemplateInstantiations.empty()) { // Do not complain about the form of friend template types during // template instantiation; we will already have complained when the // template was declared. } else { if (!T->isElaboratedTypeSpecifier()) { // If we evaluated the type to a record type, suggest putting // a tag in front. if (const RecordType *RT = T->getAs<RecordType>()) { RecordDecl *RD = RT->getDecl(); SmallString<16> InsertionText(" "); InsertionText += RD->getKindName(); Diag(TypeRange.getBegin(), getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_unelaborated_friend_type : diag::ext_unelaborated_friend_type) << (unsigned) RD->getTagKind() << T << FixItHint::CreateInsertion(PP.getLocForEndOfToken(FriendLoc), InsertionText); } else { Diag(FriendLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_nonclass_type_friend : diag::ext_nonclass_type_friend) << T << TypeRange; } } else if (T->getAs<EnumType>()) { Diag(FriendLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_enum_friend : diag::ext_enum_friend) << T << TypeRange; } // C++11 [class.friend]p3: // A friend declaration that does not declare a function shall have one // of the following forms: // friend elaborated-type-specifier ; // friend simple-type-specifier ; // friend typename-specifier ; if (getLangOpts().CPlusPlus11 && LocStart != FriendLoc) Diag(FriendLoc, diag::err_friend_not_first_in_declaration) << T; } // If the type specifier in a friend declaration designates a (possibly // cv-qualified) class type, that class is declared as a friend; otherwise, // the friend declaration is ignored. return FriendDecl::Create(Context, CurContext, TSInfo->getTypeLoc().getLocStart(), TSInfo, FriendLoc); } /// Handle a friend tag declaration where the scope specifier was /// templated. Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists) { TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec); bool isExplicitSpecialization = false; bool Invalid = false; if (TemplateParameterList *TemplateParams = MatchTemplateParametersToScopeSpecifier( TagLoc, NameLoc, SS, nullptr, TempParamLists, /*friend*/ true, isExplicitSpecialization, Invalid)) { if (TemplateParams->size() > 0) { // This is a declaration of a class template. if (Invalid) return nullptr; return CheckClassTemplate(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc, Attr, TemplateParams, AS_public, /*ModulePrivateLoc=*/SourceLocation(), FriendLoc, TempParamLists.size() - 1, TempParamLists.data()).get(); } else { // The "template<>" header is extraneous. Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams) << TypeWithKeyword::getTagTypeKindName(Kind) << Name; isExplicitSpecialization = true; } } if (Invalid) return nullptr; bool isAllExplicitSpecializations = true; for (unsigned I = TempParamLists.size(); I-- > 0; ) { if (TempParamLists[I]->size()) { isAllExplicitSpecializations = false; break; } } // FIXME: don't ignore attributes. // If it's explicit specializations all the way down, just forget // about the template header and build an appropriate non-templated // friend. TODO: for source fidelity, remember the headers. if (isAllExplicitSpecializations) { if (SS.isEmpty()) { bool Owned = false; bool IsDependent = false; return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc, Attr, AS_public, /*ModulePrivateLoc=*/SourceLocation(), MultiTemplateParamsArg(), Owned, IsDependent, /*ScopedEnumKWLoc=*/SourceLocation(), /*ScopedEnumUsesClassTag=*/false, /*UnderlyingType=*/TypeResult(), /*IsTypeSpecifier=*/false); } NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context); ElaboratedTypeKeyword Keyword = TypeWithKeyword::getKeywordForTagTypeKind(Kind); QualType T = CheckTypenameType(Keyword, TagLoc, QualifierLoc, *Name, NameLoc); if (T.isNull()) return nullptr; TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T); if (isa<DependentNameType>(T)) { DependentNameTypeLoc TL = TSI->getTypeLoc().castAs<DependentNameTypeLoc>(); TL.setElaboratedKeywordLoc(TagLoc); TL.setQualifierLoc(QualifierLoc); TL.setNameLoc(NameLoc); } else { ElaboratedTypeLoc TL = TSI->getTypeLoc().castAs<ElaboratedTypeLoc>(); TL.setElaboratedKeywordLoc(TagLoc); TL.setQualifierLoc(QualifierLoc); TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(NameLoc); } FriendDecl *Friend = FriendDecl::Create(Context, CurContext, NameLoc, TSI, FriendLoc, TempParamLists); Friend->setAccess(AS_public); CurContext->addDecl(Friend); return Friend; } assert(SS.isNotEmpty() && "valid templated tag with no SS and no direct?"); // Handle the case of a templated-scope friend class. e.g. // template <class T> class A<T>::B; // FIXME: we don't support these right now. Diag(NameLoc, diag::warn_template_qualified_friend_unsupported) << SS.getScopeRep() << SS.getRange() << cast<CXXRecordDecl>(CurContext); ElaboratedTypeKeyword ETK = TypeWithKeyword::getKeywordForTagTypeKind(Kind); QualType T = Context.getDependentNameType(ETK, SS.getScopeRep(), Name); TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T); DependentNameTypeLoc TL = TSI->getTypeLoc().castAs<DependentNameTypeLoc>(); TL.setElaboratedKeywordLoc(TagLoc); TL.setQualifierLoc(SS.getWithLocInContext(Context)); TL.setNameLoc(NameLoc); FriendDecl *Friend = FriendDecl::Create(Context, CurContext, NameLoc, TSI, FriendLoc, TempParamLists); Friend->setAccess(AS_public); Friend->setUnsupportedFriend(true); CurContext->addDecl(Friend); return Friend; } /// Handle a friend type declaration. This works in tandem with /// ActOnTag. /// /// Notes on friend class templates: /// /// We generally treat friend class declarations as if they were /// declaring a class. So, for example, the elaborated type specifier /// in a friend declaration is required to obey the restrictions of a /// class-head (i.e. no typedefs in the scope chain), template /// parameters are required to match up with simple template-ids, &c. /// However, unlike when declaring a template specialization, it's /// okay to refer to a template specialization without an empty /// template parameter declaration, e.g. /// friend class A<T>::B<unsigned>; /// We permit this as a special case; if there are any template /// parameters present at all, require proper matching, i.e. /// template <> template \<class T> friend class A<int>::B; Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TempParams) { SourceLocation Loc = DS.getLocStart(); assert(DS.isFriendSpecified()); assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified); // Try to convert the decl specifier to a type. This works for // friend templates because ActOnTag never produces a ClassTemplateDecl // for a TUK_Friend. Declarator TheDeclarator(DS, Declarator::MemberContext); TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator, S); QualType T = TSI->getType(); if (TheDeclarator.isInvalidType()) return nullptr; if (DiagnoseUnexpandedParameterPack(Loc, TSI, UPPC_FriendDeclaration)) return nullptr; // This is definitely an error in C++98. It's probably meant to // be forbidden in C++0x, too, but the specification is just // poorly written. // // The problem is with declarations like the following: // template <T> friend A<T>::foo; // where deciding whether a class C is a friend or not now hinges // on whether there exists an instantiation of A that causes // 'foo' to equal C. There are restrictions on class-heads // (which we declare (by fiat) elaborated friend declarations to // be) that makes this tractable. // // FIXME: handle "template <> friend class A<T>;", which // is possibly well-formed? Who even knows? if (TempParams.size() && !T->isElaboratedTypeSpecifier()) { Diag(Loc, diag::err_tagless_friend_type_template) << DS.getSourceRange(); return nullptr; } // C++98 [class.friend]p1: A friend of a class is a function // or class that is not a member of the class . . . // This is fixed in DR77, which just barely didn't make the C++03 // deadline. It's also a very silly restriction that seriously // affects inner classes and which nobody else seems to implement; // thus we never diagnose it, not even in -pedantic. // // But note that we could warn about it: it's always useless to // friend one of your own members (it's not, however, worthless to // friend a member of an arbitrary specialization of your template). Decl *D; if (unsigned NumTempParamLists = TempParams.size()) D = FriendTemplateDecl::Create(Context, CurContext, Loc, NumTempParamLists, TempParams.data(), TSI, DS.getFriendSpecLoc()); else D = CheckFriendTypeDecl(Loc, DS.getFriendSpecLoc(), TSI); if (!D) return nullptr; D->setAccess(AS_public); CurContext->addDecl(D); return D; } NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams) { const DeclSpec &DS = D.getDeclSpec(); assert(DS.isFriendSpecified()); assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified); SourceLocation Loc = D.getIdentifierLoc(); TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); // C++ [class.friend]p1 // A friend of a class is a function or class.... // Note that this sees through typedefs, which is intended. // It *doesn't* see through dependent types, which is correct // according to [temp.arg.type]p3: // If a declaration acquires a function type through a // type dependent on a template-parameter and this causes // a declaration that does not use the syntactic form of a // function declarator to have a function type, the program // is ill-formed. if (!TInfo->getType()->isFunctionType()) { Diag(Loc, diag::err_unexpected_friend); // It might be worthwhile to try to recover by creating an // appropriate declaration. return nullptr; } // C++ [namespace.memdef]p3 // - If a friend declaration in a non-local class first declares a // class or function, the friend class or function is a member // of the innermost enclosing namespace. // - The name of the friend is not found by simple name lookup // until a matching declaration is provided in that namespace // scope (either before or after the class declaration granting // friendship). // - If a friend function is called, its name may be found by the // name lookup that considers functions from namespaces and // classes associated with the types of the function arguments. // - When looking for a prior declaration of a class or a function // declared as a friend, scopes outside the innermost enclosing // namespace scope are not considered. CXXScopeSpec &SS = D.getCXXScopeSpec(); DeclarationNameInfo NameInfo = GetNameForDeclarator(D); DeclarationName Name = NameInfo.getName(); assert(Name); // Check for unexpanded parameter packs. if (DiagnoseUnexpandedParameterPack(Loc, TInfo, UPPC_FriendDeclaration) || DiagnoseUnexpandedParameterPack(NameInfo, UPPC_FriendDeclaration) || DiagnoseUnexpandedParameterPack(SS, UPPC_FriendDeclaration)) return nullptr; // The context we found the declaration in, or in which we should // create the declaration. DeclContext *DC; Scope *DCScope = S; LookupResult Previous(*this, NameInfo, LookupOrdinaryName, ForRedeclaration); // There are five cases here. // - There's no scope specifier and we're in a local class. Only look // for functions declared in the immediately-enclosing block scope. // We recover from invalid scope qualifiers as if they just weren't there. FunctionDecl *FunctionContainingLocalClass = nullptr; if ((SS.isInvalid() || !SS.isSet()) && (FunctionContainingLocalClass = cast<CXXRecordDecl>(CurContext)->isLocalClass())) { // C++11 [class.friend]p11: // If a friend declaration appears in a local class and the name // specified is an unqualified name, a prior declaration is // looked up without considering scopes that are outside the // innermost enclosing non-class scope. For a friend function // declaration, if there is no prior declaration, the program is // ill-formed. // Find the innermost enclosing non-class scope. This is the block // scope containing the local class definition (or for a nested class, // the outer local class). DCScope = S->getFnParent(); // Look up the function name in the scope. Previous.clear(LookupLocalFriendName); LookupName(Previous, S, /*AllowBuiltinCreation*/false); if (!Previous.empty()) { // All possible previous declarations must have the same context: // either they were declared at block scope or they are members of // one of the enclosing local classes. DC = Previous.getRepresentativeDecl()->getDeclContext(); } else { // This is ill-formed, but provide the context that we would have // declared the function in, if we were permitted to, for error recovery. DC = FunctionContainingLocalClass; } adjustContextForLocalExternDecl(DC); // C++ [class.friend]p6: // A function can be defined in a friend declaration of a class if and // only if the class is a non-local class (9.8), the function name is // unqualified, and the function has namespace scope. if (D.isFunctionDefinition()) { Diag(NameInfo.getBeginLoc(), diag::err_friend_def_in_local_class); } // - There's no scope specifier, in which case we just go to the // appropriate scope and look for a function or function template // there as appropriate. } else if (SS.isInvalid() || !SS.isSet()) { // C++11 [namespace.memdef]p3: // If the name in a friend declaration is neither qualified nor // a template-id and the declaration is a function or an // elaborated-type-specifier, the lookup to determine whether // the entity has been previously declared shall not consider // any scopes outside the innermost enclosing namespace. bool isTemplateId = D.getName().getKind() == UnqualifiedId::IK_TemplateId; // Find the appropriate context according to the above. DC = CurContext; // Skip class contexts. If someone can cite chapter and verse // for this behavior, that would be nice --- it's what GCC and // EDG do, and it seems like a reasonable intent, but the spec // really only says that checks for unqualified existing // declarations should stop at the nearest enclosing namespace, // not that they should only consider the nearest enclosing // namespace. while (DC->isRecord()) DC = DC->getParent(); DeclContext *LookupDC = DC; while (LookupDC->isTransparentContext()) LookupDC = LookupDC->getParent(); while (true) { LookupQualifiedName(Previous, LookupDC); if (!Previous.empty()) { DC = LookupDC; break; } if (isTemplateId) { if (isa<TranslationUnitDecl>(LookupDC)) break; } else { if (LookupDC->isFileContext()) break; } LookupDC = LookupDC->getParent(); } DCScope = getScopeForDeclContext(S, DC); // - There's a non-dependent scope specifier, in which case we // compute it and do a previous lookup there for a function // or function template. } else if (!SS.getScopeRep()->isDependent()) { DC = computeDeclContext(SS); if (!DC) return nullptr; if (RequireCompleteDeclContext(SS, DC)) return nullptr; LookupQualifiedName(Previous, DC); // Ignore things found implicitly in the wrong scope. // TODO: better diagnostics for this case. Suggesting the right // qualified scope would be nice... LookupResult::Filter F = Previous.makeFilter(); while (F.hasNext()) { NamedDecl *D = F.next(); if (!DC->InEnclosingNamespaceSetOf( D->getDeclContext()->getRedeclContext())) F.erase(); } F.done(); if (Previous.empty()) { D.setInvalidType(); Diag(Loc, diag::err_qualified_friend_not_found) << Name << TInfo->getType(); return nullptr; } // C++ [class.friend]p1: A friend of a class is a function or // class that is not a member of the class . . . if (DC->Equals(CurContext)) Diag(DS.getFriendSpecLoc(), getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_friend_is_member : diag::err_friend_is_member); if (D.isFunctionDefinition()) { // C++ [class.friend]p6: // A function can be defined in a friend declaration of a class if and // only if the class is a non-local class (9.8), the function name is // unqualified, and the function has namespace scope. SemaDiagnosticBuilder DB = Diag(SS.getRange().getBegin(), diag::err_qualified_friend_def); DB << SS.getScopeRep(); if (DC->isFileContext()) DB << FixItHint::CreateRemoval(SS.getRange()); SS.clear(); } // - There's a scope specifier that does not match any template // parameter lists, in which case we use some arbitrary context, // create a method or method template, and wait for instantiation. // - There's a scope specifier that does match some template // parameter lists, which we don't handle right now. } else { if (D.isFunctionDefinition()) { // C++ [class.friend]p6: // A function can be defined in a friend declaration of a class if and // only if the class is a non-local class (9.8), the function name is // unqualified, and the function has namespace scope. Diag(SS.getRange().getBegin(), diag::err_qualified_friend_def) << SS.getScopeRep(); } DC = CurContext; assert(isa<CXXRecordDecl>(DC) && "friend declaration not in class?"); } if (!DC->isRecord()) { // This implies that it has to be an operator or function. if (D.getName().getKind() == UnqualifiedId::IK_ConstructorName || D.getName().getKind() == UnqualifiedId::IK_DestructorName || D.getName().getKind() == UnqualifiedId::IK_ConversionFunctionId) { Diag(Loc, diag::err_introducing_special_friend) << (D.getName().getKind() == UnqualifiedId::IK_ConstructorName ? 0 : D.getName().getKind() == UnqualifiedId::IK_DestructorName ? 1 : 2); return nullptr; } } // FIXME: This is an egregious hack to cope with cases where the scope stack // does not contain the declaration context, i.e., in an out-of-line // definition of a class. Scope FakeDCScope(S, Scope::DeclScope, Diags); if (!DCScope) { FakeDCScope.setEntity(DC); DCScope = &FakeDCScope; } bool AddToScope = true; NamedDecl *ND = ActOnFunctionDeclarator(DCScope, D, DC, TInfo, Previous, TemplateParams, AddToScope); if (!ND) return nullptr; assert(ND->getLexicalDeclContext() == CurContext); // If we performed typo correction, we might have added a scope specifier // and changed the decl context. DC = ND->getDeclContext(); // Add the function declaration to the appropriate lookup tables, // adjusting the redeclarations list as necessary. We don't // want to do this yet if the friending class is dependent. // // Also update the scope-based lookup if the target context's // lookup context is in lexical scope. if (!CurContext->isDependentContext()) { DC = DC->getRedeclContext(); DC->makeDeclVisibleInContext(ND); if (Scope *EnclosingScope = getScopeForDeclContext(S, DC)) PushOnScopeChains(ND, EnclosingScope, /*AddToContext=*/ false); } FriendDecl *FrD = FriendDecl::Create(Context, CurContext, D.getIdentifierLoc(), ND, DS.getFriendSpecLoc()); FrD->setAccess(AS_public); CurContext->addDecl(FrD); if (ND->isInvalidDecl()) { FrD->setInvalidDecl(); } else { if (DC->isRecord()) CheckFriendAccess(ND); FunctionDecl *FD; if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND)) FD = FTD->getTemplatedDecl(); else FD = cast<FunctionDecl>(ND); // C++11 [dcl.fct.default]p4: If a friend declaration specifies a // default argument expression, that declaration shall be a definition // and shall be the only declaration of the function or function // template in the translation unit. if (functionDeclHasDefaultArgument(FD)) { if (FunctionDecl *OldFD = FD->getPreviousDecl()) { Diag(FD->getLocation(), diag::err_friend_decl_with_def_arg_redeclared); Diag(OldFD->getLocation(), diag::note_previous_declaration); } else if (!D.isFunctionDefinition()) Diag(FD->getLocation(), diag::err_friend_decl_with_def_arg_must_be_def); } // Mark templated-scope function declarations as unsupported. if (FD->getNumTemplateParameterLists() && SS.isValid()) { Diag(FD->getLocation(), diag::warn_template_qualified_friend_unsupported) << SS.getScopeRep() << SS.getRange() << cast<CXXRecordDecl>(CurContext); FrD->setUnsupportedFriend(true); } } return ND; } void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) { AdjustDeclIfTemplate(Dcl); FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(Dcl); if (!Fn) { Diag(DelLoc, diag::err_deleted_non_function); return; } if (const FunctionDecl *Prev = Fn->getPreviousDecl()) { // Don't consider the implicit declaration we generate for explicit // specializations. FIXME: Do not generate these implicit declarations. if ((Prev->getTemplateSpecializationKind() != TSK_ExplicitSpecialization || Prev->getPreviousDecl()) && !Prev->isDefined()) { Diag(DelLoc, diag::err_deleted_decl_not_first); Diag(Prev->getLocation().isInvalid() ? DelLoc : Prev->getLocation(), Prev->isImplicit() ? diag::note_previous_implicit_declaration : diag::note_previous_declaration); } // If the declaration wasn't the first, we delete the function anyway for // recovery. Fn = Fn->getCanonicalDecl(); } // dllimport/dllexport cannot be deleted. if (const InheritableAttr *DLLAttr = getDLLAttr(Fn)) { Diag(Fn->getLocation(), diag::err_attribute_dll_deleted) << DLLAttr; Fn->setInvalidDecl(); } if (Fn->isDeleted()) return; // See if we're deleting a function which is already known to override a // non-deleted virtual function. if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn)) { bool IssuedDiagnostic = false; for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(), E = MD->end_overridden_methods(); I != E; ++I) { if (!(*MD->begin_overridden_methods())->isDeleted()) { if (!IssuedDiagnostic) { Diag(DelLoc, diag::err_deleted_override) << MD->getDeclName(); IssuedDiagnostic = true; } Diag((*I)->getLocation(), diag::note_overridden_virtual_function); } } } // C++11 [basic.start.main]p3: // A program that defines main as deleted [...] is ill-formed. if (Fn->isMain()) Diag(DelLoc, diag::err_deleted_main); Fn->setDeletedAsWritten(); } void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) { CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(Dcl); if (MD) { if (MD->getParent()->isDependentType()) { MD->setDefaulted(); MD->setExplicitlyDefaulted(); return; } CXXSpecialMember Member = getSpecialMember(MD); if (Member == CXXInvalid) { if (!MD->isInvalidDecl()) Diag(DefaultLoc, diag::err_default_special_members); return; } MD->setDefaulted(); MD->setExplicitlyDefaulted(); // If this definition appears within the record, do the checking when // the record is complete. const FunctionDecl *Primary = MD; if (const FunctionDecl *Pattern = MD->getTemplateInstantiationPattern()) // Find the uninstantiated declaration that actually had the '= default' // on it. Pattern->isDefined(Primary); // If the method was defaulted on its first declaration, we will have // already performed the checking in CheckCompletedCXXClass. Such a // declaration doesn't trigger an implicit definition. if (Primary == Primary->getCanonicalDecl()) return; CheckExplicitlyDefaultedSpecialMember(MD); if (MD->isInvalidDecl()) return; switch (Member) { case CXXDefaultConstructor: DefineImplicitDefaultConstructor(DefaultLoc, cast<CXXConstructorDecl>(MD)); break; case CXXCopyConstructor: DefineImplicitCopyConstructor(DefaultLoc, cast<CXXConstructorDecl>(MD)); break; case CXXCopyAssignment: DefineImplicitCopyAssignment(DefaultLoc, MD); break; case CXXDestructor: DefineImplicitDestructor(DefaultLoc, cast<CXXDestructorDecl>(MD)); break; case CXXMoveConstructor: DefineImplicitMoveConstructor(DefaultLoc, cast<CXXConstructorDecl>(MD)); break; case CXXMoveAssignment: DefineImplicitMoveAssignment(DefaultLoc, MD); break; case CXXInvalid: llvm_unreachable("Invalid special member."); } } else { Diag(DefaultLoc, diag::err_default_special_members); } } static void SearchForReturnInStmt(Sema &Self, Stmt *S) { for (Stmt *SubStmt : S->children()) { if (!SubStmt) continue; if (isa<ReturnStmt>(SubStmt)) Self.Diag(SubStmt->getLocStart(), diag::err_return_in_constructor_handler); if (!isa<Expr>(SubStmt)) SearchForReturnInStmt(Self, SubStmt); } } void Sema::DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock) { for (unsigned I = 0, E = TryBlock->getNumHandlers(); I != E; ++I) { CXXCatchStmt *Handler = TryBlock->getHandler(I); SearchForReturnInStmt(*this, Handler); } } bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old) { const FunctionType *NewFT = New->getType()->getAs<FunctionType>(); const FunctionType *OldFT = Old->getType()->getAs<FunctionType>(); CallingConv NewCC = NewFT->getCallConv(), OldCC = OldFT->getCallConv(); // If the calling conventions match, everything is fine if (NewCC == OldCC) return false; // If the calling conventions mismatch because the new function is static, // suppress the calling convention mismatch error; the error about static // function override (err_static_overrides_virtual from // Sema::CheckFunctionDeclaration) is more clear. if (New->getStorageClass() == SC_Static) return false; Diag(New->getLocation(), diag::err_conflicting_overriding_cc_attributes) << New->getDeclName() << New->getType() << Old->getType(); Diag(Old->getLocation(), diag::note_overridden_virtual_function); return true; } bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old) { QualType NewTy = New->getType()->getAs<FunctionType>()->getReturnType(); QualType OldTy = Old->getType()->getAs<FunctionType>()->getReturnType(); if (Context.hasSameType(NewTy, OldTy) || NewTy->isDependentType() || OldTy->isDependentType()) return false; // Check if the return types are covariant QualType NewClassTy, OldClassTy; /// Both types must be pointers or references to classes. if (const PointerType *NewPT = NewTy->getAs<PointerType>()) { if (const PointerType *OldPT = OldTy->getAs<PointerType>()) { NewClassTy = NewPT->getPointeeType(); OldClassTy = OldPT->getPointeeType(); } } else if (const ReferenceType *NewRT = NewTy->getAs<ReferenceType>()) { if (const ReferenceType *OldRT = OldTy->getAs<ReferenceType>()) { if (NewRT->getTypeClass() == OldRT->getTypeClass()) { NewClassTy = NewRT->getPointeeType(); OldClassTy = OldRT->getPointeeType(); } } } // The return types aren't either both pointers or references to a class type. if (NewClassTy.isNull()) { Diag(New->getLocation(), diag::err_different_return_type_for_overriding_virtual_function) << New->getDeclName() << NewTy << OldTy << New->getReturnTypeSourceRange(); Diag(Old->getLocation(), diag::note_overridden_virtual_function) << Old->getReturnTypeSourceRange(); return true; } // C++ [class.virtual]p6: // If the return type of D::f differs from the return type of B::f, the // class type in the return type of D::f shall be complete at the point of // declaration of D::f or shall be the class type D. if (const RecordType *RT = NewClassTy->getAs<RecordType>()) { if (!RT->isBeingDefined() && RequireCompleteType(New->getLocation(), NewClassTy, diag::err_covariant_return_incomplete, New->getDeclName())) return true; } if (!Context.hasSameUnqualifiedType(NewClassTy, OldClassTy)) { // Check if the new class derives from the old class. if (!IsDerivedFrom(NewClassTy, OldClassTy)) { Diag(New->getLocation(), diag::err_covariant_return_not_derived) << New->getDeclName() << NewTy << OldTy << New->getReturnTypeSourceRange(); Diag(Old->getLocation(), diag::note_overridden_virtual_function) << Old->getReturnTypeSourceRange(); return true; } // Check if we the conversion from derived to base is valid. if (CheckDerivedToBaseConversion( NewClassTy, OldClassTy, diag::err_covariant_return_inaccessible_base, diag::err_covariant_return_ambiguous_derived_to_base_conv, New->getLocation(), New->getReturnTypeSourceRange(), New->getDeclName(), nullptr)) { // FIXME: this note won't trigger for delayed access control // diagnostics, and it's impossible to get an undelayed error // here from access control during the original parse because // the ParsingDeclSpec/ParsingDeclarator are still in scope. Diag(Old->getLocation(), diag::note_overridden_virtual_function) << Old->getReturnTypeSourceRange(); return true; } } // The qualifiers of the return types must be the same. if (NewTy.getLocalCVRQualifiers() != OldTy.getLocalCVRQualifiers()) { Diag(New->getLocation(), diag::err_covariant_return_type_different_qualifications) << New->getDeclName() << NewTy << OldTy << New->getReturnTypeSourceRange(); Diag(Old->getLocation(), diag::note_overridden_virtual_function) << Old->getReturnTypeSourceRange(); return true; }; // The new class type must have the same or less qualifiers as the old type. if (NewClassTy.isMoreQualifiedThan(OldClassTy)) { Diag(New->getLocation(), diag::err_covariant_return_type_class_type_more_qualified) << New->getDeclName() << NewTy << OldTy << New->getReturnTypeSourceRange(); Diag(Old->getLocation(), diag::note_overridden_virtual_function) << Old->getReturnTypeSourceRange(); return true; }; return false; } /// \brief Mark the given method pure. /// /// \param Method the method to be marked pure. /// /// \param InitRange the source range that covers the "0" initializer. bool Sema::CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange) { SourceLocation EndLoc = InitRange.getEnd(); if (EndLoc.isValid()) Method->setRangeEnd(EndLoc); if (Method->isVirtual() || Method->getParent()->isDependentContext()) { Method->setPure(); return false; } if (!Method->isInvalidDecl()) Diag(Method->getLocation(), diag::err_non_virtual_pure) << Method->getDeclName() << InitRange; return true; } void Sema::ActOnPureSpecifier(Decl *D, SourceLocation ZeroLoc) { if (D->getFriendObjectKind()) Diag(D->getLocation(), diag::err_pure_friend); else if (auto *M = dyn_cast<CXXMethodDecl>(D)) CheckPureMethod(M, ZeroLoc); else Diag(D->getLocation(), diag::err_illegal_initializer); } /// \brief Determine whether the given declaration is a static data member. static bool isStaticDataMember(const Decl *D) { if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(D)) return Var->isStaticDataMember(); return false; } /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse /// an initializer for the out-of-line declaration 'Dcl'. The scope /// is a fresh scope pushed for just this purpose. /// /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void Sema::ActOnCXXEnterDeclInitializer(Scope *S, Decl *D) { // If there is no declaration, there was an error parsing it. if (!D || D->isInvalidDecl()) return; // We will always have a nested name specifier here, but this declaration // might not be out of line if the specifier names the current namespace: // extern int n; // int ::n = 0; if (D->isOutOfLine()) EnterDeclaratorContext(S, D->getDeclContext()); // If we are parsing the initializer for a static data member, push a // new expression evaluation context that is associated with this static // data member. if (isStaticDataMember(D)) PushExpressionEvaluationContext(PotentiallyEvaluated, D); } /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the out-of-line declaration 'D'. void Sema::ActOnCXXExitDeclInitializer(Scope *S, Decl *D) { // If there is no declaration, there was an error parsing it. if (!D || D->isInvalidDecl()) return; if (isStaticDataMember(D)) PopExpressionEvaluationContext(); if (D->isOutOfLine()) ExitDeclaratorContext(S); } /// ActOnCXXConditionDeclarationExpr - Parsed a condition declaration of a /// C++ if/switch/while/for statement. /// e.g: "if (int x = f()) {...}" DeclResult Sema::ActOnCXXConditionDeclaration(Scope *S, Declarator &D) { // C++ 6.4p2: // The declarator shall not specify a function or an array. // The type-specifier-seq shall not contain typedef and shall not declare a // new class or enumeration. assert(D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef && "Parser allowed 'typedef' as storage class of condition decl."); Decl *Dcl = ActOnDeclarator(S, D); if (!Dcl) return true; if (isa<FunctionDecl>(Dcl)) { // The declarator shall not specify a function. Diag(Dcl->getLocation(), diag::err_invalid_use_of_function_type) << D.getSourceRange(); return true; } return Dcl; } void Sema::LoadExternalVTableUses() { if (!ExternalSource) return; SmallVector<ExternalVTableUse, 4> VTables; ExternalSource->ReadUsedVTables(VTables); SmallVector<VTableUse, 4> NewUses; for (unsigned I = 0, N = VTables.size(); I != N; ++I) { llvm::DenseMap<CXXRecordDecl *, bool>::iterator Pos = VTablesUsed.find(VTables[I].Record); // Even if a definition wasn't required before, it may be required now. if (Pos != VTablesUsed.end()) { if (!Pos->second && VTables[I].DefinitionRequired) Pos->second = true; continue; } VTablesUsed[VTables[I].Record] = VTables[I].DefinitionRequired; NewUses.push_back(VTableUse(VTables[I].Record, VTables[I].Location)); } VTableUses.insert(VTableUses.begin(), NewUses.begin(), NewUses.end()); } void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired) { // Ignore any vtable uses in unevaluated operands or for classes that do // not have a vtable. if (!Class->isDynamicClass() || Class->isDependentContext() || CurContext->isDependentContext() || isUnevaluatedContext()) return; // Try to insert this class into the map. LoadExternalVTableUses(); Class = cast<CXXRecordDecl>(Class->getCanonicalDecl()); std::pair<llvm::DenseMap<CXXRecordDecl *, bool>::iterator, bool> Pos = VTablesUsed.insert(std::make_pair(Class, DefinitionRequired)); if (!Pos.second) { // If we already had an entry, check to see if we are promoting this vtable // to require a definition. If so, we need to reappend to the VTableUses // list, since we may have already processed the first entry. if (DefinitionRequired && !Pos.first->second) { Pos.first->second = true; } else { // Otherwise, we can early exit. return; } } else { // The Microsoft ABI requires that we perform the destructor body // checks (i.e. operator delete() lookup) when the vtable is marked used, as // the deleting destructor is emitted with the vtable, not with the // destructor definition as in the Itanium ABI. // If it has a definition, we do the check at that point instead. if (Context.getTargetInfo().getCXXABI().isMicrosoft() && Class->hasUserDeclaredDestructor() && !Class->getDestructor()->isDefined() && !Class->getDestructor()->isDeleted()) { CXXDestructorDecl *DD = Class->getDestructor(); ContextRAII SavedContext(*this, DD); CheckDestructor(DD); } } // Local classes need to have their virtual members marked // immediately. For all other classes, we mark their virtual members // at the end of the translation unit. if (Class->isLocalClass()) MarkVirtualMembersReferenced(Loc, Class); else VTableUses.push_back(std::make_pair(Class, Loc)); } bool Sema::DefineUsedVTables() { LoadExternalVTableUses(); if (VTableUses.empty()) return false; // Note: The VTableUses vector could grow as a result of marking // the members of a class as "used", so we check the size each // time through the loop and prefer indices (which are stable) to // iterators (which are not). bool DefinedAnything = false; for (unsigned I = 0; I != VTableUses.size(); ++I) { CXXRecordDecl *Class = VTableUses[I].first->getDefinition(); if (!Class) continue; SourceLocation Loc = VTableUses[I].second; bool DefineVTable = true; // If this class has a key function, but that key function is // defined in another translation unit, we don't need to emit the // vtable even though we're using it. const CXXMethodDecl *KeyFunction = Context.getCurrentKeyFunction(Class); if (KeyFunction && !KeyFunction->hasBody()) { // The key function is in another translation unit. DefineVTable = false; TemplateSpecializationKind TSK = KeyFunction->getTemplateSpecializationKind(); assert(TSK != TSK_ExplicitInstantiationDefinition && TSK != TSK_ImplicitInstantiation && "Instantiations don't have key functions"); (void)TSK; } else if (!KeyFunction) { // If we have a class with no key function that is the subject // of an explicit instantiation declaration, suppress the // vtable; it will live with the explicit instantiation // definition. bool IsExplicitInstantiationDeclaration = Class->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration; for (auto R : Class->redecls()) { TemplateSpecializationKind TSK = cast<CXXRecordDecl>(R)->getTemplateSpecializationKind(); if (TSK == TSK_ExplicitInstantiationDeclaration) IsExplicitInstantiationDeclaration = true; else if (TSK == TSK_ExplicitInstantiationDefinition) { IsExplicitInstantiationDeclaration = false; break; } } if (IsExplicitInstantiationDeclaration) DefineVTable = false; } // The exception specifications for all virtual members may be needed even // if we are not providing an authoritative form of the vtable in this TU. // We may choose to emit it available_externally anyway. if (!DefineVTable) { MarkVirtualMemberExceptionSpecsNeeded(Loc, Class); continue; } // Mark all of the virtual members of this class as referenced, so // that we can build a vtable. Then, tell the AST consumer that a // vtable for this class is required. DefinedAnything = true; MarkVirtualMembersReferenced(Loc, Class); CXXRecordDecl *Canonical = cast<CXXRecordDecl>(Class->getCanonicalDecl()); if (VTablesUsed[Canonical]) Consumer.HandleVTable(Class); // Optionally warn if we're emitting a weak vtable. if (Class->isExternallyVisible() && Class->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) { const FunctionDecl *KeyFunctionDef = nullptr; if (!KeyFunction || (KeyFunction->hasBody(KeyFunctionDef) && KeyFunctionDef->isInlined())) Diag(Class->getLocation(), Class->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition ? diag::warn_weak_template_vtable : diag::warn_weak_vtable) << Class; } } VTableUses.clear(); return DefinedAnything; } void Sema::MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD) { for (const auto *I : RD->methods()) if (I->isVirtual() && !I->isPure()) ResolveExceptionSpec(Loc, I->getType()->castAs<FunctionProtoType>()); } void Sema::MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD) { // Mark all functions which will appear in RD's vtable as used. CXXFinalOverriderMap FinalOverriders; RD->getFinalOverriders(FinalOverriders); for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(), E = FinalOverriders.end(); I != E; ++I) { for (OverridingMethods::const_iterator OI = I->second.begin(), OE = I->second.end(); OI != OE; ++OI) { assert(OI->second.size() > 0 && "no final overrider"); CXXMethodDecl *Overrider = OI->second.front().Method; // C++ [basic.def.odr]p2: // [...] A virtual member function is used if it is not pure. [...] if (!Overrider->isPure()) MarkFunctionReferenced(Loc, Overrider); } } // Only classes that have virtual bases need a VTT. if (RD->getNumVBases() == 0) return; for (const auto &I : RD->bases()) { const CXXRecordDecl *Base = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); if (Base->getNumVBases() == 0) continue; MarkVirtualMembersReferenced(Loc, Base); } } /// SetIvarInitializers - This routine builds initialization ASTs for the /// Objective-C implementation whose ivars need be initialized. void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) { if (!getLangOpts().CPlusPlus) return; if (ObjCInterfaceDecl *OID = ObjCImplementation->getClassInterface()) { SmallVector<ObjCIvarDecl*, 8> ivars; CollectIvarsToConstructOrDestruct(OID, ivars); if (ivars.empty()) return; SmallVector<CXXCtorInitializer*, 32> AllToInit; for (unsigned i = 0; i < ivars.size(); i++) { FieldDecl *Field = ivars[i]; if (Field->isInvalidDecl()) continue; CXXCtorInitializer *Member; InitializedEntity InitEntity = InitializedEntity::InitializeMember(Field); InitializationKind InitKind = InitializationKind::CreateDefault(ObjCImplementation->getLocation()); InitializationSequence InitSeq(*this, InitEntity, InitKind, None); ExprResult MemberInit = InitSeq.Perform(*this, InitEntity, InitKind, None); MemberInit = MaybeCreateExprWithCleanups(MemberInit); // Note, MemberInit could actually come back empty if no initialization // is required (e.g., because it would call a trivial default constructor) if (!MemberInit.get() || MemberInit.isInvalid()) continue; Member = new (Context) CXXCtorInitializer(Context, Field, SourceLocation(), SourceLocation(), MemberInit.getAs<Expr>(), SourceLocation()); AllToInit.push_back(Member); // Be sure that the destructor is accessible and is marked as referenced. if (const RecordType *RecordTy = Context.getBaseElementType(Field->getType()) ->getAs<RecordType>()) { CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) { MarkFunctionReferenced(Field->getLocation(), Destructor); CheckDestructorAccess(Field->getLocation(), Destructor, PDiag(diag::err_access_dtor_ivar) << Context.getBaseElementType(Field->getType())); } } } ObjCImplementation->setIvarInitializers(Context, AllToInit.data(), AllToInit.size()); } } static void DelegatingCycleHelper(CXXConstructorDecl* Ctor, llvm::SmallSet<CXXConstructorDecl*, 4> &Valid, llvm::SmallSet<CXXConstructorDecl*, 4> &Invalid, llvm::SmallSet<CXXConstructorDecl*, 4> &Current, Sema &S) { if (Ctor->isInvalidDecl()) return; CXXConstructorDecl *Target = Ctor->getTargetConstructor(); // Target may not be determinable yet, for instance if this is a dependent // call in an uninstantiated template. if (Target) { const FunctionDecl *FNTarget = nullptr; (void)Target->hasBody(FNTarget); Target = const_cast<CXXConstructorDecl*>( cast_or_null<CXXConstructorDecl>(FNTarget)); } CXXConstructorDecl *Canonical = Ctor->getCanonicalDecl(), // Avoid dereferencing a null pointer here. *TCanonical = Target? Target->getCanonicalDecl() : nullptr; if (!Current.insert(Canonical).second) return; // We know that beyond here, we aren't chaining into a cycle. if (!Target || !Target->isDelegatingConstructor() || Target->isInvalidDecl() || Valid.count(TCanonical)) { Valid.insert(Current.begin(), Current.end()); Current.clear(); // We've hit a cycle. } else if (TCanonical == Canonical || Invalid.count(TCanonical) || Current.count(TCanonical)) { // If we haven't diagnosed this cycle yet, do so now. if (!Invalid.count(TCanonical)) { S.Diag((*Ctor->init_begin())->getSourceLocation(), diag::warn_delegating_ctor_cycle) << Ctor; // Don't add a note for a function delegating directly to itself. if (TCanonical != Canonical) S.Diag(Target->getLocation(), diag::note_it_delegates_to); CXXConstructorDecl *C = Target; while (C->getCanonicalDecl() != Canonical) { const FunctionDecl *FNTarget = nullptr; (void)C->getTargetConstructor()->hasBody(FNTarget); assert(FNTarget && "Ctor cycle through bodiless function"); C = const_cast<CXXConstructorDecl*>( cast<CXXConstructorDecl>(FNTarget)); S.Diag(C->getLocation(), diag::note_which_delegates_to); } } Invalid.insert(Current.begin(), Current.end()); Current.clear(); } else { DelegatingCycleHelper(Target, Valid, Invalid, Current, S); } } void Sema::CheckDelegatingCtorCycles() { llvm::SmallSet<CXXConstructorDecl*, 4> Valid, Invalid, Current; for (DelegatingCtorDeclsType::iterator I = DelegatingCtorDecls.begin(ExternalSource), E = DelegatingCtorDecls.end(); I != E; ++I) DelegatingCycleHelper(*I, Valid, Invalid, Current, *this); for (llvm::SmallSet<CXXConstructorDecl *, 4>::iterator CI = Invalid.begin(), CE = Invalid.end(); CI != CE; ++CI) (*CI)->setInvalidDecl(); } namespace { /// \brief AST visitor that finds references to the 'this' expression. class FindCXXThisExpr : public RecursiveASTVisitor<FindCXXThisExpr> { Sema &S; public: explicit FindCXXThisExpr(Sema &S) : S(S) { } bool VisitCXXThisExpr(CXXThisExpr *E) { S.Diag(E->getLocation(), diag::err_this_static_member_func) << E->isImplicit(); return false; } }; } bool Sema::checkThisInStaticMemberFunctionType(CXXMethodDecl *Method) { TypeSourceInfo *TSInfo = Method->getTypeSourceInfo(); if (!TSInfo) return false; TypeLoc TL = TSInfo->getTypeLoc(); FunctionProtoTypeLoc ProtoTL = TL.getAs<FunctionProtoTypeLoc>(); if (!ProtoTL) return false; // C++11 [expr.prim.general]p3: // [The expression this] shall not appear before the optional // cv-qualifier-seq and it shall not appear within the declaration of a // static member function (although its type and value category are defined // within a static member function as they are within a non-static member // function). [ Note: this is because declaration matching does not occur // until the complete declarator is known. - end note ] const FunctionProtoType *Proto = ProtoTL.getTypePtr(); FindCXXThisExpr Finder(*this); // If the return type came after the cv-qualifier-seq, check it now. if (Proto->hasTrailingReturn() && !Finder.TraverseTypeLoc(ProtoTL.getReturnLoc())) return true; // Check the exception specification. if (checkThisInStaticMemberFunctionExceptionSpec(Method)) return true; return checkThisInStaticMemberFunctionAttributes(Method); } bool Sema::checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method) { TypeSourceInfo *TSInfo = Method->getTypeSourceInfo(); if (!TSInfo) return false; TypeLoc TL = TSInfo->getTypeLoc(); FunctionProtoTypeLoc ProtoTL = TL.getAs<FunctionProtoTypeLoc>(); if (!ProtoTL) return false; const FunctionProtoType *Proto = ProtoTL.getTypePtr(); FindCXXThisExpr Finder(*this); switch (Proto->getExceptionSpecType()) { case EST_Unparsed: case EST_Uninstantiated: case EST_Unevaluated: case EST_BasicNoexcept: case EST_DynamicNone: case EST_MSAny: case EST_None: break; case EST_ComputedNoexcept: if (!Finder.TraverseStmt(Proto->getNoexceptExpr())) return true; LLVM_FALLTHROUGH; // HLSL Change case EST_Dynamic: for (const auto &E : Proto->exceptions()) { if (!Finder.TraverseType(E)) return true; } break; } return false; } bool Sema::checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method) { FindCXXThisExpr Finder(*this); // Check attributes. for (const auto *A : Method->attrs()) { // FIXME: This should be emitted by tblgen. Expr *Arg = nullptr; ArrayRef<Expr *> Args; if (const auto *G = dyn_cast<GuardedByAttr>(A)) Arg = G->getArg(); else if (const auto *G = dyn_cast<PtGuardedByAttr>(A)) Arg = G->getArg(); else if (const auto *AA = dyn_cast<AcquiredAfterAttr>(A)) Args = llvm::makeArrayRef(AA->args_begin(), AA->args_size()); else if (const auto *AB = dyn_cast<AcquiredBeforeAttr>(A)) Args = llvm::makeArrayRef(AB->args_begin(), AB->args_size()); else if (const auto *ETLF = dyn_cast<ExclusiveTrylockFunctionAttr>(A)) { Arg = ETLF->getSuccessValue(); Args = llvm::makeArrayRef(ETLF->args_begin(), ETLF->args_size()); } else if (const auto *STLF = dyn_cast<SharedTrylockFunctionAttr>(A)) { Arg = STLF->getSuccessValue(); Args = llvm::makeArrayRef(STLF->args_begin(), STLF->args_size()); } else if (const auto *LR = dyn_cast<LockReturnedAttr>(A)) Arg = LR->getArg(); else if (const auto *LE = dyn_cast<LocksExcludedAttr>(A)) Args = llvm::makeArrayRef(LE->args_begin(), LE->args_size()); else if (const auto *RC = dyn_cast<RequiresCapabilityAttr>(A)) Args = llvm::makeArrayRef(RC->args_begin(), RC->args_size()); else if (const auto *AC = dyn_cast<AcquireCapabilityAttr>(A)) Args = llvm::makeArrayRef(AC->args_begin(), AC->args_size()); else if (const auto *AC = dyn_cast<TryAcquireCapabilityAttr>(A)) Args = llvm::makeArrayRef(AC->args_begin(), AC->args_size()); else if (const auto *RC = dyn_cast<ReleaseCapabilityAttr>(A)) Args = llvm::makeArrayRef(RC->args_begin(), RC->args_size()); if (Arg && !Finder.TraverseStmt(Arg)) return true; for (unsigned I = 0, N = Args.size(); I != N; ++I) { if (!Finder.TraverseStmt(Args[I])) return true; } } return false; } void Sema::checkExceptionSpecification( bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI) { // HLSL Change Starts if (LangOpts.HLSL) { assert(DynamicExceptions.size() == 0); assert(DynamicExceptionRanges.size() == 0); assert(NoexceptExpr == nullptr); assert(Exceptions.size() == 0); return; } // HLSL Change Ends Exceptions.clear(); ESI.Type = EST; if (EST == EST_Dynamic) { Exceptions.reserve(DynamicExceptions.size()); for (unsigned ei = 0, ee = DynamicExceptions.size(); ei != ee; ++ei) { // FIXME: Preserve type source info. QualType ET = GetTypeFromParser(DynamicExceptions[ei]); if (IsTopLevel) { SmallVector<UnexpandedParameterPack, 2> Unexpanded; collectUnexpandedParameterPacks(ET, Unexpanded); if (!Unexpanded.empty()) { DiagnoseUnexpandedParameterPacks( DynamicExceptionRanges[ei].getBegin(), UPPC_ExceptionType, Unexpanded); continue; } } // Check that the type is valid for an exception spec, and // drop it if not. if (!CheckSpecifiedExceptionType(ET, DynamicExceptionRanges[ei])) Exceptions.push_back(ET); } ESI.Exceptions = Exceptions; return; } if (EST == EST_ComputedNoexcept) { // If an error occurred, there's no expression here. if (NoexceptExpr) { assert((NoexceptExpr->isTypeDependent() || NoexceptExpr->getType()->getCanonicalTypeUnqualified() == Context.BoolTy) && "Parser should have made sure that the expression is boolean"); if (IsTopLevel && NoexceptExpr && DiagnoseUnexpandedParameterPack(NoexceptExpr)) { ESI.Type = EST_BasicNoexcept; return; } if (!NoexceptExpr->isValueDependent()) NoexceptExpr = VerifyIntegerConstantExpression(NoexceptExpr, nullptr, diag::err_noexcept_needs_constant_expression, /*AllowFold*/ false).get(); ESI.NoexceptExpr = NoexceptExpr; } return; } } void Sema::actOnDelayedExceptionSpecification(Decl *MethodD, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr) { if (!MethodD) return; // Dig out the method we're referring to. if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(MethodD)) MethodD = FunTmpl->getTemplatedDecl(); CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(MethodD); if (!Method) return; // Check the exception specification. llvm::SmallVector<QualType, 4> Exceptions; FunctionProtoType::ExceptionSpecInfo ESI; checkExceptionSpecification(/*IsTopLevel*/true, EST, DynamicExceptions, DynamicExceptionRanges, NoexceptExpr, Exceptions, ESI); // Update the exception specification on the function type. Context.adjustExceptionSpec(Method, ESI, /*AsWritten*/true); if (Method->isStatic()) checkThisInStaticMemberFunctionExceptionSpec(Method); if (Method->isVirtual()) { // Check overrides, which we previously had to delay. for (CXXMethodDecl::method_iterator O = Method->begin_overridden_methods(), OEnd = Method->end_overridden_methods(); O != OEnd; ++O) CheckOverridingFunctionExceptionSpec(Method, *O); } } /// HandleMSProperty - Analyze a __delcspec(property) field of a C++ class. /// MSPropertyDecl *Sema::HandleMSProperty(Scope *S, RecordDecl *Record, SourceLocation DeclStart, Declarator &D, Expr *BitWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr) { IdentifierInfo *II = D.getIdentifier(); if (!II) { Diag(DeclStart, diag::err_anonymous_property); return nullptr; } SourceLocation Loc = D.getIdentifierLoc(); TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); QualType T = TInfo->getType(); if (getLangOpts().CPlusPlus) { CheckExtraCXXDefaultArguments(D); if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo, UPPC_DataMemberType)) { D.setInvalidType(); T = Context.IntTy; TInfo = Context.getTrivialTypeSourceInfo(T, Loc); } } DiagnoseFunctionSpecifiers(D.getDeclSpec()); if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), diag::err_invalid_thread) << DeclSpec::getSpecifierName(TSCS); // Check to see if this name was declared as a member previously NamedDecl *PrevDecl = nullptr; LookupResult Previous(*this, II, Loc, LookupMemberName, ForRedeclaration); LookupName(Previous, S); switch (Previous.getResultKind()) { case LookupResult::Found: case LookupResult::FoundUnresolvedValue: PrevDecl = Previous.getAsSingle<NamedDecl>(); break; case LookupResult::FoundOverloaded: PrevDecl = Previous.getRepresentativeDecl(); break; case LookupResult::NotFound: case LookupResult::NotFoundInCurrentInstantiation: case LookupResult::Ambiguous: break; } if (PrevDecl && PrevDecl->isTemplateParameter()) { // Maybe we will complain about the shadowed template parameter. DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl); // Just pretend that we didn't see the previous declaration. PrevDecl = nullptr; } if (PrevDecl && !isDeclInScope(PrevDecl, Record, S)) PrevDecl = nullptr; SourceLocation TSSL = D.getLocStart(); const AttributeList::PropertyData &Data = MSPropertyAttr->getPropertyData(); MSPropertyDecl *NewPD = MSPropertyDecl::Create( Context, Record, Loc, II, T, TInfo, TSSL, Data.GetterId, Data.SetterId); ProcessDeclAttributes(TUScope, NewPD, D); NewPD->setAccess(AS); if (NewPD->isInvalidDecl()) Record->setInvalidDecl(); if (D.getDeclSpec().isModulePrivateSpecified()) NewPD->setModulePrivate(); if (NewPD->isInvalidDecl() && PrevDecl) { // Don't introduce NewFD into scope; there's already something // with the same name in the same scope. } else if (II) { PushOnScopeChains(NewPD, S); } else Record->addDecl(NewPD); return NewPD; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/TypeLocBuilder.cpp
//===--- TypeLocBuilder.cpp - Type Source Info collector ------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This files defines TypeLocBuilder, a class for building TypeLocs // bottom-up. // //===----------------------------------------------------------------------===// #include "TypeLocBuilder.h" using namespace clang; void TypeLocBuilder::pushFullCopy(TypeLoc L) { size_t Size = L.getFullDataSize(); reserve(Size); SmallVector<TypeLoc, 4> TypeLocs; TypeLoc CurTL = L; while (CurTL) { TypeLocs.push_back(CurTL); CurTL = CurTL.getNextTypeLoc(); } for (unsigned i = 0, e = TypeLocs.size(); i < e; ++i) { TypeLoc CurTL = TypeLocs[e-i-1]; switch (CurTL.getTypeLocClass()) { #define ABSTRACT_TYPELOC(CLASS, PARENT) #define TYPELOC(CLASS, PARENT) \ case TypeLoc::CLASS: { \ CLASS##TypeLoc NewTL = push<class CLASS##TypeLoc>(CurTL.getType()); \ memcpy(NewTL.getOpaqueData(), CurTL.getOpaqueData(), NewTL.getLocalDataSize()); \ break; \ } #include "clang/AST/TypeLocNodes.def" } } } void TypeLocBuilder::grow(size_t NewCapacity) { assert(NewCapacity > Capacity); // Allocate the new buffer and copy the old data into it. char *NewBuffer = new char[NewCapacity]; unsigned NewIndex = Index + NewCapacity - Capacity; memcpy(&NewBuffer[NewIndex], &Buffer[Index], Capacity - Index); if (Buffer != InlineBuffer.buffer) delete[] Buffer; Buffer = NewBuffer; Capacity = NewCapacity; Index = NewIndex; } TypeLoc TypeLocBuilder::pushImpl(QualType T, size_t LocalSize, unsigned LocalAlignment) { #ifndef NDEBUG QualType TLast = TypeLoc(T, nullptr).getNextTypeLoc().getType(); assert(TLast == LastTy && "mismatch between last type and new type's inner type"); LastTy = T; #endif assert(LocalAlignment <= BufferMaxAlignment && "Unexpected alignment"); // If we need to grow, grow by a factor of 2. if (LocalSize > Index) { size_t RequiredCapacity = Capacity + (LocalSize - Index); size_t NewCapacity = Capacity * 2; while (RequiredCapacity > NewCapacity) NewCapacity *= 2; grow(NewCapacity); } // Because we're adding elements to the TypeLoc backwards, we have to // do some extra work to keep everything aligned appropriately. // FIXME: This algorithm is a absolute mess because every TypeLoc returned // needs to be valid. Partial TypeLocs are a terrible idea. // FIXME: 4 and 8 are sufficient at the moment, but it's pretty ugly to // hardcode them. if (LocalAlignment == 4) { if (NumBytesAtAlign8 == 0) { NumBytesAtAlign4 += LocalSize; } else { unsigned Padding = NumBytesAtAlign4 % 8; if (Padding == 0) { if (LocalSize % 8 == 0) { // Everything is set: there's no padding and we don't need to add // any. } else { assert(LocalSize % 8 == 4); // No existing padding; add in 4 bytes padding memmove(&Buffer[Index - 4], &Buffer[Index], NumBytesAtAlign4); Index -= 4; } } else { assert(Padding == 4); if (LocalSize % 8 == 0) { // Everything is set: there's 4 bytes padding and we don't need // to add any. } else { assert(LocalSize % 8 == 4); // There are 4 bytes padding, but we don't need any; remove it. memmove(&Buffer[Index + 4], &Buffer[Index], NumBytesAtAlign4); Index += 4; } } NumBytesAtAlign4 += LocalSize; } } else if (LocalAlignment == 8) { if (!NumBytesAtAlign8 && NumBytesAtAlign4 % 8 != 0) { // No existing padding and misaligned members; add in 4 bytes padding memmove(&Buffer[Index - 4], &Buffer[Index], NumBytesAtAlign4); Index -= 4; } // Forget about any padding. NumBytesAtAlign4 = 0; NumBytesAtAlign8 += LocalSize; } else { assert(LocalSize == 0); } Index -= LocalSize; assert(Capacity - Index == TypeLoc::getFullDataSizeForType(T) && "incorrect data size provided to CreateTypeSourceInfo!"); return getTemporaryTypeLoc(T); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/Scope.cpp
//===- Scope.cpp - Lexical scope information --------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the Scope class, which is used for recording // information about a lexical scope. // //===----------------------------------------------------------------------===// #include "clang/Sema/Scope.h" #include "clang/AST/Decl.h" #include "llvm/Support/raw_ostream.h" using namespace clang; void Scope::Init(Scope *parent, unsigned flags) { AnyParent = parent; Flags = flags; if (parent && !(flags & FnScope)) { BreakParent = parent->BreakParent; ContinueParent = parent->ContinueParent; } else { // Control scopes do not contain the contents of nested function scopes for // control flow purposes. BreakParent = ContinueParent = nullptr; } if (parent) { Depth = parent->Depth + 1; PrototypeDepth = parent->PrototypeDepth; PrototypeIndex = 0; FnParent = parent->FnParent; BlockParent = parent->BlockParent; TemplateParamParent = parent->TemplateParamParent; MSLastManglingParent = parent->MSLastManglingParent; MSCurManglingNumber = getMSLastManglingNumber(); if ((Flags & (FnScope | ClassScope | BlockScope | TemplateParamScope | FunctionPrototypeScope | AtCatchScope | ObjCMethodScope)) == 0) Flags |= parent->getFlags() & OpenMPSimdDirectiveScope; } else { Depth = 0; PrototypeDepth = 0; PrototypeIndex = 0; MSLastManglingParent = FnParent = BlockParent = nullptr; TemplateParamParent = nullptr; MSLastManglingNumber = 1; MSCurManglingNumber = 1; } // If this scope is a function or contains breaks/continues, remember it. if (flags & FnScope) FnParent = this; // The MS mangler uses the number of scopes that can hold declarations as // part of an external name. if (Flags & (ClassScope | FnScope)) { MSLastManglingNumber = getMSLastManglingNumber(); MSLastManglingParent = this; MSCurManglingNumber = 1; } if (flags & BreakScope) BreakParent = this; if (flags & ContinueScope) ContinueParent = this; if (flags & BlockScope) BlockParent = this; if (flags & TemplateParamScope) TemplateParamParent = this; // If this is a prototype scope, record that. if (flags & FunctionPrototypeScope) PrototypeDepth++; if (flags & DeclScope) { if (flags & FunctionPrototypeScope) ; // Prototype scopes are uninteresting. else if ((flags & ClassScope) && getParent()->isClassScope()) ; // Nested class scopes aren't ambiguous. else if ((flags & ClassScope) && getParent()->getFlags() == DeclScope) ; // Classes inside of namespaces aren't ambiguous. else if ((flags & EnumScope)) ; // Don't increment for enum scopes. else incrementMSManglingNumber(); } DeclsInScope.clear(); UsingDirectives.clear(); Entity = nullptr; ErrorTrap.reset(); NRVO.setPointerAndInt(nullptr, 0); } bool Scope::containedInPrototypeScope() const { const Scope *S = this; while (S) { if (S->isFunctionPrototypeScope()) return true; S = S->getParent(); } return false; } void Scope::AddFlags(unsigned FlagsToSet) { assert((FlagsToSet & ~(BreakScope | ContinueScope)) == 0 && "Unsupported scope flags"); if (FlagsToSet & BreakScope) { assert((Flags & BreakScope) == 0 && "Already set"); BreakParent = this; } if (FlagsToSet & ContinueScope) { assert((Flags & ContinueScope) == 0 && "Already set"); ContinueParent = this; } Flags |= FlagsToSet; } void Scope::mergeNRVOIntoParent() { if (VarDecl *Candidate = NRVO.getPointer()) { if (isDeclScope(Candidate)) Candidate->setNRVOVariable(true); } if (getEntity()) return; if (NRVO.getInt()) getParent()->setNoNRVO(); else if (NRVO.getPointer()) getParent()->addNRVOCandidate(NRVO.getPointer()); } void Scope::dump() const { dumpImpl(llvm::errs()); } void Scope::dumpImpl(raw_ostream &OS) const { unsigned Flags = getFlags(); bool HasFlags = Flags != 0; if (HasFlags) OS << "Flags: "; while (Flags) { if (Flags & FnScope) { OS << "FnScope"; Flags &= ~FnScope; } else if (Flags & BreakScope) { OS << "BreakScope"; Flags &= ~BreakScope; } else if (Flags & ContinueScope) { OS << "ContinueScope"; Flags &= ~ContinueScope; } else if (Flags & DeclScope) { OS << "DeclScope"; Flags &= ~DeclScope; } else if (Flags & ControlScope) { OS << "ControlScope"; Flags &= ~ControlScope; } else if (Flags & ClassScope) { OS << "ClassScope"; Flags &= ~ClassScope; } else if (Flags & BlockScope) { OS << "BlockScope"; Flags &= ~BlockScope; } else if (Flags & TemplateParamScope) { OS << "TemplateParamScope"; Flags &= ~TemplateParamScope; } else if (Flags & FunctionPrototypeScope) { OS << "FunctionPrototypeScope"; Flags &= ~FunctionPrototypeScope; } else if (Flags & FunctionDeclarationScope) { OS << "FunctionDeclarationScope"; Flags &= ~FunctionDeclarationScope; } else if (Flags & AtCatchScope) { OS << "AtCatchScope"; Flags &= ~AtCatchScope; } else if (Flags & ObjCMethodScope) { OS << "ObjCMethodScope"; Flags &= ~ObjCMethodScope; } else if (Flags & SwitchScope) { OS << "SwitchScope"; Flags &= ~SwitchScope; } else if (Flags & TryScope) { OS << "TryScope"; Flags &= ~TryScope; } else if (Flags & FnTryCatchScope) { OS << "FnTryCatchScope"; Flags &= ~FnTryCatchScope; } else if (Flags & SEHTryScope) { OS << "SEHTryScope"; Flags &= ~SEHTryScope; } else if (Flags & SEHExceptScope) { OS << "SEHExceptScope"; Flags &= ~SEHExceptScope; } else if (Flags & OpenMPDirectiveScope) { OS << "OpenMPDirectiveScope"; Flags &= ~OpenMPDirectiveScope; } else if (Flags & OpenMPLoopDirectiveScope) { OS << "OpenMPLoopDirectiveScope"; Flags &= ~OpenMPLoopDirectiveScope; } else if (Flags & OpenMPSimdDirectiveScope) { OS << "OpenMPSimdDirectiveScope"; Flags &= ~OpenMPSimdDirectiveScope; } if (Flags) OS << " | "; } if (HasFlags) OS << '\n'; if (const Scope *Parent = getParent()) OS << "Parent: (clang::Scope*)" << Parent << '\n'; OS << "Depth: " << Depth << '\n'; OS << "MSLastManglingNumber: " << getMSLastManglingNumber() << '\n'; OS << "MSCurManglingNumber: " << getMSCurManglingNumber() << '\n'; if (const DeclContext *DC = getEntity()) OS << "Entity : (clang::DeclContext*)" << DC << '\n'; if (NRVO.getInt()) OS << "NRVO not allowed\n"; else if (NRVO.getPointer()) OS << "NRVO candidate : (clang::VarDecl*)" << NRVO.getPointer() << '\n'; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
//===--- SemaTemplateInstantiateDecl.cpp - C++ Template Decl Instantiation ===/ // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. //===----------------------------------------------------------------------===/ // // This file implements C++ template instantiation for declarations. // //===----------------------------------------------------------------------===/ #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclVisitor.h" #include "clang/AST/DependentDiagnostic.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/PrettyDeclStackTrace.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change #include "clang/Sema/Template.h" #include "llvm/Support/TimeProfiler.h" // HLSL Change using namespace clang; static bool isDeclWithinFunction(const Decl *D) { const DeclContext *DC = D->getDeclContext(); if (DC->isFunctionOrMethod()) return true; if (DC->isRecord()) return cast<CXXRecordDecl>(DC)->isLocalClass(); return false; } template<typename DeclT> static bool SubstQualifier(Sema &SemaRef, const DeclT *OldDecl, DeclT *NewDecl, const MultiLevelTemplateArgumentList &TemplateArgs) { if (!OldDecl->getQualifierLoc()) return false; assert((NewDecl->getFriendObjectKind() || !OldDecl->getLexicalDeclContext()->isDependentContext()) && "non-friend with qualified name defined in dependent context"); Sema::ContextRAII SavedContext( SemaRef, const_cast<DeclContext *>(NewDecl->getFriendObjectKind() ? NewDecl->getLexicalDeclContext() : OldDecl->getLexicalDeclContext())); NestedNameSpecifierLoc NewQualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(OldDecl->getQualifierLoc(), TemplateArgs); if (!NewQualifierLoc) return true; NewDecl->setQualifierInfo(NewQualifierLoc); return false; } bool TemplateDeclInstantiator::SubstQualifier(const DeclaratorDecl *OldDecl, DeclaratorDecl *NewDecl) { return ::SubstQualifier(SemaRef, OldDecl, NewDecl, TemplateArgs); } bool TemplateDeclInstantiator::SubstQualifier(const TagDecl *OldDecl, TagDecl *NewDecl) { return ::SubstQualifier(SemaRef, OldDecl, NewDecl, TemplateArgs); } // Include attribute instantiation code. #include "clang/Sema/AttrTemplateInstantiate.inc" static void instantiateDependentAlignedAttr( Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs, const AlignedAttr *Aligned, Decl *New, bool IsPackExpansion) { if (Aligned->isAlignmentExpr()) { // The alignment expression is a constant expression. EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated); ExprResult Result = S.SubstExpr(Aligned->getAlignmentExpr(), TemplateArgs); if (!Result.isInvalid()) S.AddAlignedAttr(Aligned->getLocation(), New, Result.getAs<Expr>(), Aligned->getSpellingListIndex(), IsPackExpansion); } else { TypeSourceInfo *Result = S.SubstType(Aligned->getAlignmentType(), TemplateArgs, Aligned->getLocation(), DeclarationName()); if (Result) S.AddAlignedAttr(Aligned->getLocation(), New, Result, Aligned->getSpellingListIndex(), IsPackExpansion); } } static void instantiateDependentAlignedAttr( Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs, const AlignedAttr *Aligned, Decl *New) { if (!Aligned->isPackExpansion()) { instantiateDependentAlignedAttr(S, TemplateArgs, Aligned, New, false); return; } SmallVector<UnexpandedParameterPack, 2> Unexpanded; if (Aligned->isAlignmentExpr()) S.collectUnexpandedParameterPacks(Aligned->getAlignmentExpr(), Unexpanded); else S.collectUnexpandedParameterPacks(Aligned->getAlignmentType()->getTypeLoc(), Unexpanded); assert(!Unexpanded.empty() && "Pack expansion without parameter packs?"); // Determine whether we can expand this attribute pack yet. bool Expand = true, RetainExpansion = false; Optional<unsigned> NumExpansions; // FIXME: Use the actual location of the ellipsis. SourceLocation EllipsisLoc = Aligned->getLocation(); if (S.CheckParameterPacksForExpansion(EllipsisLoc, Aligned->getRange(), Unexpanded, TemplateArgs, Expand, RetainExpansion, NumExpansions)) return; if (!Expand) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, -1); instantiateDependentAlignedAttr(S, TemplateArgs, Aligned, New, true); } else { for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, I); instantiateDependentAlignedAttr(S, TemplateArgs, Aligned, New, false); } } } static void instantiateDependentAssumeAlignedAttr( Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs, const AssumeAlignedAttr *Aligned, Decl *New) { // The alignment expression is a constant expression. EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated); Expr *E, *OE = nullptr; ExprResult Result = S.SubstExpr(Aligned->getAlignment(), TemplateArgs); if (Result.isInvalid()) return; E = Result.getAs<Expr>(); if (Aligned->getOffset()) { Result = S.SubstExpr(Aligned->getOffset(), TemplateArgs); if (Result.isInvalid()) return; OE = Result.getAs<Expr>(); } S.AddAssumeAlignedAttr(Aligned->getLocation(), New, E, OE, Aligned->getSpellingListIndex()); } static void instantiateDependentAlignValueAttr( Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs, const AlignValueAttr *Aligned, Decl *New) { // The alignment expression is a constant expression. EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated); ExprResult Result = S.SubstExpr(Aligned->getAlignment(), TemplateArgs); if (!Result.isInvalid()) S.AddAlignValueAttr(Aligned->getLocation(), New, Result.getAs<Expr>(), Aligned->getSpellingListIndex()); } static void instantiateDependentEnableIfAttr( Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs, const EnableIfAttr *A, const Decl *Tmpl, Decl *New) { Expr *Cond = nullptr; { EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated); ExprResult Result = S.SubstExpr(A->getCond(), TemplateArgs); if (Result.isInvalid()) return; Cond = Result.getAs<Expr>(); } if (A->getCond()->isTypeDependent() && !Cond->isTypeDependent()) { ExprResult Converted = S.PerformContextuallyConvertToBool(Cond); if (Converted.isInvalid()) return; Cond = Converted.get(); } SmallVector<PartialDiagnosticAt, 8> Diags; if (A->getCond()->isValueDependent() && !Cond->isValueDependent() && !Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(Tmpl), Diags)) { S.Diag(A->getLocation(), diag::err_enable_if_never_constant_expr); for (int I = 0, N = Diags.size(); I != N; ++I) S.Diag(Diags[I].first, Diags[I].second); return; } EnableIfAttr *EIA = new (S.getASTContext()) EnableIfAttr(A->getLocation(), S.getASTContext(), Cond, A->getMessage(), A->getSpellingListIndex()); New->addAttr(EIA); } // Constructs and adds to New a new instance of CUDALaunchBoundsAttr using // template A as the base and arguments from TemplateArgs. static void instantiateDependentCUDALaunchBoundsAttr( Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs, const CUDALaunchBoundsAttr &Attr, Decl *New) { // The alignment expression is a constant expression. EnterExpressionEvaluationContext Unevaluated(S, Sema::ConstantEvaluated); ExprResult Result = S.SubstExpr(Attr.getMaxThreads(), TemplateArgs); if (Result.isInvalid()) return; Expr *MaxThreads = Result.getAs<Expr>(); Expr *MinBlocks = nullptr; if (Attr.getMinBlocks()) { Result = S.SubstExpr(Attr.getMinBlocks(), TemplateArgs); if (Result.isInvalid()) return; MinBlocks = Result.getAs<Expr>(); } S.AddLaunchBoundsAttr(Attr.getLocation(), New, MaxThreads, MinBlocks, Attr.getSpellingListIndex()); } void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Tmpl, Decl *New, LateInstantiatedAttrVec *LateAttrs, LocalInstantiationScope *OuterMostScope) { for (const auto *TmplAttr : Tmpl->attrs()) { // FIXME: This should be generalized to more than just the AlignedAttr. const AlignedAttr *Aligned = dyn_cast<AlignedAttr>(TmplAttr); if (Aligned && Aligned->isAlignmentDependent()) { instantiateDependentAlignedAttr(*this, TemplateArgs, Aligned, New); continue; } const AssumeAlignedAttr *AssumeAligned = dyn_cast<AssumeAlignedAttr>(TmplAttr); if (AssumeAligned) { instantiateDependentAssumeAlignedAttr(*this, TemplateArgs, AssumeAligned, New); continue; } const AlignValueAttr *AlignValue = dyn_cast<AlignValueAttr>(TmplAttr); if (AlignValue) { instantiateDependentAlignValueAttr(*this, TemplateArgs, AlignValue, New); continue; } const EnableIfAttr *EnableIf = dyn_cast<EnableIfAttr>(TmplAttr); if (EnableIf && EnableIf->getCond()->isValueDependent()) { instantiateDependentEnableIfAttr(*this, TemplateArgs, EnableIf, Tmpl, New); continue; } if (const CUDALaunchBoundsAttr *CUDALaunchBounds = dyn_cast<CUDALaunchBoundsAttr>(TmplAttr)) { instantiateDependentCUDALaunchBoundsAttr(*this, TemplateArgs, *CUDALaunchBounds, New); continue; } // HLSL Change Begin - Validate post-instantiation attributes DiagnoseHLSLDeclAttr(New, TmplAttr); // HLSL Change End // Existing DLL attribute on the instantiation takes precedence. if (TmplAttr->getKind() == attr::DLLExport || TmplAttr->getKind() == attr::DLLImport) { if (New->hasAttr<DLLExportAttr>() || New->hasAttr<DLLImportAttr>()) { continue; } } assert(!TmplAttr->isPackExpansion()); if (TmplAttr->isLateParsed() && LateAttrs) { // Late parsed attributes must be instantiated and attached after the // enclosing class has been instantiated. See Sema::InstantiateClass. LocalInstantiationScope *Saved = nullptr; if (CurrentInstantiationScope) Saved = CurrentInstantiationScope->cloneScopes(OuterMostScope); LateAttrs->push_back(LateInstantiatedAttribute(TmplAttr, Saved, New)); } else { // Allow 'this' within late-parsed attributes. NamedDecl *ND = dyn_cast<NamedDecl>(New); CXXRecordDecl *ThisContext = dyn_cast_or_null<CXXRecordDecl>(ND->getDeclContext()); CXXThisScopeRAII ThisScope(*this, ThisContext, /*TypeQuals*/0, ND && ND->isCXXInstanceMember()); Attr *NewAttr = sema::instantiateTemplateAttribute(TmplAttr, Context, *this, TemplateArgs); if (NewAttr) New->addAttr(NewAttr); } } // HLSL Change - Begin // When instantiating a NamedDecl, we need to carry over the HLSL annotations if (auto Decl = dyn_cast<NamedDecl>(Tmpl)) { auto NDecl = cast<NamedDecl>(New); NDecl->setUnusualAnnotations(Decl->getUnusualAnnotations()); } // HLSL Change - End } /// Get the previous declaration of a declaration for the purposes of template /// instantiation. If this finds a previous declaration, then the previous /// declaration of the instantiation of D should be an instantiation of the /// result of this function. template<typename DeclT> static DeclT *getPreviousDeclForInstantiation(DeclT *D) { DeclT *Result = D->getPreviousDecl(); // If the declaration is within a class, and the previous declaration was // merged from a different definition of that class, then we don't have a // previous declaration for the purpose of template instantiation. if (Result && isa<CXXRecordDecl>(D->getDeclContext()) && D->getLexicalDeclContext() != Result->getLexicalDeclContext()) return nullptr; return Result; } Decl * TemplateDeclInstantiator::VisitTranslationUnitDecl(TranslationUnitDecl *D) { llvm_unreachable("Translation units cannot be instantiated"); } // HLSL Change Starts Decl * TemplateDeclInstantiator::VisitHLSLBufferDecl(HLSLBufferDecl *Decl) { llvm_unreachable("HLSL buffer declarations cannot be instantiated"); } // HLSL Change Ends Decl * TemplateDeclInstantiator::VisitExternCContextDecl(ExternCContextDecl *D) { llvm_unreachable("extern \"C\" context cannot be instantiated"); } Decl * TemplateDeclInstantiator::VisitLabelDecl(LabelDecl *D) { LabelDecl *Inst = LabelDecl::Create(SemaRef.Context, Owner, D->getLocation(), D->getIdentifier()); Owner->addDecl(Inst); return Inst; } Decl * TemplateDeclInstantiator::VisitNamespaceDecl(NamespaceDecl *D) { llvm_unreachable("Namespaces cannot be instantiated"); } Decl * TemplateDeclInstantiator::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) { NamespaceAliasDecl *Inst = NamespaceAliasDecl::Create(SemaRef.Context, Owner, D->getNamespaceLoc(), D->getAliasLoc(), D->getIdentifier(), D->getQualifierLoc(), D->getTargetNameLoc(), D->getNamespace()); Owner->addDecl(Inst); return Inst; } Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D, bool IsTypeAlias) { bool Invalid = false; TypeSourceInfo *DI = D->getTypeSourceInfo(); if (DI->getType()->isInstantiationDependentType() || DI->getType()->isVariablyModifiedType()) { DI = SemaRef.SubstType(DI, TemplateArgs, D->getLocation(), D->getDeclName()); if (!DI) { Invalid = true; DI = SemaRef.Context.getTrivialTypeSourceInfo(SemaRef.Context.IntTy); } } else { SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType()); } // HACK: g++ has a bug where it gets the value kind of ?: wrong. // libstdc++ relies upon this bug in its implementation of common_type. // If we happen to be processing that implementation, fake up the g++ ?: // semantics. See LWG issue 2141 for more information on the bug. const DecltypeType *DT = DI->getType()->getAs<DecltypeType>(); CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); if (DT && RD && isa<ConditionalOperator>(DT->getUnderlyingExpr()) && DT->isReferenceType() && RD->getEnclosingNamespaceContext() == SemaRef.getStdNamespace() && RD->getIdentifier() && RD->getIdentifier()->isStr("common_type") && D->getIdentifier() && D->getIdentifier()->isStr("type") && SemaRef.getSourceManager().isInSystemHeader(D->getLocStart())) // Fold it to the (non-reference) type which g++ would have produced. DI = SemaRef.Context.getTrivialTypeSourceInfo( DI->getType().getNonReferenceType()); // Create the new typedef TypedefNameDecl *Typedef; if (IsTypeAlias) Typedef = TypeAliasDecl::Create(SemaRef.Context, Owner, D->getLocStart(), D->getLocation(), D->getIdentifier(), DI); else Typedef = TypedefDecl::Create(SemaRef.Context, Owner, D->getLocStart(), D->getLocation(), D->getIdentifier(), DI); if (Invalid) Typedef->setInvalidDecl(); // If the old typedef was the name for linkage purposes of an anonymous // tag decl, re-establish that relationship for the new typedef. if (const TagType *oldTagType = D->getUnderlyingType()->getAs<TagType>()) { TagDecl *oldTag = oldTagType->getDecl(); if (oldTag->getTypedefNameForAnonDecl() == D && !Invalid) { TagDecl *newTag = DI->getType()->castAs<TagType>()->getDecl(); assert(!newTag->hasNameForLinkage()); newTag->setTypedefNameForAnonDecl(Typedef); } } if (TypedefNameDecl *Prev = getPreviousDeclForInstantiation(D)) { NamedDecl *InstPrev = SemaRef.FindInstantiatedDecl(D->getLocation(), Prev, TemplateArgs); if (!InstPrev) return nullptr; TypedefNameDecl *InstPrevTypedef = cast<TypedefNameDecl>(InstPrev); // If the typedef types are not identical, reject them. SemaRef.isIncompatibleTypedef(InstPrevTypedef, Typedef); Typedef->setPreviousDecl(InstPrevTypedef); } SemaRef.InstantiateAttrs(TemplateArgs, D, Typedef); Typedef->setAccess(D->getAccess()); return Typedef; } Decl *TemplateDeclInstantiator::VisitTypedefDecl(TypedefDecl *D) { Decl *Typedef = InstantiateTypedefNameDecl(D, /*IsTypeAlias=*/false); if (Typedef) Owner->addDecl(Typedef); return Typedef; } Decl *TemplateDeclInstantiator::VisitTypeAliasDecl(TypeAliasDecl *D) { Decl *Typedef = InstantiateTypedefNameDecl(D, /*IsTypeAlias=*/true); if (Typedef) Owner->addDecl(Typedef); return Typedef; } Decl * TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) { // Create a local instantiation scope for this type alias template, which // will contain the instantiations of the template parameters. LocalInstantiationScope Scope(SemaRef); TemplateParameterList *TempParams = D->getTemplateParameters(); TemplateParameterList *InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; TypeAliasDecl *Pattern = D->getTemplatedDecl(); TypeAliasTemplateDecl *PrevAliasTemplate = nullptr; if (getPreviousDeclForInstantiation<TypedefNameDecl>(Pattern)) { DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName()); if (!Found.empty()) { PrevAliasTemplate = dyn_cast<TypeAliasTemplateDecl>(Found.front()); } } TypeAliasDecl *AliasInst = cast_or_null<TypeAliasDecl>( InstantiateTypedefNameDecl(Pattern, /*IsTypeAlias=*/true)); if (!AliasInst) return nullptr; TypeAliasTemplateDecl *Inst = TypeAliasTemplateDecl::Create(SemaRef.Context, Owner, D->getLocation(), D->getDeclName(), InstParams, AliasInst); AliasInst->setDescribedAliasTemplate(Inst); if (PrevAliasTemplate) Inst->setPreviousDecl(PrevAliasTemplate); Inst->setAccess(D->getAccess()); if (!PrevAliasTemplate) Inst->setInstantiatedFromMemberTemplate(D); Owner->addDecl(Inst); return Inst; } Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) { return VisitVarDecl(D, /*InstantiatingVarTemplate=*/false); } Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D, bool InstantiatingVarTemplate) { // If this is the variable for an anonymous struct or union, // instantiate the anonymous struct/union type first. if (const RecordType *RecordTy = D->getType()->getAs<RecordType>()) if (RecordTy->getDecl()->isAnonymousStructOrUnion()) if (!VisitCXXRecordDecl(cast<CXXRecordDecl>(RecordTy->getDecl()))) return nullptr; // Do substitution on the type of the declaration TypeSourceInfo *DI = SemaRef.SubstType(D->getTypeSourceInfo(), TemplateArgs, D->getTypeSpecStartLoc(), D->getDeclName()); if (!DI) return nullptr; if (DI->getType()->isFunctionType()) { SemaRef.Diag(D->getLocation(), diag::err_variable_instantiates_to_function) << D->isStaticDataMember() << DI->getType(); return nullptr; } DeclContext *DC = Owner; if (D->isLocalExternDecl()) SemaRef.adjustContextForLocalExternDecl(DC); // Build the instantiated declaration. VarDecl *Var = VarDecl::Create(SemaRef.Context, DC, D->getInnerLocStart(), D->getLocation(), D->getIdentifier(), DI->getType(), DI, D->getStorageClass()); // In ARC, infer 'retaining' for variables of retainable type. if (SemaRef.getLangOpts().ObjCAutoRefCount && SemaRef.inferObjCARCLifetime(Var)) Var->setInvalidDecl(); // Substitute the nested name specifier, if any. if (SubstQualifier(D, Var)) return nullptr; SemaRef.BuildVariableInstantiation(Var, D, TemplateArgs, LateAttrs, Owner, StartingScope, InstantiatingVarTemplate); if (D->isNRVOVariable()) { QualType ReturnType = cast<FunctionDecl>(DC)->getReturnType(); if (SemaRef.isCopyElisionCandidate(ReturnType, Var, false)) Var->setNRVOVariable(true); } Var->setImplicit(D->isImplicit()); return Var; } Decl *TemplateDeclInstantiator::VisitAccessSpecDecl(AccessSpecDecl *D) { AccessSpecDecl* AD = AccessSpecDecl::Create(SemaRef.Context, D->getAccess(), Owner, D->getAccessSpecifierLoc(), D->getColonLoc()); Owner->addHiddenDecl(AD); return AD; } Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) { bool Invalid = false; TypeSourceInfo *DI = D->getTypeSourceInfo(); if (DI->getType()->isInstantiationDependentType() || DI->getType()->isVariablyModifiedType()) { DI = SemaRef.SubstType(DI, TemplateArgs, D->getLocation(), D->getDeclName()); if (!DI) { DI = D->getTypeSourceInfo(); Invalid = true; } else if (DI->getType()->isFunctionType()) { // C++ [temp.arg.type]p3: // If a declaration acquires a function type through a type // dependent on a template-parameter and this causes a // declaration that does not use the syntactic form of a // function declarator to have function type, the program is // ill-formed. SemaRef.Diag(D->getLocation(), diag::err_field_instantiates_to_function) << DI->getType(); Invalid = true; } } else { SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType()); } Expr *BitWidth = D->getBitWidth(); if (Invalid) BitWidth = nullptr; else if (BitWidth) { // The bit-width expression is a constant expression. EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::ConstantEvaluated); ExprResult InstantiatedBitWidth = SemaRef.SubstExpr(BitWidth, TemplateArgs); if (InstantiatedBitWidth.isInvalid()) { Invalid = true; BitWidth = nullptr; } else BitWidth = InstantiatedBitWidth.getAs<Expr>(); } FieldDecl *Field = SemaRef.CheckFieldDecl(D->getDeclName(), DI->getType(), DI, cast<RecordDecl>(Owner), D->getLocation(), D->isMutable(), BitWidth, D->getInClassInitStyle(), D->getInnerLocStart(), D->getAccess(), nullptr); if (!Field) { cast<Decl>(Owner)->setInvalidDecl(); return nullptr; } SemaRef.InstantiateAttrs(TemplateArgs, D, Field, LateAttrs, StartingScope); if (Field->hasAttrs()) SemaRef.CheckAlignasUnderalignment(Field); if (Invalid) Field->setInvalidDecl(); if (!Field->getDeclName()) { // Keep track of where this decl came from. SemaRef.Context.setInstantiatedFromUnnamedFieldDecl(Field, D); } if (CXXRecordDecl *Parent= dyn_cast<CXXRecordDecl>(Field->getDeclContext())) { if (Parent->isAnonymousStructOrUnion() && Parent->getRedeclContext()->isFunctionOrMethod()) SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Field); } Field->setImplicit(D->isImplicit()); Field->setAccess(D->getAccess()); Owner->addDecl(Field); return Field; } Decl *TemplateDeclInstantiator::VisitMSPropertyDecl(MSPropertyDecl *D) { bool Invalid = false; TypeSourceInfo *DI = D->getTypeSourceInfo(); if (DI->getType()->isVariablyModifiedType()) { SemaRef.Diag(D->getLocation(), diag::err_property_is_variably_modified) << D; Invalid = true; } else if (DI->getType()->isInstantiationDependentType()) { DI = SemaRef.SubstType(DI, TemplateArgs, D->getLocation(), D->getDeclName()); if (!DI) { DI = D->getTypeSourceInfo(); Invalid = true; } else if (DI->getType()->isFunctionType()) { // C++ [temp.arg.type]p3: // If a declaration acquires a function type through a type // dependent on a template-parameter and this causes a // declaration that does not use the syntactic form of a // function declarator to have function type, the program is // ill-formed. SemaRef.Diag(D->getLocation(), diag::err_field_instantiates_to_function) << DI->getType(); Invalid = true; } } else { SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType()); } MSPropertyDecl *Property = MSPropertyDecl::Create( SemaRef.Context, Owner, D->getLocation(), D->getDeclName(), DI->getType(), DI, D->getLocStart(), D->getGetterId(), D->getSetterId()); SemaRef.InstantiateAttrs(TemplateArgs, D, Property, LateAttrs, StartingScope); if (Invalid) Property->setInvalidDecl(); Property->setAccess(D->getAccess()); Owner->addDecl(Property); return Property; } Decl *TemplateDeclInstantiator::VisitIndirectFieldDecl(IndirectFieldDecl *D) { NamedDecl **NamedChain = new (SemaRef.Context)NamedDecl*[D->getChainingSize()]; int i = 0; for (auto *PI : D->chain()) { NamedDecl *Next = SemaRef.FindInstantiatedDecl(D->getLocation(), PI, TemplateArgs); if (!Next) return nullptr; NamedChain[i++] = Next; } QualType T = cast<FieldDecl>(NamedChain[i-1])->getType(); IndirectFieldDecl *IndirectField = IndirectFieldDecl::Create( SemaRef.Context, Owner, D->getLocation(), D->getIdentifier(), T, NamedChain, D->getChainingSize()); for (const auto *Attr : D->attrs()) IndirectField->addAttr(Attr->clone(SemaRef.Context)); IndirectField->setImplicit(D->isImplicit()); IndirectField->setAccess(D->getAccess()); Owner->addDecl(IndirectField); return IndirectField; } Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) { // Handle friend type expressions by simply substituting template // parameters into the pattern type and checking the result. if (TypeSourceInfo *Ty = D->getFriendType()) { TypeSourceInfo *InstTy; // If this is an unsupported friend, don't bother substituting template // arguments into it. The actual type referred to won't be used by any // parts of Clang, and may not be valid for instantiating. Just use the // same info for the instantiated friend. if (D->isUnsupportedFriend()) { InstTy = Ty; } else { InstTy = SemaRef.SubstType(Ty, TemplateArgs, D->getLocation(), DeclarationName()); } if (!InstTy) return nullptr; FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getLocStart(), D->getFriendLoc(), InstTy); if (!FD) return nullptr; FD->setAccess(AS_public); FD->setUnsupportedFriend(D->isUnsupportedFriend()); Owner->addDecl(FD); return FD; } NamedDecl *ND = D->getFriendDecl(); assert(ND && "friend decl must be a decl or a type!"); // All of the Visit implementations for the various potential friend // declarations have to be carefully written to work for friend // objects, with the most important detail being that the target // decl should almost certainly not be placed in Owner. Decl *NewND = Visit(ND); if (!NewND) return nullptr; FriendDecl *FD = FriendDecl::Create(SemaRef.Context, Owner, D->getLocation(), cast<NamedDecl>(NewND), D->getFriendLoc()); FD->setAccess(AS_public); FD->setUnsupportedFriend(D->isUnsupportedFriend()); Owner->addDecl(FD); return FD; } Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) { Expr *AssertExpr = D->getAssertExpr(); // The expression in a static assertion is a constant expression. EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::ConstantEvaluated); ExprResult InstantiatedAssertExpr = SemaRef.SubstExpr(AssertExpr, TemplateArgs); if (InstantiatedAssertExpr.isInvalid()) return nullptr; return SemaRef.BuildStaticAssertDeclaration(D->getLocation(), InstantiatedAssertExpr.get(), D->getMessage(), D->getRParenLoc(), D->isFailed()); } Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) { EnumDecl *PrevDecl = nullptr; if (EnumDecl *PatternPrev = getPreviousDeclForInstantiation(D)) { NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(), PatternPrev, TemplateArgs); if (!Prev) return nullptr; PrevDecl = cast<EnumDecl>(Prev); } EnumDecl *Enum = EnumDecl::Create(SemaRef.Context, Owner, D->getLocStart(), D->getLocation(), D->getIdentifier(), PrevDecl, D->isScoped(), D->isScopedUsingClassTag(), D->isFixed()); if (D->isFixed()) { if (TypeSourceInfo *TI = D->getIntegerTypeSourceInfo()) { // If we have type source information for the underlying type, it means it // has been explicitly set by the user. Perform substitution on it before // moving on. SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc(); TypeSourceInfo *NewTI = SemaRef.SubstType(TI, TemplateArgs, UnderlyingLoc, DeclarationName()); if (!NewTI || SemaRef.CheckEnumUnderlyingType(NewTI)) Enum->setIntegerType(SemaRef.Context.IntTy); else Enum->setIntegerTypeSourceInfo(NewTI); } else { assert(!D->getIntegerType()->isDependentType() && "Dependent type without type source info"); Enum->setIntegerType(D->getIntegerType()); } } SemaRef.InstantiateAttrs(TemplateArgs, D, Enum); Enum->setInstantiationOfMemberEnum(D, TSK_ImplicitInstantiation); Enum->setAccess(D->getAccess()); // Forward the mangling number from the template to the instantiated decl. SemaRef.Context.setManglingNumber(Enum, SemaRef.Context.getManglingNumber(D)); if (SubstQualifier(D, Enum)) return nullptr; Owner->addDecl(Enum); EnumDecl *Def = D->getDefinition(); if (Def && Def != D) { // If this is an out-of-line definition of an enum member template, check // that the underlying types match in the instantiation of both // declarations. if (TypeSourceInfo *TI = Def->getIntegerTypeSourceInfo()) { SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc(); QualType DefnUnderlying = SemaRef.SubstType(TI->getType(), TemplateArgs, UnderlyingLoc, DeclarationName()); SemaRef.CheckEnumRedeclaration(Def->getLocation(), Def->isScoped(), DefnUnderlying, Enum); } } // C++11 [temp.inst]p1: The implicit instantiation of a class template // specialization causes the implicit instantiation of the declarations, but // not the definitions of scoped member enumerations. // // DR1484 clarifies that enumeration definitions inside of a template // declaration aren't considered entities that can be separately instantiated // from the rest of the entity they are declared inside of. if (isDeclWithinFunction(D) ? D == Def : Def && !Enum->isScoped()) { SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Enum); InstantiateEnumDefinition(Enum, Def); } return Enum; } void TemplateDeclInstantiator::InstantiateEnumDefinition( EnumDecl *Enum, EnumDecl *Pattern) { Enum->startDefinition(); // Update the location to refer to the definition. Enum->setLocation(Pattern->getLocation()); SmallVector<Decl*, 4> Enumerators; EnumConstantDecl *LastEnumConst = nullptr; for (auto *EC : Pattern->enumerators()) { // The specified value for the enumerator. ExprResult Value((Expr *)nullptr); if (Expr *UninstValue = EC->getInitExpr()) { // The enumerator's value expression is a constant expression. EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::ConstantEvaluated); Value = SemaRef.SubstExpr(UninstValue, TemplateArgs); } // Drop the initial value and continue. bool isInvalid = false; if (Value.isInvalid()) { Value = nullptr; isInvalid = true; } EnumConstantDecl *EnumConst = SemaRef.CheckEnumConstant(Enum, LastEnumConst, EC->getLocation(), EC->getIdentifier(), Value.get()); if (isInvalid) { if (EnumConst) EnumConst->setInvalidDecl(); Enum->setInvalidDecl(); } if (EnumConst) { SemaRef.InstantiateAttrs(TemplateArgs, EC, EnumConst); EnumConst->setAccess(Enum->getAccess()); Enum->addDecl(EnumConst); Enumerators.push_back(EnumConst); LastEnumConst = EnumConst; if (Pattern->getDeclContext()->isFunctionOrMethod() && !Enum->isScoped()) { // If the enumeration is within a function or method, record the enum // constant as a local. SemaRef.CurrentInstantiationScope->InstantiatedLocal(EC, EnumConst); } } } // FIXME: Fixup LBraceLoc SemaRef.ActOnEnumBody(Enum->getLocation(), SourceLocation(), Enum->getRBraceLoc(), Enum, Enumerators, nullptr, nullptr); } Decl *TemplateDeclInstantiator::VisitEnumConstantDecl(EnumConstantDecl *D) { llvm_unreachable("EnumConstantDecls can only occur within EnumDecls."); } Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) { bool isFriend = (D->getFriendObjectKind() != Decl::FOK_None); // Create a local instantiation scope for this class template, which // will contain the instantiations of the template parameters. LocalInstantiationScope Scope(SemaRef); TemplateParameterList *TempParams = D->getTemplateParameters(); TemplateParameterList *InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; CXXRecordDecl *Pattern = D->getTemplatedDecl(); // Instantiate the qualifier. We have to do this first in case // we're a friend declaration, because if we are then we need to put // the new declaration in the appropriate context. NestedNameSpecifierLoc QualifierLoc = Pattern->getQualifierLoc(); if (QualifierLoc) { QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgs); if (!QualifierLoc) return nullptr; } CXXRecordDecl *PrevDecl = nullptr; ClassTemplateDecl *PrevClassTemplate = nullptr; if (!isFriend && getPreviousDeclForInstantiation(Pattern)) { DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName()); if (!Found.empty()) { PrevClassTemplate = dyn_cast<ClassTemplateDecl>(Found.front()); if (PrevClassTemplate) PrevDecl = PrevClassTemplate->getTemplatedDecl(); } } // If this isn't a friend, then it's a member template, in which // case we just want to build the instantiation in the // specialization. If it is a friend, we want to build it in // the appropriate context. DeclContext *DC = Owner; if (isFriend) { if (QualifierLoc) { CXXScopeSpec SS; SS.Adopt(QualifierLoc); DC = SemaRef.computeDeclContext(SS); if (!DC) return nullptr; } else { DC = SemaRef.FindInstantiatedContext(Pattern->getLocation(), Pattern->getDeclContext(), TemplateArgs); } // Look for a previous declaration of the template in the owning // context. LookupResult R(SemaRef, Pattern->getDeclName(), Pattern->getLocation(), Sema::LookupOrdinaryName, Sema::ForRedeclaration); SemaRef.LookupQualifiedName(R, DC); if (R.isSingleResult()) { PrevClassTemplate = R.getAsSingle<ClassTemplateDecl>(); if (PrevClassTemplate) PrevDecl = PrevClassTemplate->getTemplatedDecl(); } if (!PrevClassTemplate && QualifierLoc) { SemaRef.Diag(Pattern->getLocation(), diag::err_not_tag_in_scope) << D->getTemplatedDecl()->getTagKind() << Pattern->getDeclName() << DC << QualifierLoc.getSourceRange(); return nullptr; } bool AdoptedPreviousTemplateParams = false; if (PrevClassTemplate) { bool Complain = true; // HACK: libstdc++ 4.2.1 contains an ill-formed friend class // template for struct std::tr1::__detail::_Map_base, where the // template parameters of the friend declaration don't match the // template parameters of the original declaration. In this one // case, we don't complain about the ill-formed friend // declaration. if (isFriend && Pattern->getIdentifier() && Pattern->getIdentifier()->isStr("_Map_base") && DC->isNamespace() && cast<NamespaceDecl>(DC)->getIdentifier() && cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__detail")) { DeclContext *DCParent = DC->getParent(); if (DCParent->isNamespace() && cast<NamespaceDecl>(DCParent)->getIdentifier() && cast<NamespaceDecl>(DCParent)->getIdentifier()->isStr("tr1")) { if (cast<Decl>(DCParent)->isInStdNamespace()) Complain = false; } } TemplateParameterList *PrevParams = PrevClassTemplate->getTemplateParameters(); // Make sure the parameter lists match. if (!SemaRef.TemplateParameterListsAreEqual(InstParams, PrevParams, Complain, Sema::TPL_TemplateMatch)) { if (Complain) return nullptr; AdoptedPreviousTemplateParams = true; InstParams = PrevParams; } // Do some additional validation, then merge default arguments // from the existing declarations. if (!AdoptedPreviousTemplateParams && SemaRef.CheckTemplateParameterList(InstParams, PrevParams, Sema::TPC_ClassTemplate)) return nullptr; } } CXXRecordDecl *RecordInst = CXXRecordDecl::Create(SemaRef.Context, Pattern->getTagKind(), DC, Pattern->getLocStart(), Pattern->getLocation(), Pattern->getIdentifier(), PrevDecl, /*DelayTypeCreation=*/true); if (QualifierLoc) RecordInst->setQualifierInfo(QualifierLoc); ClassTemplateDecl *Inst = ClassTemplateDecl::Create(SemaRef.Context, DC, D->getLocation(), D->getIdentifier(), InstParams, RecordInst, PrevClassTemplate); RecordInst->setDescribedClassTemplate(Inst); if (isFriend) { if (PrevClassTemplate) Inst->setAccess(PrevClassTemplate->getAccess()); else Inst->setAccess(D->getAccess()); Inst->setObjectOfFriendDecl(); // TODO: do we want to track the instantiation progeny of this // friend target decl? } else { Inst->setAccess(D->getAccess()); if (!PrevClassTemplate) Inst->setInstantiatedFromMemberTemplate(D); } // Trigger creation of the type for the instantiation. SemaRef.Context.getInjectedClassNameType(RecordInst, Inst->getInjectedClassNameSpecialization()); // Finish handling of friends. if (isFriend) { DC->makeDeclVisibleInContext(Inst); Inst->setLexicalDeclContext(Owner); RecordInst->setLexicalDeclContext(Owner); return Inst; } if (D->isOutOfLine()) { Inst->setLexicalDeclContext(D->getLexicalDeclContext()); RecordInst->setLexicalDeclContext(D->getLexicalDeclContext()); } Owner->addDecl(Inst); if (!PrevClassTemplate) { // Queue up any out-of-line partial specializations of this member // class template; the client will force their instantiation once // the enclosing class has been instantiated. SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs; D->getPartialSpecializations(PartialSpecs); for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) if (PartialSpecs[I]->getFirstDecl()->isOutOfLine()) OutOfLinePartialSpecs.push_back(std::make_pair(Inst, PartialSpecs[I])); } return Inst; } Decl * TemplateDeclInstantiator::VisitClassTemplatePartialSpecializationDecl( ClassTemplatePartialSpecializationDecl *D) { ClassTemplateDecl *ClassTemplate = D->getSpecializedTemplate(); // Lookup the already-instantiated declaration in the instantiation // of the class template and return that. DeclContext::lookup_result Found = Owner->lookup(ClassTemplate->getDeclName()); if (Found.empty()) return nullptr; ClassTemplateDecl *InstClassTemplate = dyn_cast<ClassTemplateDecl>(Found.front()); if (!InstClassTemplate) return nullptr; if (ClassTemplatePartialSpecializationDecl *Result = InstClassTemplate->findPartialSpecInstantiatedFromMember(D)) return Result; return InstantiateClassTemplatePartialSpecialization(InstClassTemplate, D); } Decl *TemplateDeclInstantiator::VisitVarTemplateDecl(VarTemplateDecl *D) { assert(D->getTemplatedDecl()->isStaticDataMember() && "Only static data member templates are allowed."); // Create a local instantiation scope for this variable template, which // will contain the instantiations of the template parameters. LocalInstantiationScope Scope(SemaRef); TemplateParameterList *TempParams = D->getTemplateParameters(); TemplateParameterList *InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; VarDecl *Pattern = D->getTemplatedDecl(); VarTemplateDecl *PrevVarTemplate = nullptr; if (getPreviousDeclForInstantiation(Pattern)) { DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName()); if (!Found.empty()) PrevVarTemplate = dyn_cast<VarTemplateDecl>(Found.front()); } VarDecl *VarInst = cast_or_null<VarDecl>(VisitVarDecl(Pattern, /*InstantiatingVarTemplate=*/true)); DeclContext *DC = Owner; VarTemplateDecl *Inst = VarTemplateDecl::Create( SemaRef.Context, DC, D->getLocation(), D->getIdentifier(), InstParams, VarInst); VarInst->setDescribedVarTemplate(Inst); Inst->setPreviousDecl(PrevVarTemplate); Inst->setAccess(D->getAccess()); if (!PrevVarTemplate) Inst->setInstantiatedFromMemberTemplate(D); if (D->isOutOfLine()) { Inst->setLexicalDeclContext(D->getLexicalDeclContext()); VarInst->setLexicalDeclContext(D->getLexicalDeclContext()); } Owner->addDecl(Inst); if (!PrevVarTemplate) { // Queue up any out-of-line partial specializations of this member // variable template; the client will force their instantiation once // the enclosing class has been instantiated. SmallVector<VarTemplatePartialSpecializationDecl *, 4> PartialSpecs; D->getPartialSpecializations(PartialSpecs); for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) if (PartialSpecs[I]->getFirstDecl()->isOutOfLine()) OutOfLineVarPartialSpecs.push_back( std::make_pair(Inst, PartialSpecs[I])); } return Inst; } Decl *TemplateDeclInstantiator::VisitVarTemplatePartialSpecializationDecl( VarTemplatePartialSpecializationDecl *D) { assert(D->isStaticDataMember() && "Only static data member templates are allowed."); VarTemplateDecl *VarTemplate = D->getSpecializedTemplate(); // Lookup the already-instantiated declaration and return that. DeclContext::lookup_result Found = Owner->lookup(VarTemplate->getDeclName()); assert(!Found.empty() && "Instantiation found nothing?"); VarTemplateDecl *InstVarTemplate = dyn_cast<VarTemplateDecl>(Found.front()); assert(InstVarTemplate && "Instantiation did not find a variable template?"); if (VarTemplatePartialSpecializationDecl *Result = InstVarTemplate->findPartialSpecInstantiatedFromMember(D)) return Result; return InstantiateVarTemplatePartialSpecialization(InstVarTemplate, D); } Decl * TemplateDeclInstantiator::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) { // Create a local instantiation scope for this function template, which // will contain the instantiations of the template parameters and then get // merged with the local instantiation scope for the function template // itself. LocalInstantiationScope Scope(SemaRef); TemplateParameterList *TempParams = D->getTemplateParameters(); TemplateParameterList *InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; FunctionDecl *Instantiated = nullptr; if (CXXMethodDecl *DMethod = dyn_cast<CXXMethodDecl>(D->getTemplatedDecl())) Instantiated = cast_or_null<FunctionDecl>(VisitCXXMethodDecl(DMethod, InstParams)); else Instantiated = cast_or_null<FunctionDecl>(VisitFunctionDecl( D->getTemplatedDecl(), InstParams)); if (!Instantiated) return nullptr; // Link the instantiated function template declaration to the function // template from which it was instantiated. FunctionTemplateDecl *InstTemplate = Instantiated->getDescribedFunctionTemplate(); InstTemplate->setAccess(D->getAccess()); assert(InstTemplate && "VisitFunctionDecl/CXXMethodDecl didn't create a template!"); bool isFriend = (InstTemplate->getFriendObjectKind() != Decl::FOK_None); // Link the instantiation back to the pattern *unless* this is a // non-definition friend declaration. if (!InstTemplate->getInstantiatedFromMemberTemplate() && !(isFriend && !D->getTemplatedDecl()->isThisDeclarationADefinition())) InstTemplate->setInstantiatedFromMemberTemplate(D); // Make declarations visible in the appropriate context. if (!isFriend) { Owner->addDecl(InstTemplate); } else if (InstTemplate->getDeclContext()->isRecord() && !getPreviousDeclForInstantiation(D)) { SemaRef.CheckFriendAccess(InstTemplate); } return InstTemplate; } Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) { CXXRecordDecl *PrevDecl = nullptr; if (D->isInjectedClassName()) PrevDecl = cast<CXXRecordDecl>(Owner); else if (CXXRecordDecl *PatternPrev = getPreviousDeclForInstantiation(D)) { NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(), PatternPrev, TemplateArgs); if (!Prev) return nullptr; PrevDecl = cast<CXXRecordDecl>(Prev); } CXXRecordDecl *Record = CXXRecordDecl::Create(SemaRef.Context, D->getTagKind(), Owner, D->getLocStart(), D->getLocation(), D->getIdentifier(), PrevDecl); // Substitute the nested name specifier, if any. if (SubstQualifier(D, Record)) return nullptr; Record->setImplicit(D->isImplicit()); // FIXME: Check against AS_none is an ugly hack to work around the issue that // the tag decls introduced by friend class declarations don't have an access // specifier. Remove once this area of the code gets sorted out. if (D->getAccess() != AS_none) Record->setAccess(D->getAccess()); if (!D->isInjectedClassName()) Record->setInstantiationOfMemberClass(D, TSK_ImplicitInstantiation); // If the original function was part of a friend declaration, // inherit its namespace state. if (D->getFriendObjectKind()) Record->setObjectOfFriendDecl(); // Make sure that anonymous structs and unions are recorded. if (D->isAnonymousStructOrUnion()) Record->setAnonymousStructOrUnion(true); if (D->isLocalClass()) SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Record); // Forward the mangling number from the template to the instantiated decl. SemaRef.Context.setManglingNumber(Record, SemaRef.Context.getManglingNumber(D)); Owner->addDecl(Record); // DR1484 clarifies that the members of a local class are instantiated as part // of the instantiation of their enclosing entity. if (D->isCompleteDefinition() && D->isLocalClass()) { Sema::SavePendingLocalImplicitInstantiationsRAII SavedPendingLocalImplicitInstantiations(SemaRef); SemaRef.InstantiateClass(D->getLocation(), Record, D, TemplateArgs, TSK_ImplicitInstantiation, /*Complain=*/true); SemaRef.InstantiateClassMembers(D->getLocation(), Record, TemplateArgs, TSK_ImplicitInstantiation); // This class may have local implicit instantiations that need to be // performed within this scope. SemaRef.PerformPendingInstantiations(/*LocalOnly=*/true); } SemaRef.DiagnoseUnusedNestedTypedefs(Record); return Record; } /// \brief Adjust the given function type for an instantiation of the /// given declaration, to cope with modifications to the function's type that /// aren't reflected in the type-source information. /// /// \param D The declaration we're instantiating. /// \param TInfo The already-instantiated type. static QualType adjustFunctionTypeForInstantiation(ASTContext &Context, FunctionDecl *D, TypeSourceInfo *TInfo) { const FunctionProtoType *OrigFunc = D->getType()->castAs<FunctionProtoType>(); const FunctionProtoType *NewFunc = TInfo->getType()->castAs<FunctionProtoType>(); if (OrigFunc->getExtInfo() == NewFunc->getExtInfo()) return TInfo->getType(); FunctionProtoType::ExtProtoInfo NewEPI = NewFunc->getExtProtoInfo(); NewEPI.ExtInfo = OrigFunc->getExtInfo(); return Context.getFunctionType(NewFunc->getReturnType(), NewFunc->getParamTypes(), NewEPI, NewFunc->getParamMods()); // HLSL Change } /// Normal class members are of more specific types and therefore /// don't make it here. This function serves two purposes: /// 1) instantiating function templates /// 2) substituting friend declarations Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D, TemplateParameterList *TemplateParams) { // Check whether there is already a function template specialization for // this declaration. FunctionTemplateDecl *FunctionTemplate = D->getDescribedFunctionTemplate(); if (FunctionTemplate && !TemplateParams) { ArrayRef<TemplateArgument> Innermost = TemplateArgs.getInnermost(); void *InsertPos = nullptr; FunctionDecl *SpecFunc = FunctionTemplate->findSpecialization(Innermost, InsertPos); // If we already have a function template specialization, return it. if (SpecFunc) return SpecFunc; } bool isFriend; if (FunctionTemplate) isFriend = (FunctionTemplate->getFriendObjectKind() != Decl::FOK_None); else isFriend = (D->getFriendObjectKind() != Decl::FOK_None); bool MergeWithParentScope = (TemplateParams != nullptr) || Owner->isFunctionOrMethod() || !(isa<Decl>(Owner) && cast<Decl>(Owner)->isDefinedOutsideFunctionOrMethod()); LocalInstantiationScope Scope(SemaRef, MergeWithParentScope); SmallVector<ParmVarDecl *, 4> Params; TypeSourceInfo *TInfo = SubstFunctionType(D, Params); if (!TInfo) return nullptr; QualType T = adjustFunctionTypeForInstantiation(SemaRef.Context, D, TInfo); NestedNameSpecifierLoc QualifierLoc = D->getQualifierLoc(); if (QualifierLoc) { QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgs); if (!QualifierLoc) return nullptr; } // If we're instantiating a local function declaration, put the result // in the enclosing namespace; otherwise we need to find the instantiated // context. DeclContext *DC; if (D->isLocalExternDecl()) { DC = Owner; SemaRef.adjustContextForLocalExternDecl(DC); } else if (isFriend && QualifierLoc) { CXXScopeSpec SS; SS.Adopt(QualifierLoc); DC = SemaRef.computeDeclContext(SS); if (!DC) return nullptr; } else { DC = SemaRef.FindInstantiatedContext(D->getLocation(), D->getDeclContext(), TemplateArgs); } FunctionDecl *Function = FunctionDecl::Create(SemaRef.Context, DC, D->getInnerLocStart(), D->getNameInfo(), T, TInfo, D->getCanonicalDecl()->getStorageClass(), D->isInlineSpecified(), D->hasWrittenPrototype(), D->isConstexpr()); Function->setRangeEnd(D->getSourceRange().getEnd()); if (D->isInlined()) Function->setImplicitlyInline(); if (QualifierLoc) Function->setQualifierInfo(QualifierLoc); if (D->isLocalExternDecl()) Function->setLocalExternDecl(); DeclContext *LexicalDC = Owner; if (!isFriend && D->isOutOfLine() && !D->isLocalExternDecl()) { assert(D->getDeclContext()->isFileContext()); LexicalDC = D->getDeclContext(); } Function->setLexicalDeclContext(LexicalDC); // Attach the parameters for (unsigned P = 0; P < Params.size(); ++P) if (Params[P]) Params[P]->setOwningFunction(Function); Function->setParams(Params); SourceLocation InstantiateAtPOI; if (TemplateParams) { // Our resulting instantiation is actually a function template, since we // are substituting only the outer template parameters. For example, given // // template<typename T> // struct X { // template<typename U> friend void f(T, U); // }; // // X<int> x; // // We are instantiating the friend function template "f" within X<int>, // which means substituting int for T, but leaving "f" as a friend function // template. // Build the function template itself. FunctionTemplate = FunctionTemplateDecl::Create(SemaRef.Context, DC, Function->getLocation(), Function->getDeclName(), TemplateParams, Function); Function->setDescribedFunctionTemplate(FunctionTemplate); FunctionTemplate->setLexicalDeclContext(LexicalDC); if (isFriend && D->isThisDeclarationADefinition()) { // TODO: should we remember this connection regardless of whether // the friend declaration provided a body? FunctionTemplate->setInstantiatedFromMemberTemplate( D->getDescribedFunctionTemplate()); } } else if (FunctionTemplate) { // Record this function template specialization. ArrayRef<TemplateArgument> Innermost = TemplateArgs.getInnermost(); Function->setFunctionTemplateSpecialization(FunctionTemplate, TemplateArgumentList::CreateCopy(SemaRef.Context, Innermost.begin(), Innermost.size()), /*InsertPos=*/nullptr); } else if (isFriend) { // Note, we need this connection even if the friend doesn't have a body. // Its body may exist but not have been attached yet due to deferred // parsing. // FIXME: It might be cleaner to set this when attaching the body to the // friend function declaration, however that would require finding all the // instantiations and modifying them. Function->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation); } if (InitFunctionInstantiation(Function, D)) Function->setInvalidDecl(); bool isExplicitSpecialization = false; LookupResult Previous( SemaRef, Function->getDeclName(), SourceLocation(), D->isLocalExternDecl() ? Sema::LookupRedeclarationWithLinkage : Sema::LookupOrdinaryName, Sema::ForRedeclaration); if (DependentFunctionTemplateSpecializationInfo *Info = D->getDependentSpecializationInfo()) { assert(isFriend && "non-friend has dependent specialization info?"); // This needs to be set now for future sanity. Function->setObjectOfFriendDecl(); // Instantiate the explicit template arguments. TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(), Info->getRAngleLoc()); if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(), ExplicitArgs, TemplateArgs)) return nullptr; // Map the candidate templates to their instantiations. for (unsigned I = 0, E = Info->getNumTemplates(); I != E; ++I) { Decl *Temp = SemaRef.FindInstantiatedDecl(D->getLocation(), Info->getTemplate(I), TemplateArgs); if (!Temp) return nullptr; Previous.addDecl(cast<FunctionTemplateDecl>(Temp)); } if (SemaRef.CheckFunctionTemplateSpecialization(Function, &ExplicitArgs, Previous)) Function->setInvalidDecl(); isExplicitSpecialization = true; } else if (TemplateParams || !FunctionTemplate) { // Look only into the namespace where the friend would be declared to // find a previous declaration. This is the innermost enclosing namespace, // as described in ActOnFriendFunctionDecl. SemaRef.LookupQualifiedName(Previous, DC); // In C++, the previous declaration we find might be a tag type // (class or enum). In this case, the new declaration will hide the // tag type. Note that this does does not apply if we're declaring a // typedef (C++ [dcl.typedef]p4). if (Previous.isSingleTagDecl()) Previous.clear(); } // HLSL Change Begin - back ported from llvm-project/4409a83c2935. // Per [temp.inst], default arguments in function declarations at local scope // are instantiated along with the enclosing declaration. For example: // // template<typename T> // void ft() { // void f(int = []{ return T::value; }()); // } // template void ft<int>(); // error: type 'int' cannot be used prior // to '::' because it has no members // // The error is issued during instantiation of ft<int>() because substitution // into the default argument fails; the default argument is instantiated even // though it is never used. if (Function->isLocalExternDecl()) { for (ParmVarDecl *PVD : Function->parameters()) { if (!PVD->hasDefaultArg()) continue; if (SemaRef.SubstDefaultArgument(D->getInnerLocStart(), PVD, TemplateArgs)) { Function->setInvalidDecl(); return nullptr; } } } // HLSL Change End - back ported from llvm-project/4409a83c2935. SemaRef.CheckFunctionDeclaration(/*Scope*/ nullptr, Function, Previous, isExplicitSpecialization); NamedDecl *PrincipalDecl = (TemplateParams ? cast<NamedDecl>(FunctionTemplate) : Function); // If the original function was part of a friend declaration, // inherit its namespace state and add it to the owner. if (isFriend) { PrincipalDecl->setObjectOfFriendDecl(); DC->makeDeclVisibleInContext(PrincipalDecl); bool QueuedInstantiation = false; // C++11 [temp.friend]p4 (DR329): // When a function is defined in a friend function declaration in a class // template, the function is instantiated when the function is odr-used. // The same restrictions on multiple declarations and definitions that // apply to non-template function declarations and definitions also apply // to these implicit definitions. if (D->isThisDeclarationADefinition()) { // Check for a function body. const FunctionDecl *Definition = nullptr; if (Function->isDefined(Definition) && Definition->getTemplateSpecializationKind() == TSK_Undeclared) { SemaRef.Diag(Function->getLocation(), diag::err_redefinition) << Function->getDeclName(); SemaRef.Diag(Definition->getLocation(), diag::note_previous_definition); } // Check for redefinitions due to other instantiations of this or // a similar friend function. else for (auto R : Function->redecls()) { if (R == Function) continue; // If some prior declaration of this function has been used, we need // to instantiate its definition. if (!QueuedInstantiation && R->isUsed(false)) { if (MemberSpecializationInfo *MSInfo = Function->getMemberSpecializationInfo()) { if (MSInfo->getPointOfInstantiation().isInvalid()) { SourceLocation Loc = R->getLocation(); // FIXME MSInfo->setPointOfInstantiation(Loc); SemaRef.PendingLocalImplicitInstantiations.push_back( std::make_pair(Function, Loc)); QueuedInstantiation = true; } } } // If some prior declaration of this function was a friend with an // uninstantiated definition, reject it. if (R->getFriendObjectKind()) { if (const FunctionDecl *RPattern = R->getTemplateInstantiationPattern()) { if (RPattern->isDefined(RPattern)) { SemaRef.Diag(Function->getLocation(), diag::err_redefinition) << Function->getDeclName(); SemaRef.Diag(R->getLocation(), diag::note_previous_definition); break; } } } } } } if (Function->isLocalExternDecl() && !Function->getPreviousDecl()) DC->makeDeclVisibleInContext(PrincipalDecl); if (Function->isOverloadedOperator() && !DC->isRecord() && PrincipalDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary)) PrincipalDecl->setNonMemberOperator(); assert(!D->isDefaulted() && "only methods should be defaulted"); return Function; } Decl * TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D, TemplateParameterList *TemplateParams, bool IsClassScopeSpecialization) { FunctionTemplateDecl *FunctionTemplate = D->getDescribedFunctionTemplate(); if (FunctionTemplate && !TemplateParams) { // We are creating a function template specialization from a function // template. Check whether there is already a function template // specialization for this particular set of template arguments. ArrayRef<TemplateArgument> Innermost = TemplateArgs.getInnermost(); void *InsertPos = nullptr; FunctionDecl *SpecFunc = FunctionTemplate->findSpecialization(Innermost, InsertPos); // If we already have a function template specialization, return it. if (SpecFunc) return SpecFunc; } bool isFriend; if (FunctionTemplate) isFriend = (FunctionTemplate->getFriendObjectKind() != Decl::FOK_None); else isFriend = (D->getFriendObjectKind() != Decl::FOK_None); bool MergeWithParentScope = (TemplateParams != nullptr) || !(isa<Decl>(Owner) && cast<Decl>(Owner)->isDefinedOutsideFunctionOrMethod()); LocalInstantiationScope Scope(SemaRef, MergeWithParentScope); // Instantiate enclosing template arguments for friends. SmallVector<TemplateParameterList *, 4> TempParamLists; unsigned NumTempParamLists = 0; if (isFriend && (NumTempParamLists = D->getNumTemplateParameterLists())) { TempParamLists.set_size(NumTempParamLists); for (unsigned I = 0; I != NumTempParamLists; ++I) { TemplateParameterList *TempParams = D->getTemplateParameterList(I); TemplateParameterList *InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; TempParamLists[I] = InstParams; } } SmallVector<ParmVarDecl *, 4> Params; TypeSourceInfo *TInfo = SubstFunctionType(D, Params); if (!TInfo) return nullptr; QualType T = adjustFunctionTypeForInstantiation(SemaRef.Context, D, TInfo); NestedNameSpecifierLoc QualifierLoc = D->getQualifierLoc(); if (QualifierLoc) { QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgs); if (!QualifierLoc) return nullptr; } DeclContext *DC = Owner; if (isFriend) { if (QualifierLoc) { CXXScopeSpec SS; SS.Adopt(QualifierLoc); DC = SemaRef.computeDeclContext(SS); if (DC && SemaRef.RequireCompleteDeclContext(SS, DC)) return nullptr; } else { DC = SemaRef.FindInstantiatedContext(D->getLocation(), D->getDeclContext(), TemplateArgs); } if (!DC) return nullptr; } // Build the instantiated method declaration. CXXRecordDecl *Record = cast<CXXRecordDecl>(DC); CXXMethodDecl *Method = nullptr; SourceLocation StartLoc = D->getInnerLocStart(); DeclarationNameInfo NameInfo = SemaRef.SubstDeclarationNameInfo(D->getNameInfo(), TemplateArgs); if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) { Method = CXXConstructorDecl::Create(SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo, Constructor->isExplicit(), Constructor->isInlineSpecified(), false, Constructor->isConstexpr()); // Claim that the instantiation of a constructor or constructor template // inherits the same constructor that the template does. if (CXXConstructorDecl *Inh = const_cast<CXXConstructorDecl *>( Constructor->getInheritedConstructor())) { // If we're instantiating a specialization of a function template, our // "inherited constructor" will actually itself be a function template. // Instantiate a declaration of it, too. if (FunctionTemplate) { assert(!TemplateParams && Inh->getDescribedFunctionTemplate() && !Inh->getParent()->isDependentContext() && "inheriting constructor template in dependent context?"); Sema::InstantiatingTemplate Inst(SemaRef, Constructor->getLocation(), Inh); if (Inst.isInvalid()) return nullptr; Sema::ContextRAII SavedContext(SemaRef, Inh->getDeclContext()); LocalInstantiationScope LocalScope(SemaRef); // Use the same template arguments that we deduced for the inheriting // constructor. There's no way they could be deduced differently. MultiLevelTemplateArgumentList InheritedArgs; InheritedArgs.addOuterTemplateArguments(TemplateArgs.getInnermost()); Inh = cast_or_null<CXXConstructorDecl>( SemaRef.SubstDecl(Inh, Inh->getDeclContext(), InheritedArgs)); if (!Inh) return nullptr; } cast<CXXConstructorDecl>(Method)->setInheritedConstructor(Inh); } } else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) { Method = CXXDestructorDecl::Create(SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo, Destructor->isInlineSpecified(), false); } else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) { Method = CXXConversionDecl::Create(SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo, Conversion->isInlineSpecified(), Conversion->isExplicit(), Conversion->isConstexpr(), Conversion->getLocEnd()); } else { StorageClass SC = D->isStatic() ? SC_Static : SC_None; Method = CXXMethodDecl::Create(SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo, SC, D->isInlineSpecified(), D->isConstexpr(), D->getLocEnd()); } if (D->isInlined()) Method->setImplicitlyInline(); if (QualifierLoc) Method->setQualifierInfo(QualifierLoc); if (TemplateParams) { // Our resulting instantiation is actually a function template, since we // are substituting only the outer template parameters. For example, given // // template<typename T> // struct X { // template<typename U> void f(T, U); // }; // // X<int> x; // // We are instantiating the member template "f" within X<int>, which means // substituting int for T, but leaving "f" as a member function template. // Build the function template itself. FunctionTemplate = FunctionTemplateDecl::Create(SemaRef.Context, Record, Method->getLocation(), Method->getDeclName(), TemplateParams, Method); if (isFriend) { FunctionTemplate->setLexicalDeclContext(Owner); FunctionTemplate->setObjectOfFriendDecl(); } else if (D->isOutOfLine()) FunctionTemplate->setLexicalDeclContext(D->getLexicalDeclContext()); Method->setDescribedFunctionTemplate(FunctionTemplate); } else if (FunctionTemplate) { // Record this function template specialization. ArrayRef<TemplateArgument> Innermost = TemplateArgs.getInnermost(); Method->setFunctionTemplateSpecialization(FunctionTemplate, TemplateArgumentList::CreateCopy(SemaRef.Context, Innermost.begin(), Innermost.size()), /*InsertPos=*/nullptr); } else if (!isFriend) { // Record that this is an instantiation of a member function. Method->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation); } // If we are instantiating a member function defined // out-of-line, the instantiation will have the same lexical // context (which will be a namespace scope) as the template. if (isFriend) { if (NumTempParamLists) Method->setTemplateParameterListsInfo(SemaRef.Context, NumTempParamLists, TempParamLists.data()); Method->setLexicalDeclContext(Owner); Method->setObjectOfFriendDecl(); } else if (D->isOutOfLine()) Method->setLexicalDeclContext(D->getLexicalDeclContext()); // Attach the parameters for (unsigned P = 0; P < Params.size(); ++P) Params[P]->setOwningFunction(Method); Method->setParams(Params); if (InitMethodInstantiation(Method, D)) Method->setInvalidDecl(); LookupResult Previous(SemaRef, NameInfo, Sema::LookupOrdinaryName, Sema::ForRedeclaration); if (!FunctionTemplate || TemplateParams || isFriend) { SemaRef.LookupQualifiedName(Previous, Record); // In C++, the previous declaration we find might be a tag type // (class or enum). In this case, the new declaration will hide the // tag type. Note that this does does not apply if we're declaring a // typedef (C++ [dcl.typedef]p4). if (Previous.isSingleTagDecl()) Previous.clear(); } // HLSL Change Begin - back ported from llvm-project/4409a83c2935. // Per [temp.inst], default arguments in member functions of local classes // are instantiated along with the member function declaration. For example: // // template<typename T> // void ft() { // struct lc { // int operator()(int p = []{ return T::value; }()); // }; // } // template void ft<int>(); // error: type 'int' cannot be used prior // to '::'because it has no members // // The error is issued during instantiation of ft<int>()::lc::operator() // because substitution into the default argument fails; the default argument // is instantiated even though it is never used. if (D->isInLocalScopeForInstantiation()) { for (unsigned P = 0; P < Params.size(); ++P) { if (!Params[P]->hasDefaultArg()) continue; if (SemaRef.SubstDefaultArgument(StartLoc, Params[P], TemplateArgs)) { return nullptr; } } } // HLSL Change End - back ported from llvm-project/4409a83c2935. if (!IsClassScopeSpecialization) SemaRef.CheckFunctionDeclaration(nullptr, Method, Previous, false); if (D->isPure()) SemaRef.CheckPureMethod(Method, SourceRange()); // Propagate access. For a non-friend declaration, the access is // whatever we're propagating from. For a friend, it should be the // previous declaration we just found. if (isFriend && Method->getPreviousDecl()) Method->setAccess(Method->getPreviousDecl()->getAccess()); else Method->setAccess(D->getAccess()); if (FunctionTemplate) FunctionTemplate->setAccess(Method->getAccess()); SemaRef.CheckOverrideControl(Method); // If a function is defined as defaulted or deleted, mark it as such now. if (D->isExplicitlyDefaulted()) SemaRef.SetDeclDefaulted(Method, Method->getLocation()); if (D->isDeletedAsWritten()) SemaRef.SetDeclDeleted(Method, Method->getLocation()); // If there's a function template, let our caller handle it. if (FunctionTemplate) { // do nothing // Don't hide a (potentially) valid declaration with an invalid one. } else if (Method->isInvalidDecl() && !Previous.empty()) { // do nothing // Otherwise, check access to friends and make them visible. } else if (isFriend) { // We only need to re-check access for methods which we didn't // manage to match during parsing. if (!D->getPreviousDecl()) SemaRef.CheckFriendAccess(Method); Record->makeDeclVisibleInContext(Method); // Otherwise, add the declaration. We don't need to do this for // class-scope specializations because we'll have matched them with // the appropriate template. } else if (!IsClassScopeSpecialization) { Owner->addDecl(Method); } return Method; } Decl *TemplateDeclInstantiator::VisitCXXConstructorDecl(CXXConstructorDecl *D) { return VisitCXXMethodDecl(D); } Decl *TemplateDeclInstantiator::VisitCXXDestructorDecl(CXXDestructorDecl *D) { return VisitCXXMethodDecl(D); } Decl *TemplateDeclInstantiator::VisitCXXConversionDecl(CXXConversionDecl *D) { return VisitCXXMethodDecl(D); } Decl *TemplateDeclInstantiator::VisitParmVarDecl(ParmVarDecl *D) { return SemaRef.SubstParmVarDecl(D, TemplateArgs, /*indexAdjustment*/ 0, None, /*ExpectParameterPack=*/ false); } Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl( TemplateTypeParmDecl *D) { // TODO: don't always clone when decls are refcounted. assert(D->getTypeForDecl()->isTemplateTypeParmType()); TemplateTypeParmDecl *Inst = TemplateTypeParmDecl::Create(SemaRef.Context, Owner, D->getLocStart(), D->getLocation(), D->getDepth() - TemplateArgs.getNumLevels(), D->getIndex(), D->getIdentifier(), D->wasDeclaredWithTypename(), D->isParameterPack()); Inst->setAccess(AS_public); if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) { TypeSourceInfo *InstantiatedDefaultArg = SemaRef.SubstType(D->getDefaultArgumentInfo(), TemplateArgs, D->getDefaultArgumentLoc(), D->getDeclName()); if (InstantiatedDefaultArg) Inst->setDefaultArgument(InstantiatedDefaultArg); } // Introduce this template parameter's instantiation into the instantiation // scope. SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Inst); return Inst; } Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl( NonTypeTemplateParmDecl *D) { // Substitute into the type of the non-type template parameter. TypeLoc TL = D->getTypeSourceInfo()->getTypeLoc(); SmallVector<TypeSourceInfo *, 4> ExpandedParameterPackTypesAsWritten; SmallVector<QualType, 4> ExpandedParameterPackTypes; bool IsExpandedParameterPack = false; TypeSourceInfo *DI; QualType T; bool Invalid = false; if (D->isExpandedParameterPack()) { // The non-type template parameter pack is an already-expanded pack // expansion of types. Substitute into each of the expanded types. ExpandedParameterPackTypes.reserve(D->getNumExpansionTypes()); ExpandedParameterPackTypesAsWritten.reserve(D->getNumExpansionTypes()); for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) { TypeSourceInfo *NewDI =SemaRef.SubstType(D->getExpansionTypeSourceInfo(I), TemplateArgs, D->getLocation(), D->getDeclName()); if (!NewDI) return nullptr; ExpandedParameterPackTypesAsWritten.push_back(NewDI); QualType NewT =SemaRef.CheckNonTypeTemplateParameterType(NewDI->getType(), D->getLocation()); if (NewT.isNull()) return nullptr; ExpandedParameterPackTypes.push_back(NewT); } IsExpandedParameterPack = true; DI = D->getTypeSourceInfo(); T = DI->getType(); } else if (D->isPackExpansion()) { // The non-type template parameter pack's type is a pack expansion of types. // Determine whether we need to expand this parameter pack into separate // types. PackExpansionTypeLoc Expansion = TL.castAs<PackExpansionTypeLoc>(); TypeLoc Pattern = Expansion.getPatternLoc(); SmallVector<UnexpandedParameterPack, 2> Unexpanded; SemaRef.collectUnexpandedParameterPacks(Pattern, Unexpanded); // Determine whether the set of unexpanded parameter packs can and should // be expanded. bool Expand = true; bool RetainExpansion = false; Optional<unsigned> OrigNumExpansions = Expansion.getTypePtr()->getNumExpansions(); Optional<unsigned> NumExpansions = OrigNumExpansions; if (SemaRef.CheckParameterPacksForExpansion(Expansion.getEllipsisLoc(), Pattern.getSourceRange(), Unexpanded, TemplateArgs, Expand, RetainExpansion, NumExpansions)) return nullptr; if (Expand) { for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I); TypeSourceInfo *NewDI = SemaRef.SubstType(Pattern, TemplateArgs, D->getLocation(), D->getDeclName()); if (!NewDI) return nullptr; ExpandedParameterPackTypesAsWritten.push_back(NewDI); QualType NewT = SemaRef.CheckNonTypeTemplateParameterType( NewDI->getType(), D->getLocation()); if (NewT.isNull()) return nullptr; ExpandedParameterPackTypes.push_back(NewT); } // Note that we have an expanded parameter pack. The "type" of this // expanded parameter pack is the original expansion type, but callers // will end up using the expanded parameter pack types for type-checking. IsExpandedParameterPack = true; DI = D->getTypeSourceInfo(); T = DI->getType(); } else { // We cannot fully expand the pack expansion now, so substitute into the // pattern and create a new pack expansion type. Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1); TypeSourceInfo *NewPattern = SemaRef.SubstType(Pattern, TemplateArgs, D->getLocation(), D->getDeclName()); if (!NewPattern) return nullptr; DI = SemaRef.CheckPackExpansion(NewPattern, Expansion.getEllipsisLoc(), NumExpansions); if (!DI) return nullptr; T = DI->getType(); } } else { // Simple case: substitution into a parameter that is not a parameter pack. DI = SemaRef.SubstType(D->getTypeSourceInfo(), TemplateArgs, D->getLocation(), D->getDeclName()); if (!DI) return nullptr; // Check that this type is acceptable for a non-type template parameter. T = SemaRef.CheckNonTypeTemplateParameterType(DI->getType(), D->getLocation()); if (T.isNull()) { T = SemaRef.Context.IntTy; Invalid = true; } } NonTypeTemplateParmDecl *Param; if (IsExpandedParameterPack) Param = NonTypeTemplateParmDecl::Create(SemaRef.Context, Owner, D->getInnerLocStart(), D->getLocation(), D->getDepth() - TemplateArgs.getNumLevels(), D->getPosition(), D->getIdentifier(), T, DI, ExpandedParameterPackTypes.data(), ExpandedParameterPackTypes.size(), ExpandedParameterPackTypesAsWritten.data()); else Param = NonTypeTemplateParmDecl::Create(SemaRef.Context, Owner, D->getInnerLocStart(), D->getLocation(), D->getDepth() - TemplateArgs.getNumLevels(), D->getPosition(), D->getIdentifier(), T, D->isParameterPack(), DI); Param->setAccess(AS_public); if (Invalid) Param->setInvalidDecl(); if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) { ExprResult Value = SemaRef.SubstExpr(D->getDefaultArgument(), TemplateArgs); if (!Value.isInvalid()) Param->setDefaultArgument(Value.get()); } // Introduce this template parameter's instantiation into the instantiation // scope. SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Param); return Param; } static void collectUnexpandedParameterPacks( Sema &S, TemplateParameterList *Params, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) { for (const auto &P : *Params) { if (P->isTemplateParameterPack()) continue; if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) S.collectUnexpandedParameterPacks(NTTP->getTypeSourceInfo()->getTypeLoc(), Unexpanded); if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(P)) collectUnexpandedParameterPacks(S, TTP->getTemplateParameters(), Unexpanded); } } Decl * TemplateDeclInstantiator::VisitTemplateTemplateParmDecl( TemplateTemplateParmDecl *D) { // Instantiate the template parameter list of the template template parameter. TemplateParameterList *TempParams = D->getTemplateParameters(); TemplateParameterList *InstParams; SmallVector<TemplateParameterList*, 8> ExpandedParams; bool IsExpandedParameterPack = false; if (D->isExpandedParameterPack()) { // The template template parameter pack is an already-expanded pack // expansion of template parameters. Substitute into each of the expanded // parameters. ExpandedParams.reserve(D->getNumExpansionTemplateParameters()); for (unsigned I = 0, N = D->getNumExpansionTemplateParameters(); I != N; ++I) { LocalInstantiationScope Scope(SemaRef); TemplateParameterList *Expansion = SubstTemplateParams(D->getExpansionTemplateParameters(I)); if (!Expansion) return nullptr; ExpandedParams.push_back(Expansion); } IsExpandedParameterPack = true; InstParams = TempParams; } else if (D->isPackExpansion()) { // The template template parameter pack expands to a pack of template // template parameters. Determine whether we need to expand this parameter // pack into separate parameters. SmallVector<UnexpandedParameterPack, 2> Unexpanded; collectUnexpandedParameterPacks(SemaRef, D->getTemplateParameters(), Unexpanded); // Determine whether the set of unexpanded parameter packs can and should // be expanded. bool Expand = true; bool RetainExpansion = false; Optional<unsigned> NumExpansions; if (SemaRef.CheckParameterPacksForExpansion(D->getLocation(), TempParams->getSourceRange(), Unexpanded, TemplateArgs, Expand, RetainExpansion, NumExpansions)) return nullptr; if (Expand) { for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I); LocalInstantiationScope Scope(SemaRef); TemplateParameterList *Expansion = SubstTemplateParams(TempParams); if (!Expansion) return nullptr; ExpandedParams.push_back(Expansion); } // Note that we have an expanded parameter pack. The "type" of this // expanded parameter pack is the original expansion type, but callers // will end up using the expanded parameter pack types for type-checking. IsExpandedParameterPack = true; InstParams = TempParams; } else { // We cannot fully expand the pack expansion now, so just substitute // into the pattern. Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1); LocalInstantiationScope Scope(SemaRef); InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; } } else { // Perform the actual substitution of template parameters within a new, // local instantiation scope. LocalInstantiationScope Scope(SemaRef); InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; } // Build the template template parameter. TemplateTemplateParmDecl *Param; if (IsExpandedParameterPack) Param = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner, D->getLocation(), D->getDepth() - TemplateArgs.getNumLevels(), D->getPosition(), D->getIdentifier(), InstParams, ExpandedParams); else Param = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner, D->getLocation(), D->getDepth() - TemplateArgs.getNumLevels(), D->getPosition(), D->isParameterPack(), D->getIdentifier(), InstParams); if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) { NestedNameSpecifierLoc QualifierLoc = D->getDefaultArgument().getTemplateQualifierLoc(); QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgs); TemplateName TName = SemaRef.SubstTemplateName( QualifierLoc, D->getDefaultArgument().getArgument().getAsTemplate(), D->getDefaultArgument().getTemplateNameLoc(), TemplateArgs); if (!TName.isNull()) Param->setDefaultArgument( SemaRef.Context, TemplateArgumentLoc(TemplateArgument(TName), D->getDefaultArgument().getTemplateQualifierLoc(), D->getDefaultArgument().getTemplateNameLoc())); } Param->setAccess(AS_public); // Introduce this template parameter's instantiation into the instantiation // scope. SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Param); return Param; } Decl *TemplateDeclInstantiator::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) { // Using directives are never dependent (and never contain any types or // expressions), so they require no explicit instantiation work. UsingDirectiveDecl *Inst = UsingDirectiveDecl::Create(SemaRef.Context, Owner, D->getLocation(), D->getNamespaceKeyLocation(), D->getQualifierLoc(), D->getIdentLocation(), D->getNominatedNamespace(), D->getCommonAncestor()); // Add the using directive to its declaration context // only if this is not a function or method. if (!Owner->isFunctionOrMethod()) Owner->addDecl(Inst); return Inst; } Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) { // The nested name specifier may be dependent, for example // template <typename T> struct t { // struct s1 { T f1(); }; // struct s2 : s1 { using s1::f1; }; // }; // template struct t<int>; // Here, in using s1::f1, s1 refers to t<T>::s1; // we need to substitute for t<int>::s1. NestedNameSpecifierLoc QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(), TemplateArgs); if (!QualifierLoc) return nullptr; // The name info is non-dependent, so no transformation // is required. DeclarationNameInfo NameInfo = D->getNameInfo(); // We only need to do redeclaration lookups if we're in a class // scope (in fact, it's not really even possible in non-class // scopes). bool CheckRedeclaration = Owner->isRecord(); LookupResult Prev(SemaRef, NameInfo, Sema::LookupUsingDeclName, Sema::ForRedeclaration); UsingDecl *NewUD = UsingDecl::Create(SemaRef.Context, Owner, D->getUsingLoc(), QualifierLoc, NameInfo, D->hasTypename()); CXXScopeSpec SS; SS.Adopt(QualifierLoc); if (CheckRedeclaration) { Prev.setHideTags(false); SemaRef.LookupQualifiedName(Prev, Owner); // Check for invalid redeclarations. if (SemaRef.CheckUsingDeclRedeclaration(D->getUsingLoc(), D->hasTypename(), SS, D->getLocation(), Prev)) NewUD->setInvalidDecl(); } if (!NewUD->isInvalidDecl() && SemaRef.CheckUsingDeclQualifier(D->getUsingLoc(), SS, NameInfo, D->getLocation())) NewUD->setInvalidDecl(); SemaRef.Context.setInstantiatedFromUsingDecl(NewUD, D); NewUD->setAccess(D->getAccess()); Owner->addDecl(NewUD); // Don't process the shadow decls for an invalid decl. if (NewUD->isInvalidDecl()) return NewUD; if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) { SemaRef.CheckInheritingConstructorUsingDecl(NewUD); return NewUD; } bool isFunctionScope = Owner->isFunctionOrMethod(); // Process the shadow decls. for (auto *Shadow : D->shadows()) { NamedDecl *InstTarget = cast_or_null<NamedDecl>(SemaRef.FindInstantiatedDecl( Shadow->getLocation(), Shadow->getTargetDecl(), TemplateArgs)); if (!InstTarget) return nullptr; UsingShadowDecl *PrevDecl = nullptr; if (CheckRedeclaration) { if (SemaRef.CheckUsingShadowDecl(NewUD, InstTarget, Prev, PrevDecl)) continue; } else if (UsingShadowDecl *OldPrev = getPreviousDeclForInstantiation(Shadow)) { PrevDecl = cast_or_null<UsingShadowDecl>(SemaRef.FindInstantiatedDecl( Shadow->getLocation(), OldPrev, TemplateArgs)); } UsingShadowDecl *InstShadow = SemaRef.BuildUsingShadowDecl(/*Scope*/nullptr, NewUD, InstTarget, PrevDecl); SemaRef.Context.setInstantiatedFromUsingShadowDecl(InstShadow, Shadow); if (isFunctionScope) SemaRef.CurrentInstantiationScope->InstantiatedLocal(Shadow, InstShadow); } return NewUD; } Decl *TemplateDeclInstantiator::VisitUsingShadowDecl(UsingShadowDecl *D) { // Ignore these; we handle them in bulk when processing the UsingDecl. return nullptr; } Decl * TemplateDeclInstantiator ::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) { NestedNameSpecifierLoc QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(), TemplateArgs); if (!QualifierLoc) return nullptr; CXXScopeSpec SS; SS.Adopt(QualifierLoc); // Since NameInfo refers to a typename, it cannot be a C++ special name. // Hence, no transformation is required for it. DeclarationNameInfo NameInfo(D->getDeclName(), D->getLocation()); NamedDecl *UD = SemaRef.BuildUsingDeclaration(/*Scope*/ nullptr, D->getAccess(), D->getUsingLoc(), SS, NameInfo, nullptr, /*instantiation*/ true, /*typename*/ true, D->getTypenameLoc()); if (UD) SemaRef.Context.setInstantiatedFromUsingDecl(cast<UsingDecl>(UD), D); return UD; } Decl * TemplateDeclInstantiator ::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) { NestedNameSpecifierLoc QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(), TemplateArgs); if (!QualifierLoc) return nullptr; CXXScopeSpec SS; SS.Adopt(QualifierLoc); DeclarationNameInfo NameInfo = SemaRef.SubstDeclarationNameInfo(D->getNameInfo(), TemplateArgs); NamedDecl *UD = SemaRef.BuildUsingDeclaration(/*Scope*/ nullptr, D->getAccess(), D->getUsingLoc(), SS, NameInfo, nullptr, /*instantiation*/ true, /*typename*/ false, SourceLocation()); if (UD) SemaRef.Context.setInstantiatedFromUsingDecl(cast<UsingDecl>(UD), D); return UD; } Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl( ClassScopeFunctionSpecializationDecl *Decl) { CXXMethodDecl *OldFD = Decl->getSpecialization(); CXXMethodDecl *NewFD = cast_or_null<CXXMethodDecl>(VisitCXXMethodDecl(OldFD, nullptr, true)); if (!NewFD) return nullptr; LookupResult Previous(SemaRef, NewFD->getNameInfo(), Sema::LookupOrdinaryName, Sema::ForRedeclaration); TemplateArgumentListInfo TemplateArgs; TemplateArgumentListInfo *TemplateArgsPtr = nullptr; if (Decl->hasExplicitTemplateArgs()) { TemplateArgs = Decl->templateArgs(); TemplateArgsPtr = &TemplateArgs; } SemaRef.LookupQualifiedName(Previous, SemaRef.CurContext); if (SemaRef.CheckFunctionTemplateSpecialization(NewFD, TemplateArgsPtr, Previous)) { NewFD->setInvalidDecl(); return NewFD; } // Associate the specialization with the pattern. FunctionDecl *Specialization = cast<FunctionDecl>(Previous.getFoundDecl()); assert(Specialization && "Class scope Specialization is null"); SemaRef.Context.setClassScopeSpecializationPattern(Specialization, OldFD); return NewFD; } Decl *TemplateDeclInstantiator::VisitOMPThreadPrivateDecl( OMPThreadPrivateDecl *D) { SmallVector<Expr *, 5> Vars; for (auto *I : D->varlists()) { Expr *Var = SemaRef.SubstExpr(I, TemplateArgs).get(); assert(isa<DeclRefExpr>(Var) && "threadprivate arg is not a DeclRefExpr"); Vars.push_back(Var); } OMPThreadPrivateDecl *TD = SemaRef.CheckOMPThreadPrivateDecl(D->getLocation(), Vars); TD->setAccess(AS_public); Owner->addDecl(TD); return TD; } Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D) { return VisitFunctionDecl(D, nullptr); } Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D) { return VisitCXXMethodDecl(D, nullptr); } Decl *TemplateDeclInstantiator::VisitRecordDecl(RecordDecl *D) { llvm_unreachable("There are only CXXRecordDecls in C++"); } Decl * TemplateDeclInstantiator::VisitClassTemplateSpecializationDecl( ClassTemplateSpecializationDecl *D) { // As a MS extension, we permit class-scope explicit specialization // of member class templates. ClassTemplateDecl *ClassTemplate = D->getSpecializedTemplate(); assert(ClassTemplate->getDeclContext()->isRecord() && D->getTemplateSpecializationKind() == TSK_ExplicitSpecialization && "can only instantiate an explicit specialization " "for a member class template"); // Lookup the already-instantiated declaration in the instantiation // of the class template. FIXME: Diagnose or assert if this fails? DeclContext::lookup_result Found = Owner->lookup(ClassTemplate->getDeclName()); if (Found.empty()) return nullptr; ClassTemplateDecl *InstClassTemplate = dyn_cast<ClassTemplateDecl>(Found.front()); if (!InstClassTemplate) return nullptr; // Substitute into the template arguments of the class template explicit // specialization. TemplateSpecializationTypeLoc Loc = D->getTypeAsWritten()->getTypeLoc(). castAs<TemplateSpecializationTypeLoc>(); TemplateArgumentListInfo InstTemplateArgs(Loc.getLAngleLoc(), Loc.getRAngleLoc()); SmallVector<TemplateArgumentLoc, 4> ArgLocs; for (unsigned I = 0; I != Loc.getNumArgs(); ++I) ArgLocs.push_back(Loc.getArgLoc(I)); if (SemaRef.Subst(ArgLocs.data(), ArgLocs.size(), InstTemplateArgs, TemplateArgs)) return nullptr; // Check that the template argument list is well-formed for this // class template. SmallVector<TemplateArgument, 4> Converted; if (SemaRef.CheckTemplateArgumentList(InstClassTemplate, D->getLocation(), InstTemplateArgs, false, Converted)) return nullptr; // Figure out where to insert this class template explicit specialization // in the member template's set of class template explicit specializations. void *InsertPos = nullptr; ClassTemplateSpecializationDecl *PrevDecl = InstClassTemplate->findSpecialization(Converted, InsertPos); // Check whether we've already seen a conflicting instantiation of this // declaration (for instance, if there was a prior implicit instantiation). bool Ignored; if (PrevDecl && SemaRef.CheckSpecializationInstantiationRedecl(D->getLocation(), D->getSpecializationKind(), PrevDecl, PrevDecl->getSpecializationKind(), PrevDecl->getPointOfInstantiation(), Ignored)) return nullptr; // If PrevDecl was a definition and D is also a definition, diagnose. // This happens in cases like: // // template<typename T, typename U> // struct Outer { // template<typename X> struct Inner; // template<> struct Inner<T> {}; // template<> struct Inner<U> {}; // }; // // Outer<int, int> outer; // error: the explicit specializations of Inner // // have the same signature. if (PrevDecl && PrevDecl->getDefinition() && D->isThisDeclarationADefinition()) { SemaRef.Diag(D->getLocation(), diag::err_redefinition) << PrevDecl; SemaRef.Diag(PrevDecl->getDefinition()->getLocation(), diag::note_previous_definition); return nullptr; } // Create the class template partial specialization declaration. ClassTemplateSpecializationDecl *InstD = ClassTemplateSpecializationDecl::Create(SemaRef.Context, D->getTagKind(), Owner, D->getLocStart(), D->getLocation(), InstClassTemplate, Converted.data(), Converted.size(), PrevDecl); // Add this partial specialization to the set of class template partial // specializations. if (!PrevDecl) InstClassTemplate->AddSpecialization(InstD, InsertPos); // Substitute the nested name specifier, if any. if (SubstQualifier(D, InstD)) return nullptr; // Build the canonical type that describes the converted template // arguments of the class template explicit specialization. QualType CanonType = SemaRef.Context.getTemplateSpecializationType( TemplateName(InstClassTemplate), Converted.data(), Converted.size(), SemaRef.Context.getRecordType(InstD)); // Build the fully-sugared type for this class template // specialization as the user wrote in the specialization // itself. This means that we'll pretty-print the type retrieved // from the specialization's declaration the way that the user // actually wrote the specialization, rather than formatting the // name based on the "canonical" representation used to store the // template arguments in the specialization. TypeSourceInfo *WrittenTy = SemaRef.Context.getTemplateSpecializationTypeInfo( TemplateName(InstClassTemplate), D->getLocation(), InstTemplateArgs, CanonType); InstD->setAccess(D->getAccess()); InstD->setInstantiationOfMemberClass(D, TSK_ImplicitInstantiation); InstD->setSpecializationKind(D->getSpecializationKind()); InstD->setTypeAsWritten(WrittenTy); InstD->setExternLoc(D->getExternLoc()); InstD->setTemplateKeywordLoc(D->getTemplateKeywordLoc()); Owner->addDecl(InstD); // Instantiate the members of the class-scope explicit specialization eagerly. // We don't have support for lazy instantiation of an explicit specialization // yet, and MSVC eagerly instantiates in this case. if (D->isThisDeclarationADefinition() && SemaRef.InstantiateClass(D->getLocation(), InstD, D, TemplateArgs, TSK_ImplicitInstantiation, /*Complain=*/true)) return nullptr; return InstD; } Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *D) { TemplateArgumentListInfo VarTemplateArgsInfo; VarTemplateDecl *VarTemplate = D->getSpecializedTemplate(); assert(VarTemplate && "A template specialization without specialized template?"); // Substitute the current template arguments. const TemplateArgumentListInfo &TemplateArgsInfo = D->getTemplateArgsInfo(); VarTemplateArgsInfo.setLAngleLoc(TemplateArgsInfo.getLAngleLoc()); VarTemplateArgsInfo.setRAngleLoc(TemplateArgsInfo.getRAngleLoc()); if (SemaRef.Subst(TemplateArgsInfo.getArgumentArray(), TemplateArgsInfo.size(), VarTemplateArgsInfo, TemplateArgs)) return nullptr; // Check that the template argument list is well-formed for this template. SmallVector<TemplateArgument, 4> Converted; if (SemaRef.CheckTemplateArgumentList( VarTemplate, VarTemplate->getLocStart(), const_cast<TemplateArgumentListInfo &>(VarTemplateArgsInfo), false, Converted)) return nullptr; // Find the variable template specialization declaration that // corresponds to these arguments. void *InsertPos = nullptr; if (VarTemplateSpecializationDecl *VarSpec = VarTemplate->findSpecialization( Converted, InsertPos)) // If we already have a variable template specialization, return it. return VarSpec; return VisitVarTemplateSpecializationDecl(VarTemplate, D, InsertPos, VarTemplateArgsInfo, Converted); } Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl( VarTemplateDecl *VarTemplate, VarDecl *D, void *InsertPos, const TemplateArgumentListInfo &TemplateArgsInfo, ArrayRef<TemplateArgument> Converted) { // If this is the variable for an anonymous struct or union, // instantiate the anonymous struct/union type first. if (const RecordType *RecordTy = D->getType()->getAs<RecordType>()) if (RecordTy->getDecl()->isAnonymousStructOrUnion()) if (!VisitCXXRecordDecl(cast<CXXRecordDecl>(RecordTy->getDecl()))) return nullptr; // Do substitution on the type of the declaration TypeSourceInfo *DI = SemaRef.SubstType(D->getTypeSourceInfo(), TemplateArgs, D->getTypeSpecStartLoc(), D->getDeclName()); if (!DI) return nullptr; if (DI->getType()->isFunctionType()) { SemaRef.Diag(D->getLocation(), diag::err_variable_instantiates_to_function) << D->isStaticDataMember() << DI->getType(); return nullptr; } // Build the instantiated declaration VarTemplateSpecializationDecl *Var = VarTemplateSpecializationDecl::Create( SemaRef.Context, Owner, D->getInnerLocStart(), D->getLocation(), VarTemplate, DI->getType(), DI, D->getStorageClass(), Converted.data(), Converted.size()); Var->setTemplateArgsInfo(TemplateArgsInfo); if (InsertPos) VarTemplate->AddSpecialization(Var, InsertPos); // Substitute the nested name specifier, if any. if (SubstQualifier(D, Var)) return nullptr; SemaRef.BuildVariableInstantiation(Var, D, TemplateArgs, LateAttrs, Owner, StartingScope); return Var; } Decl *TemplateDeclInstantiator::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D) { llvm_unreachable("@defs is not supported in Objective-C++"); } Decl *TemplateDeclInstantiator::VisitFriendTemplateDecl(FriendTemplateDecl *D) { // FIXME: We need to be able to instantiate FriendTemplateDecls. unsigned DiagID = SemaRef.getDiagnostics().getCustomDiagID( DiagnosticsEngine::Error, "cannot instantiate %0 yet"); SemaRef.Diag(D->getLocation(), DiagID) << D->getDeclKindName(); return nullptr; } Decl *TemplateDeclInstantiator::VisitDecl(Decl *D) { llvm_unreachable("Unexpected decl"); } Decl *Sema::SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs) { TemplateDeclInstantiator Instantiator(*this, Owner, TemplateArgs); if (D->isInvalidDecl()) return nullptr; return Instantiator.Visit(D); } /// \brief Instantiates a nested template parameter list in the current /// instantiation context. /// /// \param L The parameter list to instantiate /// /// \returns NULL if there was an error TemplateParameterList * TemplateDeclInstantiator::SubstTemplateParams(TemplateParameterList *L) { // Get errors for all the parameters before bailing out. bool Invalid = false; unsigned N = L->size(); typedef SmallVector<NamedDecl *, 8> ParamVector; ParamVector Params; Params.reserve(N); for (auto &P : *L) { NamedDecl *D = cast_or_null<NamedDecl>(Visit(P)); Params.push_back(D); Invalid = Invalid || !D || D->isInvalidDecl(); } // Clean up if we had an error. if (Invalid) return nullptr; TemplateParameterList *InstL = TemplateParameterList::Create(SemaRef.Context, L->getTemplateLoc(), L->getLAngleLoc(), &Params.front(), N, L->getRAngleLoc()); return InstL; } /// \brief Instantiate the declaration of a class template partial /// specialization. /// /// \param ClassTemplate the (instantiated) class template that is partially // specialized by the instantiation of \p PartialSpec. /// /// \param PartialSpec the (uninstantiated) class template partial /// specialization that we are instantiating. /// /// \returns The instantiated partial specialization, if successful; otherwise, /// NULL to indicate an error. ClassTemplatePartialSpecializationDecl * TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization( ClassTemplateDecl *ClassTemplate, ClassTemplatePartialSpecializationDecl *PartialSpec) { // Create a local instantiation scope for this class template partial // specialization, which will contain the instantiations of the template // parameters. LocalInstantiationScope Scope(SemaRef); // Substitute into the template parameters of the class template partial // specialization. TemplateParameterList *TempParams = PartialSpec->getTemplateParameters(); TemplateParameterList *InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; // Substitute into the template arguments of the class template partial // specialization. const ASTTemplateArgumentListInfo *TemplArgInfo = PartialSpec->getTemplateArgsAsWritten(); TemplateArgumentListInfo InstTemplateArgs(TemplArgInfo->LAngleLoc, TemplArgInfo->RAngleLoc); if (SemaRef.Subst(TemplArgInfo->getTemplateArgs(), TemplArgInfo->NumTemplateArgs, InstTemplateArgs, TemplateArgs)) return nullptr; // Check that the template argument list is well-formed for this // class template. SmallVector<TemplateArgument, 4> Converted; if (SemaRef.CheckTemplateArgumentList(ClassTemplate, PartialSpec->getLocation(), InstTemplateArgs, false, Converted)) return nullptr; // Figure out where to insert this class template partial specialization // in the member template's set of class template partial specializations. void *InsertPos = nullptr; ClassTemplateSpecializationDecl *PrevDecl = ClassTemplate->findPartialSpecialization(Converted, InsertPos); // Build the canonical type that describes the converted template // arguments of the class template partial specialization. QualType CanonType = SemaRef.Context.getTemplateSpecializationType(TemplateName(ClassTemplate), Converted.data(), Converted.size()); // Build the fully-sugared type for this class template // specialization as the user wrote in the specialization // itself. This means that we'll pretty-print the type retrieved // from the specialization's declaration the way that the user // actually wrote the specialization, rather than formatting the // name based on the "canonical" representation used to store the // template arguments in the specialization. TypeSourceInfo *WrittenTy = SemaRef.Context.getTemplateSpecializationTypeInfo( TemplateName(ClassTemplate), PartialSpec->getLocation(), InstTemplateArgs, CanonType); if (PrevDecl) { // We've already seen a partial specialization with the same template // parameters and template arguments. This can happen, for example, when // substituting the outer template arguments ends up causing two // class template partial specializations of a member class template // to have identical forms, e.g., // // template<typename T, typename U> // struct Outer { // template<typename X, typename Y> struct Inner; // template<typename Y> struct Inner<T, Y>; // template<typename Y> struct Inner<U, Y>; // }; // // Outer<int, int> outer; // error: the partial specializations of Inner // // have the same signature. SemaRef.Diag(PartialSpec->getLocation(), diag::err_partial_spec_redeclared) << WrittenTy->getType(); SemaRef.Diag(PrevDecl->getLocation(), diag::note_prev_partial_spec_here) << SemaRef.Context.getTypeDeclType(PrevDecl); return nullptr; } // Create the class template partial specialization declaration. ClassTemplatePartialSpecializationDecl *InstPartialSpec = ClassTemplatePartialSpecializationDecl::Create(SemaRef.Context, PartialSpec->getTagKind(), Owner, PartialSpec->getLocStart(), PartialSpec->getLocation(), InstParams, ClassTemplate, Converted.data(), Converted.size(), InstTemplateArgs, CanonType, nullptr); // Substitute the nested name specifier, if any. if (SubstQualifier(PartialSpec, InstPartialSpec)) return nullptr; InstPartialSpec->setInstantiatedFromMember(PartialSpec); InstPartialSpec->setTypeAsWritten(WrittenTy); // Add this partial specialization to the set of class template partial // specializations. ClassTemplate->AddPartialSpecialization(InstPartialSpec, /*InsertPos=*/nullptr); return InstPartialSpec; } /// \brief Instantiate the declaration of a variable template partial /// specialization. /// /// \param VarTemplate the (instantiated) variable template that is partially /// specialized by the instantiation of \p PartialSpec. /// /// \param PartialSpec the (uninstantiated) variable template partial /// specialization that we are instantiating. /// /// \returns The instantiated partial specialization, if successful; otherwise, /// NULL to indicate an error. VarTemplatePartialSpecializationDecl * TemplateDeclInstantiator::InstantiateVarTemplatePartialSpecialization( VarTemplateDecl *VarTemplate, VarTemplatePartialSpecializationDecl *PartialSpec) { // Create a local instantiation scope for this variable template partial // specialization, which will contain the instantiations of the template // parameters. LocalInstantiationScope Scope(SemaRef); // Substitute into the template parameters of the variable template partial // specialization. TemplateParameterList *TempParams = PartialSpec->getTemplateParameters(); TemplateParameterList *InstParams = SubstTemplateParams(TempParams); if (!InstParams) return nullptr; // Substitute into the template arguments of the variable template partial // specialization. const ASTTemplateArgumentListInfo *TemplArgInfo = PartialSpec->getTemplateArgsAsWritten(); TemplateArgumentListInfo InstTemplateArgs(TemplArgInfo->LAngleLoc, TemplArgInfo->RAngleLoc); if (SemaRef.Subst(TemplArgInfo->getTemplateArgs(), TemplArgInfo->NumTemplateArgs, InstTemplateArgs, TemplateArgs)) return nullptr; // Check that the template argument list is well-formed for this // class template. SmallVector<TemplateArgument, 4> Converted; if (SemaRef.CheckTemplateArgumentList(VarTemplate, PartialSpec->getLocation(), InstTemplateArgs, false, Converted)) return nullptr; // Figure out where to insert this variable template partial specialization // in the member template's set of variable template partial specializations. void *InsertPos = nullptr; VarTemplateSpecializationDecl *PrevDecl = VarTemplate->findPartialSpecialization(Converted, InsertPos); // Build the canonical type that describes the converted template // arguments of the variable template partial specialization. QualType CanonType = SemaRef.Context.getTemplateSpecializationType( TemplateName(VarTemplate), Converted.data(), Converted.size()); // Build the fully-sugared type for this variable template // specialization as the user wrote in the specialization // itself. This means that we'll pretty-print the type retrieved // from the specialization's declaration the way that the user // actually wrote the specialization, rather than formatting the // name based on the "canonical" representation used to store the // template arguments in the specialization. TypeSourceInfo *WrittenTy = SemaRef.Context.getTemplateSpecializationTypeInfo( TemplateName(VarTemplate), PartialSpec->getLocation(), InstTemplateArgs, CanonType); if (PrevDecl) { // We've already seen a partial specialization with the same template // parameters and template arguments. This can happen, for example, when // substituting the outer template arguments ends up causing two // variable template partial specializations of a member variable template // to have identical forms, e.g., // // template<typename T, typename U> // struct Outer { // template<typename X, typename Y> pair<X,Y> p; // template<typename Y> pair<T, Y> p; // template<typename Y> pair<U, Y> p; // }; // // Outer<int, int> outer; // error: the partial specializations of Inner // // have the same signature. SemaRef.Diag(PartialSpec->getLocation(), diag::err_var_partial_spec_redeclared) << WrittenTy->getType(); SemaRef.Diag(PrevDecl->getLocation(), diag::note_var_prev_partial_spec_here); return nullptr; } // Do substitution on the type of the declaration TypeSourceInfo *DI = SemaRef.SubstType( PartialSpec->getTypeSourceInfo(), TemplateArgs, PartialSpec->getTypeSpecStartLoc(), PartialSpec->getDeclName()); if (!DI) return nullptr; if (DI->getType()->isFunctionType()) { SemaRef.Diag(PartialSpec->getLocation(), diag::err_variable_instantiates_to_function) << PartialSpec->isStaticDataMember() << DI->getType(); return nullptr; } // Create the variable template partial specialization declaration. VarTemplatePartialSpecializationDecl *InstPartialSpec = VarTemplatePartialSpecializationDecl::Create( SemaRef.Context, Owner, PartialSpec->getInnerLocStart(), PartialSpec->getLocation(), InstParams, VarTemplate, DI->getType(), DI, PartialSpec->getStorageClass(), Converted.data(), Converted.size(), InstTemplateArgs); // Substitute the nested name specifier, if any. if (SubstQualifier(PartialSpec, InstPartialSpec)) return nullptr; InstPartialSpec->setInstantiatedFromMember(PartialSpec); InstPartialSpec->setTypeAsWritten(WrittenTy); // Add this partial specialization to the set of variable template partial // specializations. The instantiation of the initializer is not necessary. VarTemplate->AddPartialSpecialization(InstPartialSpec, /*InsertPos=*/nullptr); SemaRef.BuildVariableInstantiation(InstPartialSpec, PartialSpec, TemplateArgs, LateAttrs, Owner, StartingScope); return InstPartialSpec; } TypeSourceInfo* TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D, SmallVectorImpl<ParmVarDecl *> &Params) { TypeSourceInfo *OldTInfo = D->getTypeSourceInfo(); assert(OldTInfo && "substituting function without type source info"); assert(Params.empty() && "parameter vector is non-empty at start"); CXXRecordDecl *ThisContext = nullptr; unsigned ThisTypeQuals = 0; if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) { ThisContext = cast<CXXRecordDecl>(Owner); ThisTypeQuals = Method->getTypeQualifiers(); } TypeSourceInfo *NewTInfo = SemaRef.SubstFunctionDeclType(OldTInfo, TemplateArgs, D->getTypeSpecStartLoc(), D->getDeclName(), ThisContext, ThisTypeQuals); if (!NewTInfo) return nullptr; TypeLoc OldTL = OldTInfo->getTypeLoc().IgnoreParens(); if (FunctionProtoTypeLoc OldProtoLoc = OldTL.getAs<FunctionProtoTypeLoc>()) { if (NewTInfo != OldTInfo) { // Get parameters from the new type info. TypeLoc NewTL = NewTInfo->getTypeLoc().IgnoreParens(); FunctionProtoTypeLoc NewProtoLoc = NewTL.castAs<FunctionProtoTypeLoc>(); unsigned NewIdx = 0; for (unsigned OldIdx = 0, NumOldParams = OldProtoLoc.getNumParams(); OldIdx != NumOldParams; ++OldIdx) { ParmVarDecl *OldParam = OldProtoLoc.getParam(OldIdx); LocalInstantiationScope *Scope = SemaRef.CurrentInstantiationScope; Optional<unsigned> NumArgumentsInExpansion; if (OldParam->isParameterPack()) NumArgumentsInExpansion = SemaRef.getNumArgumentsInExpansion(OldParam->getType(), TemplateArgs); if (!NumArgumentsInExpansion) { // Simple case: normal parameter, or a parameter pack that's // instantiated to a (still-dependent) parameter pack. ParmVarDecl *NewParam = NewProtoLoc.getParam(NewIdx++); Params.push_back(NewParam); Scope->InstantiatedLocal(OldParam, NewParam); } else { // Parameter pack expansion: make the instantiation an argument pack. Scope->MakeInstantiatedLocalArgPack(OldParam); for (unsigned I = 0; I != *NumArgumentsInExpansion; ++I) { ParmVarDecl *NewParam = NewProtoLoc.getParam(NewIdx++); Params.push_back(NewParam); Scope->InstantiatedLocalPackArg(OldParam, NewParam); } } } } else { // The function type itself was not dependent and therefore no // substitution occurred. However, we still need to instantiate // the function parameters themselves. const FunctionProtoType *OldProto = cast<FunctionProtoType>(OldProtoLoc.getType()); for (unsigned i = 0, i_end = OldProtoLoc.getNumParams(); i != i_end; ++i) { ParmVarDecl *OldParam = OldProtoLoc.getParam(i); if (!OldParam) { Params.push_back(SemaRef.BuildParmVarDeclForTypedef( D, D->getLocation(), OldProto->getParamType(i))); continue; } ParmVarDecl *Parm = cast_or_null<ParmVarDecl>(VisitParmVarDecl(OldParam)); if (!Parm) return nullptr; Params.push_back(Parm); } } } else { // If the type of this function, after ignoring parentheses, is not // *directly* a function type, then we're instantiating a function that // was declared via a typedef or with attributes, e.g., // // typedef int functype(int, int); // functype func; // int __cdecl meth(int, int); // // In this case, we'll just go instantiate the ParmVarDecls that we // synthesized in the method declaration. SmallVector<QualType, 4> ParamTypes; if (SemaRef.SubstParmTypes(D->getLocation(), D->param_begin(), D->getNumParams(), TemplateArgs, ParamTypes, &Params)) return nullptr; } return NewTInfo; } /// Introduce the instantiated function parameters into the local /// instantiation scope, and set the parameter names to those used /// in the template. /// HLSL Change Begin - back ported from llvm-project/601377b23767. bool Sema::addInstantiatedParametersToScope( FunctionDecl *Function, const FunctionDecl *PatternDecl, LocalInstantiationScope &Scope, const MultiLevelTemplateArgumentList &TemplateArgs) { unsigned FParamIdx = 0; for (unsigned I = 0, N = PatternDecl->getNumParams(); I != N; ++I) { const ParmVarDecl *PatternParam = PatternDecl->getParamDecl(I); if (!PatternParam->isParameterPack()) { // Simple case: not a parameter pack. assert(FParamIdx < Function->getNumParams()); ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx); FunctionParam->setDeclName(PatternParam->getDeclName()); // If the parameter's type is not dependent, update it to match the type // in the pattern. They can differ in top-level cv-qualifiers, and we want // the pattern's type here. If the type is dependent, they can't differ, // per core issue 1668. Substitute into the type from the pattern, in case // it's instantiation-dependent. // FIXME: Updating the type to work around this is at best fragile. if (!PatternDecl->getType()->isDependentType()) { QualType T = SubstType(PatternParam->getType(), TemplateArgs, FunctionParam->getLocation(), FunctionParam->getDeclName()); if (T.isNull()) return true; FunctionParam->setType(T); } Scope.InstantiatedLocal(PatternParam, FunctionParam); ++FParamIdx; continue; } // Expand the parameter pack. Scope.MakeInstantiatedLocalArgPack(PatternParam); Optional<unsigned> NumArgumentsInExpansion = getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs); assert(NumArgumentsInExpansion && "should only be called when all template arguments are known"); QualType PatternType = PatternParam->getType()->castAs<PackExpansionType>()->getPattern(); for (unsigned Arg = 0; Arg < *NumArgumentsInExpansion; ++Arg) { ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx); FunctionParam->setDeclName(PatternParam->getDeclName()); if (!PatternDecl->getType()->isDependentType()) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, Arg); QualType T = SubstType(PatternType, TemplateArgs, FunctionParam->getLocation(), FunctionParam->getDeclName()); if (T.isNull()) return true; FunctionParam->setType(T); } Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam); ++FParamIdx; } } return false; } /// HLSL Change End - back ported from llvm-project/601377b23767. void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Decl) { const FunctionProtoType *Proto = Decl->getType()->castAs<FunctionProtoType>(); if (Proto->getExceptionSpecType() != EST_Uninstantiated) return; InstantiatingTemplate Inst(*this, PointOfInstantiation, Decl, InstantiatingTemplate::ExceptionSpecification()); if (Inst.isInvalid()) { // We hit the instantiation depth limit. Clear the exception specification // so that our callers don't have to cope with EST_Uninstantiated. UpdateExceptionSpec(Decl, EST_None); return; } // Enter the scope of this instantiation. We don't use // PushDeclContext because we don't have a scope. Sema::ContextRAII savedContext(*this, Decl); LocalInstantiationScope Scope(*this); MultiLevelTemplateArgumentList TemplateArgs = getTemplateInstantiationArgs(Decl, nullptr, /*RelativeToPrimary*/true); FunctionDecl *Template = Proto->getExceptionSpecTemplate(); // HLSL Change - back ported from llvm-project/601377b23767. if (addInstantiatedParametersToScope(Decl, Template, Scope, TemplateArgs)) { UpdateExceptionSpec(Decl, EST_None); return; } SubstExceptionSpec(Decl, Template->getType()->castAs<FunctionProtoType>(), TemplateArgs); } /// \brief Initializes the common fields of an instantiation function /// declaration (New) from the corresponding fields of its template (Tmpl). /// /// \returns true if there was an error bool TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New, FunctionDecl *Tmpl) { if (Tmpl->isDeleted()) New->setDeletedAsWritten(); // Forward the mangling number from the template to the instantiated decl. SemaRef.Context.setManglingNumber(New, SemaRef.Context.getManglingNumber(Tmpl)); // If we are performing substituting explicitly-specified template arguments // or deduced template arguments into a function template and we reach this // point, we are now past the point where SFINAE applies and have committed // to keeping the new function template specialization. We therefore // convert the active template instantiation for the function template // into a template instantiation for this specific function template // specialization, which is not a SFINAE context, so that we diagnose any // further errors in the declaration itself. typedef Sema::ActiveTemplateInstantiation ActiveInstType; ActiveInstType &ActiveInst = SemaRef.ActiveTemplateInstantiations.back(); if (ActiveInst.Kind == ActiveInstType::ExplicitTemplateArgumentSubstitution || ActiveInst.Kind == ActiveInstType::DeducedTemplateArgumentSubstitution) { if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ActiveInst.Entity)) { assert(FunTmpl->getTemplatedDecl() == Tmpl && "Deduction from the wrong function template?"); (void) FunTmpl; ActiveInst.Kind = ActiveInstType::TemplateInstantiation; ActiveInst.Entity = New; } } const FunctionProtoType *Proto = Tmpl->getType()->getAs<FunctionProtoType>(); assert(Proto && "Function template without prototype?"); if (Proto->hasExceptionSpec() || Proto->getNoReturnAttr()) { FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo(); // DR1330: In C++11, defer instantiation of a non-trivial // exception specification. // DR1484: Local classes and their members are instantiated along with the // containing function. bool RequireInstantiation = false; if (CXXRecordDecl *Cls = dyn_cast<CXXRecordDecl>(Tmpl->getDeclContext())) { if (Cls->isLocalClass()) RequireInstantiation = true; } if (SemaRef.getLangOpts().CPlusPlus11 && EPI.ExceptionSpec.Type != EST_None && EPI.ExceptionSpec.Type != EST_DynamicNone && EPI.ExceptionSpec.Type != EST_BasicNoexcept && !RequireInstantiation) { FunctionDecl *ExceptionSpecTemplate = Tmpl; if (EPI.ExceptionSpec.Type == EST_Uninstantiated) ExceptionSpecTemplate = EPI.ExceptionSpec.SourceTemplate; ExceptionSpecificationType NewEST = EST_Uninstantiated; if (EPI.ExceptionSpec.Type == EST_Unevaluated) NewEST = EST_Unevaluated; // Mark the function has having an uninstantiated exception specification. const FunctionProtoType *NewProto = New->getType()->getAs<FunctionProtoType>(); assert(NewProto && "Template instantiation without function prototype?"); EPI = NewProto->getExtProtoInfo(); EPI.ExceptionSpec.Type = NewEST; EPI.ExceptionSpec.SourceDecl = New; EPI.ExceptionSpec.SourceTemplate = ExceptionSpecTemplate; New->setType(SemaRef.Context.getFunctionType( NewProto->getReturnType(), NewProto->getParamTypes(), EPI, NewProto->getParamMods())); // HLSL Change } else { SemaRef.SubstExceptionSpec(New, Proto, TemplateArgs); } } // Get the definition. Leaves the variable unchanged if undefined. const FunctionDecl *Definition = Tmpl; Tmpl->isDefined(Definition); SemaRef.InstantiateAttrs(TemplateArgs, Definition, New, LateAttrs, StartingScope); return false; } /// \brief Initializes common fields of an instantiated method /// declaration (New) from the corresponding fields of its template /// (Tmpl). /// /// \returns true if there was an error bool TemplateDeclInstantiator::InitMethodInstantiation(CXXMethodDecl *New, CXXMethodDecl *Tmpl) { if (InitFunctionInstantiation(New, Tmpl)) return true; New->setAccess(Tmpl->getAccess()); if (Tmpl->isVirtualAsWritten()) New->setVirtualAsWritten(true); // FIXME: New needs a pointer to Tmpl return false; } /// \brief Instantiate the definition of the given function from its /// template. /// /// \param PointOfInstantiation the point at which the instantiation was /// required. Note that this is not precisely a "point of instantiation" /// for the function, but it's close. /// /// \param Function the already-instantiated declaration of a /// function template specialization or member function of a class template /// specialization. /// /// \param Recursive if true, recursively instantiates any functions that /// are required by this instantiation. /// /// \param DefinitionRequired if true, then we are performing an explicit /// instantiation where the body of the function is required. Complain if /// there is no such body. void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive, bool DefinitionRequired) { if (Function->isInvalidDecl() || Function->isDefined()) return; // HLSL Change Begin - Support hierarchial time tracing. llvm::TimeTraceScope TimeScope("InstantiateFunction", [&]() { return Function->getQualifiedNameAsString(); }); // HLSL Change End - Support hierarchial time tracing. // Never instantiate an explicit specialization except if it is a class scope // explicit specialization. if (Function->getTemplateSpecializationKind() == TSK_ExplicitSpecialization && !Function->getClassScopeSpecializationPattern()) return; // Find the function body that we'll be substituting. const FunctionDecl *PatternDecl = Function->getTemplateInstantiationPattern(); assert(PatternDecl && "instantiating a non-template"); Stmt *Pattern = PatternDecl->getBody(PatternDecl); assert(PatternDecl && "template definition is not a template"); if (!Pattern) { // Try to find a defaulted definition PatternDecl->isDefined(PatternDecl); } assert(PatternDecl && "template definition is not a template"); // Postpone late parsed template instantiations. if (PatternDecl->isLateTemplateParsed() && !LateTemplateParser) { PendingInstantiations.push_back( std::make_pair(Function, PointOfInstantiation)); return; } // If we're performing recursive template instantiation, create our own // queue of pending implicit instantiations that we will instantiate later, // while we're still within our own instantiation context. // This has to happen before LateTemplateParser below is called, so that // it marks vtables used in late parsed templates as used. SavePendingLocalImplicitInstantiationsRAII SavedPendingLocalImplicitInstantiations(*this); SavePendingInstantiationsAndVTableUsesRAII SavePendingInstantiationsAndVTableUses(*this, /*Enabled=*/Recursive); // Call the LateTemplateParser callback if there is a need to late parse // a templated function definition. if (!Pattern && PatternDecl->isLateTemplateParsed() && LateTemplateParser) { // FIXME: Optimize to allow individual templates to be deserialized. if (PatternDecl->isFromASTFile()) ExternalSource->ReadLateParsedTemplates(LateParsedTemplateMap); LateParsedTemplate *LPT = LateParsedTemplateMap.lookup(PatternDecl); assert(LPT && "missing LateParsedTemplate"); LateTemplateParser(OpaqueParser, *LPT); Pattern = PatternDecl->getBody(PatternDecl); } if (!Pattern && !PatternDecl->isDefaulted()) { if (DefinitionRequired) { if (Function->getPrimaryTemplate()) Diag(PointOfInstantiation, diag::err_explicit_instantiation_undefined_func_template) << Function->getPrimaryTemplate(); else Diag(PointOfInstantiation, diag::err_explicit_instantiation_undefined_member) << 1 << Function->getDeclName() << Function->getDeclContext(); if (PatternDecl) Diag(PatternDecl->getLocation(), diag::note_explicit_instantiation_here); Function->setInvalidDecl(); } else if (Function->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition) { assert(!Recursive); PendingInstantiations.push_back( std::make_pair(Function, PointOfInstantiation)); } return; } // C++1y [temp.explicit]p10: // Except for inline functions, declarations with types deduced from their // initializer or return value, and class template specializations, other // explicit instantiation declarations have the effect of suppressing the // implicit instantiation of the entity to which they refer. if (Function->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration && !PatternDecl->isInlined() && !PatternDecl->getReturnType()->getContainedAutoType()) return; if (PatternDecl->isInlined()) { // Function, and all later redeclarations of it (from imported modules, // for instance), are now implicitly inline. for (auto *D = Function->getMostRecentDecl(); /**/; D = D->getPreviousDecl()) { D->setImplicitlyInline(); if (D == Function) break; } } InstantiatingTemplate Inst(*this, PointOfInstantiation, Function); if (Inst.isInvalid()) return; // Copy the inner loc start from the pattern. Function->setInnerLocStart(PatternDecl->getInnerLocStart()); EnterExpressionEvaluationContext EvalContext(*this, Sema::PotentiallyEvaluated); // Introduce a new scope where local variable instantiations will be // recorded, unless we're actually a member function within a local // class, in which case we need to merge our results with the parent // scope (of the enclosing function). bool MergeWithParentScope = false; if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Function->getDeclContext())) MergeWithParentScope = Rec->isLocalClass(); LocalInstantiationScope Scope(*this, MergeWithParentScope); if (PatternDecl->isDefaulted()) SetDeclDefaulted(Function, PatternDecl->getLocation()); else { MultiLevelTemplateArgumentList TemplateArgs = getTemplateInstantiationArgs(Function, nullptr, false, PatternDecl); // Substitute into the qualifier; we can get a substitution failure here // through evil use of alias templates. // FIXME: Is CurContext correct for this? Should we go to the (instantiation // of the) lexical context of the pattern? SubstQualifier(*this, PatternDecl, Function, TemplateArgs); ActOnStartOfFunctionDef(nullptr, Function); // Enter the scope of this instantiation. We don't use // PushDeclContext because we don't have a scope. Sema::ContextRAII savedContext(*this, Function); // HLSL Change - back ported from llvm-project/601377b23767. if (addInstantiatedParametersToScope(Function, PatternDecl, Scope, TemplateArgs)) return; // If this is a constructor, instantiate the member initializers. if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(PatternDecl)) { InstantiateMemInitializers(cast<CXXConstructorDecl>(Function), Ctor, TemplateArgs); } // Instantiate the function body. StmtResult Body = SubstStmt(Pattern, TemplateArgs); if (Body.isInvalid()) Function->setInvalidDecl(); ActOnFinishFunctionBody(Function, Body.get(), /*IsInstantiation=*/true); PerformDependentDiagnostics(PatternDecl, TemplateArgs); if (auto *Listener = getASTMutationListener()) Listener->FunctionDefinitionInstantiated(Function); savedContext.pop(); } DeclGroupRef DG(Function); Consumer.HandleTopLevelDecl(DG); // This class may have local implicit instantiations that need to be // instantiation within this scope. PerformPendingInstantiations(/*LocalOnly=*/true); Scope.Exit(); if (Recursive) { // Define any pending vtables. DefineUsedVTables(); // Instantiate any pending implicit instantiations found during the // instantiation of this template. PerformPendingInstantiations(); // PendingInstantiations and VTableUses are restored through // SavePendingInstantiationsAndVTableUses's destructor. } } VarTemplateSpecializationDecl *Sema::BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs, LocalInstantiationScope *StartingScope) { if (FromVar->isInvalidDecl()) return nullptr; InstantiatingTemplate Inst(*this, PointOfInstantiation, FromVar); if (Inst.isInvalid()) return nullptr; MultiLevelTemplateArgumentList TemplateArgLists; TemplateArgLists.addOuterTemplateArguments(&TemplateArgList); // Instantiate the first declaration of the variable template: for a partial // specialization of a static data member template, the first declaration may // or may not be the declaration in the class; if it's in the class, we want // to instantiate a member in the class (a declaration), and if it's outside, // we want to instantiate a definition. // // If we're instantiating an explicitly-specialized member template or member // partial specialization, don't do this. The member specialization completely // replaces the original declaration in this case. bool IsMemberSpec = false; if (VarTemplatePartialSpecializationDecl *PartialSpec = dyn_cast<VarTemplatePartialSpecializationDecl>(FromVar)) IsMemberSpec = PartialSpec->isMemberSpecialization(); else if (VarTemplateDecl *FromTemplate = FromVar->getDescribedVarTemplate()) IsMemberSpec = FromTemplate->isMemberSpecialization(); if (!IsMemberSpec) FromVar = FromVar->getFirstDecl(); MultiLevelTemplateArgumentList MultiLevelList(TemplateArgList); TemplateDeclInstantiator Instantiator(*this, FromVar->getDeclContext(), MultiLevelList); // TODO: Set LateAttrs and StartingScope ... return cast_or_null<VarTemplateSpecializationDecl>( Instantiator.VisitVarTemplateSpecializationDecl( VarTemplate, FromVar, InsertPos, TemplateArgsInfo, Converted)); } /// \brief Instantiates a variable template specialization by completing it /// with appropriate type information and initializer. VarTemplateSpecializationDecl *Sema::CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs) { // Do substitution on the type of the declaration TypeSourceInfo *DI = SubstType(PatternDecl->getTypeSourceInfo(), TemplateArgs, PatternDecl->getTypeSpecStartLoc(), PatternDecl->getDeclName()); if (!DI) return nullptr; // Update the type of this variable template specialization. VarSpec->setType(DI->getType()); // Instantiate the initializer. InstantiateVariableInitializer(VarSpec, PatternDecl, TemplateArgs); return VarSpec; } /// BuildVariableInstantiation - Used after a new variable has been created. /// Sets basic variable data and decides whether to postpone the /// variable instantiation. void Sema::BuildVariableInstantiation( VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate) { // If we are instantiating a local extern declaration, the // instantiation belongs lexically to the containing function. // If we are instantiating a static data member defined // out-of-line, the instantiation will have the same lexical // context (which will be a namespace scope) as the template. if (OldVar->isLocalExternDecl()) { NewVar->setLocalExternDecl(); NewVar->setLexicalDeclContext(Owner); } else if (OldVar->isOutOfLine()) NewVar->setLexicalDeclContext(OldVar->getLexicalDeclContext()); NewVar->setTSCSpec(OldVar->getTSCSpec()); NewVar->setInitStyle(OldVar->getInitStyle()); NewVar->setCXXForRangeDecl(OldVar->isCXXForRangeDecl()); NewVar->setConstexpr(OldVar->isConstexpr()); NewVar->setInitCapture(OldVar->isInitCapture()); NewVar->setPreviousDeclInSameBlockScope( OldVar->isPreviousDeclInSameBlockScope()); NewVar->setAccess(OldVar->getAccess()); if (!OldVar->isStaticDataMember()) { if (OldVar->isUsed(false)) NewVar->setIsUsed(); NewVar->setReferenced(OldVar->isReferenced()); } // See if the old variable had a type-specifier that defined an anonymous tag. // If it did, mark the new variable as being the declarator for the new // anonymous tag. if (const TagType *OldTagType = OldVar->getType()->getAs<TagType>()) { TagDecl *OldTag = OldTagType->getDecl(); if (OldTag->getDeclaratorForAnonDecl() == OldVar) { TagDecl *NewTag = NewVar->getType()->castAs<TagType>()->getDecl(); assert(!NewTag->hasNameForLinkage() && !NewTag->hasDeclaratorForAnonDecl()); NewTag->setDeclaratorForAnonDecl(NewVar); } } InstantiateAttrs(TemplateArgs, OldVar, NewVar, LateAttrs, StartingScope); LookupResult Previous( *this, NewVar->getDeclName(), NewVar->getLocation(), NewVar->isLocalExternDecl() ? Sema::LookupRedeclarationWithLinkage : Sema::LookupOrdinaryName, Sema::ForRedeclaration); if (NewVar->isLocalExternDecl() && OldVar->getPreviousDecl() && (!OldVar->getPreviousDecl()->getDeclContext()->isDependentContext() || OldVar->getPreviousDecl()->getDeclContext()==OldVar->getDeclContext())) { // We have a previous declaration. Use that one, so we merge with the // right type. if (NamedDecl *NewPrev = FindInstantiatedDecl( NewVar->getLocation(), OldVar->getPreviousDecl(), TemplateArgs)) Previous.addDecl(NewPrev); } else if (!isa<VarTemplateSpecializationDecl>(NewVar) && OldVar->hasLinkage()) LookupQualifiedName(Previous, NewVar->getDeclContext(), false); CheckVariableDeclaration(NewVar, Previous); if (!InstantiatingVarTemplate) { NewVar->getLexicalDeclContext()->addHiddenDecl(NewVar); if (!NewVar->isLocalExternDecl() || !NewVar->getPreviousDecl()) NewVar->getDeclContext()->makeDeclVisibleInContext(NewVar); } if (!OldVar->isOutOfLine()) { if (NewVar->getDeclContext()->isFunctionOrMethod()) CurrentInstantiationScope->InstantiatedLocal(OldVar, NewVar); } // Link instantiations of static data members back to the template from // which they were instantiated. if (NewVar->isStaticDataMember() && !InstantiatingVarTemplate) NewVar->setInstantiationOfStaticDataMember(OldVar, TSK_ImplicitInstantiation); // Forward the mangling number from the template to the instantiated decl. Context.setManglingNumber(NewVar, Context.getManglingNumber(OldVar)); Context.setStaticLocalNumber(NewVar, Context.getStaticLocalNumber(OldVar)); // Delay instantiation of the initializer for variable templates until a // definition of the variable is needed. We need it right away if the type // contains 'auto'. if ((!isa<VarTemplateSpecializationDecl>(NewVar) && !InstantiatingVarTemplate) || NewVar->getType()->isUndeducedType()) InstantiateVariableInitializer(NewVar, OldVar, TemplateArgs); // Diagnose unused local variables with dependent types, where the diagnostic // will have been deferred. if (!NewVar->isInvalidDecl() && NewVar->getDeclContext()->isFunctionOrMethod() && OldVar->getType()->isDependentType()) DiagnoseUnusedDecl(NewVar); } /// \brief Instantiate the initializer of a variable. void Sema::InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs) { if (Var->getAnyInitializer()) // We already have an initializer in the class. return; if (OldVar->getInit()) { if (Var->isStaticDataMember() && !OldVar->isOutOfLine()) PushExpressionEvaluationContext(Sema::ConstantEvaluated, OldVar); else PushExpressionEvaluationContext(Sema::PotentiallyEvaluated, OldVar); // Instantiate the initializer. ExprResult Init = SubstInitializer(OldVar->getInit(), TemplateArgs, OldVar->getInitStyle() == VarDecl::CallInit); if (!Init.isInvalid()) { bool TypeMayContainAuto = true; Expr *InitExpr = Init.get(); if (Var->hasAttr<DLLImportAttr>() && (!InitExpr || !InitExpr->isConstantInitializer(getASTContext(), false))) { // Do not dynamically initialize dllimport variables. } else if (InitExpr) { bool DirectInit = OldVar->isDirectInit(); AddInitializerToDecl(Var, InitExpr, DirectInit, TypeMayContainAuto); } else ActOnUninitializedDecl(Var, TypeMayContainAuto); } else { // FIXME: Not too happy about invalidating the declaration // because of a bogus initializer. Var->setInvalidDecl(); } PopExpressionEvaluationContext(); } else if ((!Var->isStaticDataMember() || Var->isOutOfLine()) && !Var->isCXXForRangeDecl()) ActOnUninitializedDecl(Var, false); } /// \brief Instantiate the definition of the given variable from its /// template. /// /// \param PointOfInstantiation the point at which the instantiation was /// required. Note that this is not precisely a "point of instantiation" /// for the function, but it's close. /// /// \param Var the already-instantiated declaration of a static member /// variable of a class template specialization. /// /// \param Recursive if true, recursively instantiates any functions that /// are required by this instantiation. /// /// \param DefinitionRequired if true, then we are performing an explicit /// instantiation where an out-of-line definition of the member variable /// is required. Complain if there is no such definition. void Sema::InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive, bool DefinitionRequired) { InstantiateVariableDefinition(PointOfInstantiation, Var, Recursive, DefinitionRequired); } void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive, bool DefinitionRequired) { if (Var->isInvalidDecl()) return; VarTemplateSpecializationDecl *VarSpec = dyn_cast<VarTemplateSpecializationDecl>(Var); VarDecl *PatternDecl = nullptr, *Def = nullptr; MultiLevelTemplateArgumentList TemplateArgs = getTemplateInstantiationArgs(Var); if (VarSpec) { // If this is a variable template specialization, make sure that it is // non-dependent, then find its instantiation pattern. bool InstantiationDependent = false; assert(!TemplateSpecializationType::anyDependentTemplateArguments( VarSpec->getTemplateArgsInfo(), InstantiationDependent) && "Only instantiate variable template specializations that are " "not type-dependent"); (void)InstantiationDependent; // Find the variable initialization that we'll be substituting. If the // pattern was instantiated from a member template, look back further to // find the real pattern. assert(VarSpec->getSpecializedTemplate() && "Specialization without specialized template?"); llvm::PointerUnion<VarTemplateDecl *, VarTemplatePartialSpecializationDecl *> PatternPtr = VarSpec->getSpecializedTemplateOrPartial(); if (PatternPtr.is<VarTemplatePartialSpecializationDecl *>()) { VarTemplatePartialSpecializationDecl *Tmpl = PatternPtr.get<VarTemplatePartialSpecializationDecl *>(); while (VarTemplatePartialSpecializationDecl *From = Tmpl->getInstantiatedFromMember()) { if (Tmpl->isMemberSpecialization()) break; Tmpl = From; } PatternDecl = Tmpl; } else { VarTemplateDecl *Tmpl = PatternPtr.get<VarTemplateDecl *>(); while (VarTemplateDecl *From = Tmpl->getInstantiatedFromMemberTemplate()) { if (Tmpl->isMemberSpecialization()) break; Tmpl = From; } PatternDecl = Tmpl->getTemplatedDecl(); } // If this is a static data member template, there might be an // uninstantiated initializer on the declaration. If so, instantiate // it now. if (PatternDecl->isStaticDataMember() && (PatternDecl = PatternDecl->getFirstDecl())->hasInit() && !Var->hasInit()) { // FIXME: Factor out the duplicated instantiation context setup/tear down // code here. InstantiatingTemplate Inst(*this, PointOfInstantiation, Var); if (Inst.isInvalid()) return; // If we're performing recursive template instantiation, create our own // queue of pending implicit instantiations that we will instantiate // later, while we're still within our own instantiation context. SavePendingInstantiationsAndVTableUsesRAII SavePendingInstantiationsAndVTableUses(*this, /*Enabled=*/Recursive); LocalInstantiationScope Local(*this); // Enter the scope of this instantiation. We don't use // PushDeclContext because we don't have a scope. ContextRAII PreviousContext(*this, Var->getDeclContext()); InstantiateVariableInitializer(Var, PatternDecl, TemplateArgs); PreviousContext.pop(); // FIXME: Need to inform the ASTConsumer that we instantiated the // initializer? // This variable may have local implicit instantiations that need to be // instantiated within this scope. PerformPendingInstantiations(/*LocalOnly=*/true); Local.Exit(); if (Recursive) { // Define any newly required vtables. DefineUsedVTables(); // Instantiate any pending implicit instantiations found during the // instantiation of this template. PerformPendingInstantiations(); // PendingInstantiations and VTableUses are restored through // SavePendingInstantiationsAndVTableUses's destructor. } } // Find actual definition Def = PatternDecl->getDefinition(getASTContext()); } else { // If this is a static data member, find its out-of-line definition. assert(Var->isStaticDataMember() && "not a static data member?"); PatternDecl = Var->getInstantiatedFromStaticDataMember(); assert(PatternDecl && "data member was not instantiated from a template?"); assert(PatternDecl->isStaticDataMember() && "not a static data member?"); Def = PatternDecl->getOutOfLineDefinition(); } // If we don't have a definition of the variable template, we won't perform // any instantiation. Rather, we rely on the user to instantiate this // definition (or provide a specialization for it) in another translation // unit. if (!Def) { if (DefinitionRequired) { if (VarSpec) Diag(PointOfInstantiation, diag::err_explicit_instantiation_undefined_var_template) << Var; else Diag(PointOfInstantiation, diag::err_explicit_instantiation_undefined_member) << 2 << Var->getDeclName() << Var->getDeclContext(); Diag(PatternDecl->getLocation(), diag::note_explicit_instantiation_here); if (VarSpec) Var->setInvalidDecl(); } else if (Var->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition) { PendingInstantiations.push_back( std::make_pair(Var, PointOfInstantiation)); } return; } TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind(); // Never instantiate an explicit specialization. if (TSK == TSK_ExplicitSpecialization) return; // C++11 [temp.explicit]p10: // Except for inline functions, [...] explicit instantiation declarations // have the effect of suppressing the implicit instantiation of the entity // to which they refer. if (TSK == TSK_ExplicitInstantiationDeclaration) return; // Make sure to pass the instantiated variable to the consumer at the end. struct PassToConsumerRAII { ASTConsumer &Consumer; VarDecl *Var; PassToConsumerRAII(ASTConsumer &Consumer, VarDecl *Var) : Consumer(Consumer), Var(Var) { } ~PassToConsumerRAII() { Consumer.HandleCXXStaticMemberVarInstantiation(Var); } } PassToConsumerRAII(Consumer, Var); // If we already have a definition, we're done. if (VarDecl *Def = Var->getDefinition()) { // We may be explicitly instantiating something we've already implicitly // instantiated. Def->setTemplateSpecializationKind(Var->getTemplateSpecializationKind(), PointOfInstantiation); return; } InstantiatingTemplate Inst(*this, PointOfInstantiation, Var); if (Inst.isInvalid()) return; // If we're performing recursive template instantiation, create our own // queue of pending implicit instantiations that we will instantiate later, // while we're still within our own instantiation context. SavePendingLocalImplicitInstantiationsRAII SavedPendingLocalImplicitInstantiations(*this); SavePendingInstantiationsAndVTableUsesRAII SavePendingInstantiationsAndVTableUses(*this, /*Enabled=*/Recursive); // Enter the scope of this instantiation. We don't use // PushDeclContext because we don't have a scope. ContextRAII PreviousContext(*this, Var->getDeclContext()); LocalInstantiationScope Local(*this); VarDecl *OldVar = Var; if (!VarSpec) Var = cast_or_null<VarDecl>(SubstDecl(Def, Var->getDeclContext(), TemplateArgs)); else if (Var->isStaticDataMember() && Var->getLexicalDeclContext()->isRecord()) { // We need to instantiate the definition of a static data member template, // and all we have is the in-class declaration of it. Instantiate a separate // declaration of the definition. TemplateDeclInstantiator Instantiator(*this, Var->getDeclContext(), TemplateArgs); Var = cast_or_null<VarDecl>(Instantiator.VisitVarTemplateSpecializationDecl( VarSpec->getSpecializedTemplate(), Def, nullptr, VarSpec->getTemplateArgsInfo(), VarSpec->getTemplateArgs().asArray())); if (Var) { llvm::PointerUnion<VarTemplateDecl *, VarTemplatePartialSpecializationDecl *> PatternPtr = VarSpec->getSpecializedTemplateOrPartial(); if (VarTemplatePartialSpecializationDecl *Partial = PatternPtr.dyn_cast<VarTemplatePartialSpecializationDecl *>()) cast<VarTemplateSpecializationDecl>(Var)->setInstantiationOf( Partial, &VarSpec->getTemplateInstantiationArgs()); // Merge the definition with the declaration. LookupResult R(*this, Var->getDeclName(), Var->getLocation(), LookupOrdinaryName, ForRedeclaration); R.addDecl(OldVar); ShadowMergeState MergeState = ShadowMergeState_Disallowed; // HLSL Change - add merge state MergeVarDecl(Var, R, MergeState); // HLSL Change - add merge state // Attach the initializer. InstantiateVariableInitializer(Var, Def, TemplateArgs); } } else // Complete the existing variable's definition with an appropriately // substituted type and initializer. Var = CompleteVarTemplateSpecializationDecl(VarSpec, Def, TemplateArgs); PreviousContext.pop(); if (Var) { PassToConsumerRAII.Var = Var; Var->setTemplateSpecializationKind(OldVar->getTemplateSpecializationKind(), OldVar->getPointOfInstantiation()); } // This variable may have local implicit instantiations that need to be // instantiated within this scope. PerformPendingInstantiations(/*LocalOnly=*/true); Local.Exit(); if (Recursive) { // Define any newly required vtables. DefineUsedVTables(); // Instantiate any pending implicit instantiations found during the // instantiation of this template. PerformPendingInstantiations(); // PendingInstantiations and VTableUses are restored through // SavePendingInstantiationsAndVTableUses's destructor. } } void Sema::InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs) { SmallVector<CXXCtorInitializer*, 4> NewInits; bool AnyErrors = Tmpl->isInvalidDecl(); // Instantiate all the initializers. for (const auto *Init : Tmpl->inits()) { // Only instantiate written initializers, let Sema re-construct implicit // ones. if (!Init->isWritten()) continue; SourceLocation EllipsisLoc; if (Init->isPackExpansion()) { // This is a pack expansion. We should expand it now. TypeLoc BaseTL = Init->getTypeSourceInfo()->getTypeLoc(); SmallVector<UnexpandedParameterPack, 4> Unexpanded; collectUnexpandedParameterPacks(BaseTL, Unexpanded); collectUnexpandedParameterPacks(Init->getInit(), Unexpanded); bool ShouldExpand = false; bool RetainExpansion = false; Optional<unsigned> NumExpansions; if (CheckParameterPacksForExpansion(Init->getEllipsisLoc(), BaseTL.getSourceRange(), Unexpanded, TemplateArgs, ShouldExpand, RetainExpansion, NumExpansions)) { AnyErrors = true; New->setInvalidDecl(); continue; } assert(ShouldExpand && "Partial instantiation of base initializer?"); // Loop over all of the arguments in the argument pack(s), for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, I); // Instantiate the initializer. ExprResult TempInit = SubstInitializer(Init->getInit(), TemplateArgs, /*CXXDirectInit=*/true); if (TempInit.isInvalid()) { AnyErrors = true; break; } // Instantiate the base type. TypeSourceInfo *BaseTInfo = SubstType(Init->getTypeSourceInfo(), TemplateArgs, Init->getSourceLocation(), New->getDeclName()); if (!BaseTInfo) { AnyErrors = true; break; } // Build the initializer. MemInitResult NewInit = BuildBaseInitializer(BaseTInfo->getType(), BaseTInfo, TempInit.get(), New->getParent(), SourceLocation()); if (NewInit.isInvalid()) { AnyErrors = true; break; } NewInits.push_back(NewInit.get()); } continue; } // Instantiate the initializer. ExprResult TempInit = SubstInitializer(Init->getInit(), TemplateArgs, /*CXXDirectInit=*/true); if (TempInit.isInvalid()) { AnyErrors = true; continue; } MemInitResult NewInit; if (Init->isDelegatingInitializer() || Init->isBaseInitializer()) { TypeSourceInfo *TInfo = SubstType(Init->getTypeSourceInfo(), TemplateArgs, Init->getSourceLocation(), New->getDeclName()); if (!TInfo) { AnyErrors = true; New->setInvalidDecl(); continue; } if (Init->isBaseInitializer()) NewInit = BuildBaseInitializer(TInfo->getType(), TInfo, TempInit.get(), New->getParent(), EllipsisLoc); else NewInit = BuildDelegatingInitializer(TInfo, TempInit.get(), cast<CXXRecordDecl>(CurContext->getParent())); } else if (Init->isMemberInitializer()) { FieldDecl *Member = cast_or_null<FieldDecl>(FindInstantiatedDecl( Init->getMemberLocation(), Init->getMember(), TemplateArgs)); if (!Member) { AnyErrors = true; New->setInvalidDecl(); continue; } NewInit = BuildMemberInitializer(Member, TempInit.get(), Init->getSourceLocation()); } else if (Init->isIndirectMemberInitializer()) { IndirectFieldDecl *IndirectMember = cast_or_null<IndirectFieldDecl>(FindInstantiatedDecl( Init->getMemberLocation(), Init->getIndirectMember(), TemplateArgs)); if (!IndirectMember) { AnyErrors = true; New->setInvalidDecl(); continue; } NewInit = BuildMemberInitializer(IndirectMember, TempInit.get(), Init->getSourceLocation()); } if (NewInit.isInvalid()) { AnyErrors = true; New->setInvalidDecl(); } else { NewInits.push_back(NewInit.get()); } } // Assign all the initializers to the new constructor. ActOnMemInitializers(New, /*FIXME: ColonLoc */ SourceLocation(), NewInits, AnyErrors); } // TODO: this could be templated if the various decl types used the // same method name. static bool isInstantiationOf(ClassTemplateDecl *Pattern, ClassTemplateDecl *Instance) { Pattern = Pattern->getCanonicalDecl(); do { Instance = Instance->getCanonicalDecl(); if (Pattern == Instance) return true; Instance = Instance->getInstantiatedFromMemberTemplate(); } while (Instance); return false; } static bool isInstantiationOf(FunctionTemplateDecl *Pattern, FunctionTemplateDecl *Instance) { Pattern = Pattern->getCanonicalDecl(); do { Instance = Instance->getCanonicalDecl(); if (Pattern == Instance) return true; Instance = Instance->getInstantiatedFromMemberTemplate(); } while (Instance); return false; } static bool isInstantiationOf(ClassTemplatePartialSpecializationDecl *Pattern, ClassTemplatePartialSpecializationDecl *Instance) { Pattern = cast<ClassTemplatePartialSpecializationDecl>(Pattern->getCanonicalDecl()); do { Instance = cast<ClassTemplatePartialSpecializationDecl>( Instance->getCanonicalDecl()); if (Pattern == Instance) return true; Instance = Instance->getInstantiatedFromMember(); } while (Instance); return false; } static bool isInstantiationOf(CXXRecordDecl *Pattern, CXXRecordDecl *Instance) { Pattern = Pattern->getCanonicalDecl(); do { Instance = Instance->getCanonicalDecl(); if (Pattern == Instance) return true; Instance = Instance->getInstantiatedFromMemberClass(); } while (Instance); return false; } static bool isInstantiationOf(FunctionDecl *Pattern, FunctionDecl *Instance) { Pattern = Pattern->getCanonicalDecl(); do { Instance = Instance->getCanonicalDecl(); if (Pattern == Instance) return true; Instance = Instance->getInstantiatedFromMemberFunction(); } while (Instance); return false; } static bool isInstantiationOf(EnumDecl *Pattern, EnumDecl *Instance) { Pattern = Pattern->getCanonicalDecl(); do { Instance = Instance->getCanonicalDecl(); if (Pattern == Instance) return true; Instance = Instance->getInstantiatedFromMemberEnum(); } while (Instance); return false; } static bool isInstantiationOf(UsingShadowDecl *Pattern, UsingShadowDecl *Instance, ASTContext &C) { return declaresSameEntity(C.getInstantiatedFromUsingShadowDecl(Instance), Pattern); } static bool isInstantiationOf(UsingDecl *Pattern, UsingDecl *Instance, ASTContext &C) { return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern); } static bool isInstantiationOf(UnresolvedUsingValueDecl *Pattern, UsingDecl *Instance, ASTContext &C) { return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern); } static bool isInstantiationOf(UnresolvedUsingTypenameDecl *Pattern, UsingDecl *Instance, ASTContext &C) { return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern); } static bool isInstantiationOfStaticDataMember(VarDecl *Pattern, VarDecl *Instance) { assert(Instance->isStaticDataMember()); Pattern = Pattern->getCanonicalDecl(); do { Instance = Instance->getCanonicalDecl(); if (Pattern == Instance) return true; Instance = Instance->getInstantiatedFromStaticDataMember(); } while (Instance); return false; } // Other is the prospective instantiation // D is the prospective pattern static bool isInstantiationOf(ASTContext &Ctx, NamedDecl *D, Decl *Other) { if (D->getKind() != Other->getKind()) { if (UnresolvedUsingTypenameDecl *UUD = dyn_cast<UnresolvedUsingTypenameDecl>(D)) { if (UsingDecl *UD = dyn_cast<UsingDecl>(Other)) { return isInstantiationOf(UUD, UD, Ctx); } } if (UnresolvedUsingValueDecl *UUD = dyn_cast<UnresolvedUsingValueDecl>(D)) { if (UsingDecl *UD = dyn_cast<UsingDecl>(Other)) { return isInstantiationOf(UUD, UD, Ctx); } } return false; } if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Other)) return isInstantiationOf(cast<CXXRecordDecl>(D), Record); if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Other)) return isInstantiationOf(cast<FunctionDecl>(D), Function); if (EnumDecl *Enum = dyn_cast<EnumDecl>(Other)) return isInstantiationOf(cast<EnumDecl>(D), Enum); if (VarDecl *Var = dyn_cast<VarDecl>(Other)) if (Var->isStaticDataMember()) return isInstantiationOfStaticDataMember(cast<VarDecl>(D), Var); if (ClassTemplateDecl *Temp = dyn_cast<ClassTemplateDecl>(Other)) return isInstantiationOf(cast<ClassTemplateDecl>(D), Temp); if (FunctionTemplateDecl *Temp = dyn_cast<FunctionTemplateDecl>(Other)) return isInstantiationOf(cast<FunctionTemplateDecl>(D), Temp); if (ClassTemplatePartialSpecializationDecl *PartialSpec = dyn_cast<ClassTemplatePartialSpecializationDecl>(Other)) return isInstantiationOf(cast<ClassTemplatePartialSpecializationDecl>(D), PartialSpec); if (FieldDecl *Field = dyn_cast<FieldDecl>(Other)) { if (!Field->getDeclName()) { // This is an unnamed field. return declaresSameEntity(Ctx.getInstantiatedFromUnnamedFieldDecl(Field), cast<FieldDecl>(D)); } } if (UsingDecl *Using = dyn_cast<UsingDecl>(Other)) return isInstantiationOf(cast<UsingDecl>(D), Using, Ctx); if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(Other)) return isInstantiationOf(cast<UsingShadowDecl>(D), Shadow, Ctx); return D->getDeclName() && isa<NamedDecl>(Other) && D->getDeclName() == cast<NamedDecl>(Other)->getDeclName(); } template<typename ForwardIterator> static NamedDecl *findInstantiationOf(ASTContext &Ctx, NamedDecl *D, ForwardIterator first, ForwardIterator last) { for (; first != last; ++first) if (isInstantiationOf(Ctx, D, *first)) return cast<NamedDecl>(*first); return nullptr; } /// \brief Finds the instantiation of the given declaration context /// within the current instantiation. /// /// \returns NULL if there was an error DeclContext *Sema::FindInstantiatedContext(SourceLocation Loc, DeclContext* DC, const MultiLevelTemplateArgumentList &TemplateArgs) { if (NamedDecl *D = dyn_cast<NamedDecl>(DC)) { Decl* ID = FindInstantiatedDecl(Loc, D, TemplateArgs); return cast_or_null<DeclContext>(ID); } else return DC; } /// \brief Find the instantiation of the given declaration within the /// current instantiation. /// /// This routine is intended to be used when \p D is a declaration /// referenced from within a template, that needs to mapped into the /// corresponding declaration within an instantiation. For example, /// given: /// /// \code /// template<typename T> /// struct X { /// enum Kind { /// KnownValue = sizeof(T) /// }; /// /// bool getKind() const { return KnownValue; } /// }; /// /// template struct X<int>; /// \endcode /// /// In the instantiation of <tt>X<int>::getKind()</tt>, we need to map the /// \p EnumConstantDecl for \p KnownValue (which refers to /// <tt>X<T>::<Kind>::KnownValue</tt>) to its instantiation /// (<tt>X<int>::<Kind>::KnownValue</tt>). \p FindInstantiatedDecl performs /// this mapping from within the instantiation of <tt>X<int></tt>. NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs) { DeclContext *ParentDC = D->getDeclContext(); // FIXME: Parmeters of pointer to functions (y below) that are themselves // parameters (p below) can have their ParentDC set to the translation-unit // - thus we can not consistently check if the ParentDC of such a parameter // is Dependent or/and a FunctionOrMethod. // For e.g. this code, during Template argument deduction tries to // find an instantiated decl for (T y) when the ParentDC for y is // the translation unit. // e.g. template <class T> void Foo(auto (*p)(T y) -> decltype(y())) {} // float baz(float(*)()) { return 0.0; } // Foo(baz); // The better fix here is perhaps to ensure that a ParmVarDecl, by the time // it gets here, always has a FunctionOrMethod as its ParentDC?? // For now: // - as long as we have a ParmVarDecl whose parent is non-dependent and // whose type is not instantiation dependent, do nothing to the decl // - otherwise find its instantiated decl. if (isa<ParmVarDecl>(D) && !ParentDC->isDependentContext() && !cast<ParmVarDecl>(D)->getType()->isInstantiationDependentType()) return D; if (isa<ParmVarDecl>(D) || isa<NonTypeTemplateParmDecl>(D) || isa<TemplateTypeParmDecl>(D) || isa<TemplateTemplateParmDecl>(D) || (ParentDC->isFunctionOrMethod() && ParentDC->isDependentContext()) || (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda())) { // D is a local of some kind. Look into the map of local // declarations to their instantiations. if (CurrentInstantiationScope) { if (auto Found = CurrentInstantiationScope->findInstantiationOf(D)) { if (Decl *FD = Found->dyn_cast<Decl *>()) return cast<NamedDecl>(FD); int PackIdx = ArgumentPackSubstitutionIndex; assert(PackIdx != -1 && "found declaration pack but not pack expanding"); typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack; return cast<NamedDecl>((*Found->get<DeclArgumentPack *>())[PackIdx]); } } // If we're performing a partial substitution during template argument // deduction, we may not have values for template parameters yet. They // just map to themselves. if (isa<NonTypeTemplateParmDecl>(D) || isa<TemplateTypeParmDecl>(D) || isa<TemplateTemplateParmDecl>(D)) return D; if (D->isInvalidDecl()) return nullptr; // Normally this function only searches for already instantiated declaration // however we have to make an exclusion for local types used before // definition as in the code: // // template<typename T> void f1() { // void g1(struct x1); // struct x1 {}; // } // // In this case instantiation of the type of 'g1' requires definition of // 'x1', which is defined later. Error recovery may produce an enum used // before definition. In these cases we need to instantiate relevant // declarations here. bool NeedInstantiate = false; if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) NeedInstantiate = RD->isLocalClass(); else NeedInstantiate = isa<EnumDecl>(D); if (NeedInstantiate) { Decl *Inst = SubstDecl(D, CurContext, TemplateArgs); CurrentInstantiationScope->InstantiatedLocal(D, Inst); return cast<TypeDecl>(Inst); } // If we didn't find the decl, then we must have a label decl that hasn't // been found yet. Lazily instantiate it and return it now. assert(isa<LabelDecl>(D)); Decl *Inst = SubstDecl(D, CurContext, TemplateArgs); assert(Inst && "Failed to instantiate label??"); CurrentInstantiationScope->InstantiatedLocal(D, Inst); return cast<LabelDecl>(Inst); } // For variable template specializations, update those that are still // type-dependent. if (VarTemplateSpecializationDecl *VarSpec = dyn_cast<VarTemplateSpecializationDecl>(D)) { bool InstantiationDependent = false; const TemplateArgumentListInfo &VarTemplateArgs = VarSpec->getTemplateArgsInfo(); if (TemplateSpecializationType::anyDependentTemplateArguments( VarTemplateArgs, InstantiationDependent)) D = cast<NamedDecl>( SubstDecl(D, VarSpec->getDeclContext(), TemplateArgs)); return D; } if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) { if (!Record->isDependentContext()) return D; // Determine whether this record is the "templated" declaration describing // a class template or class template partial specialization. ClassTemplateDecl *ClassTemplate = Record->getDescribedClassTemplate(); if (ClassTemplate) ClassTemplate = ClassTemplate->getCanonicalDecl(); else if (ClassTemplatePartialSpecializationDecl *PartialSpec = dyn_cast<ClassTemplatePartialSpecializationDecl>(Record)) ClassTemplate = PartialSpec->getSpecializedTemplate()->getCanonicalDecl(); // Walk the current context to find either the record or an instantiation of // it. DeclContext *DC = CurContext; while (!DC->isFileContext()) { // If we're performing substitution while we're inside the template // definition, we'll find our own context. We're done. if (DC->Equals(Record)) return Record; if (CXXRecordDecl *InstRecord = dyn_cast<CXXRecordDecl>(DC)) { // Check whether we're in the process of instantiating a class template // specialization of the template we're mapping. if (ClassTemplateSpecializationDecl *InstSpec = dyn_cast<ClassTemplateSpecializationDecl>(InstRecord)){ ClassTemplateDecl *SpecTemplate = InstSpec->getSpecializedTemplate(); if (ClassTemplate && isInstantiationOf(ClassTemplate, SpecTemplate)) return InstRecord; } // Check whether we're in the process of instantiating a member class. if (isInstantiationOf(Record, InstRecord)) return InstRecord; } // Move to the outer template scope. if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC)) { if (FD->getFriendObjectKind() && FD->getDeclContext()->isFileContext()){ DC = FD->getLexicalDeclContext(); continue; } } DC = DC->getParent(); } // Fall through to deal with other dependent record types (e.g., // anonymous unions in class templates). } if (!ParentDC->isDependentContext()) return D; ParentDC = FindInstantiatedContext(Loc, ParentDC, TemplateArgs); if (!ParentDC) return nullptr; if (ParentDC != D->getDeclContext()) { // We performed some kind of instantiation in the parent context, // so now we need to look into the instantiated parent context to // find the instantiation of the declaration D. // If our context used to be dependent, we may need to instantiate // it before performing lookup into that context. bool IsBeingInstantiated = false; if (CXXRecordDecl *Spec = dyn_cast<CXXRecordDecl>(ParentDC)) { if (!Spec->isDependentContext()) { QualType T = Context.getTypeDeclType(Spec); const RecordType *Tag = T->getAs<RecordType>(); assert(Tag && "type of non-dependent record is not a RecordType"); if (Tag->isBeingDefined()) IsBeingInstantiated = true; if (!Tag->isBeingDefined() && RequireCompleteType(Loc, T, diag::err_incomplete_type)) return nullptr; ParentDC = Tag->getDecl(); } } NamedDecl *Result = nullptr; if (D->getDeclName()) { DeclContext::lookup_result Found = ParentDC->lookup(D->getDeclName()); Result = findInstantiationOf(Context, D, Found.begin(), Found.end()); } else { // Since we don't have a name for the entity we're looking for, // our only option is to walk through all of the declarations to // find that name. This will occur in a few cases: // // - anonymous struct/union within a template // - unnamed class/struct/union/enum within a template // // FIXME: Find a better way to find these instantiations! Result = findInstantiationOf(Context, D, ParentDC->decls_begin(), ParentDC->decls_end()); } if (!Result) { if (isa<UsingShadowDecl>(D)) { // UsingShadowDecls can instantiate to nothing because of using hiding. } else if (Diags.hasErrorOccurred()) { // We've already complained about something, so most likely this // declaration failed to instantiate. There's no point in complaining // further, since this is normal in invalid code. } else if (IsBeingInstantiated) { // The class in which this member exists is currently being // instantiated, and we haven't gotten around to instantiating this // member yet. This can happen when the code uses forward declarations // of member classes, and introduces ordering dependencies via // template instantiation. Diag(Loc, diag::err_member_not_yet_instantiated) << D->getDeclName() << Context.getTypeDeclType(cast<CXXRecordDecl>(ParentDC)); Diag(D->getLocation(), diag::note_non_instantiated_member_here); } else if (EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) { // This enumeration constant was found when the template was defined, // but can't be found in the instantiation. This can happen if an // unscoped enumeration member is explicitly specialized. EnumDecl *Enum = cast<EnumDecl>(ED->getLexicalDeclContext()); EnumDecl *Spec = cast<EnumDecl>(FindInstantiatedDecl(Loc, Enum, TemplateArgs)); assert(Spec->getTemplateSpecializationKind() == TSK_ExplicitSpecialization); Diag(Loc, diag::err_enumerator_does_not_exist) << D->getDeclName() << Context.getTypeDeclType(cast<TypeDecl>(Spec->getDeclContext())); Diag(Spec->getLocation(), diag::note_enum_specialized_here) << Context.getTypeDeclType(Spec); } else { // We should have found something, but didn't. llvm_unreachable("Unable to find instantiation of declaration!"); } } D = Result; } return D; } /// \brief Performs template instantiation for all implicit template /// instantiations we have seen until this point. void Sema::PerformPendingInstantiations(bool LocalOnly) { while (!PendingLocalImplicitInstantiations.empty() || (!LocalOnly && !PendingInstantiations.empty())) { PendingImplicitInstantiation Inst; if (PendingLocalImplicitInstantiations.empty()) { Inst = PendingInstantiations.front(); PendingInstantiations.pop_front(); } else { Inst = PendingLocalImplicitInstantiations.front(); PendingLocalImplicitInstantiations.pop_front(); } // Instantiate function definitions if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Inst.first)) { PrettyDeclStackTraceEntry CrashInfo(*this, Function, SourceLocation(), "instantiating function definition"); bool DefinitionRequired = Function->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition; InstantiateFunctionDefinition(/*FIXME:*/Inst.second, Function, true, DefinitionRequired); continue; } // Instantiate variable definitions VarDecl *Var = cast<VarDecl>(Inst.first); assert((Var->isStaticDataMember() || isa<VarTemplateSpecializationDecl>(Var)) && "Not a static data member, nor a variable template" " specialization?"); // Don't try to instantiate declarations if the most recent redeclaration // is invalid. if (Var->getMostRecentDecl()->isInvalidDecl()) continue; // Check if the most recent declaration has changed the specialization kind // and removed the need for implicit instantiation. switch (Var->getMostRecentDecl()->getTemplateSpecializationKind()) { case TSK_Undeclared: llvm_unreachable("Cannot instantitiate an undeclared specialization."); case TSK_ExplicitInstantiationDeclaration: case TSK_ExplicitSpecialization: continue; // No longer need to instantiate this type. case TSK_ExplicitInstantiationDefinition: // We only need an instantiation if the pending instantiation *is* the // explicit instantiation. if (Var != Var->getMostRecentDecl()) continue; break; case TSK_ImplicitInstantiation: break; } PrettyDeclStackTraceEntry CrashInfo(*this, Var, SourceLocation(), "instantiating variable definition"); bool DefinitionRequired = Var->getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition; // Instantiate static data member definitions or variable template // specializations. InstantiateVariableDefinition(/*FIXME:*/ Inst.second, Var, true, DefinitionRequired); } } void Sema::PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs) { for (auto DD : Pattern->ddiags()) { switch (DD->getKind()) { case DependentDiagnostic::Access: HandleDependentAccessCheck(*DD, TemplateArgs); break; } } }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaCUDA.cpp
//===--- SemaCUDA.cpp - Semantic Analysis for CUDA constructs -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file implements semantic analysis for CUDA constructs. /// //===----------------------------------------------------------------------===// #include "clang/Sema/Sema.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/SemaDiagnostic.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallVector.h" using namespace clang; ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc) { FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl(); if (!ConfigDecl) return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use) << "cudaConfigureCall"); QualType ConfigQTy = ConfigDecl->getType(); DeclRefExpr *ConfigDR = new (Context) DeclRefExpr(ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc); MarkFunctionReferenced(LLLLoc, ConfigDecl); return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, nullptr, /*IsExecConfig=*/true); } /// IdentifyCUDATarget - Determine the CUDA compilation target for this function Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) { if (D->hasAttr<CUDAInvalidTargetAttr>()) return CFT_InvalidTarget; if (D->hasAttr<CUDAGlobalAttr>()) return CFT_Global; if (D->hasAttr<CUDADeviceAttr>()) { if (D->hasAttr<CUDAHostAttr>()) return CFT_HostDevice; return CFT_Device; } else if (D->hasAttr<CUDAHostAttr>()) { return CFT_Host; } else if (D->isImplicit()) { // Some implicit declarations (like intrinsic functions) are not marked. // Set the most lenient target on them for maximal flexibility. return CFT_HostDevice; } return CFT_Host; } bool Sema::CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee) { // The CUDADisableTargetCallChecks short-circuits this check: we assume all // cross-target calls are valid. if (getLangOpts().CUDADisableTargetCallChecks) return false; CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller), CalleeTarget = IdentifyCUDATarget(Callee); // If one of the targets is invalid, the check always fails, no matter what // the other target is. if (CallerTarget == CFT_InvalidTarget || CalleeTarget == CFT_InvalidTarget) return true; // CUDA B.1.1 "The __device__ qualifier declares a function that is [...] // Callable from the device only." if (CallerTarget == CFT_Host && CalleeTarget == CFT_Device) return true; // CUDA B.1.2 "The __global__ qualifier declares a function that is [...] // Callable from the host only." // CUDA B.1.3 "The __host__ qualifier declares a function that is [...] // Callable from the host only." if ((CallerTarget == CFT_Device || CallerTarget == CFT_Global) && (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global)) return true; // CUDA B.1.3 "The __device__ and __host__ qualifiers can be used together // however, in which case the function is compiled for both the host and the // device. The __CUDA_ARCH__ macro [...] can be used to differentiate code // paths between host and device." if (CallerTarget == CFT_HostDevice && CalleeTarget != CFT_HostDevice) { // If the caller is implicit then the check always passes. if (Caller->isImplicit()) return false; bool InDeviceMode = getLangOpts().CUDAIsDevice; if (!InDeviceMode && CalleeTarget != CFT_Host) return true; if (InDeviceMode && CalleeTarget != CFT_Device) { // Allow host device functions to call host functions if explicitly // requested. if (CalleeTarget == CFT_Host && getLangOpts().CUDAAllowHostCallsFromHostDevice) { Diag(Caller->getLocation(), diag::warn_host_calls_from_host_device) << Callee->getNameAsString() << Caller->getNameAsString(); return false; } return true; } } return false; } /// When an implicitly-declared special member has to invoke more than one /// base/field special member, conflicts may occur in the targets of these /// members. For example, if one base's member __host__ and another's is /// __device__, it's a conflict. /// This function figures out if the given targets \param Target1 and /// \param Target2 conflict, and if they do not it fills in /// \param ResolvedTarget with a target that resolves for both calls. /// \return true if there's a conflict, false otherwise. static bool resolveCalleeCUDATargetConflict(Sema::CUDAFunctionTarget Target1, Sema::CUDAFunctionTarget Target2, Sema::CUDAFunctionTarget *ResolvedTarget) { if (Target1 == Sema::CFT_Global && Target2 == Sema::CFT_Global) { // TODO: this shouldn't happen, really. Methods cannot be marked __global__. // Clang should detect this earlier and produce an error. Then this // condition can be changed to an assertion. return true; } if (Target1 == Sema::CFT_HostDevice) { *ResolvedTarget = Target2; } else if (Target2 == Sema::CFT_HostDevice) { *ResolvedTarget = Target1; } else if (Target1 != Target2) { return true; } else { *ResolvedTarget = Target1; } return false; } bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose) { llvm::Optional<CUDAFunctionTarget> InferredTarget; // We're going to invoke special member lookup; mark that these special // members are called from this one, and not from its caller. ContextRAII MethodContext(*this, MemberDecl); // Look for special members in base classes that should be invoked from here. // Infer the target of this member base on the ones it should call. // Skip direct and indirect virtual bases for abstract classes. llvm::SmallVector<const CXXBaseSpecifier *, 16> Bases; for (const auto &B : ClassDecl->bases()) { if (!B.isVirtual()) { Bases.push_back(&B); } } if (!ClassDecl->isAbstract()) { for (const auto &VB : ClassDecl->vbases()) { Bases.push_back(&VB); } } for (const auto *B : Bases) { const RecordType *BaseType = B->getType()->getAs<RecordType>(); if (!BaseType) { continue; } CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl()); Sema::SpecialMemberOverloadResult *SMOR = LookupSpecialMember(BaseClassDecl, CSM, /* ConstArg */ ConstRHS, /* VolatileArg */ false, /* RValueThis */ false, /* ConstThis */ false, /* VolatileThis */ false); if (!SMOR || !SMOR->getMethod()) { continue; } CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR->getMethod()); if (!InferredTarget.hasValue()) { InferredTarget = BaseMethodTarget; } else { bool ResolutionError = resolveCalleeCUDATargetConflict( InferredTarget.getValue(), BaseMethodTarget, InferredTarget.getPointer()); if (ResolutionError) { if (Diagnose) { Diag(ClassDecl->getLocation(), diag::note_implicit_member_target_infer_collision) << (unsigned)CSM << InferredTarget.getValue() << BaseMethodTarget; } MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context)); return true; } } } // Same as for bases, but now for special members of fields. for (const auto *F : ClassDecl->fields()) { if (F->isInvalidDecl()) { continue; } const RecordType *FieldType = Context.getBaseElementType(F->getType())->getAs<RecordType>(); if (!FieldType) { continue; } CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(FieldType->getDecl()); Sema::SpecialMemberOverloadResult *SMOR = LookupSpecialMember(FieldRecDecl, CSM, /* ConstArg */ ConstRHS && !F->isMutable(), /* VolatileArg */ false, /* RValueThis */ false, /* ConstThis */ false, /* VolatileThis */ false); if (!SMOR || !SMOR->getMethod()) { continue; } CUDAFunctionTarget FieldMethodTarget = IdentifyCUDATarget(SMOR->getMethod()); if (!InferredTarget.hasValue()) { InferredTarget = FieldMethodTarget; } else { bool ResolutionError = resolveCalleeCUDATargetConflict( InferredTarget.getValue(), FieldMethodTarget, InferredTarget.getPointer()); if (ResolutionError) { if (Diagnose) { Diag(ClassDecl->getLocation(), diag::note_implicit_member_target_infer_collision) << (unsigned)CSM << InferredTarget.getValue() << FieldMethodTarget; } MemberDecl->addAttr(CUDAInvalidTargetAttr::CreateImplicit(Context)); return true; } } } if (InferredTarget.hasValue()) { if (InferredTarget.getValue() == CFT_Device) { MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context)); } else if (InferredTarget.getValue() == CFT_Host) { MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context)); } else { MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context)); MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context)); } } else { // If no target was inferred, mark this member as __host__ __device__; // it's the least restrictive option that can be invoked from any target. MemberDecl->addAttr(CUDADeviceAttr::CreateImplicit(Context)); MemberDecl->addAttr(CUDAHostAttr::CreateImplicit(Context)); } return false; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
//===--- SemaCXXScopeSpec.cpp - Semantic Analysis for C++ scope specifiers-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements C++ semantic analysis for scope specifiers. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "TypeLocBuilder.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Template.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/raw_ostream.h" using namespace clang; /// \brief Find the current instantiation that associated with the given type. static CXXRecordDecl *getCurrentInstantiationOf(QualType T, DeclContext *CurContext) { if (T.isNull()) return nullptr; const Type *Ty = T->getCanonicalTypeInternal().getTypePtr(); if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); if (!Record->isDependentContext() || Record->isCurrentInstantiation(CurContext)) return Record; return nullptr; } else if (isa<InjectedClassNameType>(Ty)) return cast<InjectedClassNameType>(Ty)->getDecl(); else return nullptr; } /// \brief Compute the DeclContext that is associated with the given type. /// /// \param T the type for which we are attempting to find a DeclContext. /// /// \returns the declaration context represented by the type T, /// or NULL if the declaration context cannot be computed (e.g., because it is /// dependent and not the current instantiation). DeclContext *Sema::computeDeclContext(QualType T) { if (!T->isDependentType()) if (const TagType *Tag = T->getAs<TagType>()) return Tag->getDecl(); return ::getCurrentInstantiationOf(T, CurContext); } /// \brief Compute the DeclContext that is associated with the given /// scope specifier. /// /// \param SS the C++ scope specifier as it appears in the source /// /// \param EnteringContext when true, we will be entering the context of /// this scope specifier, so we can retrieve the declaration context of a /// class template or class template partial specialization even if it is /// not the current instantiation. /// /// \returns the declaration context represented by the scope specifier @p SS, /// or NULL if the declaration context cannot be computed (e.g., because it is /// dependent and not the current instantiation). DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext) { if (!SS.isSet() || SS.isInvalid()) return nullptr; NestedNameSpecifier *NNS = SS.getScopeRep(); if (NNS->isDependent()) { // If this nested-name-specifier refers to the current // instantiation, return its DeclContext. if (CXXRecordDecl *Record = getCurrentInstantiationOf(NNS)) return Record; if (EnteringContext) { const Type *NNSType = NNS->getAsType(); if (!NNSType) { return nullptr; } // Look through type alias templates, per C++0x [temp.dep.type]p1. NNSType = Context.getCanonicalType(NNSType); if (const TemplateSpecializationType *SpecType = NNSType->getAs<TemplateSpecializationType>()) { // We are entering the context of the nested name specifier, so try to // match the nested name specifier to either a primary class template // or a class template partial specialization. if (ClassTemplateDecl *ClassTemplate = dyn_cast_or_null<ClassTemplateDecl>( SpecType->getTemplateName().getAsTemplateDecl())) { QualType ContextType = Context.getCanonicalType(QualType(SpecType, 0)); // If the type of the nested name specifier is the same as the // injected class name of the named class template, we're entering // into that class template definition. QualType Injected = ClassTemplate->getInjectedClassNameSpecialization(); if (Context.hasSameType(Injected, ContextType)) return ClassTemplate->getTemplatedDecl(); // If the type of the nested name specifier is the same as the // type of one of the class template's class template partial // specializations, we're entering into the definition of that // class template partial specialization. if (ClassTemplatePartialSpecializationDecl *PartialSpec = ClassTemplate->findPartialSpecialization(ContextType)) return PartialSpec; } } else if (const RecordType *RecordT = NNSType->getAs<RecordType>()) { // The nested name specifier refers to a member of a class template. return RecordT->getDecl(); } } return nullptr; } switch (NNS->getKind()) { case NestedNameSpecifier::Identifier: llvm_unreachable("Dependent nested-name-specifier has no DeclContext"); case NestedNameSpecifier::Namespace: return NNS->getAsNamespace(); case NestedNameSpecifier::NamespaceAlias: return NNS->getAsNamespaceAlias()->getNamespace(); case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: { const TagType *Tag = NNS->getAsType()->getAs<TagType>(); assert(Tag && "Non-tag type in nested-name-specifier"); return Tag->getDecl(); } case NestedNameSpecifier::Global: return Context.getTranslationUnitDecl(); case NestedNameSpecifier::Super: return NNS->getAsRecordDecl(); } llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); } bool Sema::isDependentScopeSpecifier(const CXXScopeSpec &SS) { if (!SS.isSet() || SS.isInvalid()) return false; return SS.getScopeRep()->isDependent(); } /// \brief If the given nested name specifier refers to the current /// instantiation, return the declaration that corresponds to that /// current instantiation (C++0x [temp.dep.type]p1). /// /// \param NNS a dependent nested name specifier. CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier *NNS) { assert(getLangOpts().CPlusPlus && "Only callable in C++"); assert(NNS->isDependent() && "Only dependent nested-name-specifier allowed"); if (!NNS->getAsType()) return nullptr; QualType T = QualType(NNS->getAsType(), 0); return ::getCurrentInstantiationOf(T, CurContext); } /// \brief Require that the context specified by SS be complete. /// /// If SS refers to a type, this routine checks whether the type is /// complete enough (or can be made complete enough) for name lookup /// into the DeclContext. A type that is not yet completed can be /// considered "complete enough" if it is a class/struct/union/enum /// that is currently being defined. Or, if we have a type that names /// a class template specialization that is not a complete type, we /// will attempt to instantiate that class template. bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC) { assert(DC && "given null context"); TagDecl *tag = dyn_cast<TagDecl>(DC); // If this is a dependent type, then we consider it complete. if (!tag || tag->isDependentContext()) return false; // If we're currently defining this type, then lookup into the // type is okay: don't complain that it isn't complete yet. QualType type = Context.getTypeDeclType(tag); const TagType *tagType = type->getAs<TagType>(); if (tagType && tagType->isBeingDefined()) return false; SourceLocation loc = SS.getLastQualifierNameLoc(); if (loc.isInvalid()) loc = SS.getRange().getBegin(); // The type must be complete. if (RequireCompleteType(loc, type, diag::err_incomplete_nested_name_spec, SS.getRange())) { SS.SetInvalid(SS.getRange()); return true; } // Fixed enum types are complete, but they aren't valid as scopes // until we see a definition, so awkwardly pull out this special // case. // FIXME: The definition might not be visible; complain if it is not. const EnumType *enumType = dyn_cast_or_null<EnumType>(tagType); if (!enumType || enumType->getDecl()->isCompleteDefinition()) return false; // Try to instantiate the definition, if this is a specialization of an // enumeration temploid. EnumDecl *ED = enumType->getDecl(); if (EnumDecl *Pattern = ED->getInstantiatedFromMemberEnum()) { MemberSpecializationInfo *MSI = ED->getMemberSpecializationInfo(); if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) { if (InstantiateEnum(loc, ED, Pattern, getTemplateInstantiationArgs(ED), TSK_ImplicitInstantiation)) { SS.SetInvalid(SS.getRange()); return true; } return false; } } Diag(loc, diag::err_incomplete_nested_name_spec) << type << SS.getRange(); SS.SetInvalid(SS.getRange()); return true; } bool Sema::ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS) { SS.MakeGlobal(Context, CCLoc); return false; } bool Sema::ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS) { CXXRecordDecl *RD = nullptr; for (Scope *S = getCurScope(); S; S = S->getParent()) { if (S->isFunctionScope()) { if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(S->getEntity())) RD = MD->getParent(); break; } if (S->isClassScope()) { RD = cast<CXXRecordDecl>(S->getEntity()); break; } } if (!RD) { Diag(SuperLoc, diag::err_invalid_super_scope); return true; } else if (RD->isLambda()) { Diag(SuperLoc, diag::err_super_in_lambda_unsupported); return true; } else if (RD->getNumBases() == 0) { Diag(SuperLoc, diag::err_no_base_classes) << RD->getName(); return true; } SS.MakeSuper(Context, RD, SuperLoc, ColonColonLoc); return false; } /// \brief Determines whether the given declaration is an valid acceptable /// result for name lookup of a nested-name-specifier. /// \param SD Declaration checked for nested-name-specifier. /// \param IsExtension If not null and the declaration is accepted as an /// extension, the pointed variable is assigned true. bool Sema::isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *IsExtension) { if (!SD) return false; // Namespace and namespace aliases are fine. if (isa<NamespaceDecl>(SD) || isa<NamespaceAliasDecl>(SD)) return true; if (!isa<TypeDecl>(SD)) return false; // Determine whether we have a class (or, in C++11, an enum) or // a typedef thereof. If so, build the nested-name-specifier. QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD)); if (T->isDependentType()) return true; if (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) { if (TD->getUnderlyingType()->isRecordType()) return true; if (TD->getUnderlyingType()->isEnumeralType()) { if (Context.getLangOpts().CPlusPlus11) return true; if (IsExtension) *IsExtension = true; } } else if (isa<RecordDecl>(SD)) { return true; } else if (isa<EnumDecl>(SD)) { if (Context.getLangOpts().CPlusPlus11) return true; if (IsExtension) *IsExtension = true; } return false; } /// \brief If the given nested-name-specifier begins with a bare identifier /// (e.g., Base::), perform name lookup for that identifier as a /// nested-name-specifier within the given scope, and return the result of that /// name lookup. NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS) { if (!S || !NNS) return nullptr; while (NNS->getPrefix()) NNS = NNS->getPrefix(); if (NNS->getKind() != NestedNameSpecifier::Identifier) return nullptr; LookupResult Found(*this, NNS->getAsIdentifier(), SourceLocation(), LookupNestedNameSpecifierName); LookupName(Found, S); assert(!Found.isAmbiguous() && "Cannot handle ambiguities here yet"); if (!Found.isSingleResult()) return nullptr; NamedDecl *Result = Found.getFoundDecl(); if (isAcceptableNestedNameSpecifier(Result)) return Result; return nullptr; } bool Sema::isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectTypePtr) { QualType ObjectType = GetTypeFromParser(ObjectTypePtr); LookupResult Found(*this, &II, IdLoc, LookupNestedNameSpecifierName); // Determine where to perform name lookup DeclContext *LookupCtx = nullptr; bool isDependent = false; if (!ObjectType.isNull()) { // This nested-name-specifier occurs in a member access expression, e.g., // x->B::f, and we are looking into the type of the object. assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist"); LookupCtx = computeDeclContext(ObjectType); isDependent = ObjectType->isDependentType(); } else if (SS.isSet()) { // This nested-name-specifier occurs after another nested-name-specifier, // so long into the context associated with the prior nested-name-specifier. LookupCtx = computeDeclContext(SS, false); isDependent = isDependentScopeSpecifier(SS); Found.setContextRange(SS.getRange()); } if (LookupCtx) { // Perform "qualified" name lookup into the declaration context we // computed, which is either the type of the base of a member access // expression or the declaration context associated with a prior // nested-name-specifier. // The declaration context must be complete. if (!LookupCtx->isDependentContext() && RequireCompleteDeclContext(SS, LookupCtx)) return false; LookupQualifiedName(Found, LookupCtx); } else if (isDependent) { return false; } else { LookupName(Found, S); } Found.suppressDiagnostics(); if (NamedDecl *ND = Found.getAsSingle<NamedDecl>()) return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND); return false; } namespace { // Callback to only accept typo corrections that can be a valid C++ member // intializer: either a non-static field member or a base class. class NestedNameSpecifierValidatorCCC : public CorrectionCandidateCallback { public: explicit NestedNameSpecifierValidatorCCC(Sema &SRef) : SRef(SRef) {} bool ValidateCandidate(const TypoCorrection &candidate) override { return SRef.isAcceptableNestedNameSpecifier(candidate.getCorrectionDecl()); } private: Sema &SRef; }; } /// \brief Build a new nested-name-specifier for "identifier::", as described /// by ActOnCXXNestedNameSpecifier. /// /// \param S Scope in which the nested-name-specifier occurs. /// \param Identifier Identifier in the sequence "identifier" "::". /// \param IdentifierLoc Location of the \p Identifier. /// \param CCLoc Location of "::" following Identifier. /// \param ObjectType Type of postfix expression if the nested-name-specifier /// occurs in construct like: <tt>ptr->nns::f</tt>. /// \param EnteringContext If true, enter the context specified by the /// nested-name-specifier. /// \param SS Optional nested name specifier preceding the identifier. /// \param ScopeLookupResult Provides the result of name lookup within the /// scope of the nested-name-specifier that was computed at template /// definition time. /// \param ErrorRecoveryLookup Specifies if the method is called to improve /// error recovery and what kind of recovery is performed. /// \param IsCorrectedToColon If not null, suggestion of replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to /// 'true' if the identifier is treated as if it was followed by ':', /// not '::'. /// /// This routine differs only slightly from ActOnCXXNestedNameSpecifier, in /// that it contains an extra parameter \p ScopeLookupResult, which provides /// the result of name lookup within the scope of the nested-name-specifier /// that was computed at template definition time. /// /// If ErrorRecoveryLookup is true, then this call is used to improve error /// recovery. This means that it should not emit diagnostics, it should /// just return true on failure. It also means it should only return a valid /// scope if it *knows* that the result is correct. It should not return in a /// dependent context, for example. Nor will it extend \p SS with the scope /// specifier. bool Sema::BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon) { LookupResult Found(*this, &Identifier, IdentifierLoc, LookupNestedNameSpecifierName); // Determine where to perform name lookup DeclContext *LookupCtx = nullptr; bool isDependent = false; if (IsCorrectedToColon) *IsCorrectedToColon = false; if (!ObjectType.isNull()) { // This nested-name-specifier occurs in a member access expression, e.g., // x->B::f, and we are looking into the type of the object. assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist"); LookupCtx = computeDeclContext(ObjectType); isDependent = ObjectType->isDependentType(); } else if (SS.isSet()) { // This nested-name-specifier occurs after another nested-name-specifier, // so look into the context associated with the prior nested-name-specifier. LookupCtx = computeDeclContext(SS, EnteringContext); isDependent = isDependentScopeSpecifier(SS); Found.setContextRange(SS.getRange()); } bool ObjectTypeSearchedInScope = false; if (LookupCtx) { // Perform "qualified" name lookup into the declaration context we // computed, which is either the type of the base of a member access // expression or the declaration context associated with a prior // nested-name-specifier. // The declaration context must be complete. if (!LookupCtx->isDependentContext() && RequireCompleteDeclContext(SS, LookupCtx)) return true; LookupQualifiedName(Found, LookupCtx); if (!ObjectType.isNull() && Found.empty()) { // C++ [basic.lookup.classref]p4: // If the id-expression in a class member access is a qualified-id of // the form // // class-name-or-namespace-name::... // // the class-name-or-namespace-name following the . or -> operator is // looked up both in the context of the entire postfix-expression and in // the scope of the class of the object expression. If the name is found // only in the scope of the class of the object expression, the name // shall refer to a class-name. If the name is found only in the // context of the entire postfix-expression, the name shall refer to a // class-name or namespace-name. [...] // // Qualified name lookup into a class will not find a namespace-name, // so we do not need to diagnose that case specifically. However, // this qualified name lookup may find nothing. In that case, perform // unqualified name lookup in the given scope (if available) or // reconstruct the result from when name lookup was performed at template // definition time. if (S) LookupName(Found, S); else if (ScopeLookupResult) Found.addDecl(ScopeLookupResult); ObjectTypeSearchedInScope = true; } } else if (!isDependent) { // Perform unqualified name lookup in the current scope. LookupName(Found, S); } // If we performed lookup into a dependent context and did not find anything, // that's fine: just build a dependent nested-name-specifier. if (Found.empty() && isDependent && !(LookupCtx && LookupCtx->isRecord() && (!cast<CXXRecordDecl>(LookupCtx)->hasDefinition() || !cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases()))) { // Don't speculate if we're just trying to improve error recovery. if (ErrorRecoveryLookup) return true; // We were not able to compute the declaration context for a dependent // base object type or prior nested-name-specifier, so this // nested-name-specifier refers to an unknown specialization. Just build // a dependent nested-name-specifier. SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc); return false; } // FIXME: Deal with ambiguities cleanly. if (Found.empty() && !ErrorRecoveryLookup) { // If identifier is not found as class-name-or-namespace-name, but is found // as other entity, don't look for typos. LookupResult R(*this, Found.getLookupNameInfo(), LookupOrdinaryName); if (LookupCtx) LookupQualifiedName(R, LookupCtx); else if (S && !isDependent) LookupName(R, S); if (!R.empty()) { // The identifier is found in ordinary lookup. If correction to colon is // allowed, suggest replacement to ':'. if (IsCorrectedToColon) { *IsCorrectedToColon = true; Diag(CCLoc, diag::err_nested_name_spec_is_not_class) << &Identifier << getLangOpts().CPlusPlus << FixItHint::CreateReplacement(CCLoc, ":"); if (NamedDecl *ND = R.getAsSingle<NamedDecl>()) Diag(ND->getLocation(), diag::note_declared_at); return true; } // Replacement '::' -> ':' is not allowed, just issue respective error. Diag(R.getNameLoc(), diag::err_expected_class_or_namespace) << &Identifier << getLangOpts().CPlusPlus; if (NamedDecl *ND = R.getAsSingle<NamedDecl>()) Diag(ND->getLocation(), diag::note_entity_declared_at) << &Identifier; return true; } } if (Found.empty() && !ErrorRecoveryLookup && !getLangOpts().MSVCCompat) { // We haven't found anything, and we're not recovering from a // different kind of error, so look for typos. DeclarationName Name = Found.getLookupName(); Found.clear(); if (TypoCorrection Corrected = CorrectTypo( Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS, llvm::make_unique<NestedNameSpecifierValidatorCCC>(*this), CTK_ErrorRecovery, LookupCtx, EnteringContext)) { if (LookupCtx) { bool DroppedSpecifier = Corrected.WillReplaceSpecifier() && Name.getAsString() == Corrected.getAsString(getLangOpts()); if (DroppedSpecifier) SS.clear(); diagnoseTypo(Corrected, PDiag(diag::err_no_member_suggest) << Name << LookupCtx << DroppedSpecifier << SS.getRange()); } else diagnoseTypo(Corrected, PDiag(diag::err_undeclared_var_use_suggest) << Name); if (NamedDecl *ND = Corrected.getCorrectionDecl()) Found.addDecl(ND); Found.setLookupName(Corrected.getCorrection()); } else { Found.setLookupName(&Identifier); } } NamedDecl *SD = Found.getAsSingle<NamedDecl>(); bool IsExtension = false; bool AcceptSpec = isAcceptableNestedNameSpecifier(SD, &IsExtension); if (!AcceptSpec && IsExtension) { AcceptSpec = true; // HLSL Change: Suppress c++11 extension warnings for nested name specifier in HLSL2017 if (getLangOpts().HLSLVersion < hlsl::LangStd::v2017) Diag(IdentifierLoc, diag::ext_nested_name_spec_is_enum); } if (AcceptSpec) { if (!ObjectType.isNull() && !ObjectTypeSearchedInScope && !getLangOpts().CPlusPlus11) { // C++03 [basic.lookup.classref]p4: // [...] If the name is found in both contexts, the // class-name-or-namespace-name shall refer to the same entity. // // We already found the name in the scope of the object. Now, look // into the current scope (the scope of the postfix-expression) to // see if we can find the same name there. As above, if there is no // scope, reconstruct the result from the template instantiation itself. // // Note that C++11 does *not* perform this redundant lookup. NamedDecl *OuterDecl; if (S) { LookupResult FoundOuter(*this, &Identifier, IdentifierLoc, LookupNestedNameSpecifierName); LookupName(FoundOuter, S); OuterDecl = FoundOuter.getAsSingle<NamedDecl>(); } else OuterDecl = ScopeLookupResult; if (isAcceptableNestedNameSpecifier(OuterDecl) && OuterDecl->getCanonicalDecl() != SD->getCanonicalDecl() && (!isa<TypeDecl>(OuterDecl) || !isa<TypeDecl>(SD) || !Context.hasSameType( Context.getTypeDeclType(cast<TypeDecl>(OuterDecl)), Context.getTypeDeclType(cast<TypeDecl>(SD))))) { if (ErrorRecoveryLookup) return true; Diag(IdentifierLoc, diag::err_nested_name_member_ref_lookup_ambiguous) << &Identifier; Diag(SD->getLocation(), diag::note_ambig_member_ref_object_type) << ObjectType; Diag(OuterDecl->getLocation(), diag::note_ambig_member_ref_scope); // Fall through so that we'll pick the name we found in the object // type, since that's probably what the user wanted anyway. } } if (auto *TD = dyn_cast_or_null<TypedefNameDecl>(SD)) MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false); // If we're just performing this lookup for error-recovery purposes, // don't extend the nested-name-specifier. Just return now. if (ErrorRecoveryLookup) return false; // The use of a nested name specifier may trigger deprecation warnings. DiagnoseUseOfDecl(SD, CCLoc); if (NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(SD)) { SS.Extend(Context, Namespace, IdentifierLoc, CCLoc); return false; } if (NamespaceAliasDecl *Alias = dyn_cast<NamespaceAliasDecl>(SD)) { SS.Extend(Context, Alias, IdentifierLoc, CCLoc); return false; } QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD)); TypeLocBuilder TLB; if (isa<InjectedClassNameType>(T)) { InjectedClassNameTypeLoc InjectedTL = TLB.push<InjectedClassNameTypeLoc>(T); InjectedTL.setNameLoc(IdentifierLoc); } else if (isa<RecordType>(T)) { RecordTypeLoc RecordTL = TLB.push<RecordTypeLoc>(T); RecordTL.setNameLoc(IdentifierLoc); } else if (isa<TypedefType>(T)) { TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(T); TypedefTL.setNameLoc(IdentifierLoc); } else if (isa<EnumType>(T)) { EnumTypeLoc EnumTL = TLB.push<EnumTypeLoc>(T); EnumTL.setNameLoc(IdentifierLoc); } else if (isa<TemplateTypeParmType>(T)) { TemplateTypeParmTypeLoc TemplateTypeTL = TLB.push<TemplateTypeParmTypeLoc>(T); TemplateTypeTL.setNameLoc(IdentifierLoc); } else if (isa<UnresolvedUsingType>(T)) { UnresolvedUsingTypeLoc UnresolvedTL = TLB.push<UnresolvedUsingTypeLoc>(T); UnresolvedTL.setNameLoc(IdentifierLoc); } else if (isa<SubstTemplateTypeParmType>(T)) { SubstTemplateTypeParmTypeLoc TL = TLB.push<SubstTemplateTypeParmTypeLoc>(T); TL.setNameLoc(IdentifierLoc); } else if (isa<SubstTemplateTypeParmPackType>(T)) { SubstTemplateTypeParmPackTypeLoc TL = TLB.push<SubstTemplateTypeParmPackTypeLoc>(T); TL.setNameLoc(IdentifierLoc); } else { llvm_unreachable("Unhandled TypeDecl node in nested-name-specifier"); } if (T->isEnumeralType()) Diag(IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec); SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T), CCLoc); return false; } // Otherwise, we have an error case. If we don't want diagnostics, just // return an error now. if (ErrorRecoveryLookup) return true; // If we didn't find anything during our lookup, try again with // ordinary name lookup, which can help us produce better error // messages. if (Found.empty()) { Found.clear(LookupOrdinaryName); LookupName(Found, S); } // In Microsoft mode, if we are within a templated function and we can't // resolve Identifier, then extend the SS with Identifier. This will have // the effect of resolving Identifier during template instantiation. // The goal is to be able to resolve a function call whose // nested-name-specifier is located inside a dependent base class. // Example: // // class C { // public: // static void foo2() { } // }; // template <class T> class A { public: typedef C D; }; // // template <class T> class B : public A<T> { // public: // void foo() { D::foo2(); } // }; if (getLangOpts().MSVCCompat) { DeclContext *DC = LookupCtx ? LookupCtx : CurContext; if (DC->isDependentContext() && DC->isFunctionOrMethod()) { CXXRecordDecl *ContainingClass = dyn_cast<CXXRecordDecl>(DC->getParent()); if (ContainingClass && ContainingClass->hasAnyDependentBases()) { Diag(IdentifierLoc, diag::ext_undeclared_unqual_id_with_dependent_base) << &Identifier << ContainingClass; SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc); return false; } } } if (!Found.empty()) { if (TypeDecl *TD = Found.getAsSingle<TypeDecl>()) Diag(IdentifierLoc, diag::err_expected_class_or_namespace) << QualType(TD->getTypeForDecl(), 0) << getLangOpts().CPlusPlus; else { Diag(IdentifierLoc, diag::err_expected_class_or_namespace) << &Identifier << getLangOpts().CPlusPlus; if (NamedDecl *ND = Found.getAsSingle<NamedDecl>()) Diag(ND->getLocation(), diag::note_entity_declared_at) << &Identifier; } } else if (SS.isSet()) Diag(IdentifierLoc, diag::err_no_member) << &Identifier << LookupCtx << SS.getRange(); else Diag(IdentifierLoc, diag::err_undeclared_var_use) << &Identifier; return true; } bool Sema::ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup, bool *IsCorrectedToColon) { if (SS.isInvalid()) return true; return BuildCXXNestedNameSpecifier(S, Identifier, IdentifierLoc, CCLoc, GetTypeFromParser(ObjectType), EnteringContext, SS, /*ScopeLookupResult=*/nullptr, false, IsCorrectedToColon); } bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc) { if (SS.isInvalid() || DS.getTypeSpecType() == DeclSpec::TST_error) return true; assert(DS.getTypeSpecType() == DeclSpec::TST_decltype); QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc()); if (!T->isDependentType() && !T->getAs<TagType>()) { Diag(DS.getTypeSpecTypeLoc(), diag::err_expected_class_or_namespace) << T << getLangOpts().CPlusPlus; return true; } TypeLocBuilder TLB; DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T); DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc()); SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T), ColonColonLoc); return false; } /// IsInvalidUnlessNestedName - This method is used for error recovery /// purposes to determine whether the specified identifier is only valid as /// a nested name specifier, for example a namespace name. It is /// conservatively correct to always return false from this method. /// /// The arguments are the same as those passed to ActOnCXXNestedNameSpecifier. bool Sema::IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext) { if (SS.isInvalid()) return false; return !BuildCXXNestedNameSpecifier(S, Identifier, IdentifierLoc, ColonLoc, GetTypeFromParser(ObjectType), EnteringContext, SS, /*ScopeLookupResult=*/nullptr, true); } bool Sema::ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext) { if (SS.isInvalid()) return true; // Translate the parser's template argument list in our AST format. TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc); translateTemplateArguments(TemplateArgsIn, TemplateArgs); DependentTemplateName *DTN = Template.get().getAsDependentTemplateName(); if (DTN && DTN->isIdentifier()) { // Handle a dependent template specialization for which we cannot resolve // the template name. assert(DTN->getQualifier() == SS.getScopeRep()); QualType T = Context.getDependentTemplateSpecializationType(ETK_None, DTN->getQualifier(), DTN->getIdentifier(), TemplateArgs); // Create source-location information for this type. TypeLocBuilder Builder; DependentTemplateSpecializationTypeLoc SpecTL = Builder.push<DependentTemplateSpecializationTypeLoc>(T); SpecTL.setElaboratedKeywordLoc(SourceLocation()); SpecTL.setQualifierLoc(SS.getWithLocInContext(Context)); SpecTL.setTemplateKeywordLoc(TemplateKWLoc); SpecTL.setTemplateNameLoc(TemplateNameLoc); SpecTL.setLAngleLoc(LAngleLoc); SpecTL.setRAngleLoc(RAngleLoc); for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo()); SS.Extend(Context, TemplateKWLoc, Builder.getTypeLocInContext(Context, T), CCLoc); return false; } TemplateDecl *TD = Template.get().getAsTemplateDecl(); if (Template.get().getAsOverloadedTemplate() || DTN || isa<FunctionTemplateDecl>(TD) || isa<VarTemplateDecl>(TD)) { SourceRange R(TemplateNameLoc, RAngleLoc); if (SS.getRange().isValid()) R.setBegin(SS.getRange().getBegin()); Diag(CCLoc, diag::err_non_type_template_in_nested_name_specifier) << (TD && isa<VarTemplateDecl>(TD)) << Template.get() << R; NoteAllFoundTemplates(Template.get()); return true; } // We were able to resolve the template name to an actual template. // Build an appropriate nested-name-specifier. QualType T = CheckTemplateIdType(Template.get(), TemplateNameLoc, TemplateArgs); if (T.isNull()) return true; // Alias template specializations can produce types which are not valid // nested name specifiers. if (!T->isDependentType() && !T->getAs<TagType>()) { Diag(TemplateNameLoc, diag::err_nested_name_spec_non_tag) << T; NoteAllFoundTemplates(Template.get()); return true; } // Provide source-location information for the template specialization type. TypeLocBuilder Builder; TemplateSpecializationTypeLoc SpecTL = Builder.push<TemplateSpecializationTypeLoc>(T); SpecTL.setTemplateKeywordLoc(TemplateKWLoc); SpecTL.setTemplateNameLoc(TemplateNameLoc); SpecTL.setLAngleLoc(LAngleLoc); SpecTL.setRAngleLoc(RAngleLoc); for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo()); SS.Extend(Context, TemplateKWLoc, Builder.getTypeLocInContext(Context, T), CCLoc); return false; } namespace { /// \brief A structure that stores a nested-name-specifier annotation, /// including both the nested-name-specifier struct NestedNameSpecifierAnnotation { NestedNameSpecifier *NNS; }; } void *Sema::SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS) { if (SS.isEmpty() || SS.isInvalid()) return nullptr; void *Mem = Context.Allocate((sizeof(NestedNameSpecifierAnnotation) + SS.location_size()), llvm::alignOf<NestedNameSpecifierAnnotation>()); NestedNameSpecifierAnnotation *Annotation = new (Mem) NestedNameSpecifierAnnotation; Annotation->NNS = SS.getScopeRep(); memcpy(Annotation + 1, SS.location_data(), SS.location_size()); return Annotation; } void Sema::RestoreNestedNameSpecifierAnnotation(void *AnnotationPtr, SourceRange AnnotationRange, CXXScopeSpec &SS) { if (!AnnotationPtr) { SS.SetInvalid(AnnotationRange); return; } NestedNameSpecifierAnnotation *Annotation = static_cast<NestedNameSpecifierAnnotation *>(AnnotationPtr); SS.Adopt(NestedNameSpecifierLoc(Annotation->NNS, Annotation + 1)); } bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) { assert(SS.isSet() && "Parser passed invalid CXXScopeSpec."); NestedNameSpecifier *Qualifier = SS.getScopeRep(); // There are only two places a well-formed program may qualify a // declarator: first, when defining a namespace or class member // out-of-line, and second, when naming an explicitly-qualified // friend function. The latter case is governed by // C++03 [basic.lookup.unqual]p10: // In a friend declaration naming a member function, a name used // in the function declarator and not part of a template-argument // in a template-id is first looked up in the scope of the member // function's class. If it is not found, or if the name is part of // a template-argument in a template-id, the look up is as // described for unqualified names in the definition of the class // granting friendship. // i.e. we don't push a scope unless it's a class member. switch (Qualifier->getKind()) { case NestedNameSpecifier::Global: case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: // These are always namespace scopes. We never want to enter a // namespace scope from anything but a file context. return CurContext->getRedeclContext()->isFileContext(); case NestedNameSpecifier::Identifier: case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: case NestedNameSpecifier::Super: // These are never namespace scopes. return true; } llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); } /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool Sema::ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS) { assert(SS.isSet() && "Parser passed invalid CXXScopeSpec."); if (SS.isInvalid()) return true; DeclContext *DC = computeDeclContext(SS, true); if (!DC) return true; // Before we enter a declarator's context, we need to make sure that // it is a complete declaration context. if (!DC->isDependentContext() && RequireCompleteDeclContext(SS, DC)) return true; EnterDeclaratorContext(S, DC); // Rebuild the nested name specifier for the new scope. if (DC->isDependentContext()) RebuildNestedNameSpecifierInCurrentInstantiation(SS); return false; } /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void Sema::ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS) { assert(SS.isSet() && "Parser passed invalid CXXScopeSpec."); if (SS.isInvalid()) return; assert(!SS.isInvalid() && computeDeclContext(SS, true) && "exiting declarator scope we never really entered"); ExitDeclaratorContext(S); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaFixItUtils.cpp
//===--- SemaFixItUtils.cpp - Sema FixIts ---------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines helper classes for generation of Sema FixItHints. // //===----------------------------------------------------------------------===// #include "clang/AST/ASTContext.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Sema.h" #include "clang/Sema/SemaFixItUtils.h" using namespace clang; bool ConversionFixItGenerator::compareTypesSimple(CanQualType From, CanQualType To, Sema &S, SourceLocation Loc, ExprValueKind FromVK) { if (!To.isAtLeastAsQualifiedAs(From)) return false; From = From.getNonReferenceType(); To = To.getNonReferenceType(); // If both are pointer types, work with the pointee types. if (isa<PointerType>(From) && isa<PointerType>(To)) { From = S.Context.getCanonicalType( (cast<PointerType>(From))->getPointeeType()); To = S.Context.getCanonicalType( (cast<PointerType>(To))->getPointeeType()); } const CanQualType FromUnq = From.getUnqualifiedType(); const CanQualType ToUnq = To.getUnqualifiedType(); if ((FromUnq == ToUnq || (S.IsDerivedFrom(FromUnq, ToUnq)) ) && To.isAtLeastAsQualifiedAs(From)) return true; return false; } bool ConversionFixItGenerator::tryToFixConversion(const Expr *FullExpr, const QualType FromTy, const QualType ToTy, Sema &S) { if (!FullExpr) return false; const CanQualType FromQTy = S.Context.getCanonicalType(FromTy); const CanQualType ToQTy = S.Context.getCanonicalType(ToTy); const SourceLocation Begin = FullExpr->getSourceRange().getBegin(); const SourceLocation End = S.PP.getLocForEndOfToken(FullExpr->getSourceRange() .getEnd()); // Strip the implicit casts - those are implied by the compiler, not the // original source code. const Expr* Expr = FullExpr->IgnoreImpCasts(); bool NeedParen = true; if (isa<ArraySubscriptExpr>(Expr) || isa<CallExpr>(Expr) || isa<DeclRefExpr>(Expr) || isa<CastExpr>(Expr) || isa<CXXNewExpr>(Expr) || isa<CXXConstructExpr>(Expr) || isa<CXXDeleteExpr>(Expr) || isa<CXXNoexceptExpr>(Expr) || isa<CXXPseudoDestructorExpr>(Expr) || isa<CXXScalarValueInitExpr>(Expr) || isa<CXXThisExpr>(Expr) || isa<CXXTypeidExpr>(Expr) || isa<CXXUnresolvedConstructExpr>(Expr) || isa<ObjCMessageExpr>(Expr) || isa<ObjCPropertyRefExpr>(Expr) || isa<ObjCProtocolExpr>(Expr) || isa<MemberExpr>(Expr) || isa<ParenExpr>(FullExpr) || isa<ParenListExpr>(Expr) || isa<SizeOfPackExpr>(Expr) || isa<UnaryOperator>(Expr)) NeedParen = false; // Check if the argument needs to be dereferenced: // (type * -> type) or (type * -> type &). if (const PointerType *FromPtrTy = dyn_cast<PointerType>(FromQTy)) { OverloadFixItKind FixKind = OFIK_Dereference; bool CanConvert = CompareTypes( S.Context.getCanonicalType(FromPtrTy->getPointeeType()), ToQTy, S, Begin, VK_LValue); if (CanConvert) { // Do not suggest dereferencing a Null pointer. if (Expr->IgnoreParenCasts()-> isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) return false; if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Expr)) { if (UO->getOpcode() == UO_AddrOf) { FixKind = OFIK_RemoveTakeAddress; Hints.push_back(FixItHint::CreateRemoval( CharSourceRange::getTokenRange(Begin, Begin))); } } else if (NeedParen) { Hints.push_back(FixItHint::CreateInsertion(Begin, "*(")); Hints.push_back(FixItHint::CreateInsertion(End, ")")); } else { Hints.push_back(FixItHint::CreateInsertion(Begin, "*")); } NumConversionsFixed++; if (NumConversionsFixed == 1) Kind = FixKind; return true; } } // Check if the pointer to the argument needs to be passed: // (type -> type *) or (type & -> type *). if (isa<PointerType>(ToQTy)) { bool CanConvert = false; OverloadFixItKind FixKind = OFIK_TakeAddress; // Only suggest taking address of L-values. if (!Expr->isLValue() || Expr->getObjectKind() != OK_Ordinary) return false; CanConvert = CompareTypes(S.Context.getPointerType(FromQTy), ToQTy, S, Begin, VK_RValue); if (CanConvert) { if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Expr)) { if (UO->getOpcode() == UO_Deref) { FixKind = OFIK_RemoveDereference; Hints.push_back(FixItHint::CreateRemoval( CharSourceRange::getTokenRange(Begin, Begin))); } } else if (NeedParen) { Hints.push_back(FixItHint::CreateInsertion(Begin, "&(")); Hints.push_back(FixItHint::CreateInsertion(End, ")")); } else { Hints.push_back(FixItHint::CreateInsertion(Begin, "&")); } NumConversionsFixed++; if (NumConversionsFixed == 1) Kind = FixKind; return true; } } return false; } static bool isMacroDefined(const Sema &S, SourceLocation Loc, StringRef Name) { return (bool)S.PP.getMacroDefinitionAtLoc(&S.getASTContext().Idents.get(Name), Loc); } static std::string getScalarZeroExpressionForType( const Type &T, SourceLocation Loc, const Sema &S) { assert(T.isScalarType() && "use scalar types only"); // Suggest "0" for non-enumeration scalar types, unless we can find a // better initializer. if (T.isEnumeralType()) return std::string(); if ((T.isObjCObjectPointerType() || T.isBlockPointerType()) && isMacroDefined(S, Loc, "nil")) return "nil"; if (T.isRealFloatingType()) return "0.0"; if (T.isBooleanType() && (S.LangOpts.CPlusPlus || isMacroDefined(S, Loc, "false"))) return "false"; if (T.isPointerType() || T.isMemberPointerType()) { if (S.LangOpts.CPlusPlus11) return "nullptr"; if (isMacroDefined(S, Loc, "NULL")) return "NULL"; } if (T.isCharType()) return "'\\0'"; if (T.isWideCharType()) return "L'\\0'"; if (T.isChar16Type()) return "u'\\0'"; if (T.isChar32Type()) return "U'\\0'"; return "0"; } std::string Sema::getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const { if (T->isScalarType()) { std::string s = getScalarZeroExpressionForType(*T, Loc, *this); if (!s.empty()) s = " = " + s; return s; } const CXXRecordDecl *RD = T->getAsCXXRecordDecl(); if (!RD || !RD->hasDefinition()) return std::string(); if (LangOpts.CPlusPlus11 && !RD->hasUserProvidedDefaultConstructor()) return "{}"; if (RD->isAggregate()) return " = {}"; return std::string(); } std::string Sema::getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const { return getScalarZeroExpressionForType(*T, Loc, *this); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaExceptionSpec.cpp
//===--- SemaExceptionSpec.cpp - C++ Exception Specifications ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file provides Sema routines for C++ exception specification testing. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/SourceManager.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" namespace clang { static const FunctionProtoType *GetUnderlyingFunction(QualType T) { if (const PointerType *PtrTy = T->getAs<PointerType>()) T = PtrTy->getPointeeType(); else if (const ReferenceType *RefTy = T->getAs<ReferenceType>()) T = RefTy->getPointeeType(); else if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) T = MPTy->getPointeeType(); return T->getAs<FunctionProtoType>(); } /// HACK: libstdc++ has a bug where it shadows std::swap with a member /// swap function then tries to call std::swap unqualified from the exception /// specification of that function. This function detects whether we're in /// such a case and turns off delay-parsing of exception specifications. bool Sema::isLibstdcxxEagerExceptionSpecHack(const Declarator &D) { auto *RD = dyn_cast<CXXRecordDecl>(CurContext); // All the problem cases are member functions named "swap" within class // templates declared directly within namespace std. if (!RD || RD->getEnclosingNamespaceContext() != getStdNamespace() || !RD->getIdentifier() || !RD->getDescribedClassTemplate() || !D.getIdentifier() || !D.getIdentifier()->isStr("swap")) return false; // Only apply this hack within a system header. if (!Context.getSourceManager().isInSystemHeader(D.getLocStart())) return false; return llvm::StringSwitch<bool>(RD->getIdentifier()->getName()) .Case("array", true) .Case("pair", true) .Case("priority_queue", true) .Case("stack", true) .Case("queue", true) .Default(false); } /// CheckSpecifiedExceptionType - Check if the given type is valid in an /// exception specification. Incomplete types, or pointers to incomplete types /// other than void are not allowed. /// /// \param[in,out] T The exception type. This will be decayed to a pointer type /// when the input is an array or a function type. bool Sema::CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range) { // C++11 [except.spec]p2: // A type cv T, "array of T", or "function returning T" denoted // in an exception-specification is adjusted to type T, "pointer to T", or // "pointer to function returning T", respectively. // // We also apply this rule in C++98. if (T->isArrayType()) T = Context.getArrayDecayedType(T); else if (T->isFunctionType()) T = Context.getPointerType(T); int Kind = 0; QualType PointeeT = T; if (const PointerType *PT = T->getAs<PointerType>()) { PointeeT = PT->getPointeeType(); Kind = 1; // cv void* is explicitly permitted, despite being a pointer to an // incomplete type. if (PointeeT->isVoidType()) return false; } else if (const ReferenceType *RT = T->getAs<ReferenceType>()) { PointeeT = RT->getPointeeType(); Kind = 2; if (RT->isRValueReferenceType()) { // C++11 [except.spec]p2: // A type denoted in an exception-specification shall not denote [...] // an rvalue reference type. Diag(Range.getBegin(), diag::err_rref_in_exception_spec) << T << Range; return true; } } // C++11 [except.spec]p2: // A type denoted in an exception-specification shall not denote an // incomplete type other than a class currently being defined [...]. // A type denoted in an exception-specification shall not denote a // pointer or reference to an incomplete type, other than (cv) void* or a // pointer or reference to a class currently being defined. if (!(PointeeT->isRecordType() && PointeeT->getAs<RecordType>()->isBeingDefined()) && RequireCompleteType(Range.getBegin(), PointeeT, diag::err_incomplete_in_exception_spec, Kind, Range)) return true; return false; } /// CheckDistantExceptionSpec - Check if the given type is a pointer or pointer /// to member to a function with an exception specification. This means that /// it is invalid to add another level of indirection. bool Sema::CheckDistantExceptionSpec(QualType T) { if (const PointerType *PT = T->getAs<PointerType>()) T = PT->getPointeeType(); else if (const MemberPointerType *PT = T->getAs<MemberPointerType>()) T = PT->getPointeeType(); else return false; const FunctionProtoType *FnT = T->getAs<FunctionProtoType>(); if (!FnT) return false; return FnT->hasExceptionSpec(); } const FunctionProtoType * Sema::ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT) { if (FPT->getExceptionSpecType() == EST_Unparsed) { Diag(Loc, diag::err_exception_spec_not_parsed); return nullptr; } if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) return FPT; FunctionDecl *SourceDecl = FPT->getExceptionSpecDecl(); const FunctionProtoType *SourceFPT = SourceDecl->getType()->castAs<FunctionProtoType>(); // If the exception specification has already been resolved, just return it. if (!isUnresolvedExceptionSpec(SourceFPT->getExceptionSpecType())) return SourceFPT; // Compute or instantiate the exception specification now. if (SourceFPT->getExceptionSpecType() == EST_Unevaluated) EvaluateImplicitExceptionSpec(Loc, cast<CXXMethodDecl>(SourceDecl)); else InstantiateExceptionSpec(Loc, SourceDecl); const FunctionProtoType *Proto = SourceDecl->getType()->castAs<FunctionProtoType>(); if (Proto->getExceptionSpecType() == clang::EST_Unparsed) { Diag(Loc, diag::err_exception_spec_not_parsed); Proto = nullptr; } return Proto; } void Sema::UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI) { // If we've fully resolved the exception specification, notify listeners. if (!isUnresolvedExceptionSpec(ESI.Type)) if (auto *Listener = getASTMutationListener()) Listener->ResolvedExceptionSpec(FD); for (auto *Redecl : FD->redecls()) Context.adjustExceptionSpec(cast<FunctionDecl>(Redecl), ESI); } #if 0 // HLSL Change Starts /// Determine whether a function has an implicitly-generated exception /// specification. static bool hasImplicitExceptionSpec(FunctionDecl *Decl) { if (!isa<CXXDestructorDecl>(Decl) && Decl->getDeclName().getCXXOverloadedOperator() != OO_Delete && Decl->getDeclName().getCXXOverloadedOperator() != OO_Array_Delete) return false; // For a function that the user didn't declare: // - if this is a destructor, its exception specification is implicit. // - if this is 'operator delete' or 'operator delete[]', the exception // specification is as-if an explicit exception specification was given // (per [basic.stc.dynamic]p2). if (!Decl->getTypeSourceInfo()) return isa<CXXDestructorDecl>(Decl); const FunctionProtoType *Ty = Decl->getTypeSourceInfo()->getType()->getAs<FunctionProtoType>(); return !Ty->hasExceptionSpec(); } #endif // HLSL Change Ends bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) { // HLSL Change Starts // Rather than fixing for param modifiers, comment out - this is N/A for HLSL return false; #if 0 // HLSL Change Ends OverloadedOperatorKind OO = New->getDeclName().getCXXOverloadedOperator(); bool IsOperatorNew = OO == OO_New || OO == OO_Array_New; bool MissingExceptionSpecification = false; bool MissingEmptyExceptionSpecification = false; unsigned DiagID = diag::err_mismatched_exception_spec; bool ReturnValueOnError = true; if (getLangOpts().MicrosoftExt) { DiagID = diag::ext_mismatched_exception_spec; ReturnValueOnError = false; } // Check the types as written: they must match before any exception // specification adjustment is applied. if (!CheckEquivalentExceptionSpec( PDiag(DiagID), PDiag(diag::note_previous_declaration), Old->getType()->getAs<FunctionProtoType>(), Old->getLocation(), New->getType()->getAs<FunctionProtoType>(), New->getLocation(), &MissingExceptionSpecification, &MissingEmptyExceptionSpecification, /*AllowNoexceptAllMatchWithNoSpec=*/true, IsOperatorNew)) { // C++11 [except.spec]p4 [DR1492]: // If a declaration of a function has an implicit // exception-specification, other declarations of the function shall // not specify an exception-specification. if (getLangOpts().CPlusPlus11 && hasImplicitExceptionSpec(Old) != hasImplicitExceptionSpec(New)) { Diag(New->getLocation(), diag::ext_implicit_exception_spec_mismatch) << hasImplicitExceptionSpec(Old); if (!Old->getLocation().isInvalid()) Diag(Old->getLocation(), diag::note_previous_declaration); } return false; } // The failure was something other than an missing exception // specification; return an error, except in MS mode where this is a warning. if (!MissingExceptionSpecification) return ReturnValueOnError; const FunctionProtoType *NewProto = New->getType()->castAs<FunctionProtoType>(); // The new function declaration is only missing an empty exception // specification "throw()". If the throw() specification came from a // function in a system header that has C linkage, just add an empty // exception specification to the "new" declaration. This is an // egregious workaround for glibc, which adds throw() specifications // to many libc functions as an optimization. Unfortunately, that // optimization isn't permitted by the C++ standard, so we're forced // to work around it here. if (MissingEmptyExceptionSpecification && NewProto && (Old->getLocation().isInvalid() || Context.getSourceManager().isInSystemHeader(Old->getLocation())) && Old->isExternC()) { New->setType(Context.getFunctionType( NewProto->getReturnType(), NewProto->getParamTypes(), NewProto->getExtProtoInfo().withExceptionSpec(EST_DynamicNone))); return false; } const FunctionProtoType *OldProto = Old->getType()->castAs<FunctionProtoType>(); FunctionProtoType::ExceptionSpecInfo ESI = OldProto->getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = OldProto->exceptions(); } else if (ESI.Type == EST_ComputedNoexcept) { // FIXME: We can't just take the expression from the old prototype. It // likely contains references to the old prototype's parameters. } // Update the type of the function with the appropriate exception // specification. New->setType(Context.getFunctionType( NewProto->getReturnType(), NewProto->getParamTypes(), NewProto->getExtProtoInfo().withExceptionSpec(ESI))); // Warn about the lack of exception specification. SmallString<128> ExceptionSpecString; llvm::raw_svector_ostream OS(ExceptionSpecString); switch (OldProto->getExceptionSpecType()) { case EST_DynamicNone: OS << "throw()"; break; case EST_Dynamic: { OS << "throw("; bool OnFirstException = true; for (const auto &E : OldProto->exceptions()) { if (OnFirstException) OnFirstException = false; else OS << ", "; OS << E.getAsString(getPrintingPolicy()); } OS << ")"; break; } case EST_BasicNoexcept: OS << "noexcept"; break; case EST_ComputedNoexcept: OS << "noexcept("; assert(OldProto->getNoexceptExpr() != nullptr && "Expected non-null Expr"); OldProto->getNoexceptExpr()->printPretty(OS, nullptr, getPrintingPolicy()); OS << ")"; break; default: llvm_unreachable("This spec type is compatible with none."); } OS.flush(); SourceLocation FixItLoc; if (TypeSourceInfo *TSInfo = New->getTypeSourceInfo()) { TypeLoc TL = TSInfo->getTypeLoc().IgnoreParens(); if (FunctionTypeLoc FTLoc = TL.getAs<FunctionTypeLoc>()) FixItLoc = getLocForEndOfToken(FTLoc.getLocalRangeEnd()); } if (FixItLoc.isInvalid()) Diag(New->getLocation(), diag::warn_missing_exception_specification) << New << OS.str(); else { // FIXME: This will get more complicated with C++0x // late-specified return types. Diag(New->getLocation(), diag::warn_missing_exception_specification) << New << OS.str() << FixItHint::CreateInsertion(FixItLoc, " " + OS.str().str()); } if (!Old->getLocation().isInvalid()) Diag(Old->getLocation(), diag::note_previous_declaration); return false; #endif // HLSL Change } /// CheckEquivalentExceptionSpec - Check if the two types have equivalent /// exception specifications. Exception specifications are equivalent if /// they allow exactly the same set of exception types. It does not matter how /// that is achieved. See C++ [except.spec]p2. bool Sema::CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc) { unsigned DiagID = diag::err_mismatched_exception_spec; if (getLangOpts().MicrosoftExt) DiagID = diag::ext_mismatched_exception_spec; bool Result = CheckEquivalentExceptionSpec(PDiag(DiagID), PDiag(diag::note_previous_declaration), Old, OldLoc, New, NewLoc); // In Microsoft mode, mismatching exception specifications just cause a warning. if (getLangOpts().MicrosoftExt) return false; return Result; } /// CheckEquivalentExceptionSpec - Check if the two types have compatible /// exception specifications. See C++ [except.spec]p3. /// /// \return \c false if the exception specifications match, \c true if there is /// a problem. If \c true is returned, either a diagnostic has already been /// produced or \c *MissingExceptionSpecification is set to \c true. bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification, bool*MissingEmptyExceptionSpecification, bool AllowNoexceptAllMatchWithNoSpec, bool IsOperatorNew) { // Just completely ignore this under -fno-exceptions. if (!getLangOpts().CXXExceptions) return false; if (MissingExceptionSpecification) *MissingExceptionSpecification = false; if (MissingEmptyExceptionSpecification) *MissingEmptyExceptionSpecification = false; Old = ResolveExceptionSpec(NewLoc, Old); if (!Old) return false; New = ResolveExceptionSpec(NewLoc, New); if (!New) return false; // C++0x [except.spec]p3: Two exception-specifications are compatible if: // - both are non-throwing, regardless of their form, // - both have the form noexcept(constant-expression) and the constant- // expressions are equivalent, // - both are dynamic-exception-specifications that have the same set of // adjusted types. // // C++0x [except.spec]p12: An exception-specification is non-throwing if it is // of the form throw(), noexcept, or noexcept(constant-expression) where the // constant-expression yields true. // // C++0x [except.spec]p4: If any declaration of a function has an exception- // specifier that is not a noexcept-specification allowing all exceptions, // all declarations [...] of that function shall have a compatible // exception-specification. // // That last point basically means that noexcept(false) matches no spec. // It's considered when AllowNoexceptAllMatchWithNoSpec is true. ExceptionSpecificationType OldEST = Old->getExceptionSpecType(); ExceptionSpecificationType NewEST = New->getExceptionSpecType(); assert(!isUnresolvedExceptionSpec(OldEST) && !isUnresolvedExceptionSpec(NewEST) && "Shouldn't see unknown exception specifications here"); // Shortcut the case where both have no spec. if (OldEST == EST_None && NewEST == EST_None) return false; FunctionProtoType::NoexceptResult OldNR = Old->getNoexceptSpec(Context); FunctionProtoType::NoexceptResult NewNR = New->getNoexceptSpec(Context); if (OldNR == FunctionProtoType::NR_BadNoexcept || NewNR == FunctionProtoType::NR_BadNoexcept) return false; // Dependent noexcept specifiers are compatible with each other, but nothing // else. // One noexcept is compatible with another if the argument is the same if (OldNR == NewNR && OldNR != FunctionProtoType::NR_NoNoexcept && NewNR != FunctionProtoType::NR_NoNoexcept) return false; if (OldNR != NewNR && OldNR != FunctionProtoType::NR_NoNoexcept && NewNR != FunctionProtoType::NR_NoNoexcept) { Diag(NewLoc, DiagID); if (NoteID.getDiagID() != 0 && OldLoc.isValid()) Diag(OldLoc, NoteID); return true; } // The MS extension throw(...) is compatible with itself. if (OldEST == EST_MSAny && NewEST == EST_MSAny) return false; // It's also compatible with no spec. if ((OldEST == EST_None && NewEST == EST_MSAny) || (OldEST == EST_MSAny && NewEST == EST_None)) return false; // It's also compatible with noexcept(false). if (OldEST == EST_MSAny && NewNR == FunctionProtoType::NR_Throw) return false; if (NewEST == EST_MSAny && OldNR == FunctionProtoType::NR_Throw) return false; // As described above, noexcept(false) matches no spec only for functions. if (AllowNoexceptAllMatchWithNoSpec) { if (OldEST == EST_None && NewNR == FunctionProtoType::NR_Throw) return false; if (NewEST == EST_None && OldNR == FunctionProtoType::NR_Throw) return false; } // Any non-throwing specifications are compatible. bool OldNonThrowing = OldNR == FunctionProtoType::NR_Nothrow || OldEST == EST_DynamicNone; bool NewNonThrowing = NewNR == FunctionProtoType::NR_Nothrow || NewEST == EST_DynamicNone; if (OldNonThrowing && NewNonThrowing) return false; // As a special compatibility feature, under C++0x we accept no spec and // throw(std::bad_alloc) as equivalent for operator new and operator new[]. // This is because the implicit declaration changed, but old code would break. if (getLangOpts().CPlusPlus11 && IsOperatorNew) { const FunctionProtoType *WithExceptions = nullptr; if (OldEST == EST_None && NewEST == EST_Dynamic) WithExceptions = New; else if (OldEST == EST_Dynamic && NewEST == EST_None) WithExceptions = Old; if (WithExceptions && WithExceptions->getNumExceptions() == 1) { // One has no spec, the other throw(something). If that something is // std::bad_alloc, all conditions are met. QualType Exception = *WithExceptions->exception_begin(); if (CXXRecordDecl *ExRecord = Exception->getAsCXXRecordDecl()) { IdentifierInfo* Name = ExRecord->getIdentifier(); if (Name && Name->getName() == "bad_alloc") { // It's called bad_alloc, but is it in std? if (ExRecord->isInStdNamespace()) { return false; } } } } } // At this point, the only remaining valid case is two matching dynamic // specifications. We return here unless both specifications are dynamic. if (OldEST != EST_Dynamic || NewEST != EST_Dynamic) { if (MissingExceptionSpecification && Old->hasExceptionSpec() && !New->hasExceptionSpec()) { // The old type has an exception specification of some sort, but // the new type does not. *MissingExceptionSpecification = true; if (MissingEmptyExceptionSpecification && OldNonThrowing) { // The old type has a throw() or noexcept(true) exception specification // and the new type has no exception specification, and the caller asked // to handle this itself. *MissingEmptyExceptionSpecification = true; } return true; } Diag(NewLoc, DiagID); if (NoteID.getDiagID() != 0 && OldLoc.isValid()) Diag(OldLoc, NoteID); return true; } assert(OldEST == EST_Dynamic && NewEST == EST_Dynamic && "Exception compatibility logic error: non-dynamic spec slipped through."); bool Success = true; // Both have a dynamic exception spec. Collect the first set, then compare // to the second. llvm::SmallPtrSet<CanQualType, 8> OldTypes, NewTypes; for (const auto &I : Old->exceptions()) OldTypes.insert(Context.getCanonicalType(I).getUnqualifiedType()); for (const auto &I : New->exceptions()) { CanQualType TypePtr = Context.getCanonicalType(I).getUnqualifiedType(); if(OldTypes.count(TypePtr)) NewTypes.insert(TypePtr); else Success = false; } Success = Success && OldTypes.size() == NewTypes.size(); if (Success) { return false; } Diag(NewLoc, DiagID); if (NoteID.getDiagID() != 0 && OldLoc.isValid()) Diag(OldLoc, NoteID); return true; } /// CheckExceptionSpecSubset - Check whether the second function type's /// exception specification is a subset (or equivalent) of the first function /// type. This is used by override and pointer assignment checks. bool Sema::CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc) { // Just auto-succeed under -fno-exceptions. if (!getLangOpts().CXXExceptions) return false; // FIXME: As usual, we could be more specific in our error messages, but // that better waits until we've got types with source locations. if (!SubLoc.isValid()) SubLoc = SuperLoc; // Resolve the exception specifications, if needed. Superset = ResolveExceptionSpec(SuperLoc, Superset); if (!Superset) return false; Subset = ResolveExceptionSpec(SubLoc, Subset); if (!Subset) return false; ExceptionSpecificationType SuperEST = Superset->getExceptionSpecType(); // If superset contains everything, we're done. if (SuperEST == EST_None || SuperEST == EST_MSAny) return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc); // If there are dependent noexcept specs, assume everything is fine. Unlike // with the equivalency check, this is safe in this case, because we don't // want to merge declarations. Checks after instantiation will catch any // omissions we make here. // We also shortcut checking if a noexcept expression was bad. FunctionProtoType::NoexceptResult SuperNR =Superset->getNoexceptSpec(Context); if (SuperNR == FunctionProtoType::NR_BadNoexcept || SuperNR == FunctionProtoType::NR_Dependent) return false; // Another case of the superset containing everything. if (SuperNR == FunctionProtoType::NR_Throw) return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc); ExceptionSpecificationType SubEST = Subset->getExceptionSpecType(); assert(!isUnresolvedExceptionSpec(SuperEST) && !isUnresolvedExceptionSpec(SubEST) && "Shouldn't see unknown exception specifications here"); // It does not. If the subset contains everything, we've failed. if (SubEST == EST_None || SubEST == EST_MSAny) { Diag(SubLoc, DiagID); if (NoteID.getDiagID() != 0) Diag(SuperLoc, NoteID); return true; } FunctionProtoType::NoexceptResult SubNR = Subset->getNoexceptSpec(Context); if (SubNR == FunctionProtoType::NR_BadNoexcept || SubNR == FunctionProtoType::NR_Dependent) return false; // Another case of the subset containing everything. if (SubNR == FunctionProtoType::NR_Throw) { Diag(SubLoc, DiagID); if (NoteID.getDiagID() != 0) Diag(SuperLoc, NoteID); return true; } // If the subset contains nothing, we're done. if (SubEST == EST_DynamicNone || SubNR == FunctionProtoType::NR_Nothrow) return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc); // Otherwise, if the superset contains nothing, we've failed. if (SuperEST == EST_DynamicNone || SuperNR == FunctionProtoType::NR_Nothrow) { Diag(SubLoc, DiagID); if (NoteID.getDiagID() != 0) Diag(SuperLoc, NoteID); return true; } assert(SuperEST == EST_Dynamic && SubEST == EST_Dynamic && "Exception spec subset: non-dynamic case slipped through."); // Neither contains everything or nothing. Do a proper comparison. for (const auto &SubI : Subset->exceptions()) { // Take one type from the subset. QualType CanonicalSubT = Context.getCanonicalType(SubI); // Unwrap pointers and references so that we can do checks within a class // hierarchy. Don't unwrap member pointers; they don't have hierarchy // conversions on the pointee. bool SubIsPointer = false; if (const ReferenceType *RefTy = CanonicalSubT->getAs<ReferenceType>()) CanonicalSubT = RefTy->getPointeeType(); if (const PointerType *PtrTy = CanonicalSubT->getAs<PointerType>()) { CanonicalSubT = PtrTy->getPointeeType(); SubIsPointer = true; } bool SubIsClass = CanonicalSubT->isRecordType(); CanonicalSubT = CanonicalSubT.getLocalUnqualifiedType(); CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, /*DetectVirtual=*/false); bool Contained = false; // Make sure it's in the superset. for (const auto &SuperI : Superset->exceptions()) { QualType CanonicalSuperT = Context.getCanonicalType(SuperI); // SubT must be SuperT or derived from it, or pointer or reference to // such types. if (const ReferenceType *RefTy = CanonicalSuperT->getAs<ReferenceType>()) CanonicalSuperT = RefTy->getPointeeType(); if (SubIsPointer) { if (const PointerType *PtrTy = CanonicalSuperT->getAs<PointerType>()) CanonicalSuperT = PtrTy->getPointeeType(); else { continue; } } CanonicalSuperT = CanonicalSuperT.getLocalUnqualifiedType(); // If the types are the same, move on to the next type in the subset. if (CanonicalSubT == CanonicalSuperT) { Contained = true; break; } // Otherwise we need to check the inheritance. if (!SubIsClass || !CanonicalSuperT->isRecordType()) continue; Paths.clear(); if (!IsDerivedFrom(CanonicalSubT, CanonicalSuperT, Paths)) continue; if (Paths.isAmbiguous(Context.getCanonicalType(CanonicalSuperT))) continue; // Do this check from a context without privileges. switch (CheckBaseClassAccess(SourceLocation(), CanonicalSuperT, CanonicalSubT, Paths.front(), /*Diagnostic*/ 0, /*ForceCheck*/ true, /*ForceUnprivileged*/ true)) { case AR_accessible: break; case AR_inaccessible: continue; case AR_dependent: llvm_unreachable("access check dependent for unprivileged context"); case AR_delayed: llvm_unreachable("access check delayed in non-declaration"); } Contained = true; break; } if (!Contained) { Diag(SubLoc, DiagID); if (NoteID.getDiagID() != 0) Diag(SuperLoc, NoteID); return true; } } // We've run half the gauntlet. return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc); } static bool CheckSpecForTypesEquivalent(Sema &S, const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, QualType Target, SourceLocation TargetLoc, QualType Source, SourceLocation SourceLoc) { const FunctionProtoType *TFunc = GetUnderlyingFunction(Target); if (!TFunc) return false; const FunctionProtoType *SFunc = GetUnderlyingFunction(Source); if (!SFunc) return false; return S.CheckEquivalentExceptionSpec(DiagID, NoteID, TFunc, TargetLoc, SFunc, SourceLoc); } /// CheckParamExceptionSpec - Check if the parameter and return types of the /// two functions have equivalent exception specs. This is part of the /// assignment and override compatibility check. We do not check the parameters /// of parameter function pointers recursively, as no sane programmer would /// even be able to write such a function type. bool Sema::CheckParamExceptionSpec(const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc) { if (CheckSpecForTypesEquivalent( *this, PDiag(diag::err_deep_exception_specs_differ) << 0, PDiag(), Target->getReturnType(), TargetLoc, Source->getReturnType(), SourceLoc)) return true; // We shouldn't even be testing this unless the arguments are otherwise // compatible. assert(Target->getNumParams() == Source->getNumParams() && "Functions have different argument counts."); for (unsigned i = 0, E = Target->getNumParams(); i != E; ++i) { if (CheckSpecForTypesEquivalent( *this, PDiag(diag::err_deep_exception_specs_differ) << 1, PDiag(), Target->getParamType(i), TargetLoc, Source->getParamType(i), SourceLoc)) return true; } return false; } bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType) { // First we check for applicability. // Target type must be a function, function pointer or function reference. const FunctionProtoType *ToFunc = GetUnderlyingFunction(ToType); if (!ToFunc || ToFunc->hasDependentExceptionSpec()) return false; // SourceType must be a function or function pointer. const FunctionProtoType *FromFunc = GetUnderlyingFunction(From->getType()); if (!FromFunc || FromFunc->hasDependentExceptionSpec()) return false; // Now we've got the correct types on both sides, check their compatibility. // This means that the source of the conversion can only throw a subset of // the exceptions of the target, and any exception specs on arguments or // return types must be equivalent. // // FIXME: If there is a nested dependent exception specification, we should // not be checking it here. This is fine: // template<typename T> void f() { // void (*p)(void (*) throw(T)); // void (*q)(void (*) throw(int)) = p; // } // ... because it might be instantiated with T=int. return CheckExceptionSpecSubset(PDiag(diag::err_incompatible_exception_specs), PDiag(), ToFunc, From->getSourceRange().getBegin(), FromFunc, SourceLocation()); } bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old) { // If the new exception specification hasn't been parsed yet, skip the check. // We'll get called again once it's been parsed. if (New->getType()->castAs<FunctionProtoType>()->getExceptionSpecType() == EST_Unparsed) return false; if (getLangOpts().CPlusPlus11 && isa<CXXDestructorDecl>(New)) { // Don't check uninstantiated template destructors at all. We can only // synthesize correct specs after the template is instantiated. if (New->getParent()->isDependentType()) return false; if (New->getParent()->isBeingDefined()) { // The destructor might be updated once the definition is finished. So // remember it and check later. DelayedExceptionSpecChecks.push_back(std::make_pair(New, Old)); return false; } } // If the old exception specification hasn't been parsed yet, remember that // we need to perform this check when we get to the end of the outermost // lexically-surrounding class. if (Old->getType()->castAs<FunctionProtoType>()->getExceptionSpecType() == EST_Unparsed) { DelayedExceptionSpecChecks.push_back(std::make_pair(New, Old)); return false; } unsigned DiagID = diag::err_override_exception_spec; if (getLangOpts().MicrosoftExt) DiagID = diag::ext_override_exception_spec; return CheckExceptionSpecSubset(PDiag(DiagID), PDiag(diag::note_overridden_virtual_function), Old->getType()->getAs<FunctionProtoType>(), Old->getLocation(), New->getType()->getAs<FunctionProtoType>(), New->getLocation()); } static CanThrowResult canSubExprsThrow(Sema &S, const Expr *E) { CanThrowResult R = CT_Cannot; for (const Stmt *SubStmt : E->children()) { R = mergeCanThrow(R, S.canThrow(cast<Expr>(SubStmt))); if (R == CT_Can) break; } return R; } static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D) { assert(D && "Expected decl"); // See if we can get a function type from the decl somehow. const ValueDecl *VD = dyn_cast<ValueDecl>(D); if (!VD) // If we have no clue what we're calling, assume the worst. return CT_Can; // As an extension, we assume that __attribute__((nothrow)) functions don't // throw. if (isa<FunctionDecl>(D) && D->hasAttr<NoThrowAttr>()) return CT_Cannot; QualType T = VD->getType(); const FunctionProtoType *FT; if ((FT = T->getAs<FunctionProtoType>())) { } else if (const PointerType *PT = T->getAs<PointerType>()) FT = PT->getPointeeType()->getAs<FunctionProtoType>(); else if (const ReferenceType *RT = T->getAs<ReferenceType>()) FT = RT->getPointeeType()->getAs<FunctionProtoType>(); else if (const MemberPointerType *MT = T->getAs<MemberPointerType>()) FT = MT->getPointeeType()->getAs<FunctionProtoType>(); else if (const BlockPointerType *BT = T->getAs<BlockPointerType>()) FT = BT->getPointeeType()->getAs<FunctionProtoType>(); if (!FT) return CT_Can; FT = S.ResolveExceptionSpec(E->getLocStart(), FT); if (!FT) return CT_Can; return FT->isNothrow(S.Context) ? CT_Cannot : CT_Can; } static CanThrowResult canDynamicCastThrow(const CXXDynamicCastExpr *DC) { if (DC->isTypeDependent()) return CT_Dependent; if (!DC->getTypeAsWritten()->isReferenceType()) return CT_Cannot; if (DC->getSubExpr()->isTypeDependent()) return CT_Dependent; return DC->getCastKind() == clang::CK_Dynamic? CT_Can : CT_Cannot; } static CanThrowResult canTypeidThrow(Sema &S, const CXXTypeidExpr *DC) { if (DC->isTypeOperand()) return CT_Cannot; Expr *Op = DC->getExprOperand(); if (Op->isTypeDependent()) return CT_Dependent; const RecordType *RT = Op->getType()->getAs<RecordType>(); if (!RT) return CT_Cannot; if (!cast<CXXRecordDecl>(RT->getDecl())->isPolymorphic()) return CT_Cannot; if (Op->Classify(S.Context).isPRValue()) return CT_Cannot; return CT_Can; } CanThrowResult Sema::canThrow(const Expr *E) { // C++ [expr.unary.noexcept]p3: // [Can throw] if in a potentially-evaluated context the expression would // contain: switch (E->getStmtClass()) { case Expr::CXXThrowExprClass: // - a potentially evaluated throw-expression return CT_Can; case Expr::CXXDynamicCastExprClass: { // - a potentially evaluated dynamic_cast expression dynamic_cast<T>(v), // where T is a reference type, that requires a run-time check CanThrowResult CT = canDynamicCastThrow(cast<CXXDynamicCastExpr>(E)); if (CT == CT_Can) return CT; return mergeCanThrow(CT, canSubExprsThrow(*this, E)); } case Expr::CXXTypeidExprClass: // - a potentially evaluated typeid expression applied to a glvalue // expression whose type is a polymorphic class type return canTypeidThrow(*this, cast<CXXTypeidExpr>(E)); // - a potentially evaluated call to a function, member function, function // pointer, or member function pointer that does not have a non-throwing // exception-specification case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: case Expr::CXXOperatorCallExprClass: case Expr::UserDefinedLiteralClass: { const CallExpr *CE = cast<CallExpr>(E); CanThrowResult CT; if (E->isTypeDependent()) CT = CT_Dependent; else if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) CT = CT_Cannot; else if (CE->getCalleeDecl()) CT = canCalleeThrow(*this, E, CE->getCalleeDecl()); else CT = CT_Can; if (CT == CT_Can) return CT; return mergeCanThrow(CT, canSubExprsThrow(*this, E)); } case Expr::CXXConstructExprClass: case Expr::CXXTemporaryObjectExprClass: { CanThrowResult CT = canCalleeThrow(*this, E, cast<CXXConstructExpr>(E)->getConstructor()); if (CT == CT_Can) return CT; return mergeCanThrow(CT, canSubExprsThrow(*this, E)); } case Expr::LambdaExprClass: { const LambdaExpr *Lambda = cast<LambdaExpr>(E); CanThrowResult CT = CT_Cannot; for (LambdaExpr::capture_init_iterator Cap = Lambda->capture_init_begin(), CapEnd = Lambda->capture_init_end(); Cap != CapEnd; ++Cap) CT = mergeCanThrow(CT, canThrow(*Cap)); return CT; } case Expr::CXXNewExprClass: { CanThrowResult CT; if (E->isTypeDependent()) CT = CT_Dependent; else CT = canCalleeThrow(*this, E, cast<CXXNewExpr>(E)->getOperatorNew()); if (CT == CT_Can) return CT; return mergeCanThrow(CT, canSubExprsThrow(*this, E)); } case Expr::CXXDeleteExprClass: { CanThrowResult CT; QualType DTy = cast<CXXDeleteExpr>(E)->getDestroyedType(); if (DTy.isNull() || DTy->isDependentType()) { CT = CT_Dependent; } else { CT = canCalleeThrow(*this, E, cast<CXXDeleteExpr>(E)->getOperatorDelete()); if (const RecordType *RT = DTy->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); const CXXDestructorDecl *DD = RD->getDestructor(); if (DD) CT = mergeCanThrow(CT, canCalleeThrow(*this, E, DD)); } if (CT == CT_Can) return CT; } return mergeCanThrow(CT, canSubExprsThrow(*this, E)); } case Expr::CXXBindTemporaryExprClass: { // The bound temporary has to be destroyed again, which might throw. CanThrowResult CT = canCalleeThrow(*this, E, cast<CXXBindTemporaryExpr>(E)->getTemporary()->getDestructor()); if (CT == CT_Can) return CT; return mergeCanThrow(CT, canSubExprsThrow(*this, E)); } // ObjC message sends are like function calls, but never have exception // specs. case Expr::ObjCMessageExprClass: case Expr::ObjCPropertyRefExprClass: case Expr::ObjCSubscriptRefExprClass: return CT_Can; // All the ObjC literals that are implemented as calls are // potentially throwing unless we decide to close off that // possibility. case Expr::ObjCArrayLiteralClass: case Expr::ObjCDictionaryLiteralClass: case Expr::ObjCBoxedExprClass: return CT_Can; // Many other things have subexpressions, so we have to test those. // Some are simple: case Expr::ConditionalOperatorClass: case Expr::CompoundLiteralExprClass: case Expr::CXXConstCastExprClass: case Expr::CXXReinterpretCastExprClass: case Expr::CXXStdInitializerListExprClass: case Expr::DesignatedInitExprClass: case Expr::DesignatedInitUpdateExprClass: case Expr::ExprWithCleanupsClass: case Expr::ExtVectorElementExprClass: case Expr::ExtMatrixElementExprClass: // HLSL Change case Expr::HLSLVectorElementExprClass: // HLSL Change case Expr::InitListExprClass: case Expr::MemberExprClass: case Expr::ObjCIsaExprClass: case Expr::ObjCIvarRefExprClass: case Expr::ParenExprClass: case Expr::ParenListExprClass: case Expr::ShuffleVectorExprClass: case Expr::ConvertVectorExprClass: case Expr::VAArgExprClass: return canSubExprsThrow(*this, E); // Some might be dependent for other reasons. case Expr::ArraySubscriptExprClass: case Expr::BinaryOperatorClass: case Expr::CompoundAssignOperatorClass: case Expr::CStyleCastExprClass: case Expr::CXXStaticCastExprClass: case Expr::CXXFunctionalCastExprClass: case Expr::ImplicitCastExprClass: case Expr::MaterializeTemporaryExprClass: case Expr::UnaryOperatorClass: { CanThrowResult CT = E->isTypeDependent() ? CT_Dependent : CT_Cannot; return mergeCanThrow(CT, canSubExprsThrow(*this, E)); } // FIXME: We should handle StmtExpr, but that opens a MASSIVE can of worms. case Expr::StmtExprClass: return CT_Can; case Expr::CXXDefaultArgExprClass: return canThrow(cast<CXXDefaultArgExpr>(E)->getExpr()); case Expr::CXXDefaultInitExprClass: return canThrow(cast<CXXDefaultInitExpr>(E)->getExpr()); case Expr::ChooseExprClass: if (E->isTypeDependent() || E->isValueDependent()) return CT_Dependent; return canThrow(cast<ChooseExpr>(E)->getChosenSubExpr()); case Expr::GenericSelectionExprClass: if (cast<GenericSelectionExpr>(E)->isResultDependent()) return CT_Dependent; return canThrow(cast<GenericSelectionExpr>(E)->getResultExpr()); // Some expressions are always dependent. case Expr::CXXDependentScopeMemberExprClass: case Expr::CXXUnresolvedConstructExprClass: case Expr::DependentScopeDeclRefExprClass: case Expr::CXXFoldExprClass: return CT_Dependent; case Expr::AsTypeExprClass: case Expr::BinaryConditionalOperatorClass: case Expr::BlockExprClass: case Expr::CUDAKernelCallExprClass: case Expr::DeclRefExprClass: case Expr::ObjCBridgedCastExprClass: case Expr::ObjCIndirectCopyRestoreExprClass: case Expr::ObjCProtocolExprClass: case Expr::ObjCSelectorExprClass: case Expr::OffsetOfExprClass: case Expr::PackExpansionExprClass: case Expr::PseudoObjectExprClass: case Expr::SubstNonTypeTemplateParmExprClass: case Expr::SubstNonTypeTemplateParmPackExprClass: case Expr::FunctionParmPackExprClass: case Expr::UnaryExprOrTypeTraitExprClass: case Expr::UnresolvedLookupExprClass: case Expr::UnresolvedMemberExprClass: case Expr::TypoExprClass: // FIXME: Can any of the above throw? If so, when? return CT_Cannot; case Expr::AddrLabelExprClass: case Expr::ArrayTypeTraitExprClass: case Expr::AtomicExprClass: case Expr::TypeTraitExprClass: case Expr::CXXBoolLiteralExprClass: case Expr::CXXNoexceptExprClass: case Expr::CXXNullPtrLiteralExprClass: case Expr::CXXPseudoDestructorExprClass: case Expr::CXXScalarValueInitExprClass: case Expr::CXXThisExprClass: case Expr::CXXUuidofExprClass: case Expr::CharacterLiteralClass: case Expr::ExpressionTraitExprClass: case Expr::FloatingLiteralClass: case Expr::GNUNullExprClass: case Expr::ImaginaryLiteralClass: case Expr::ImplicitValueInitExprClass: case Expr::IntegerLiteralClass: case Expr::NoInitExprClass: case Expr::ObjCEncodeExprClass: case Expr::ObjCStringLiteralClass: case Expr::ObjCBoolLiteralExprClass: case Expr::OpaqueValueExprClass: case Expr::PredefinedExprClass: case Expr::SizeOfPackExprClass: case Expr::StringLiteralClass: // These expressions can never throw. return CT_Cannot; case Expr::MSPropertyRefExprClass: llvm_unreachable("Invalid class for expression"); #define STMT(CLASS, PARENT) case Expr::CLASS##Class: #define STMT_RANGE(Base, First, Last) #define LAST_STMT_RANGE(BASE, FIRST, LAST) #define EXPR(CLASS, PARENT) #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" case Expr::NoStmtClass: llvm_unreachable("Invalid class for expression"); } llvm_unreachable("Bogus StmtClass"); } } // end namespace clang
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaType.cpp
//===--- SemaType.cpp - Semantic Analysis for Types -----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements type-related semantic analysis. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "TypeLocBuilder.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeLocVisitor.h" #include "clang/Lex/Preprocessor.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Parse/ParseDiagnostic.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/DelayedDiagnostic.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" // HLSL Change #include "dxc/Support/Global.h" // HLSL Change #include "clang/Sema/SemaHLSL.h" // HLSL Change #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/Template.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/Support/ErrorHandling.h" using namespace clang; enum TypeDiagSelector { TDS_Function, TDS_Pointer, TDS_ObjCObjOrBlock }; /// isOmittedBlockReturnType - Return true if this declarator is missing a /// return type because this is a omitted return type on a block literal. static bool isOmittedBlockReturnType(const Declarator &D) { if (D.getContext() != Declarator::BlockLiteralContext || D.getDeclSpec().hasTypeSpecifier()) return false; if (D.getNumTypeObjects() == 0) return true; // ^{ ... } if (D.getNumTypeObjects() == 1 && D.getTypeObject(0).Kind == DeclaratorChunk::Function) return true; // ^(int X, float Y) { ... } return false; } /// diagnoseBadTypeAttribute - Diagnoses a type attribute which /// doesn't apply to the given type. static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr, QualType type) { TypeDiagSelector WhichType; bool useExpansionLoc = true; switch (attr.getKind()) { case AttributeList::AT_ObjCGC: WhichType = TDS_Pointer; break; case AttributeList::AT_ObjCOwnership: WhichType = TDS_ObjCObjOrBlock; break; default: // Assume everything else was a function attribute. WhichType = TDS_Function; useExpansionLoc = false; break; } SourceLocation loc = attr.getLoc(); StringRef name = attr.getName()->getName(); // The GC attributes are usually written with macros; special-case them. IdentifierInfo *II = attr.isArgIdent(0) ? attr.getArgAsIdent(0)->Ident : nullptr; if (useExpansionLoc && loc.isMacroID() && II) { if (II->isStr("strong")) { if (S.findMacroSpelling(loc, "__strong")) name = "__strong"; } else if (II->isStr("weak")) { if (S.findMacroSpelling(loc, "__weak")) name = "__weak"; } } S.Diag(loc, diag::warn_type_attribute_wrong_type) << name << WhichType << type; } // objc_gc applies to Objective-C pointers or, otherwise, to the // smallest available pointer type (i.e. 'void*' in 'void**'). #define OBJC_POINTER_TYPE_ATTRS_CASELIST \ case AttributeList::AT_ObjCGC: \ case AttributeList::AT_ObjCOwnership // Function type attributes. #define FUNCTION_TYPE_ATTRS_CASELIST \ case AttributeList::AT_NoReturn: \ case AttributeList::AT_CDecl: \ case AttributeList::AT_FastCall: \ case AttributeList::AT_StdCall: \ case AttributeList::AT_ThisCall: \ case AttributeList::AT_Pascal: \ case AttributeList::AT_VectorCall: \ case AttributeList::AT_MSABI: \ case AttributeList::AT_SysVABI: \ case AttributeList::AT_Regparm: \ case AttributeList::AT_Pcs: \ case AttributeList::AT_IntelOclBicc // Microsoft-specific type qualifiers. #define MS_TYPE_ATTRS_CASELIST \ case AttributeList::AT_Ptr32: \ case AttributeList::AT_Ptr64: \ case AttributeList::AT_SPtr: \ case AttributeList::AT_UPtr // Nullability qualifiers. #define NULLABILITY_TYPE_ATTRS_CASELIST \ case AttributeList::AT_TypeNonNull: \ case AttributeList::AT_TypeNullable: \ case AttributeList::AT_TypeNullUnspecified namespace { /// An object which stores processing state for the entire /// GetTypeForDeclarator process. class TypeProcessingState { Sema &sema; /// The declarator being processed. Declarator &declarator; /// The index of the declarator chunk we're currently processing. /// May be the total number of valid chunks, indicating the /// DeclSpec. unsigned chunkIndex; /// Whether there are non-trivial modifications to the decl spec. bool trivial; /// Whether we saved the attributes in the decl spec. bool hasSavedAttrs; // HLSL Change: add Scope /// The scope for the declarator Scope* scope; /// The original set of attributes on the DeclSpec. SmallVector<AttributeList*, 2> savedAttrs; /// A list of attributes to diagnose the uselessness of when the /// processing is complete. SmallVector<AttributeList*, 2> ignoredTypeAttrs; public: TypeProcessingState(Sema &sema, Declarator &declarator, Scope* S = nullptr) // HLSL Change: add Scope : sema(sema), declarator(declarator), chunkIndex(declarator.getNumTypeObjects()), trivial(true), hasSavedAttrs(false), scope(S) {} // HLSL Change: add Scope // HLSL Change: add getScope Scope* getScope() const { return scope; } Sema &getSema() const { return sema; } Declarator &getDeclarator() const { return declarator; } bool isProcessingDeclSpec() const { return chunkIndex == declarator.getNumTypeObjects(); } unsigned getCurrentChunkIndex() const { return chunkIndex; } void setCurrentChunkIndex(unsigned idx) { assert(idx <= declarator.getNumTypeObjects()); chunkIndex = idx; } AttributeList *&getCurrentAttrListRef() const { if (isProcessingDeclSpec()) return getMutableDeclSpec().getAttributes().getListRef(); return declarator.getTypeObject(chunkIndex).getAttrListRef(); } /// Save the current set of attributes on the DeclSpec. void saveDeclSpecAttrs() { // Don't try to save them multiple times. if (hasSavedAttrs) return; DeclSpec &spec = getMutableDeclSpec(); for (AttributeList *attr = spec.getAttributes().getList(); attr; attr = attr->getNext()) savedAttrs.push_back(attr); trivial &= savedAttrs.empty(); hasSavedAttrs = true; } /// Record that we had nowhere to put the given type attribute. /// We will diagnose such attributes later. void addIgnoredTypeAttr(AttributeList &attr) { ignoredTypeAttrs.push_back(&attr); } /// Diagnose all the ignored type attributes, given that the /// declarator worked out to the given type. void diagnoseIgnoredTypeAttrs(QualType type) const { for (SmallVectorImpl<AttributeList*>::const_iterator i = ignoredTypeAttrs.begin(), e = ignoredTypeAttrs.end(); i != e; ++i) diagnoseBadTypeAttribute(getSema(), **i, type); } ~TypeProcessingState() { if (trivial) return; restoreDeclSpecAttrs(); } private: DeclSpec &getMutableDeclSpec() const { return const_cast<DeclSpec&>(declarator.getDeclSpec()); } void restoreDeclSpecAttrs() { assert(hasSavedAttrs); if (savedAttrs.empty()) { getMutableDeclSpec().getAttributes().set(nullptr); return; } getMutableDeclSpec().getAttributes().set(savedAttrs[0]); for (unsigned i = 0, e = savedAttrs.size() - 1; i != e; ++i) savedAttrs[i]->setNext(savedAttrs[i+1]); savedAttrs.back()->setNext(nullptr); } }; } static void spliceAttrIntoList(AttributeList &attr, AttributeList *&head) { attr.setNext(head); head = &attr; } static void spliceAttrOutOfList(AttributeList &attr, AttributeList *&head) { if (head == &attr) { head = attr.getNext(); return; } AttributeList *cur = head; while (true) { assert(cur && cur->getNext() && "ran out of attrs?"); if (cur->getNext() == &attr) { cur->setNext(attr.getNext()); return; } cur = cur->getNext(); } } static void moveAttrFromListToList(AttributeList &attr, AttributeList *&fromList, AttributeList *&toList) { spliceAttrOutOfList(attr, fromList); spliceAttrIntoList(attr, toList); } /// The location of a type attribute. enum TypeAttrLocation { /// The attribute is in the decl-specifier-seq. TAL_DeclSpec, /// The attribute is part of a DeclaratorChunk. TAL_DeclChunk, /// The attribute is immediately after the declaration's name. TAL_DeclName }; static void processTypeAttrs(TypeProcessingState &state, QualType &type, TypeAttrLocation TAL, AttributeList *attrs); static bool handleFunctionTypeAttr(TypeProcessingState &state, AttributeList &attr, QualType &type); static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &state, AttributeList &attr, QualType &type); static bool handleObjCGCTypeAttr(TypeProcessingState &state, AttributeList &attr, QualType &type); static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state, AttributeList &attr, QualType &type); static bool handleObjCPointerTypeAttr(TypeProcessingState &state, AttributeList &attr, QualType &type) { if (attr.getKind() == AttributeList::AT_ObjCGC) return handleObjCGCTypeAttr(state, attr, type); assert(attr.getKind() == AttributeList::AT_ObjCOwnership); return handleObjCOwnershipTypeAttr(state, attr, type); } /// Given the index of a declarator chunk, check whether that chunk /// directly specifies the return type of a function and, if so, find /// an appropriate place for it. /// /// \param i - a notional index which the search will start /// immediately inside /// /// \param onlyBlockPointers Whether we should only look into block /// pointer types (vs. all pointer types). static DeclaratorChunk *maybeMovePastReturnType(Declarator &declarator, unsigned i, bool onlyBlockPointers) { assert(i <= declarator.getNumTypeObjects()); DeclaratorChunk *result = nullptr; // First, look inwards past parens for a function declarator. for (; i != 0; --i) { DeclaratorChunk &fnChunk = declarator.getTypeObject(i-1); switch (fnChunk.Kind) { case DeclaratorChunk::Paren: continue; // If we find anything except a function, bail out. case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::Array: case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: return result; // If we do find a function declarator, scan inwards from that, // looking for a (block-)pointer declarator. case DeclaratorChunk::Function: for (--i; i != 0; --i) { DeclaratorChunk &ptrChunk = declarator.getTypeObject(i-1); switch (ptrChunk.Kind) { case DeclaratorChunk::Paren: case DeclaratorChunk::Array: case DeclaratorChunk::Function: case DeclaratorChunk::Reference: continue; case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Pointer: if (onlyBlockPointers) continue; LLVM_FALLTHROUGH; // HLSL Change case DeclaratorChunk::BlockPointer: result = &ptrChunk; goto continue_outer; } llvm_unreachable("bad declarator chunk kind"); } // If we run out of declarators doing that, we're done. return result; } llvm_unreachable("bad declarator chunk kind"); // Okay, reconsider from our new point. continue_outer: ; } // Ran out of chunks, bail out. return result; } /// Given that an objc_gc attribute was written somewhere on a /// declaration *other* than on the declarator itself (for which, use /// distributeObjCPointerTypeAttrFromDeclarator), and given that it /// didn't apply in whatever position it was written in, try to move /// it to a more appropriate position. static void distributeObjCPointerTypeAttr(TypeProcessingState &state, AttributeList &attr, QualType type) { Declarator &declarator = state.getDeclarator(); // Move it to the outermost normal or block pointer declarator. for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) { DeclaratorChunk &chunk = declarator.getTypeObject(i-1); switch (chunk.Kind) { case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: { // But don't move an ARC ownership attribute to the return type // of a block. DeclaratorChunk *destChunk = nullptr; if (state.isProcessingDeclSpec() && attr.getKind() == AttributeList::AT_ObjCOwnership) destChunk = maybeMovePastReturnType(declarator, i - 1, /*onlyBlockPointers=*/true); if (!destChunk) destChunk = &chunk; moveAttrFromListToList(attr, state.getCurrentAttrListRef(), destChunk->getAttrListRef()); return; } case DeclaratorChunk::Paren: case DeclaratorChunk::Array: continue; // We may be starting at the return type of a block. case DeclaratorChunk::Function: if (state.isProcessingDeclSpec() && attr.getKind() == AttributeList::AT_ObjCOwnership) { if (DeclaratorChunk *dest = maybeMovePastReturnType( declarator, i, /*onlyBlockPointers=*/true)) { moveAttrFromListToList(attr, state.getCurrentAttrListRef(), dest->getAttrListRef()); return; } } goto error; // Don't walk through these. case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: goto error; } } error: diagnoseBadTypeAttribute(state.getSema(), attr, type); } /// Distribute an objc_gc type attribute that was written on the /// declarator. static void distributeObjCPointerTypeAttrFromDeclarator(TypeProcessingState &state, AttributeList &attr, QualType &declSpecType) { Declarator &declarator = state.getDeclarator(); // objc_gc goes on the innermost pointer to something that's not a // pointer. unsigned innermost = -1U; bool considerDeclSpec = true; for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) { DeclaratorChunk &chunk = declarator.getTypeObject(i); switch (chunk.Kind) { case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: innermost = i; continue; case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: case DeclaratorChunk::Paren: case DeclaratorChunk::Array: continue; case DeclaratorChunk::Function: considerDeclSpec = false; goto done; } } done: // That might actually be the decl spec if we weren't blocked by // anything in the declarator. if (considerDeclSpec) { if (handleObjCPointerTypeAttr(state, attr, declSpecType)) { // Splice the attribute into the decl spec. Prevents the // attribute from being applied multiple times and gives // the source-location-filler something to work with. state.saveDeclSpecAttrs(); moveAttrFromListToList(attr, declarator.getAttrListRef(), declarator.getMutableDeclSpec().getAttributes().getListRef()); return; } } // Otherwise, if we found an appropriate chunk, splice the attribute // into it. if (innermost != -1U) { moveAttrFromListToList(attr, declarator.getAttrListRef(), declarator.getTypeObject(innermost).getAttrListRef()); return; } // Otherwise, diagnose when we're done building the type. spliceAttrOutOfList(attr, declarator.getAttrListRef()); state.addIgnoredTypeAttr(attr); } /// A function type attribute was written somewhere in a declaration /// *other* than on the declarator itself or in the decl spec. Given /// that it didn't apply in whatever position it was written in, try /// to move it to a more appropriate position. static void distributeFunctionTypeAttr(TypeProcessingState &state, AttributeList &attr, QualType type) { Declarator &declarator = state.getDeclarator(); // Try to push the attribute from the return type of a function to // the function itself. for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) { DeclaratorChunk &chunk = declarator.getTypeObject(i-1); switch (chunk.Kind) { case DeclaratorChunk::Function: moveAttrFromListToList(attr, state.getCurrentAttrListRef(), chunk.getAttrListRef()); return; case DeclaratorChunk::Paren: case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::Array: case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: continue; } } diagnoseBadTypeAttribute(state.getSema(), attr, type); } /// Try to distribute a function type attribute to the innermost /// function chunk or type. Returns true if the attribute was /// distributed, false if no location was found. static bool distributeFunctionTypeAttrToInnermost(TypeProcessingState &state, AttributeList &attr, AttributeList *&attrList, QualType &declSpecType) { Declarator &declarator = state.getDeclarator(); // Put it on the innermost function chunk, if there is one. for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) { DeclaratorChunk &chunk = declarator.getTypeObject(i); if (chunk.Kind != DeclaratorChunk::Function) continue; moveAttrFromListToList(attr, attrList, chunk.getAttrListRef()); return true; } return handleFunctionTypeAttr(state, attr, declSpecType); } /// A function type attribute was written in the decl spec. Try to /// apply it somewhere. static void distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state, AttributeList &attr, QualType &declSpecType) { state.saveDeclSpecAttrs(); // C++11 attributes before the decl specifiers actually appertain to // the declarators. Move them straight there. We don't support the // 'put them wherever you like' semantics we allow for GNU attributes. if (attr.isCXX11Attribute()) { moveAttrFromListToList(attr, state.getCurrentAttrListRef(), state.getDeclarator().getAttrListRef()); return; } // Try to distribute to the innermost. if (distributeFunctionTypeAttrToInnermost(state, attr, state.getCurrentAttrListRef(), declSpecType)) return; // If that failed, diagnose the bad attribute when the declarator is // fully built. state.addIgnoredTypeAttr(attr); } /// A function type attribute was written on the declarator. Try to /// apply it somewhere. static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state, AttributeList &attr, QualType &declSpecType) { Declarator &declarator = state.getDeclarator(); // Try to distribute to the innermost. if (distributeFunctionTypeAttrToInnermost(state, attr, declarator.getAttrListRef(), declSpecType)) return; // If that failed, diagnose the bad attribute when the declarator is // fully built. spliceAttrOutOfList(attr, declarator.getAttrListRef()); state.addIgnoredTypeAttr(attr); } /// \brief Given that there are attributes written on the declarator /// itself, try to distribute any type attributes to the appropriate /// declarator chunk. /// /// These are attributes like the following: /// int f ATTR; /// int (f ATTR)(); /// but not necessarily this: /// int f() ATTR; static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state, QualType &declSpecType) { // Collect all the type attributes from the declarator itself. assert(state.getDeclarator().getAttributes() && "declarator has no attrs!"); AttributeList *attr = state.getDeclarator().getAttributes(); AttributeList *next; do { next = attr->getNext(); // Do not distribute C++11 attributes. They have strict rules for what // they appertain to. if (attr->isCXX11Attribute()) continue; switch (attr->getKind()) { OBJC_POINTER_TYPE_ATTRS_CASELIST: distributeObjCPointerTypeAttrFromDeclarator(state, *attr, declSpecType); break; case AttributeList::AT_NSReturnsRetained: if (!state.getSema().getLangOpts().ObjCAutoRefCount) break; distributeFunctionTypeAttrFromDeclarator(state, *attr, declSpecType); // HLSL Change break; // HLSL Change - avoid fallthrough FUNCTION_TYPE_ATTRS_CASELIST: distributeFunctionTypeAttrFromDeclarator(state, *attr, declSpecType); break; MS_TYPE_ATTRS_CASELIST: // Microsoft type attributes cannot go after the declarator-id. continue; NULLABILITY_TYPE_ATTRS_CASELIST: // Nullability specifiers cannot go after the declarator-id. // Objective-C __kindof does not get distributed. case AttributeList::AT_ObjCKindOf: continue; default: break; } } while ((attr = next)); } /// Add a synthetic '()' to a block-literal declarator if it is /// required, given the return type. static void maybeSynthesizeBlockSignature(TypeProcessingState &state, QualType declSpecType) { Declarator &declarator = state.getDeclarator(); // First, check whether the declarator would produce a function, // i.e. whether the innermost semantic chunk is a function. if (declarator.isFunctionDeclarator()) { // If so, make that declarator a prototyped declarator. declarator.getFunctionTypeInfo().hasPrototype = true; return; } // If there are any type objects, the type as written won't name a // function, regardless of the decl spec type. This is because a // block signature declarator is always an abstract-declarator, and // abstract-declarators can't just be parentheses chunks. Therefore // we need to build a function chunk unless there are no type // objects and the decl spec type is a function. if (!declarator.getNumTypeObjects() && declSpecType->isFunctionType()) return; // Note that there *are* cases with invalid declarators where // declarators consist solely of parentheses. In general, these // occur only in failed efforts to make function declarators, so // faking up the function chunk is still the right thing to do. // Otherwise, we need to fake up a function declarator. SourceLocation loc = declarator.getLocStart(); // ...and *prepend* it to the declarator. SourceLocation NoLoc; declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction( /*HasProto=*/true, /*IsAmbiguous=*/false, /*LParenLoc=*/NoLoc, /*ArgInfo=*/nullptr, /*NumArgs=*/0, /*EllipsisLoc=*/NoLoc, /*RParenLoc=*/NoLoc, /*TypeQuals=*/0, /*RefQualifierIsLvalueRef=*/true, /*RefQualifierLoc=*/NoLoc, /*ConstQualifierLoc=*/NoLoc, /*VolatileQualifierLoc=*/NoLoc, /*RestrictQualifierLoc=*/NoLoc, /*MutableLoc=*/NoLoc, EST_None, /*ESpecLoc=*/NoLoc, /*Exceptions=*/nullptr, /*ExceptionRanges=*/nullptr, /*NumExceptions=*/0, /*NoexceptExpr=*/nullptr, /*ExceptionSpecTokens=*/nullptr, loc, loc, declarator)); // For consistency, make sure the state still has us as processing // the decl spec. assert(state.getCurrentChunkIndex() == declarator.getNumTypeObjects() - 1); state.setCurrentChunkIndex(declarator.getNumTypeObjects()); } static void diagnoseAndRemoveTypeQualifiers(Sema &S, const DeclSpec &DS, unsigned &TypeQuals, QualType TypeSoFar, unsigned RemoveTQs, unsigned DiagID) { // If this occurs outside a template instantiation, warn the user about // it; they probably didn't mean to specify a redundant qualifier. typedef std::pair<DeclSpec::TQ, SourceLocation> QualLoc; for (QualLoc Qual : {QualLoc(DeclSpec::TQ_const, DS.getConstSpecLoc()), QualLoc(DeclSpec::TQ_volatile, DS.getVolatileSpecLoc()), QualLoc(DeclSpec::TQ_atomic, DS.getAtomicSpecLoc())}) { if (!(RemoveTQs & Qual.first)) continue; if (S.ActiveTemplateInstantiations.empty()) { if (TypeQuals & Qual.first) S.Diag(Qual.second, DiagID) << DeclSpec::getSpecifierName(Qual.first) << TypeSoFar << FixItHint::CreateRemoval(Qual.second); } TypeQuals &= ~Qual.first; } } /// Apply Objective-C type arguments to the given type. static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type, ArrayRef<TypeSourceInfo *> typeArgs, SourceRange typeArgsRange, bool failOnError = false) { // We can only apply type arguments to an Objective-C class type. const auto *objcObjectType = type->getAs<ObjCObjectType>(); if (!objcObjectType || !objcObjectType->getInterface()) { S.Diag(loc, diag::err_objc_type_args_non_class) << type << typeArgsRange; if (failOnError) return QualType(); return type; } // The class type must be parameterized. ObjCInterfaceDecl *objcClass = objcObjectType->getInterface(); ObjCTypeParamList *typeParams = objcClass->getTypeParamList(); if (!typeParams) { S.Diag(loc, diag::err_objc_type_args_non_parameterized_class) << objcClass->getDeclName() << FixItHint::CreateRemoval(typeArgsRange); if (failOnError) return QualType(); return type; } // The type must not already be specialized. if (objcObjectType->isSpecialized()) { S.Diag(loc, diag::err_objc_type_args_specialized_class) << type << FixItHint::CreateRemoval(typeArgsRange); if (failOnError) return QualType(); return type; } // Check the type arguments. SmallVector<QualType, 4> finalTypeArgs; unsigned numTypeParams = typeParams->size(); bool anyPackExpansions = false; for (unsigned i = 0, n = typeArgs.size(); i != n; ++i) { TypeSourceInfo *typeArgInfo = typeArgs[i]; QualType typeArg = typeArgInfo->getType(); // Type arguments cannot explicitly specify nullability. if (auto nullability = AttributedType::stripOuterNullability(typeArg)) { SourceLocation nullabilityLoc = typeArgInfo->getTypeLoc().findNullabilityLoc(); SourceLocation diagLoc = nullabilityLoc.isValid()? nullabilityLoc : typeArgInfo->getTypeLoc().getLocStart(); S.Diag(diagLoc, diag::err_type_arg_explicit_nullability) << typeArg << FixItHint::CreateRemoval(nullabilityLoc); } finalTypeArgs.push_back(typeArg); if (typeArg->getAs<PackExpansionType>()) anyPackExpansions = true; // Find the corresponding type parameter, if there is one. ObjCTypeParamDecl *typeParam = nullptr; if (!anyPackExpansions) { if (i < numTypeParams) { typeParam = typeParams->begin()[i]; } else { // Too many arguments. S.Diag(loc, diag::err_objc_type_args_wrong_arity) << false << objcClass->getDeclName() << (unsigned)typeArgs.size() << numTypeParams; S.Diag(objcClass->getLocation(), diag::note_previous_decl) << objcClass; if (failOnError) return QualType(); return type; } } // Objective-C object pointer types must be substitutable for the bounds. if (const auto *typeArgObjC = typeArg->getAs<ObjCObjectPointerType>()) { // If we don't have a type parameter to match against, assume // everything is fine. There was a prior pack expansion that // means we won't be able to match anything. if (!typeParam) { assert(anyPackExpansions && "Too many arguments?"); continue; } // Retrieve the bound. QualType bound = typeParam->getUnderlyingType(); const auto *boundObjC = bound->getAs<ObjCObjectPointerType>(); // Determine whether the type argument is substitutable for the bound. if (typeArgObjC->isObjCIdType()) { // When the type argument is 'id', the only acceptable type // parameter bound is 'id'. if (boundObjC->isObjCIdType()) continue; } else if (S.Context.canAssignObjCInterfaces(boundObjC, typeArgObjC)) { // Otherwise, we follow the assignability rules. continue; } // Diagnose the mismatch. S.Diag(typeArgInfo->getTypeLoc().getLocStart(), diag::err_objc_type_arg_does_not_match_bound) << typeArg << bound << typeParam->getDeclName(); S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here) << typeParam->getDeclName(); if (failOnError) return QualType(); return type; } // Block pointer types are permitted for unqualified 'id' bounds. if (typeArg->isBlockPointerType()) { // If we don't have a type parameter to match against, assume // everything is fine. There was a prior pack expansion that // means we won't be able to match anything. if (!typeParam) { assert(anyPackExpansions && "Too many arguments?"); continue; } // Retrieve the bound. QualType bound = typeParam->getUnderlyingType(); if (bound->isBlockCompatibleObjCPointerType(S.Context)) continue; // Diagnose the mismatch. S.Diag(typeArgInfo->getTypeLoc().getLocStart(), diag::err_objc_type_arg_does_not_match_bound) << typeArg << bound << typeParam->getDeclName(); S.Diag(typeParam->getLocation(), diag::note_objc_type_param_here) << typeParam->getDeclName(); if (failOnError) return QualType(); return type; } // Dependent types will be checked at instantiation time. if (typeArg->isDependentType()) { continue; } // Diagnose non-id-compatible type arguments. S.Diag(typeArgInfo->getTypeLoc().getLocStart(), diag::err_objc_type_arg_not_id_compatible) << typeArg << typeArgInfo->getTypeLoc().getSourceRange(); if (failOnError) return QualType(); return type; } // Make sure we didn't have the wrong number of arguments. if (!anyPackExpansions && finalTypeArgs.size() != numTypeParams) { S.Diag(loc, diag::err_objc_type_args_wrong_arity) << (typeArgs.size() < typeParams->size()) << objcClass->getDeclName() << (unsigned)finalTypeArgs.size() << (unsigned)numTypeParams; S.Diag(objcClass->getLocation(), diag::note_previous_decl) << objcClass; if (failOnError) return QualType(); return type; } // Success. Form the specialized type. return S.Context.getObjCObjectType(type, finalTypeArgs, { }, false); } /// Apply Objective-C protocol qualifiers to the given type. static QualType applyObjCProtocolQualifiers( Sema &S, SourceLocation loc, SourceRange range, QualType type, ArrayRef<ObjCProtocolDecl *> protocols, const SourceLocation *protocolLocs, bool failOnError = false) { ASTContext &ctx = S.Context; if (const ObjCObjectType *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){ // FIXME: Check for protocols to which the class type is already // known to conform. return ctx.getObjCObjectType(objT->getBaseType(), objT->getTypeArgsAsWritten(), protocols, objT->isKindOfTypeAsWritten()); } if (type->isObjCObjectType()) { // Silently overwrite any existing protocol qualifiers. // TODO: determine whether that's the right thing to do. // FIXME: Check for protocols to which the class type is already // known to conform. return ctx.getObjCObjectType(type, { }, protocols, false); } // id<protocol-list> if (type->isObjCIdType()) { const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>(); type = ctx.getObjCObjectType(ctx.ObjCBuiltinIdTy, { }, protocols, objPtr->isKindOfType()); return ctx.getObjCObjectPointerType(type); } // Class<protocol-list> if (type->isObjCClassType()) { const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>(); type = ctx.getObjCObjectType(ctx.ObjCBuiltinClassTy, { }, protocols, objPtr->isKindOfType()); return ctx.getObjCObjectPointerType(type); } S.Diag(loc, diag::err_invalid_protocol_qualifiers) << range; if (failOnError) return QualType(); return type; } QualType Sema::BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError) { QualType Result = BaseType; if (!TypeArgs.empty()) { Result = applyObjCTypeArgs(*this, Loc, Result, TypeArgs, SourceRange(TypeArgsLAngleLoc, TypeArgsRAngleLoc), FailOnError); if (FailOnError && Result.isNull()) return QualType(); } if (!Protocols.empty()) { Result = applyObjCProtocolQualifiers(*this, Loc, SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc), Result, Protocols, ProtocolLocs.data(), FailOnError); if (FailOnError && Result.isNull()) return QualType(); } return Result; } TypeResult Sema::actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc) { // Form id<protocol-list>. QualType Result = Context.getObjCObjectType( Context.ObjCBuiltinIdTy, { }, llvm::makeArrayRef( (ObjCProtocolDecl * const *)protocols.data(), protocols.size()), false); Result = Context.getObjCObjectPointerType(Result); TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result); TypeLoc ResultTL = ResultTInfo->getTypeLoc(); auto ObjCObjectPointerTL = ResultTL.castAs<ObjCObjectPointerTypeLoc>(); ObjCObjectPointerTL.setStarLoc(SourceLocation()); // implicit auto ObjCObjectTL = ObjCObjectPointerTL.getPointeeLoc() .castAs<ObjCObjectTypeLoc>(); ObjCObjectTL.setHasBaseTypeAsWritten(false); ObjCObjectTL.getBaseLoc().initialize(Context, SourceLocation()); // No type arguments. ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation()); ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation()); // Fill in protocol qualifiers. ObjCObjectTL.setProtocolLAngleLoc(lAngleLoc); ObjCObjectTL.setProtocolRAngleLoc(rAngleLoc); for (unsigned i = 0, n = protocols.size(); i != n; ++i) ObjCObjectTL.setProtocolLoc(i, protocolLocs[i]); // We're done. Return the completed type to the parser. return CreateParsedType(Result, ResultTInfo); } TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc) { TypeSourceInfo *BaseTypeInfo = nullptr; QualType T = GetTypeFromParser(BaseType, &BaseTypeInfo); if (T.isNull()) return true; // Handle missing type-source info. if (!BaseTypeInfo) BaseTypeInfo = Context.getTrivialTypeSourceInfo(T, Loc); // Extract type arguments. SmallVector<TypeSourceInfo *, 4> ActualTypeArgInfos; for (unsigned i = 0, n = TypeArgs.size(); i != n; ++i) { TypeSourceInfo *TypeArgInfo = nullptr; QualType TypeArg = GetTypeFromParser(TypeArgs[i], &TypeArgInfo); if (TypeArg.isNull()) { ActualTypeArgInfos.clear(); break; } assert(TypeArgInfo && "No type source info?"); ActualTypeArgInfos.push_back(TypeArgInfo); } // Build the object type. QualType Result = BuildObjCObjectType( T, BaseTypeInfo->getTypeLoc().getSourceRange().getBegin(), TypeArgsLAngleLoc, ActualTypeArgInfos, TypeArgsRAngleLoc, ProtocolLAngleLoc, llvm::makeArrayRef((ObjCProtocolDecl * const *)Protocols.data(), Protocols.size()), ProtocolLocs, ProtocolRAngleLoc, /*FailOnError=*/false); if (Result == T) return BaseType; // Create source information for this type. TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result); TypeLoc ResultTL = ResultTInfo->getTypeLoc(); // For id<Proto1, Proto2> or Class<Proto1, Proto2>, we'll have an // object pointer type. Fill in source information for it. if (auto ObjCObjectPointerTL = ResultTL.getAs<ObjCObjectPointerTypeLoc>()) { // The '*' is implicit. ObjCObjectPointerTL.setStarLoc(SourceLocation()); ResultTL = ObjCObjectPointerTL.getPointeeLoc(); } auto ObjCObjectTL = ResultTL.castAs<ObjCObjectTypeLoc>(); // Type argument information. if (ObjCObjectTL.getNumTypeArgs() > 0) { assert(ObjCObjectTL.getNumTypeArgs() == ActualTypeArgInfos.size()); ObjCObjectTL.setTypeArgsLAngleLoc(TypeArgsLAngleLoc); ObjCObjectTL.setTypeArgsRAngleLoc(TypeArgsRAngleLoc); for (unsigned i = 0, n = ActualTypeArgInfos.size(); i != n; ++i) ObjCObjectTL.setTypeArgTInfo(i, ActualTypeArgInfos[i]); } else { ObjCObjectTL.setTypeArgsLAngleLoc(SourceLocation()); ObjCObjectTL.setTypeArgsRAngleLoc(SourceLocation()); } // Protocol qualifier information. if (ObjCObjectTL.getNumProtocols() > 0) { assert(ObjCObjectTL.getNumProtocols() == Protocols.size()); ObjCObjectTL.setProtocolLAngleLoc(ProtocolLAngleLoc); ObjCObjectTL.setProtocolRAngleLoc(ProtocolRAngleLoc); for (unsigned i = 0, n = Protocols.size(); i != n; ++i) ObjCObjectTL.setProtocolLoc(i, ProtocolLocs[i]); } else { ObjCObjectTL.setProtocolLAngleLoc(SourceLocation()); ObjCObjectTL.setProtocolRAngleLoc(SourceLocation()); } // Base type. ObjCObjectTL.setHasBaseTypeAsWritten(true); if (ObjCObjectTL.getType() == T) ObjCObjectTL.getBaseLoc().initializeFullCopy(BaseTypeInfo->getTypeLoc()); else ObjCObjectTL.getBaseLoc().initialize(Context, Loc); // We're done. Return the completed type to the parser. return CreateParsedType(Result, ResultTInfo); } /// \brief Convert the specified declspec to the appropriate type /// object. /// \param state Specifies the declarator containing the declaration specifier /// to be converted, along with other associated processing state. /// \returns The type described by the declaration specifiers. This function /// never returns null. static QualType ConvertDeclSpecToType(TypeProcessingState &state) { // FIXME: Should move the logic from DeclSpec::Finish to here for validity // checking. Sema &S = state.getSema(); Declarator &declarator = state.getDeclarator(); const DeclSpec &DS = declarator.getDeclSpec(); SourceLocation DeclLoc = declarator.getIdentifierLoc(); if (DeclLoc.isInvalid()) DeclLoc = DS.getLocStart(); ASTContext &Context = S.Context; QualType Result; switch (DS.getTypeSpecType()) { case DeclSpec::TST_void: Result = Context.VoidTy; break; case DeclSpec::TST_char: if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified) Result = Context.CharTy; else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) Result = Context.SignedCharTy; else { assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned && "Unknown TSS value"); Result = Context.UnsignedCharTy; } break; case DeclSpec::TST_wchar: if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified) Result = Context.WCharTy; else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) { S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec) << DS.getSpecifierName(DS.getTypeSpecType(), Context.getPrintingPolicy()); Result = Context.getSignedWCharType(); } else { assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned && "Unknown TSS value"); S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec) << DS.getSpecifierName(DS.getTypeSpecType(), Context.getPrintingPolicy()); Result = Context.getUnsignedWCharType(); } break; case DeclSpec::TST_char16: assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified && "Unknown TSS value"); Result = Context.Char16Ty; break; case DeclSpec::TST_char32: assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified && "Unknown TSS value"); Result = Context.Char32Ty; break; case DeclSpec::TST_unspecified: // If this is a missing declspec in a block literal return context, then it // is inferred from the return statements inside the block. // The declspec is always missing in a lambda expr context; it is either // specified with a trailing return type or inferred. if (S.getLangOpts().CPlusPlus14 && declarator.getContext() == Declarator::LambdaExprContext) { // In C++1y, a lambda's implicit return type is 'auto'. Result = Context.getAutoDeductType(); break; } else if (declarator.getContext() == Declarator::LambdaExprContext || isOmittedBlockReturnType(declarator)) { Result = Context.DependentTy; break; } // Unspecified typespec defaults to int in C90. However, the C90 grammar // [C90 6.5] only allows a decl-spec if there was *some* type-specifier, // type-qualifier, or storage-class-specifier. If not, emit an extwarn. // Note that the one exception to this is function definitions, which are // allowed to be completely missing a declspec. This is handled in the // parser already though by it pretending to have seen an 'int' in this // case. if (S.getLangOpts().ImplicitInt) { // In C89 mode, we only warn if there is a completely missing declspec // when one is not allowed. if (DS.isEmpty()) { S.Diag(DeclLoc, diag::ext_missing_declspec) << DS.getSourceRange() << FixItHint::CreateInsertion(DS.getLocStart(), "int"); } } else if (!DS.hasTypeSpecifier()) { // C99 and C++ require a type specifier. For example, C99 6.7.2p2 says: // "At least one type specifier shall be given in the declaration // specifiers in each declaration, and in the specifier-qualifier list in // each struct declaration and type name." if (S.getLangOpts().CPlusPlus) { S.Diag(DeclLoc, S.getLangOpts().HLSL ? diag::err_hlsl_missing_type_specifier : diag::err_missing_type_specifier) // HLSL Change << DS.getSourceRange(); // When this occurs in C++ code, often something is very broken with the // value being declared, poison it as invalid so we don't get chains of // errors. declarator.setInvalidType(true); } else { S.Diag(DeclLoc, diag::ext_missing_type_specifier) << DS.getSourceRange(); } } LLVM_FALLTHROUGH; // HLSL CHANGE case DeclSpec::TST_int: { if (DS.getTypeSpecSign() != DeclSpec::TSS_unsigned) { switch (DS.getTypeSpecWidth()) { case DeclSpec::TSW_unspecified: Result = Context.IntTy; break; case DeclSpec::TSW_short: Result = Context.ShortTy; break; case DeclSpec::TSW_long: Result = Context.LongTy; break; case DeclSpec::TSW_longlong: Result = Context.LongLongTy; // 'long long' is a C99 or C++11 feature. if (!S.getLangOpts().C99) { if (S.getLangOpts().CPlusPlus) S.Diag(DS.getTypeSpecWidthLoc(), S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong); else S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong); } break; } } else { switch (DS.getTypeSpecWidth()) { case DeclSpec::TSW_unspecified: Result = Context.UnsignedIntTy; break; case DeclSpec::TSW_short: Result = Context.UnsignedShortTy; break; case DeclSpec::TSW_long: Result = Context.UnsignedLongTy; break; case DeclSpec::TSW_longlong: Result = Context.UnsignedLongLongTy; // 'long long' is a C99 or C++11 feature. if (!S.getLangOpts().C99) { if (S.getLangOpts().CPlusPlus) S.Diag(DS.getTypeSpecWidthLoc(), S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong); else S.Diag(DS.getTypeSpecWidthLoc(), diag::ext_c99_longlong); } break; } } break; } case DeclSpec::TST_int128: if (!S.Context.getTargetInfo().hasInt128Type()) S.Diag(DS.getTypeSpecTypeLoc(), diag::err_int128_unsupported); if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned) Result = Context.UnsignedInt128Ty; else Result = Context.Int128Ty; break; case DeclSpec::TST_half: Result = Context.HalfTy; break; case DeclSpec::TST_float: Result = Context.FloatTy; break; // HLSL Change Starts case DeclSpec::TST_halffloat: Result = Context.HalfFloatTy; break; case DeclSpec::TST_min16float: Result = Context.Min16FloatTy; break; case DeclSpec::TST_min16int: Result = Context.Min16IntTy; break; case DeclSpec::TST_min16uint: Result = Context.Min16UIntTy; break; case DeclSpec::TST_min10float: Result = Context.Min10FloatTy; break; case DeclSpec::TST_min12int: Result = Context.Min12IntTy; break; case DeclSpec::TST_int8_4packed: Result = Context.Int8_4PackedTy; break; case DeclSpec::TST_uint8_4packed: Result = Context.UInt8_4PackedTy; break; // HLSL Change Ends case DeclSpec::TST_double: if (DS.getTypeSpecWidth() == DeclSpec::TSW_long) Result = Context.LongDoubleTy; else Result = Context.DoubleTy; if (S.getLangOpts().OpenCL && !((S.getLangOpts().OpenCLVersion >= 120) || S.getOpenCLOptions().cl_khr_fp64)) { S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension) << Result << "cl_khr_fp64"; declarator.setInvalidType(true); } break; case DeclSpec::TST_bool: Result = Context.BoolTy; break; // _Bool or bool case DeclSpec::TST_decimal32: // _Decimal32 case DeclSpec::TST_decimal64: // _Decimal64 case DeclSpec::TST_decimal128: // _Decimal128 S.Diag(DS.getTypeSpecTypeLoc(), diag::err_decimal_unsupported); Result = Context.IntTy; declarator.setInvalidType(true); break; case DeclSpec::TST_class: case DeclSpec::TST_enum: case DeclSpec::TST_union: case DeclSpec::TST_struct: case DeclSpec::TST_interface: { TypeDecl *D = dyn_cast_or_null<TypeDecl>(DS.getRepAsDecl()); if (!D) { // This can happen in C++ with ambiguous lookups. Result = Context.IntTy; declarator.setInvalidType(true); break; } // If the type is deprecated or unavailable, diagnose it. S.DiagnoseUseOfDecl(D, DS.getTypeSpecTypeNameLoc()); assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 && DS.getTypeSpecSign() == 0 && "No qualifiers on tag names!"); // TypeQuals handled by caller. Result = Context.getTypeDeclType(D); // In both C and C++, make an ElaboratedType. ElaboratedTypeKeyword Keyword = ElaboratedType::getKeywordForTypeSpec(DS.getTypeSpecType()); Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result); break; } case DeclSpec::TST_typename: { assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 && "Can't handle qualifiers on typedef names yet!"); Result = S.GetTypeFromParser(DS.getRepAsType()); if (Result.isNull()) { declarator.setInvalidType(true); } else if (S.getLangOpts().OpenCL) { if (const AtomicType *AT = Result->getAs<AtomicType>()) { const BuiltinType *BT = AT->getValueType()->getAs<BuiltinType>(); bool NoExtTypes = BT && (BT->getKind() == BuiltinType::Int || BT->getKind() == BuiltinType::UInt || BT->getKind() == BuiltinType::Float); if (!S.getOpenCLOptions().cl_khr_int64_base_atomics && !NoExtTypes) { S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension) << Result << "cl_khr_int64_base_atomics"; declarator.setInvalidType(true); } if (!S.getOpenCLOptions().cl_khr_int64_extended_atomics && !NoExtTypes) { S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension) << Result << "cl_khr_int64_extended_atomics"; declarator.setInvalidType(true); } if (!S.getOpenCLOptions().cl_khr_fp64 && BT && BT->getKind() == BuiltinType::Double) { S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension) << Result << "cl_khr_fp64"; declarator.setInvalidType(true); } } } // TypeQuals handled by caller. break; } case DeclSpec::TST_typeofType: // FIXME: Preserve type source info. Result = S.GetTypeFromParser(DS.getRepAsType()); assert(!Result.isNull() && "Didn't get a type for typeof?"); if (!Result->isDependentType()) if (const TagType *TT = Result->getAs<TagType>()) S.DiagnoseUseOfDecl(TT->getDecl(), DS.getTypeSpecTypeLoc()); // TypeQuals handled by caller. Result = Context.getTypeOfType(Result); break; case DeclSpec::TST_typeofExpr: { Expr *E = DS.getRepAsExpr(); assert(E && "Didn't get an expression for typeof?"); // TypeQuals handled by caller. Result = S.BuildTypeofExprType(E, DS.getTypeSpecTypeLoc()); if (Result.isNull()) { Result = Context.IntTy; declarator.setInvalidType(true); } break; } case DeclSpec::TST_decltype: { Expr *E = DS.getRepAsExpr(); assert(E && "Didn't get an expression for decltype?"); // TypeQuals handled by caller. Result = S.BuildDecltypeType(E, DS.getTypeSpecTypeLoc()); if (Result.isNull()) { Result = Context.IntTy; declarator.setInvalidType(true); } break; } case DeclSpec::TST_underlyingType: Result = S.GetTypeFromParser(DS.getRepAsType()); assert(!Result.isNull() && "Didn't get a type for __underlying_type?"); Result = S.BuildUnaryTransformType(Result, UnaryTransformType::EnumUnderlyingType, DS.getTypeSpecTypeLoc()); if (Result.isNull()) { Result = Context.IntTy; declarator.setInvalidType(true); } break; case DeclSpec::TST_auto: // TypeQuals handled by caller. // If auto is mentioned in a lambda parameter context, convert it to a // template parameter type immediately, with the appropriate depth and // index, and update sema's state (LambdaScopeInfo) for the current lambda // being analyzed (which tracks the invented type template parameter). if (declarator.getContext() == Declarator::LambdaExprParameterContext) { sema::LambdaScopeInfo *LSI = S.getCurLambda(); assert(LSI && "No LambdaScopeInfo on the stack!"); const unsigned TemplateParameterDepth = LSI->AutoTemplateParameterDepth; const unsigned AutoParameterPosition = LSI->AutoTemplateParams.size(); const bool IsParameterPack = declarator.hasEllipsis(); // Turns out we must create the TemplateTypeParmDecl here to // retrieve the corresponding template parameter type. TemplateTypeParmDecl *CorrespondingTemplateParam = TemplateTypeParmDecl::Create(Context, // Temporarily add to the TranslationUnit DeclContext. When the // associated TemplateParameterList is attached to a template // declaration (such as FunctionTemplateDecl), the DeclContext // for each template parameter gets updated appropriately via // a call to AdoptTemplateParameterList. Context.getTranslationUnitDecl(), /*KeyLoc*/ SourceLocation(), /*NameLoc*/ declarator.getLocStart(), TemplateParameterDepth, AutoParameterPosition, // our template param index /* Identifier*/ nullptr, false, IsParameterPack); LSI->AutoTemplateParams.push_back(CorrespondingTemplateParam); // Replace the 'auto' in the function parameter with this invented // template type parameter. Result = QualType(CorrespondingTemplateParam->getTypeForDecl(), 0); } else { Result = Context.getAutoType(QualType(), /*decltype(auto)*/false, false); } break; case DeclSpec::TST_decltype_auto: Result = Context.getAutoType(QualType(), /*decltype(auto)*/true, /*IsDependent*/ false); break; case DeclSpec::TST_unknown_anytype: Result = Context.UnknownAnyTy; break; case DeclSpec::TST_atomic: Result = S.GetTypeFromParser(DS.getRepAsType()); assert(!Result.isNull() && "Didn't get a type for _Atomic?"); Result = S.BuildAtomicType(Result, DS.getTypeSpecTypeLoc()); if (Result.isNull()) { Result = Context.IntTy; declarator.setInvalidType(true); } break; case DeclSpec::TST_error: Result = Context.IntTy; declarator.setInvalidType(true); break; } // Handle complex types. if (DS.getTypeSpecComplex() == DeclSpec::TSC_complex) { if (S.getLangOpts().Freestanding) S.Diag(DS.getTypeSpecComplexLoc(), diag::ext_freestanding_complex); Result = Context.getComplexType(Result); } else if (DS.isTypeAltiVecVector()) { unsigned typeSize = static_cast<unsigned>(Context.getTypeSize(Result)); assert(typeSize > 0 && "type size for vector must be greater than 0 bits"); VectorType::VectorKind VecKind = VectorType::AltiVecVector; if (DS.isTypeAltiVecPixel()) VecKind = VectorType::AltiVecPixel; else if (DS.isTypeAltiVecBool()) VecKind = VectorType::AltiVecBool; Result = Context.getVectorType(Result, 128/typeSize, VecKind); } // FIXME: Imaginary. if (DS.getTypeSpecComplex() == DeclSpec::TSC_imaginary) S.Diag(DS.getTypeSpecComplexLoc(), diag::err_imaginary_not_supported); // Before we process any type attributes, synthesize a block literal // function declarator if necessary. if (declarator.getContext() == Declarator::BlockLiteralContext) maybeSynthesizeBlockSignature(state, Result); // Apply any type attributes from the decl spec. This may cause the // list of type attributes to be temporarily saved while the type // attributes are pushed around. if (AttributeList *attrs = DS.getAttributes().getList()) processTypeAttrs(state, Result, TAL_DeclSpec, attrs); // Apply const/volatile/restrict qualifiers to T. if (unsigned TypeQuals = DS.getTypeQualifiers()) { // Warn about CV qualifiers on function types. // C99 6.7.3p8: // If the specification of a function type includes any type qualifiers, // the behavior is undefined. // C++11 [dcl.fct]p7: // The effect of a cv-qualifier-seq in a function declarator is not the // same as adding cv-qualification on top of the function type. In the // latter case, the cv-qualifiers are ignored. if (TypeQuals && Result->isFunctionType()) { diagnoseAndRemoveTypeQualifiers( S, DS, TypeQuals, Result, DeclSpec::TQ_const | DeclSpec::TQ_volatile, S.getLangOpts().CPlusPlus ? diag::warn_typecheck_function_qualifiers_ignored : diag::warn_typecheck_function_qualifiers_unspecified); // No diagnostic for 'restrict' or '_Atomic' applied to a // function type; we'll diagnose those later, in BuildQualifiedType. } // C++11 [dcl.ref]p1: // Cv-qualified references are ill-formed except when the // cv-qualifiers are introduced through the use of a typedef-name // or decltype-specifier, in which case the cv-qualifiers are ignored. // // There don't appear to be any other contexts in which a cv-qualified // reference type could be formed, so the 'ill-formed' clause here appears // to never happen. if (TypeQuals && Result->isReferenceType()) { diagnoseAndRemoveTypeQualifiers( S, DS, TypeQuals, Result, DeclSpec::TQ_const | DeclSpec::TQ_volatile | DeclSpec::TQ_atomic, diag::warn_typecheck_reference_qualifiers); } // C90 6.5.3 constraints: "The same type qualifier shall not appear more // than once in the same specifier-list or qualifier-list, either directly // or via one or more typedefs." if (!S.getLangOpts().C99 && !S.getLangOpts().CPlusPlus && TypeQuals & Result.getCVRQualifiers()) { if (TypeQuals & DeclSpec::TQ_const && Result.isConstQualified()) { S.Diag(DS.getConstSpecLoc(), diag::ext_duplicate_declspec) << "const"; } if (TypeQuals & DeclSpec::TQ_volatile && Result.isVolatileQualified()) { S.Diag(DS.getVolatileSpecLoc(), diag::ext_duplicate_declspec) << "volatile"; } // C90 doesn't have restrict nor _Atomic, so it doesn't force us to // produce a warning in this case. } QualType Qualified = S.BuildQualifiedType(Result, DeclLoc, TypeQuals, &DS); // If adding qualifiers fails, just use the unqualified type. if (Qualified.isNull()) declarator.setInvalidType(true); else Result = Qualified; } assert(!Result.isNull() && "This function should not return a null type"); return Result; } static std::string getPrintableNameForEntity(DeclarationName Entity) { if (Entity) return Entity.getAsString(); return "type name"; } QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS) { if (T.isNull()) return QualType(); // Enforce C99 6.7.3p2: "Types other than pointer types derived from // object or incomplete types shall not be restrict-qualified." if (Qs.hasRestrict()) { unsigned DiagID = 0; QualType ProblemTy; if (T->isAnyPointerType() || T->isReferenceType() || T->isMemberPointerType()) { QualType EltTy; if (T->isObjCObjectPointerType()) EltTy = T; else if (const MemberPointerType *PTy = T->getAs<MemberPointerType>()) EltTy = PTy->getPointeeType(); else EltTy = T->getPointeeType(); // If we have a pointer or reference, the pointee must have an object // incomplete type. if (!EltTy->isIncompleteOrObjectType()) { DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee; ProblemTy = EltTy; } } else if (!T->isDependentType()) { DiagID = diag::err_typecheck_invalid_restrict_not_pointer; ProblemTy = T; } if (DiagID) { Diag(DS ? DS->getRestrictSpecLoc() : Loc, DiagID) << ProblemTy; Qs.removeRestrict(); } } return Context.getQualifiedType(T, Qs); } QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS) { if (T.isNull()) return QualType(); // Convert from DeclSpec::TQ to Qualifiers::TQ by just dropping TQ_atomic. unsigned CVR = CVRA & ~DeclSpec::TQ_atomic; // C11 6.7.3/5: // If the same qualifier appears more than once in the same // specifier-qualifier-list, either directly or via one or more typedefs, // the behavior is the same as if it appeared only once. // // It's not specified what happens when the _Atomic qualifier is applied to // a type specified with the _Atomic specifier, but we assume that this // should be treated as if the _Atomic qualifier appeared multiple times. if (CVRA & DeclSpec::TQ_atomic && !T->isAtomicType()) { // C11 6.7.3/5: // If other qualifiers appear along with the _Atomic qualifier in a // specifier-qualifier-list, the resulting type is the so-qualified // atomic type. // // Don't need to worry about array types here, since _Atomic can't be // applied to such types. SplitQualType Split = T.getSplitUnqualifiedType(); T = BuildAtomicType(QualType(Split.Ty, 0), DS ? DS->getAtomicSpecLoc() : Loc); if (T.isNull()) return T; Split.Quals.addCVRQualifiers(CVR); return BuildQualifiedType(T, Loc, Split.Quals); } return BuildQualifiedType(T, Loc, Qualifiers::fromCVRMask(CVR), DS); } /// \brief Build a paren type including \p T. QualType Sema::BuildParenType(QualType T) { return Context.getParenType(T); } /// Given that we're building a pointer or reference to the given static QualType inferARCLifetimeForPointee(Sema &S, QualType type, SourceLocation loc, bool isReference) { // Bail out if retention is unrequired or already specified. if (!type->isObjCLifetimeType() || type.getObjCLifetime() != Qualifiers::OCL_None) return type; Qualifiers::ObjCLifetime implicitLifetime = Qualifiers::OCL_None; // If the object type is const-qualified, we can safely use // __unsafe_unretained. This is safe (because there are no read // barriers), and it'll be safe to coerce anything but __weak* to // the resulting type. if (type.isConstQualified()) { implicitLifetime = Qualifiers::OCL_ExplicitNone; // Otherwise, check whether the static type does not require // retaining. This currently only triggers for Class (possibly // protocol-qualifed, and arrays thereof). } else if (type->isObjCARCImplicitlyUnretainedType()) { implicitLifetime = Qualifiers::OCL_ExplicitNone; // If we are in an unevaluated context, like sizeof, skip adding a // qualification. } else if (S.isUnevaluatedContext()) { return type; // If that failed, give an error and recover using __strong. __strong // is the option most likely to prevent spurious second-order diagnostics, // like when binding a reference to a field. } else { // These types can show up in private ivars in system headers, so // we need this to not be an error in those cases. Instead we // want to delay. if (S.DelayedDiagnostics.shouldDelayDiagnostics()) { S.DelayedDiagnostics.add( sema::DelayedDiagnostic::makeForbiddenType(loc, diag::err_arc_indirect_no_ownership, type, isReference)); } else { S.Diag(loc, diag::err_arc_indirect_no_ownership) << type << isReference; } implicitLifetime = Qualifiers::OCL_Strong; } assert(implicitLifetime && "didn't infer any lifetime!"); Qualifiers qs; qs.addObjCLifetime(implicitLifetime); return S.Context.getQualifiedType(type, qs); } static std::string getFunctionQualifiersAsString(const FunctionProtoType *FnTy){ std::string Quals = Qualifiers::fromCVRMask(FnTy->getTypeQuals()).getAsString(); switch (FnTy->getRefQualifier()) { case RQ_None: break; case RQ_LValue: if (!Quals.empty()) Quals += ' '; Quals += '&'; break; case RQ_RValue: if (!Quals.empty()) Quals += ' '; Quals += "&&"; break; } return Quals; } namespace { /// Kinds of declarator that cannot contain a qualified function type. /// /// C++98 [dcl.fct]p4 / C++11 [dcl.fct]p6: /// a function type with a cv-qualifier or a ref-qualifier can only appear /// at the topmost level of a type. /// /// Parens and member pointers are permitted. We don't diagnose array and /// function declarators, because they don't allow function types at all. /// /// The values of this enum are used in diagnostics. enum QualifiedFunctionKind { QFK_BlockPointer, QFK_Pointer, QFK_Reference }; } /// Check whether the type T is a qualified function type, and if it is, /// diagnose that it cannot be contained within the given kind of declarator. static bool checkQualifiedFunction(Sema &S, QualType T, SourceLocation Loc, QualifiedFunctionKind QFK) { // Does T refer to a function type with a cv-qualifier or a ref-qualifier? const FunctionProtoType *FPT = T->getAs<FunctionProtoType>(); if (!FPT || (FPT->getTypeQuals() == 0 && FPT->getRefQualifier() == RQ_None)) return false; S.Diag(Loc, diag::err_compound_qualified_function_type) << QFK << isa<FunctionType>(T.IgnoreParens()) << T << getFunctionQualifiersAsString(FPT); return true; } /// \brief Build a pointer type. /// /// \param T The type to which we'll be building a pointer. /// /// \param Loc The location of the entity whose type involves this /// pointer type or, if there is no such entity, the location of the /// type that will have pointer type. /// /// \param Entity The name of the entity that involves the pointer /// type, if known. /// /// \returns A suitable pointer type, if there are no /// errors. Otherwise, returns a NULL type. QualType Sema::BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity) { if (T->isReferenceType()) { // C++ 8.3.2p4: There shall be no ... pointers to references ... Diag(Loc, diag::err_illegal_decl_pointer_to_reference) << getPrintableNameForEntity(Entity) << T; return QualType(); } if (checkQualifiedFunction(*this, T, Loc, QFK_Pointer)) return QualType(); assert(!T->isObjCObjectType() && "Should build ObjCObjectPointerType"); // In ARC, it is forbidden to build pointers to unqualified pointers. if (getLangOpts().ObjCAutoRefCount) T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ false); // Build the pointer type. return Context.getPointerType(T); } /// \brief Build a reference type. /// /// \param T The type to which we'll be building a reference. /// /// \param Loc The location of the entity whose type involves this /// reference type or, if there is no such entity, the location of the /// type that will have reference type. /// /// \param Entity The name of the entity that involves the reference /// type, if known. /// /// \returns A suitable reference type, if there are no /// errors. Otherwise, returns a NULL type. QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue, SourceLocation Loc, DeclarationName Entity) { assert(Context.getCanonicalType(T) != Context.OverloadTy && "Unresolved overloaded function type"); // C++0x [dcl.ref]p6: // If a typedef (7.1.3), a type template-parameter (14.3.1), or a // decltype-specifier (7.1.6.2) denotes a type TR that is a reference to a // type T, an attempt to create the type "lvalue reference to cv TR" creates // the type "lvalue reference to T", while an attempt to create the type // "rvalue reference to cv TR" creates the type TR. bool LValueRef = SpelledAsLValue || T->getAs<LValueReferenceType>(); // C++ [dcl.ref]p4: There shall be no references to references. // // According to C++ DR 106, references to references are only // diagnosed when they are written directly (e.g., "int & &"), // but not when they happen via a typedef: // // typedef int& intref; // typedef intref& intref2; // // Parser::ParseDeclaratorInternal diagnoses the case where // references are written directly; here, we handle the // collapsing of references-to-references as described in C++0x. // DR 106 and 540 introduce reference-collapsing into C++98/03. // C++ [dcl.ref]p1: // A declarator that specifies the type "reference to cv void" // is ill-formed. if (T->isVoidType()) { Diag(Loc, diag::err_reference_to_void); return QualType(); } if (checkQualifiedFunction(*this, T, Loc, QFK_Reference)) return QualType(); // In ARC, it is forbidden to build references to unqualified pointers. if (getLangOpts().ObjCAutoRefCount) T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ true); // Handle restrict on references. if (LValueRef) return Context.getLValueReferenceType(T, SpelledAsLValue); return Context.getRValueReferenceType(T); } /// Check whether the specified array size makes the array type a VLA. If so, /// return true, if not, return the size of the array in SizeVal. static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) { // If the size is an ICE, it certainly isn't a VLA. If we're in a GNU mode // (like gnu99, but not c99) accept any evaluatable value as an extension. class VLADiagnoser : public Sema::VerifyICEDiagnoser { public: VLADiagnoser() : Sema::VerifyICEDiagnoser(true) {} void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override { } void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR) override { S.Diag(Loc, diag::ext_vla_folded_to_constant) << SR; } } Diagnoser; return S.VerifyIntegerConstantExpression(ArraySize, &SizeVal, Diagnoser, S.LangOpts.GNUMode).isInvalid(); } /// \brief Build an array type. /// /// \param T The type of each element in the array. /// /// \param ASM C99 array size modifier (e.g., '*', 'static'). /// /// \param ArraySize Expression describing the size of the array. /// /// \param Brackets The range from the opening '[' to the closing ']'. /// /// \param Entity The name of the entity that involves the array /// type, if known. /// /// \returns A suitable array type, if there are no errors. Otherwise, /// returns a NULL type. QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity) { SourceLocation Loc = Brackets.getBegin(); if (getLangOpts().CPlusPlus) { // C++ [dcl.array]p1: // T is called the array element type; this type shall not be a reference // type, the (possibly cv-qualified) type void, a function type or an // abstract class type. // // C++ [dcl.array]p3: // When several "array of" specifications are adjacent, [...] only the // first of the constant expressions that specify the bounds of the arrays // may be omitted. // // Note: function types are handled in the common path with C. if (T->isReferenceType()) { Diag(Loc, diag::err_illegal_decl_array_of_references) << getPrintableNameForEntity(Entity) << T; return QualType(); } if (T->isVoidType() || T->isIncompleteArrayType()) { Diag(Loc, diag::err_illegal_decl_array_incomplete_type) << T; return QualType(); } // HLSL Change Start // no arrays of strings in HLSL if (getLangOpts().HLSL && hlsl::IsStringType(T)) { static const unsigned selectArrayIdx = 0; Diag(Loc, diag::err_hlsl_unsupported_string_decl) << selectArrayIdx; return QualType(); } // HLSL Change End if (RequireNonAbstractType(Brackets.getBegin(), T, diag::err_array_of_abstract_type)) return QualType(); // Mentioning a member pointer type for an array type causes us to lock in // an inheritance model, even if it's inside an unused typedef. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) if (!MPTy->getClass()->isDependentType()) RequireCompleteType(Loc, T, 0); } else { // C99 6.7.5.2p1: If the element type is an incomplete or function type, // reject it (e.g. void ary[7], struct foo ary[7], void ary[7]()) if (RequireCompleteType(Loc, T, diag::err_illegal_decl_array_incomplete_type)) return QualType(); } if (T->isFunctionType()) { Diag(Loc, diag::err_illegal_decl_array_of_functions) << getPrintableNameForEntity(Entity) << T; return QualType(); } if (const RecordType *EltTy = T->getAs<RecordType>()) { // If the element type is a struct or union that contains a variadic // array, accept it as a GNU extension: C99 6.7.2.1p2. if (EltTy->getDecl()->hasFlexibleArrayMember()) Diag(Loc, diag::ext_flexible_array_in_array) << T; } else if (T->isObjCObjectType()) { Diag(Loc, diag::err_objc_array_of_interfaces) << T; return QualType(); } // Do placeholder conversions on the array size expression. if (ArraySize && ArraySize->hasPlaceholderType()) { ExprResult Result = CheckPlaceholderExpr(ArraySize); if (Result.isInvalid()) return QualType(); ArraySize = Result.get(); } // Do lvalue-to-rvalue conversions on the array size expression. if (ArraySize && !ArraySize->isRValue()) { ExprResult Result = DefaultLvalueConversion(ArraySize); if (Result.isInvalid()) return QualType(); ArraySize = Result.get(); } // C99 6.7.5.2p1: The size expression shall have integer type. // C++11 allows contextual conversions to such types. if (!getLangOpts().CPlusPlus11 && ArraySize && !ArraySize->isTypeDependent() && !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) { Diag(ArraySize->getLocStart(), diag::err_array_size_non_int) << ArraySize->getType() << ArraySize->getSourceRange(); return QualType(); } llvm::APSInt ConstVal(Context.getTypeSize(Context.getSizeType())); if (!ArraySize) { if (ASM == ArrayType::Star) T = Context.getVariableArrayType(T, nullptr, ASM, Quals, Brackets); else T = Context.getIncompleteArrayType(T, ASM, Quals); } else if (ArraySize->isTypeDependent() || ArraySize->isValueDependent()) { T = Context.getDependentSizedArrayType(T, ArraySize, ASM, Quals, Brackets); } else if ((!T->isDependentType() && !T->isIncompleteType() && !T->isConstantSizeType()) || isArraySizeVLA(*this, ArraySize, ConstVal)) { // Even in C++11, don't allow contextual conversions in the array bound // of a VLA. if (getLangOpts().CPlusPlus11 && !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) { Diag(ArraySize->getLocStart(), diag::err_array_size_non_int) << ArraySize->getType() << ArraySize->getSourceRange(); return QualType(); } // C99: an array with an element type that has a non-constant-size is a VLA. // C99: an array with a non-ICE size is a VLA. We accept any expression // that we can fold to a non-zero positive value as an extension. T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets); } else { // C99 6.7.5.2p1: If the expression is a constant expression, it shall // have a value greater than zero. if (ConstVal.isSigned() && ConstVal.isNegative()) { if (Entity) Diag(ArraySize->getLocStart(), diag::err_decl_negative_array_size) << getPrintableNameForEntity(Entity) << ArraySize->getSourceRange(); else Diag(ArraySize->getLocStart(), diag::err_typecheck_negative_array_size) << ArraySize->getSourceRange(); return QualType(); } if (ConstVal == 0) { // GCC accepts zero sized static arrays. We allow them when // we're not in a SFINAE context. Diag(ArraySize->getLocStart(), isSFINAEContext()? diag::err_typecheck_zero_array_size : diag::ext_typecheck_zero_array_size) << ArraySize->getSourceRange(); if (ASM == ArrayType::Static) { Diag(ArraySize->getLocStart(), diag::warn_typecheck_zero_static_array_size) << ArraySize->getSourceRange(); ASM = ArrayType::Normal; } } else if (!T->isDependentType() && !T->isVariablyModifiedType() && !T->isIncompleteType() && !T->isUndeducedType()) { // Is the array too large? unsigned ActiveSizeBits = ConstantArrayType::getNumAddressingBits(Context, T, ConstVal); if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) { Diag(ArraySize->getLocStart(), diag::err_array_too_large) << ConstVal.toString(10) << ArraySize->getSourceRange(); return QualType(); } } T = Context.getConstantArrayType(T, ConstVal, ASM, Quals); } // HLSL Change Starts if (getLangOpts().HLSL && T->isVariableArrayType()) { Diag(Loc, diag::err_hlsl_vla); return QualType(); } // HLSL Change Stops // OpenCL v1.2 s6.9.d: variable length arrays are not supported. if (getLangOpts().OpenCL && T->isVariableArrayType()) { Diag(Loc, diag::err_opencl_vla); return QualType(); } // If this is not C99, extwarn about VLA's and C99 array size modifiers. if (!getLangOpts().C99) { if (T->isVariableArrayType()) { // Prohibit the use of non-POD types in VLAs. QualType BaseT = Context.getBaseElementType(T); if (!T->isDependentType() && !RequireCompleteType(Loc, BaseT, 0) && !BaseT.isPODType(Context) && !BaseT->isObjCLifetimeType()) { Diag(Loc, diag::err_vla_non_pod) << BaseT; return QualType(); } // Prohibit the use of VLAs during template argument deduction. else if (isSFINAEContext()) { Diag(Loc, diag::err_vla_in_sfinae); return QualType(); } // Just extwarn about VLAs. else Diag(Loc, diag::ext_vla); } else if ((ASM != ArrayType::Normal || Quals != 0) && ! getLangOpts().HLSL) // HLSL Change: already fails in HLSL Diag(Loc, getLangOpts().CPlusPlus? diag::err_c99_array_usage_cxx : diag::ext_c99_array_usage) << ASM; } if (T->isVariableArrayType()) { // Warn about VLAs for -Wvla. Diag(Loc, diag::warn_vla_used); } return T; } /// \brief Build an ext-vector type. /// /// Run the required checks for the extended vector type. QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc) { // unlike gcc's vector_size attribute, we do not allow vectors to be defined // in conjunction with complex types (pointers, arrays, functions, etc.). if (!T->isDependentType() && !T->isIntegerType() && !T->isRealFloatingType()) { Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << T; return QualType(); } if (!ArraySize->isTypeDependent() && !ArraySize->isValueDependent()) { llvm::APSInt vecSize(32); if (!ArraySize->isIntegerConstantExpr(vecSize, Context)) { Diag(AttrLoc, diag::err_attribute_argument_type) << "ext_vector_type" << AANT_ArgumentIntegerConstant << ArraySize->getSourceRange(); return QualType(); } // unlike gcc's vector_size attribute, the size is specified as the // number of elements, not the number of bytes. unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue()); if (vectorSize == 0) { Diag(AttrLoc, diag::err_attribute_zero_size) << ArraySize->getSourceRange(); return QualType(); } if (VectorType::isVectorSizeTooLarge(vectorSize)) { Diag(AttrLoc, diag::err_attribute_size_too_large) << ArraySize->getSourceRange(); return QualType(); } return Context.getExtVectorType(T, vectorSize); } return Context.getDependentSizedExtVectorType(T, ArraySize, AttrLoc); } bool Sema::CheckFunctionReturnType(QualType T, SourceLocation Loc) { if (T->isArrayType() || T->isFunctionType()) { Diag(Loc, diag::err_func_returning_array_function) << T->isFunctionType() << T; return true; } // Functions cannot return half FP. if (T->isHalfType() && !getLangOpts().HalfArgsAndReturns) { Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 1 << FixItHint::CreateInsertion(Loc, "*"); return true; } // Methods cannot return interface types. All ObjC objects are // passed by reference. if (T->isObjCObjectType()) { Diag(Loc, diag::err_object_cannot_be_passed_returned_by_value) << 0 << T; return 0; } return false; } // HLSL Change - FIX - We should move param mods to parameter QualTypes QualType Sema::BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI, ArrayRef<hlsl::ParameterModifier> ParamMods) { // HLSL Change - End bool Invalid = false; Invalid |= CheckFunctionReturnType(T, Loc); for (unsigned Idx = 0, Cnt = ParamTypes.size(); Idx < Cnt; ++Idx) { // FIXME: Loc is too inprecise here, should use proper locations for args. QualType ParamType = Context.getAdjustedParameterType(ParamTypes[Idx]); if (ParamType->isVoidType()) { Diag(Loc, diag::err_param_with_void_type); Invalid = true; } else if (ParamType->isHalfType() && !getLangOpts().HalfArgsAndReturns) { // Disallow half FP arguments. Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 0 << FixItHint::CreateInsertion(Loc, "*"); Invalid = true; } ParamTypes[Idx] = ParamType; } if (Invalid) return QualType(); // HLSL Change - FIX - We should move param mods to parameter QualTypes return Context.getFunctionType(T, ParamTypes, EPI, ParamMods); // HLSL Change End } /// \brief Build a member pointer type \c T Class::*. /// /// \param T the type to which the member pointer refers. /// \param Class the class type into which the member pointer points. /// \param Loc the location where this type begins /// \param Entity the name of the entity that will have this member pointer type /// /// \returns a member pointer type, if successful, or a NULL type if there was /// an error. QualType Sema::BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity) { // Verify that we're not building a pointer to pointer to function with // exception specification. if (CheckDistantExceptionSpec(T)) { Diag(Loc, diag::err_distant_exception_spec); return QualType(); } // C++ 8.3.3p3: A pointer to member shall not point to ... a member // with reference type, or "cv void." if (T->isReferenceType()) { Diag(Loc, diag::err_illegal_decl_mempointer_to_reference) << getPrintableNameForEntity(Entity) << T; return QualType(); } if (T->isVoidType()) { Diag(Loc, diag::err_illegal_decl_mempointer_to_void) << getPrintableNameForEntity(Entity); return QualType(); } if (!Class->isDependentType() && !Class->isRecordType()) { Diag(Loc, diag::err_mempointer_in_nonclass_type) << Class; return QualType(); } // Adjust the default free function calling convention to the default method // calling convention. if (T->isFunctionType()) adjustMemberFunctionCC(T, /*IsStatic=*/false); return Context.getMemberPointerType(T, Class.getTypePtr()); } /// \brief Build a block pointer type. /// /// \param T The type to which we'll be building a block pointer. /// /// \param Loc The source location, used for diagnostics. /// /// \param Entity The name of the entity that involves the block pointer /// type, if known. /// /// \returns A suitable block pointer type, if there are no /// errors. Otherwise, returns a NULL type. QualType Sema::BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity) { if (!T->isFunctionType()) { Diag(Loc, diag::err_nonfunction_block_type); return QualType(); } if (checkQualifiedFunction(*this, T, Loc, QFK_BlockPointer)) return QualType(); return Context.getBlockPointerType(T); } QualType Sema::GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo) { QualType QT = Ty.get(); if (QT.isNull()) { if (TInfo) *TInfo = nullptr; return QualType(); } TypeSourceInfo *DI = nullptr; if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT)) { QT = LIT->getType(); DI = LIT->getTypeSourceInfo(); } if (TInfo) *TInfo = DI; return QT; } static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state, Qualifiers::ObjCLifetime ownership, unsigned chunkIndex); /// Given that this is the declaration of a parameter under ARC, /// attempt to infer attributes and such for pointer-to-whatever /// types. static void inferARCWriteback(TypeProcessingState &state, QualType &declSpecType) { Sema &S = state.getSema(); Declarator &declarator = state.getDeclarator(); // TODO: should we care about decl qualifiers? // Check whether the declarator has the expected form. We walk // from the inside out in order to make the block logic work. unsigned outermostPointerIndex = 0; bool isBlockPointer = false; unsigned numPointers = 0; for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) { unsigned chunkIndex = i; DeclaratorChunk &chunk = declarator.getTypeObject(chunkIndex); switch (chunk.Kind) { case DeclaratorChunk::Paren: // Ignore parens. break; case DeclaratorChunk::Reference: case DeclaratorChunk::Pointer: // Count the number of pointers. Treat references // interchangeably as pointers; if they're mis-ordered, normal // type building will discover that. outermostPointerIndex = chunkIndex; numPointers++; break; case DeclaratorChunk::BlockPointer: // If we have a pointer to block pointer, that's an acceptable // indirect reference; anything else is not an application of // the rules. if (numPointers != 1) return; numPointers++; outermostPointerIndex = chunkIndex; isBlockPointer = true; // We don't care about pointer structure in return values here. goto done; case DeclaratorChunk::Array: // suppress if written (id[])? case DeclaratorChunk::Function: case DeclaratorChunk::MemberPointer: return; } } done: // If we have *one* pointer, then we want to throw the qualifier on // the declaration-specifiers, which means that it needs to be a // retainable object type. if (numPointers == 1) { // If it's not a retainable object type, the rule doesn't apply. if (!declSpecType->isObjCRetainableType()) return; // If it already has lifetime, don't do anything. if (declSpecType.getObjCLifetime()) return; // Otherwise, modify the type in-place. Qualifiers qs; if (declSpecType->isObjCARCImplicitlyUnretainedType()) qs.addObjCLifetime(Qualifiers::OCL_ExplicitNone); else qs.addObjCLifetime(Qualifiers::OCL_Autoreleasing); declSpecType = S.Context.getQualifiedType(declSpecType, qs); // If we have *two* pointers, then we want to throw the qualifier on // the outermost pointer. } else if (numPointers == 2) { // If we don't have a block pointer, we need to check whether the // declaration-specifiers gave us something that will turn into a // retainable object pointer after we slap the first pointer on it. if (!isBlockPointer && !declSpecType->isObjCObjectType()) return; // Look for an explicit lifetime attribute there. DeclaratorChunk &chunk = declarator.getTypeObject(outermostPointerIndex); if (chunk.Kind != DeclaratorChunk::Pointer && chunk.Kind != DeclaratorChunk::BlockPointer) return; for (const AttributeList *attr = chunk.getAttrs(); attr; attr = attr->getNext()) if (attr->getKind() == AttributeList::AT_ObjCOwnership) return; transferARCOwnershipToDeclaratorChunk(state, Qualifiers::OCL_Autoreleasing, outermostPointerIndex); // Any other number of pointers/references does not trigger the rule. } else return; // TODO: mark whether we did this inference? } void Sema::diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc, SourceLocation VolatileQualLoc, SourceLocation RestrictQualLoc, SourceLocation AtomicQualLoc) { if (!Quals) return; struct Qual { unsigned Mask; const char *Name; SourceLocation Loc; } const QualKinds[4] = { { DeclSpec::TQ_const, "const", ConstQualLoc }, { DeclSpec::TQ_volatile, "volatile", VolatileQualLoc }, { DeclSpec::TQ_restrict, "restrict", RestrictQualLoc }, { DeclSpec::TQ_atomic, "_Atomic", AtomicQualLoc } }; SmallString<32> QualStr; unsigned NumQuals = 0; SourceLocation Loc; FixItHint FixIts[4]; // Build a string naming the redundant qualifiers. for (unsigned I = 0; I != 4; ++I) { if (Quals & QualKinds[I].Mask) { if (!QualStr.empty()) QualStr += ' '; QualStr += QualKinds[I].Name; // If we have a location for the qualifier, offer a fixit. SourceLocation QualLoc = QualKinds[I].Loc; if (!QualLoc.isInvalid()) { FixIts[NumQuals] = FixItHint::CreateRemoval(QualLoc); if (Loc.isInvalid() || getSourceManager().isBeforeInTranslationUnit(QualLoc, Loc)) Loc = QualLoc; } ++NumQuals; } } Diag(Loc.isInvalid() ? FallbackLoc : Loc, DiagID) << QualStr << NumQuals << FixIts[0] << FixIts[1] << FixIts[2] << FixIts[3]; } // Diagnose pointless type qualifiers on the return type of a function. static void diagnoseRedundantReturnTypeQualifiers(Sema &S, QualType RetTy, Declarator &D, unsigned FunctionChunkIndex) { if (D.getTypeObject(FunctionChunkIndex).Fun.hasTrailingReturnType()) { // FIXME: TypeSourceInfo doesn't preserve location information for // qualifiers. S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type, RetTy.getLocalCVRQualifiers(), D.getIdentifierLoc()); return; } for (unsigned OuterChunkIndex = FunctionChunkIndex + 1, End = D.getNumTypeObjects(); OuterChunkIndex != End; ++OuterChunkIndex) { DeclaratorChunk &OuterChunk = D.getTypeObject(OuterChunkIndex); switch (OuterChunk.Kind) { case DeclaratorChunk::Paren: continue; case DeclaratorChunk::Pointer: { DeclaratorChunk::PointerTypeInfo &PTI = OuterChunk.Ptr; S.diagnoseIgnoredQualifiers( diag::warn_qual_return_type, PTI.TypeQuals, SourceLocation(), SourceLocation::getFromRawEncoding(PTI.ConstQualLoc), SourceLocation::getFromRawEncoding(PTI.VolatileQualLoc), SourceLocation::getFromRawEncoding(PTI.RestrictQualLoc), SourceLocation::getFromRawEncoding(PTI.AtomicQualLoc)); return; } case DeclaratorChunk::Function: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::Reference: case DeclaratorChunk::Array: case DeclaratorChunk::MemberPointer: // FIXME: We can't currently provide an accurate source location and a // fix-it hint for these. unsigned AtomicQual = RetTy->isAtomicType() ? DeclSpec::TQ_atomic : 0; S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type, RetTy.getCVRQualifiers() | AtomicQual, D.getIdentifierLoc()); return; } llvm_unreachable("unknown declarator chunk kind"); } // If the qualifiers come from a conversion function type, don't diagnose // them -- they're not necessarily redundant, since such a conversion // operator can be explicitly called as "x.operator const int()". if (D.getName().getKind() == UnqualifiedId::IK_ConversionFunctionId) return; // Just parens all the way out to the decl specifiers. Diagnose any qualifiers // which are present there. S.diagnoseIgnoredQualifiers(diag::warn_qual_return_type, D.getDeclSpec().getTypeQualifiers(), D.getIdentifierLoc(), D.getDeclSpec().getConstSpecLoc(), D.getDeclSpec().getVolatileSpecLoc(), D.getDeclSpec().getRestrictSpecLoc(), D.getDeclSpec().getAtomicSpecLoc()); } static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state, TypeSourceInfo *&ReturnTypeInfo) { Sema &SemaRef = state.getSema(); Declarator &D = state.getDeclarator(); QualType T; ReturnTypeInfo = nullptr; // The TagDecl owned by the DeclSpec. TagDecl *OwnedTagDecl = nullptr; bool ContainsPlaceholderType = false; switch (D.getName().getKind()) { case UnqualifiedId::IK_ImplicitSelfParam: case UnqualifiedId::IK_OperatorFunctionId: case UnqualifiedId::IK_Identifier: case UnqualifiedId::IK_LiteralOperatorId: case UnqualifiedId::IK_TemplateId: T = ConvertDeclSpecToType(state); ContainsPlaceholderType = D.getDeclSpec().containsPlaceholderType(); if (!D.isInvalidType() && D.getDeclSpec().isTypeSpecOwned()) { OwnedTagDecl = cast<TagDecl>(D.getDeclSpec().getRepAsDecl()); // Owned declaration is embedded in declarator. OwnedTagDecl->setEmbeddedInDeclarator(true); } break; case UnqualifiedId::IK_ConstructorName: case UnqualifiedId::IK_ConstructorTemplateId: case UnqualifiedId::IK_DestructorName: // Constructors and destructors don't have return types. Use // "void" instead. T = SemaRef.Context.VoidTy; if (AttributeList *attrs = D.getDeclSpec().getAttributes().getList()) processTypeAttrs(state, T, TAL_DeclSpec, attrs); break; case UnqualifiedId::IK_ConversionFunctionId: // The result type of a conversion function is the type that it // converts to. T = SemaRef.GetTypeFromParser(D.getName().ConversionFunctionId, &ReturnTypeInfo); ContainsPlaceholderType = T->getContainedAutoType(); break; } if (D.getAttributes()) distributeTypeAttrsFromDeclarator(state, T); // C++11 [dcl.spec.auto]p5: reject 'auto' if it is not in an allowed context. // In C++11, a function declarator using 'auto' must have a trailing return // type (this is checked later) and we can skip this. In other languages // using auto, we need to check regardless. // C++14 In generic lambdas allow 'auto' in their parameters. if (ContainsPlaceholderType && (!SemaRef.getLangOpts().CPlusPlus11 || !D.isFunctionDeclarator())) { int Error = -1; switch (D.getContext()) { case Declarator::KNRTypeListContext: llvm_unreachable("K&R type lists aren't allowed in C++"); case Declarator::LambdaExprContext: llvm_unreachable("Can't specify a type specifier in lambda grammar"); case Declarator::ObjCParameterContext: case Declarator::ObjCResultContext: case Declarator::PrototypeContext: Error = 0; break; case Declarator::LambdaExprParameterContext: if (!(SemaRef.getLangOpts().CPlusPlus14 && D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto)) Error = 14; break; case Declarator::MemberContext: if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static) break; switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) { case TTK_Enum: llvm_unreachable("unhandled tag kind"); case TTK_Struct: Error = 1; /* Struct member */ break; case TTK_Union: Error = 2; /* Union member */ break; case TTK_Class: Error = 3; /* Class member */ break; case TTK_Interface: Error = 4; /* Interface member */ break; } break; case Declarator::CXXCatchContext: case Declarator::ObjCCatchContext: Error = 5; // Exception declaration break; case Declarator::TemplateParamContext: Error = 6; // Template parameter break; case Declarator::BlockLiteralContext: Error = 7; // Block literal break; case Declarator::TemplateTypeArgContext: Error = 8; // Template type argument break; case Declarator::AliasDeclContext: case Declarator::AliasTemplateContext: Error = 10; // Type alias break; case Declarator::TrailingReturnContext: if (!SemaRef.getLangOpts().CPlusPlus14) Error = 11; // Function return type break; case Declarator::ConversionIdContext: if (!SemaRef.getLangOpts().CPlusPlus14) Error = 12; // conversion-type-id break; case Declarator::TypeNameContext: Error = 13; // Generic break; case Declarator::FileContext: case Declarator::BlockContext: case Declarator::ForContext: case Declarator::ConditionContext: case Declarator::CXXNewContext: break; } if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) Error = 9; // In Objective-C it is an error to use 'auto' on a function declarator. if (D.isFunctionDeclarator()) Error = 11; // C++11 [dcl.spec.auto]p2: 'auto' is always fine if the declarator // contains a trailing return type. That is only legal at the outermost // level. Check all declarator chunks (outermost first) anyway, to give // better diagnostics. if (SemaRef.getLangOpts().CPlusPlus11 && Error != -1) { for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { unsigned chunkIndex = e - i - 1; state.setCurrentChunkIndex(chunkIndex); DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex); if (DeclType.Kind == DeclaratorChunk::Function) { const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun; if (FTI.hasTrailingReturnType()) { Error = -1; break; } } } } SourceRange AutoRange = D.getDeclSpec().getTypeSpecTypeLoc(); if (D.getName().getKind() == UnqualifiedId::IK_ConversionFunctionId) AutoRange = D.getName().getSourceRange(); if (Error != -1) { const bool IsDeclTypeAuto = D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_decltype_auto; SemaRef.Diag(AutoRange.getBegin(), diag::err_auto_not_allowed) << IsDeclTypeAuto << Error << AutoRange; T = SemaRef.Context.IntTy; D.setInvalidType(true); } else SemaRef.Diag(AutoRange.getBegin(), diag::warn_cxx98_compat_auto_type_specifier) << AutoRange; } if (SemaRef.getLangOpts().CPlusPlus && OwnedTagDecl && OwnedTagDecl->isCompleteDefinition()) { // Check the contexts where C++ forbids the declaration of a new class // or enumeration in a type-specifier-seq. switch (D.getContext()) { case Declarator::TrailingReturnContext: // Class and enumeration definitions are syntactically not allowed in // trailing return types. llvm_unreachable("parser should not have allowed this"); break; case Declarator::FileContext: case Declarator::MemberContext: case Declarator::BlockContext: case Declarator::ForContext: case Declarator::BlockLiteralContext: case Declarator::LambdaExprContext: // C++11 [dcl.type]p3: // A type-specifier-seq shall not define a class or enumeration unless // it appears in the type-id of an alias-declaration (7.1.3) that is not // the declaration of a template-declaration. case Declarator::AliasDeclContext: break; case Declarator::AliasTemplateContext: SemaRef.Diag(OwnedTagDecl->getLocation(), diag::err_type_defined_in_alias_template) << SemaRef.Context.getTypeDeclType(OwnedTagDecl); D.setInvalidType(true); break; case Declarator::TypeNameContext: case Declarator::ConversionIdContext: case Declarator::TemplateParamContext: case Declarator::CXXNewContext: case Declarator::CXXCatchContext: case Declarator::ObjCCatchContext: case Declarator::TemplateTypeArgContext: SemaRef.Diag(OwnedTagDecl->getLocation(), diag::err_type_defined_in_type_specifier) << SemaRef.Context.getTypeDeclType(OwnedTagDecl); D.setInvalidType(true); break; case Declarator::PrototypeContext: case Declarator::LambdaExprParameterContext: case Declarator::ObjCParameterContext: case Declarator::ObjCResultContext: case Declarator::KNRTypeListContext: // C++ [dcl.fct]p6: // Types shall not be defined in return or parameter types. SemaRef.Diag(OwnedTagDecl->getLocation(), diag::err_type_defined_in_param_type) << SemaRef.Context.getTypeDeclType(OwnedTagDecl); D.setInvalidType(true); break; case Declarator::ConditionContext: // C++ 6.4p2: // The type-specifier-seq shall not contain typedef and shall not declare // a new class or enumeration. SemaRef.Diag(OwnedTagDecl->getLocation(), diag::err_type_defined_in_condition); D.setInvalidType(true); break; } } assert(!T.isNull() && "This function should not return a null type"); return T; } /// Produce an appropriate diagnostic for an ambiguity between a function /// declarator and a C++ direct-initializer. static void warnAboutAmbiguousFunction(Sema &S, Declarator &D, DeclaratorChunk &DeclType, QualType RT) { const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun; assert(FTI.isAmbiguous && "no direct-initializer / function ambiguity"); // If the return type is void there is no ambiguity. if (RT->isVoidType()) return; // An initializer for a non-class type can have at most one argument. if (!RT->isRecordType() && FTI.NumParams > 1) return; // An initializer for a reference must have exactly one argument. if (RT->isReferenceType() && FTI.NumParams != 1) return; // Only warn if this declarator is declaring a function at block scope, and // doesn't have a storage class (such as 'extern') specified. if (!D.isFunctionDeclarator() || D.getFunctionDefinitionKind() != FDK_Declaration || !S.CurContext->isFunctionOrMethod() || D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_unspecified) return; // Inside a condition, a direct initializer is not permitted. We allow one to // be parsed in order to give better diagnostics in condition parsing. if (D.getContext() == Declarator::ConditionContext) return; SourceRange ParenRange(DeclType.Loc, DeclType.EndLoc); S.Diag(DeclType.Loc, FTI.NumParams ? diag::warn_parens_disambiguated_as_function_declaration : diag::warn_empty_parens_are_function_decl) << ParenRange; // If the declaration looks like: // T var1, // f(); // and name lookup finds a function named 'f', then the ',' was // probably intended to be a ';'. if (!D.isFirstDeclarator() && D.getIdentifier()) { FullSourceLoc Comma(D.getCommaLoc(), S.SourceMgr); FullSourceLoc Name(D.getIdentifierLoc(), S.SourceMgr); if (Comma.getFileID() != Name.getFileID() || Comma.getSpellingLineNumber() != Name.getSpellingLineNumber()) { LookupResult Result(S, D.getIdentifier(), SourceLocation(), Sema::LookupOrdinaryName); if (S.LookupName(Result, S.getCurScope())) S.Diag(D.getCommaLoc(), diag::note_empty_parens_function_call) << FixItHint::CreateReplacement(D.getCommaLoc(), ";") << D.getIdentifier(); } } if (FTI.NumParams > 0) { // For a declaration with parameters, eg. "T var(T());", suggest adding // parens around the first parameter to turn the declaration into a // variable declaration. SourceRange Range = FTI.Params[0].Param->getSourceRange(); SourceLocation B = Range.getBegin(); SourceLocation E = S.getLocForEndOfToken(Range.getEnd()); // FIXME: Maybe we should suggest adding braces instead of parens // in C++11 for classes that don't have an initializer_list constructor. S.Diag(B, diag::note_additional_parens_for_variable_declaration) << FixItHint::CreateInsertion(B, "(") << FixItHint::CreateInsertion(E, ")"); } else { // For a declaration without parameters, eg. "T var();", suggest replacing // the parens with an initializer to turn the declaration into a variable // declaration. const CXXRecordDecl *RD = RT->getAsCXXRecordDecl(); // Empty parens mean value-initialization, and no parens mean // default initialization. These are equivalent if the default // constructor is user-provided or if zero-initialization is a // no-op. if (RD && RD->hasDefinition() && (RD->isEmpty() || RD->hasUserProvidedDefaultConstructor())) S.Diag(DeclType.Loc, diag::note_empty_parens_default_ctor) << FixItHint::CreateRemoval(ParenRange); else { std::string Init = S.getFixItZeroInitializerForType(RT, ParenRange.getBegin()); if (Init.empty() && S.LangOpts.CPlusPlus11) Init = "{}"; if (!Init.empty()) S.Diag(DeclType.Loc, diag::note_empty_parens_zero_initialize) << FixItHint::CreateReplacement(ParenRange, Init); } } } /// Helper for figuring out the default CC for a function declarator type. If /// this is the outermost chunk, then we can determine the CC from the /// declarator context. If not, then this could be either a member function /// type or normal function type. static CallingConv getCCForDeclaratorChunk(Sema &S, Declarator &D, const DeclaratorChunk::FunctionTypeInfo &FTI, unsigned ChunkIndex) { assert(D.getTypeObject(ChunkIndex).Kind == DeclaratorChunk::Function); bool IsCXXInstanceMethod = false; if (S.getLangOpts().CPlusPlus) { // Look inwards through parentheses to see if this chunk will form a // member pointer type or if we're the declarator. Any type attributes // between here and there will override the CC we choose here. unsigned I = ChunkIndex; bool FoundNonParen = false; while (I && !FoundNonParen) { --I; if (D.getTypeObject(I).Kind != DeclaratorChunk::Paren) FoundNonParen = true; } if (FoundNonParen) { // If we're not the declarator, we're a regular function type unless we're // in a member pointer. IsCXXInstanceMethod = D.getTypeObject(I).Kind == DeclaratorChunk::MemberPointer; } else if (D.getContext() == Declarator::LambdaExprContext) { // This can only be a call operator for a lambda, which is an instance // method. IsCXXInstanceMethod = true; } else { // We're the innermost decl chunk, so must be a function declarator. assert(D.isFunctionDeclarator()); // If we're inside a record, we're declaring a method, but it could be // explicitly or implicitly static. IsCXXInstanceMethod = D.isFirstDeclarationOfMember() && D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef && !D.isStaticMember(); } } CallingConv CC = S.Context.getDefaultCallingConvention(FTI.isVariadic, IsCXXInstanceMethod); // Attribute AT_OpenCLKernel affects the calling convention only on // the SPIR target, hence it cannot be treated as a calling // convention attribute. This is the simplest place to infer // "spir_kernel" for OpenCL kernels on SPIR. if (CC == CC_SpirFunction) { for (const AttributeList *Attr = D.getDeclSpec().getAttributes().getList(); Attr; Attr = Attr->getNext()) { if (Attr->getKind() == AttributeList::AT_OpenCLKernel) { CC = CC_SpirKernel; break; } } } return CC; } namespace { /// A simple notion of pointer kinds, which matches up with the various /// pointer declarators. enum class SimplePointerKind { Pointer, BlockPointer, MemberPointer, }; } IdentifierInfo *Sema::getNullabilityKeyword(NullabilityKind nullability) { switch (nullability) { case NullabilityKind::NonNull: if (!Ident__Nonnull) Ident__Nonnull = PP.getIdentifierInfo("_Nonnull"); return Ident__Nonnull; case NullabilityKind::Nullable: if (!Ident__Nullable) Ident__Nullable = PP.getIdentifierInfo("_Nullable"); return Ident__Nullable; case NullabilityKind::Unspecified: if (!Ident__Null_unspecified) Ident__Null_unspecified = PP.getIdentifierInfo("_Null_unspecified"); return Ident__Null_unspecified; } llvm_unreachable("Unknown nullability kind."); } /// Retrieve the identifier "NSError". IdentifierInfo *Sema::getNSErrorIdent() { if (!Ident_NSError) Ident_NSError = PP.getIdentifierInfo("NSError"); return Ident_NSError; } /// Check whether there is a nullability attribute of any kind in the given /// attribute list. static bool hasNullabilityAttr(const AttributeList *attrs) { for (const AttributeList *attr = attrs; attr; attr = attr->getNext()) { if (attr->getKind() == AttributeList::AT_TypeNonNull || attr->getKind() == AttributeList::AT_TypeNullable || attr->getKind() == AttributeList::AT_TypeNullUnspecified) return true; } return false; } namespace { /// Describes the kind of a pointer a declarator describes. enum class PointerDeclaratorKind { // Not a pointer. NonPointer, // Single-level pointer. SingleLevelPointer, // Multi-level pointer (of any pointer kind). MultiLevelPointer, // CFFooRef* MaybePointerToCFRef, // CFErrorRef* CFErrorRefPointer, // NSError** NSErrorPointerPointer, }; } /// Classify the given declarator, whose type-specified is \c type, based on /// what kind of pointer it refers to. /// /// This is used to determine the default nullability. static PointerDeclaratorKind classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator) { unsigned numNormalPointers = 0; // For any dependent type, we consider it a non-pointer. if (type->isDependentType()) return PointerDeclaratorKind::NonPointer; // Look through the declarator chunks to identify pointers. for (unsigned i = 0, n = declarator.getNumTypeObjects(); i != n; ++i) { DeclaratorChunk &chunk = declarator.getTypeObject(i); switch (chunk.Kind) { case DeclaratorChunk::Array: case DeclaratorChunk::Function: break; case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer : PointerDeclaratorKind::SingleLevelPointer; case DeclaratorChunk::Paren: case DeclaratorChunk::Reference: continue; case DeclaratorChunk::Pointer: ++numNormalPointers; if (numNormalPointers > 2) return PointerDeclaratorKind::MultiLevelPointer; continue; } } // Then, dig into the type specifier itself. unsigned numTypeSpecifierPointers = 0; do { // Decompose normal pointers. if (auto ptrType = type->getAs<PointerType>()) { ++numNormalPointers; if (numNormalPointers > 2) return PointerDeclaratorKind::MultiLevelPointer; type = ptrType->getPointeeType(); ++numTypeSpecifierPointers; continue; } // Decompose block pointers. if (type->getAs<BlockPointerType>()) { return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer : PointerDeclaratorKind::SingleLevelPointer; } // Decompose member pointers. if (type->getAs<MemberPointerType>()) { return numNormalPointers > 0 ? PointerDeclaratorKind::MultiLevelPointer : PointerDeclaratorKind::SingleLevelPointer; } // Look at Objective-C object pointers. if (auto objcObjectPtr = type->getAs<ObjCObjectPointerType>()) { ++numNormalPointers; ++numTypeSpecifierPointers; // If this is NSError**, report that. if (auto objcClassDecl = objcObjectPtr->getInterfaceDecl()) { if (objcClassDecl->getIdentifier() == S.getNSErrorIdent() && numNormalPointers == 2 && numTypeSpecifierPointers < 2) { return PointerDeclaratorKind::NSErrorPointerPointer; } } break; } // Look at Objective-C class types. if (auto objcClass = type->getAs<ObjCInterfaceType>()) { if (objcClass->getInterface()->getIdentifier() == S.getNSErrorIdent()) { if (numNormalPointers == 2 && numTypeSpecifierPointers < 2) return PointerDeclaratorKind::NSErrorPointerPointer;; } break; } // If at this point we haven't seen a pointer, we won't see one. if (numNormalPointers == 0) return PointerDeclaratorKind::NonPointer; if (auto recordType = type->getAs<RecordType>()) { RecordDecl *recordDecl = recordType->getDecl(); bool isCFError = false; if (S.CFError) { // If we already know about CFError, test it directly. isCFError = (S.CFError == recordDecl); } else { // Check whether this is CFError, which we identify based on its bridge // to NSError. if (recordDecl->getTagKind() == TTK_Struct && numNormalPointers > 0) { if (auto bridgeAttr = recordDecl->getAttr<ObjCBridgeAttr>()) { if (bridgeAttr->getBridgedType() == S.getNSErrorIdent()) { S.CFError = recordDecl; isCFError = true; } } } } // If this is CFErrorRef*, report it as such. if (isCFError && numNormalPointers == 2 && numTypeSpecifierPointers < 2) { return PointerDeclaratorKind::CFErrorRefPointer; } break; } break; } while (true); switch (numNormalPointers) { case 0: return PointerDeclaratorKind::NonPointer; case 1: return PointerDeclaratorKind::SingleLevelPointer; case 2: return PointerDeclaratorKind::MaybePointerToCFRef; default: return PointerDeclaratorKind::MultiLevelPointer; } } static FileID getNullabilityCompletenessCheckFileID(Sema &S, SourceLocation loc) { // If we're anywhere in a function, method, or closure context, don't perform // completeness checks. for (DeclContext *ctx = S.CurContext; ctx; ctx = ctx->getParent()) { if (ctx->isFunctionOrMethod()) return FileID(); if (ctx->isFileContext()) break; } // We only care about the expansion location. loc = S.SourceMgr.getExpansionLoc(loc); FileID file = S.SourceMgr.getFileID(loc); if (file.isInvalid()) return FileID(); // Retrieve file information. bool invalid = false; const SrcMgr::SLocEntry &sloc = S.SourceMgr.getSLocEntry(file, &invalid); if (invalid || !sloc.isFile()) return FileID(); // We don't want to perform completeness checks on the main file or in // system headers. const SrcMgr::FileInfo &fileInfo = sloc.getFile(); if (fileInfo.getIncludeLoc().isInvalid()) return FileID(); if (fileInfo.getFileCharacteristic() != SrcMgr::C_User && S.Diags.getSuppressSystemWarnings()) { return FileID(); } return file; } /// Check for consistent use of nullability. static void checkNullabilityConsistency(TypeProcessingState &state, SimplePointerKind pointerKind, SourceLocation pointerLoc) { Sema &S = state.getSema(); // Determine which file we're performing consistency checking for. FileID file = getNullabilityCompletenessCheckFileID(S, pointerLoc); if (file.isInvalid()) return; // If we haven't seen any type nullability in this file, we won't warn now // about anything. FileNullability &fileNullability = S.NullabilityMap[file]; if (!fileNullability.SawTypeNullability) { // If this is the first pointer declarator in the file, record it. if (fileNullability.PointerLoc.isInvalid() && !S.Context.getDiagnostics().isIgnored(diag::warn_nullability_missing, pointerLoc)) { fileNullability.PointerLoc = pointerLoc; fileNullability.PointerKind = static_cast<unsigned>(pointerKind); } return; } // Complain about missing nullability. S.Diag(pointerLoc, diag::warn_nullability_missing) << static_cast<unsigned>(pointerKind); } static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state, QualType declSpecType, TypeSourceInfo *TInfo) { // The TypeSourceInfo that this function returns will not be a null type. // If there is an error, this function will fill in a dummy type as fallback. QualType T = declSpecType; Declarator &D = state.getDeclarator(); Sema &S = state.getSema(); ASTContext &Context = S.Context; const LangOptions &LangOpts = S.getLangOpts(); // The name we're declaring, if any. DeclarationName Name; if (D.getIdentifier()) Name = D.getIdentifier(); // Does this declaration declare a typedef-name? bool IsTypedefName = D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef || D.getContext() == Declarator::AliasDeclContext || D.getContext() == Declarator::AliasTemplateContext; // Does T refer to a function type with a cv-qualifier or a ref-qualifier? bool IsQualifiedFunction = T->isFunctionProtoType() && (T->castAs<FunctionProtoType>()->getTypeQuals() != 0 || T->castAs<FunctionProtoType>()->getRefQualifier() != RQ_None); // If T is 'decltype(auto)', the only declarators we can have are parens // and at most one function declarator if this is a function declaration. if (const AutoType *AT = T->getAs<AutoType>()) { if (AT->isDecltypeAuto()) { for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) { unsigned Index = E - I - 1; DeclaratorChunk &DeclChunk = D.getTypeObject(Index); unsigned DiagId = diag::err_decltype_auto_compound_type; unsigned DiagKind = 0; switch (DeclChunk.Kind) { case DeclaratorChunk::Paren: continue; case DeclaratorChunk::Function: { unsigned FnIndex; if (D.isFunctionDeclarationContext() && D.isFunctionDeclarator(FnIndex) && FnIndex == Index) continue; DiagId = diag::err_decltype_auto_function_declarator_not_declaration; break; } case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: DiagKind = 0; break; case DeclaratorChunk::Reference: DiagKind = 1; break; case DeclaratorChunk::Array: DiagKind = 2; break; } S.Diag(DeclChunk.Loc, DiagId) << DiagKind; D.setInvalidType(true); break; } } } // Determine whether we should infer _Nonnull on pointer types. Optional<NullabilityKind> inferNullability; bool inferNullabilityCS = false; bool inferNullabilityInnerOnly = false; bool inferNullabilityInnerOnlyComplete = false; // Are we in an assume-nonnull region? bool inAssumeNonNullRegion = false; if (S.PP.getPragmaAssumeNonNullLoc().isValid() && !state.getDeclarator().isObjCWeakProperty() && !S.deduceWeakPropertyFromType(T)) { inAssumeNonNullRegion = true; // Determine which file we saw the assume-nonnull region in. FileID file = getNullabilityCompletenessCheckFileID( S, S.PP.getPragmaAssumeNonNullLoc()); if (!file.isInvalid()) { FileNullability &fileNullability = S.NullabilityMap[file]; // If we haven't seen any type nullability before, now we have. if (!fileNullability.SawTypeNullability) { if (fileNullability.PointerLoc.isValid()) { S.Diag(fileNullability.PointerLoc, diag::warn_nullability_missing) << static_cast<unsigned>(fileNullability.PointerKind); } fileNullability.SawTypeNullability = true; } } } // Whether to complain about missing nullability specifiers or not. enum { /// Never complain. CAMN_No, /// Complain on the inner pointers (but not the outermost /// pointer). CAMN_InnerPointers, /// Complain about any pointers that don't have nullability /// specified or inferred. CAMN_Yes } complainAboutMissingNullability = CAMN_No; unsigned NumPointersRemaining = 0; if (IsTypedefName) { // For typedefs, we do not infer any nullability (the default), // and we only complain about missing nullability specifiers on // inner pointers. complainAboutMissingNullability = CAMN_InnerPointers; if (T->canHaveNullability() && !T->getNullability(S.Context)) { ++NumPointersRemaining; } for (unsigned i = 0, n = D.getNumTypeObjects(); i != n; ++i) { DeclaratorChunk &chunk = D.getTypeObject(i); switch (chunk.Kind) { case DeclaratorChunk::Array: case DeclaratorChunk::Function: break; case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: ++NumPointersRemaining; break; case DeclaratorChunk::Paren: case DeclaratorChunk::Reference: continue; case DeclaratorChunk::Pointer: ++NumPointersRemaining; continue; } } } else { bool isFunctionOrMethod = false; switch (auto context = state.getDeclarator().getContext()) { case Declarator::ObjCParameterContext: case Declarator::ObjCResultContext: case Declarator::PrototypeContext: case Declarator::TrailingReturnContext: isFunctionOrMethod = true; LLVM_FALLTHROUGH; // HLSL Change case Declarator::MemberContext: if (state.getDeclarator().isObjCIvar() && !isFunctionOrMethod) { complainAboutMissingNullability = CAMN_No; break; } LLVM_FALLTHROUGH; // HLSL Change case Declarator::FileContext: case Declarator::KNRTypeListContext: complainAboutMissingNullability = CAMN_Yes; // Nullability inference depends on the type and declarator. switch (classifyPointerDeclarator(S, T, D)) { case PointerDeclaratorKind::NonPointer: case PointerDeclaratorKind::MultiLevelPointer: // Cannot infer nullability. break; case PointerDeclaratorKind::SingleLevelPointer: // Infer _Nonnull if we are in an assumes-nonnull region. if (inAssumeNonNullRegion) { inferNullability = NullabilityKind::NonNull; inferNullabilityCS = (context == Declarator::ObjCParameterContext || context == Declarator::ObjCResultContext); } break; case PointerDeclaratorKind::CFErrorRefPointer: case PointerDeclaratorKind::NSErrorPointerPointer: // Within a function or method signature, infer _Nullable at both // levels. if (isFunctionOrMethod && inAssumeNonNullRegion) inferNullability = NullabilityKind::Nullable; break; case PointerDeclaratorKind::MaybePointerToCFRef: if (isFunctionOrMethod) { // On pointer-to-pointer parameters marked cf_returns_retained or // cf_returns_not_retained, if the outer pointer is explicit then // infer the inner pointer as _Nullable. auto hasCFReturnsAttr = [](const AttributeList *NextAttr) -> bool { while (NextAttr) { if (NextAttr->getKind() == AttributeList::AT_CFReturnsRetained || NextAttr->getKind() == AttributeList::AT_CFReturnsNotRetained) return true; NextAttr = NextAttr->getNext(); } return false; }; if (const auto *InnermostChunk = D.getInnermostNonParenChunk()) { if (hasCFReturnsAttr(D.getAttributes()) || hasCFReturnsAttr(InnermostChunk->getAttrs()) || hasCFReturnsAttr(D.getDeclSpec().getAttributes().getList())) { inferNullability = NullabilityKind::Nullable; inferNullabilityInnerOnly = true; } } } break; } break; case Declarator::ConversionIdContext: complainAboutMissingNullability = CAMN_Yes; break; case Declarator::AliasDeclContext: case Declarator::AliasTemplateContext: case Declarator::BlockContext: case Declarator::BlockLiteralContext: case Declarator::ConditionContext: case Declarator::CXXCatchContext: case Declarator::CXXNewContext: case Declarator::ForContext: case Declarator::LambdaExprContext: case Declarator::LambdaExprParameterContext: case Declarator::ObjCCatchContext: case Declarator::TemplateParamContext: case Declarator::TemplateTypeArgContext: case Declarator::TypeNameContext: // Don't infer in these contexts. break; } } // Local function that checks the nullability for a given pointer declarator. // Returns true if _Nonnull was inferred. auto inferPointerNullability = [&](SimplePointerKind pointerKind, SourceLocation pointerLoc, AttributeList *&attrs) -> AttributeList * { // We've seen a pointer. if (NumPointersRemaining > 0) --NumPointersRemaining; // If a nullability attribute is present, there's nothing to do. if (hasNullabilityAttr(attrs)) return nullptr; // If we're supposed to infer nullability, do so now. if (inferNullability && !inferNullabilityInnerOnlyComplete) { AttributeList::Syntax syntax = inferNullabilityCS ? AttributeList::AS_ContextSensitiveKeyword : AttributeList::AS_Keyword; AttributeList *nullabilityAttr = state.getDeclarator().getAttributePool() .create( S.getNullabilityKeyword( *inferNullability), SourceRange(pointerLoc), nullptr, SourceLocation(), nullptr, 0, syntax); spliceAttrIntoList(*nullabilityAttr, attrs); if (inferNullabilityCS) { state.getDeclarator().getMutableDeclSpec().getObjCQualifiers() ->setObjCDeclQualifier(ObjCDeclSpec::DQ_CSNullability); } if (inferNullabilityInnerOnly) inferNullabilityInnerOnlyComplete = true; return nullabilityAttr; } // If we're supposed to complain about missing nullability, do so // now if it's truly missing. switch (complainAboutMissingNullability) { case CAMN_No: break; case CAMN_InnerPointers: if (NumPointersRemaining == 0) break; LLVM_FALLTHROUGH; // HLSL Change case CAMN_Yes: checkNullabilityConsistency(state, pointerKind, pointerLoc); } return nullptr; }; // If the type itself could have nullability but does not, infer pointer // nullability and perform consistency checking. if (T->canHaveNullability() && S.ActiveTemplateInstantiations.empty() && !T->getNullability(S.Context)) { SimplePointerKind pointerKind = SimplePointerKind::Pointer; if (T->isBlockPointerType()) pointerKind = SimplePointerKind::BlockPointer; else if (T->isMemberPointerType()) pointerKind = SimplePointerKind::MemberPointer; if (auto *attr = inferPointerNullability( pointerKind, D.getDeclSpec().getTypeSpecTypeLoc(), D.getMutableDeclSpec().getAttributes().getListRef())) { T = Context.getAttributedType( AttributedType::getNullabilityAttrKind(*inferNullability), T, T); attr->setUsedAsTypeAttr(); } } // Walk the DeclTypeInfo, building the recursive type as we go. // DeclTypeInfos are ordered from the identifier out, which is // opposite of what we want :). for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { unsigned chunkIndex = e - i - 1; state.setCurrentChunkIndex(chunkIndex); DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex); IsQualifiedFunction &= DeclType.Kind == DeclaratorChunk::Paren; switch (DeclType.Kind) { case DeclaratorChunk::Paren: T = S.BuildParenType(T); break; case DeclaratorChunk::BlockPointer: // If blocks are disabled, emit an error. if (!LangOpts.Blocks) S.Diag(DeclType.Loc, diag::err_blocks_disable); // Handle pointer nullability. inferPointerNullability(SimplePointerKind::BlockPointer, DeclType.Loc, DeclType.getAttrListRef()); T = S.BuildBlockPointerType(T, D.getIdentifierLoc(), Name); if (DeclType.Cls.TypeQuals) T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Cls.TypeQuals); break; case DeclaratorChunk::Pointer: // Verify that we're not building a pointer to pointer to function with // exception specification. if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) { S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec); D.setInvalidType(true); // Build the type anyway. } // Handle pointer nullability inferPointerNullability(SimplePointerKind::Pointer, DeclType.Loc, DeclType.getAttrListRef()); if (LangOpts.ObjC1 && T->getAs<ObjCObjectType>()) { T = Context.getObjCObjectPointerType(T); if (DeclType.Ptr.TypeQuals) T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals); break; } T = S.BuildPointerType(T, DeclType.Loc, Name); if (DeclType.Ptr.TypeQuals) T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals); break; case DeclaratorChunk::Reference: { // Verify that we're not building a reference to pointer to function with // exception specification. if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) { S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec); D.setInvalidType(true); // Build the type anyway. } T = S.BuildReferenceType(T, DeclType.Ref.LValueRef, DeclType.Loc, Name); if (DeclType.Ref.HasRestrict) T = S.BuildQualifiedType(T, DeclType.Loc, Qualifiers::Restrict); break; } case DeclaratorChunk::Array: { // Verify that we're not building an array of pointers to function with // exception specification. if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) { S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec); D.setInvalidType(true); // Build the type anyway. } DeclaratorChunk::ArrayTypeInfo &ATI = DeclType.Arr; Expr *ArraySize = static_cast<Expr*>(ATI.NumElts); ArrayType::ArraySizeModifier ASM; if (ATI.isStar) ASM = ArrayType::Star; else if (ATI.hasStatic) ASM = ArrayType::Static; else ASM = ArrayType::Normal; if (ASM == ArrayType::Star && !D.isPrototypeContext()) { // FIXME: This check isn't quite right: it allows star in prototypes // for function definitions, and disallows some edge cases detailed // in http://gcc.gnu.org/ml/gcc-patches/2009-02/msg00133.html if (!S.getLangOpts().HLSL) // HLSL Change: this already errors at parse time S.Diag(DeclType.Loc, diag::err_array_star_outside_prototype); ASM = ArrayType::Normal; D.setInvalidType(true); } // C99 6.7.5.2p1: The optional type qualifiers and the keyword static // shall appear only in a declaration of a function parameter with an // array type, ... if (!S.getLangOpts().HLSL) // HLSL Change - these already error at parse time if (ASM == ArrayType::Static || ATI.TypeQuals) { if (!(D.isPrototypeContext() || D.getContext() == Declarator::KNRTypeListContext)) { S.Diag(DeclType.Loc, diag::err_array_static_outside_prototype) << (ASM == ArrayType::Static ? "'static'" : "type qualifier"); // Remove the 'static' and the type qualifiers. if (ASM == ArrayType::Static) ASM = ArrayType::Normal; ATI.TypeQuals = 0; D.setInvalidType(true); } // C99 6.7.5.2p1: ... and then only in the outermost array type // derivation. unsigned x = chunkIndex; while (x != 0) { // Walk outwards along the declarator chunks. x--; const DeclaratorChunk &DC = D.getTypeObject(x); switch (DC.Kind) { case DeclaratorChunk::Paren: continue; case DeclaratorChunk::Array: case DeclaratorChunk::Pointer: case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: S.Diag(DeclType.Loc, diag::err_array_static_not_outermost) << (ASM == ArrayType::Static ? "'static'" : "type qualifier"); if (ASM == ArrayType::Static) ASM = ArrayType::Normal; ATI.TypeQuals = 0; D.setInvalidType(true); break; case DeclaratorChunk::Function: case DeclaratorChunk::BlockPointer: // These are invalid anyway, so just ignore. break; } } } const AutoType *AT = T->getContainedAutoType(); // Allow arrays of auto if we are a generic lambda parameter. // i.e. [](auto (&array)[5]) { return array[0]; }; OK if (AT && D.getContext() != Declarator::LambdaExprParameterContext) { // We've already diagnosed this for decltype(auto). if (!AT->isDecltypeAuto()) S.Diag(DeclType.Loc, diag::err_illegal_decl_array_of_auto) << getPrintableNameForEntity(Name) << T; T = QualType(); break; } T = S.BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals, SourceRange(DeclType.Loc, DeclType.EndLoc), Name); break; } case DeclaratorChunk::Function: { // If the function declarator has a prototype (i.e. it is not () and // does not have a K&R-style identifier list), then the arguments are part // of the type, otherwise the argument list is (). const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun; IsQualifiedFunction = FTI.TypeQuals || FTI.hasRefQualifier(); // Check for auto functions and trailing return type and adjust the // return type accordingly. if (!D.isInvalidType()) { // trailing-return-type is only required if we're declaring a function, // and not, for instance, a pointer to a function. if (D.getDeclSpec().containsPlaceholderType() && !FTI.hasTrailingReturnType() && chunkIndex == 0 && !S.getLangOpts().CPlusPlus14) { S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(), D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto ? diag::err_auto_missing_trailing_return : diag::err_deduced_return_type); T = Context.IntTy; D.setInvalidType(true); } else if (FTI.hasTrailingReturnType()) { // T must be exactly 'auto' at this point. See CWG issue 681. if (isa<ParenType>(T)) { S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(), diag::err_trailing_return_in_parens) << T << D.getDeclSpec().getSourceRange(); D.setInvalidType(true); } else if (D.getContext() != Declarator::LambdaExprContext && (T.hasQualifiers() || !isa<AutoType>(T) || cast<AutoType>(T)->isDecltypeAuto())) { S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(), diag::err_trailing_return_without_auto) << T << D.getDeclSpec().getSourceRange(); D.setInvalidType(true); } T = S.GetTypeFromParser(FTI.getTrailingReturnType(), &TInfo); if (T.isNull()) { // An error occurred parsing the trailing return type. T = Context.IntTy; D.setInvalidType(true); } } } // C99 6.7.5.3p1: The return type may not be a function or array type. // For conversion functions, we'll diagnose this particular error later. if (((T->isArrayType() && !S.getLangOpts().HLSL) || T->isFunctionType()) && // HLSL Change - Allow array return types (D.getName().getKind() != UnqualifiedId::IK_ConversionFunctionId)) { unsigned diagID = diag::err_func_returning_array_function; // Last processing chunk in block context means this function chunk // represents the block. if (chunkIndex == 0 && D.getContext() == Declarator::BlockLiteralContext) diagID = diag::err_block_returning_array_function; S.Diag(DeclType.Loc, diagID) << T->isFunctionType() << T; T = Context.IntTy; D.setInvalidType(true); } // Do not allow returning half FP value. // FIXME: This really should be in BuildFunctionType. if (T->isHalfType()) { if (S.getLangOpts().OpenCL) { if (!S.getOpenCLOptions().cl_khr_fp16) { S.Diag(D.getIdentifierLoc(), diag::err_opencl_half_return) << T; D.setInvalidType(true); } } else if (!S.getLangOpts().HalfArgsAndReturns) { S.Diag(D.getIdentifierLoc(), diag::err_parameters_retval_cannot_have_fp16_type) << 1; D.setInvalidType(true); } } // Methods cannot return interface types. All ObjC objects are // passed by reference. if (T->isObjCObjectType()) { SourceLocation DiagLoc, FixitLoc; if (TInfo) { DiagLoc = TInfo->getTypeLoc().getLocStart(); FixitLoc = S.getLocForEndOfToken(TInfo->getTypeLoc().getLocEnd()); } else { DiagLoc = D.getDeclSpec().getTypeSpecTypeLoc(); FixitLoc = S.getLocForEndOfToken(D.getDeclSpec().getLocEnd()); } S.Diag(DiagLoc, diag::err_object_cannot_be_passed_returned_by_value) << 0 << T << FixItHint::CreateInsertion(FixitLoc, "*"); T = Context.getObjCObjectPointerType(T); if (TInfo) { TypeLocBuilder TLB; TLB.pushFullCopy(TInfo->getTypeLoc()); ObjCObjectPointerTypeLoc TLoc = TLB.push<ObjCObjectPointerTypeLoc>(T); TLoc.setStarLoc(FixitLoc); TInfo = TLB.getTypeSourceInfo(Context, T); } D.setInvalidType(true); } // cv-qualifiers on return types are pointless except when the type is a // class type in C++. if ((T.getCVRQualifiers() || T->isAtomicType()) && !(S.getLangOpts().CPlusPlus && (T->isDependentType() || T->isRecordType()))) { if (T->isVoidType() && !S.getLangOpts().CPlusPlus && D.getFunctionDefinitionKind() == FDK_Definition) { // [6.9.1/3] qualified void return is invalid on a C // function definition. Apparently ok on declarations and // in C++ though (!) S.Diag(DeclType.Loc, diag::err_func_returning_qualified_void) << T; } else diagnoseRedundantReturnTypeQualifiers(S, T, D, chunkIndex); } // Objective-C ARC ownership qualifiers are ignored on the function // return type (by type canonicalization). Complain if this attribute // was written here. if (!LangOpts.HLSL && T.getQualifiers().hasObjCLifetime()) { // HLSL Change - no checks needed SourceLocation AttrLoc; if (chunkIndex + 1 < D.getNumTypeObjects()) { DeclaratorChunk ReturnTypeChunk = D.getTypeObject(chunkIndex + 1); for (const AttributeList *Attr = ReturnTypeChunk.getAttrs(); Attr; Attr = Attr->getNext()) { if (Attr->getKind() == AttributeList::AT_ObjCOwnership) { AttrLoc = Attr->getLoc(); break; } } } if (AttrLoc.isInvalid()) { for (const AttributeList *Attr = D.getDeclSpec().getAttributes().getList(); Attr; Attr = Attr->getNext()) { if (Attr->getKind() == AttributeList::AT_ObjCOwnership) { AttrLoc = Attr->getLoc(); break; } } } if (AttrLoc.isValid()) { // The ownership attributes are almost always written via // the predefined // __strong/__weak/__autoreleasing/__unsafe_unretained. if (AttrLoc.isMacroID()) AttrLoc = S.SourceMgr.getImmediateExpansionRange(AttrLoc).first; S.Diag(AttrLoc, diag::warn_arc_lifetime_result_type) << T.getQualifiers().getObjCLifetime(); } } if (LangOpts.CPlusPlus && D.getDeclSpec().hasTagDefinition()) { // C++ [dcl.fct]p6: // Types shall not be defined in return or parameter types. TagDecl *Tag = cast<TagDecl>(D.getDeclSpec().getRepAsDecl()); S.Diag(Tag->getLocation(), diag::err_type_defined_in_result_type) << Context.getTypeDeclType(Tag); } // Exception specs are not allowed in typedefs. Complain, but add it // anyway. if (IsTypedefName && FTI.getExceptionSpecType()) S.Diag(FTI.getExceptionSpecLoc(), diag::err_exception_spec_in_typedef) << (D.getContext() == Declarator::AliasDeclContext || D.getContext() == Declarator::AliasTemplateContext); // If we see "T var();" or "T var(T());" at block scope, it is probably // an attempt to initialize a variable, not a function declaration. if (FTI.isAmbiguous) warnAboutAmbiguousFunction(S, D, DeclType, T); FunctionType::ExtInfo EI(getCCForDeclaratorChunk(S, D, FTI, chunkIndex)); if (!FTI.NumParams && !FTI.isVariadic && !LangOpts.CPlusPlus) { // Simple void foo(), where the incoming T is the result type. T = Context.getFunctionNoProtoType(T, EI); } else { // We allow a zero-parameter variadic function in C if the // function is marked with the "overloadable" attribute. Scan // for this attribute now. if (!FTI.NumParams && FTI.isVariadic && !LangOpts.CPlusPlus) { bool Overloadable = false; for (const AttributeList *Attrs = D.getAttributes(); Attrs; Attrs = Attrs->getNext()) { if (Attrs->getKind() == AttributeList::AT_Overloadable) { Overloadable = true; break; } } if (!Overloadable) S.Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_param); } if (FTI.NumParams && FTI.Params[0].Param == nullptr) { // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function // definition. S.Diag(FTI.Params[0].IdentLoc, diag::err_ident_list_in_fn_declaration); D.setInvalidType(true); // Recover by creating a K&R-style function type. T = Context.getFunctionNoProtoType(T, EI); break; } FunctionProtoType::ExtProtoInfo EPI; EPI.ExtInfo = EI; EPI.Variadic = FTI.isVariadic; EPI.HasTrailingReturn = FTI.hasTrailingReturnType(); EPI.TypeQuals = FTI.TypeQuals; EPI.RefQualifier = !FTI.hasRefQualifier()? RQ_None : FTI.RefQualifierIsLValueRef? RQ_LValue : RQ_RValue; // Otherwise, we have a function with a parameter list that is // potentially variadic. SmallVector<QualType, 16> ParamTys; ParamTys.reserve(FTI.NumParams); // HLSL Change Starts SmallVector<hlsl::ParameterModifier, 16> ParamMods; ParamMods.reserve(FTI.NumParams); // HLSL Change Ends SmallVector<bool, 16> ConsumedParameters; ConsumedParameters.reserve(FTI.NumParams); bool HasAnyConsumedParameters = false; for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) { ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param); QualType ParamTy = Param->getType(); assert(!ParamTy.isNull() && "Couldn't parse type?"); // Look for 'void'. void is allowed only as a single parameter to a // function with no other parameters (C99 6.7.5.3p10). We record // int(void) as a FunctionProtoType with an empty parameter list. if (ParamTy->isVoidType()) { // If this is something like 'float(int, void)', reject it. 'void' // is an incomplete type (C99 6.2.5p19) and function decls cannot // have parameters of incomplete type. if (FTI.NumParams != 1 || FTI.isVariadic) { S.Diag(DeclType.Loc, diag::err_void_only_param); ParamTy = Context.IntTy; Param->setType(ParamTy); } else if (FTI.Params[i].Ident) { // Reject, but continue to parse 'int(void abc)'. S.Diag(FTI.Params[i].IdentLoc, diag::err_param_with_void_type); ParamTy = Context.IntTy; Param->setType(ParamTy); } else { // Reject, but continue to parse 'float(const void)'. if (ParamTy.hasQualifiers()) S.Diag(DeclType.Loc, diag::err_void_param_qualified); // Do not add 'void' to the list. break; } } else if (ParamTy->isHalfType()) { // Disallow half FP parameters. // FIXME: This really should be in BuildFunctionType. if (S.getLangOpts().OpenCL) { if (!S.getOpenCLOptions().cl_khr_fp16) { S.Diag(Param->getLocation(), diag::err_opencl_half_param) << ParamTy; D.setInvalidType(); Param->setInvalidDecl(); } } else if (!S.getLangOpts().HalfArgsAndReturns) { S.Diag(Param->getLocation(), diag::err_parameters_retval_cannot_have_fp16_type) << 0; D.setInvalidType(); } } else if (!FTI.hasPrototype) { if (ParamTy->isPromotableIntegerType()) { ParamTy = Context.getPromotedIntegerType(ParamTy); Param->setKNRPromoted(true); } else if (const BuiltinType* BTy = ParamTy->getAs<BuiltinType>()) { if (BTy->getKind() == BuiltinType::Float) { ParamTy = Context.DoubleTy; Param->setKNRPromoted(true); } } } if (LangOpts.ObjCAutoRefCount) { bool Consumed = Param->hasAttr<NSConsumedAttr>(); ConsumedParameters.push_back(Consumed); HasAnyConsumedParameters |= Consumed; } ParamTys.push_back(ParamTy); ParamMods.push_back(Param->getParamModifiers()); } if (HasAnyConsumedParameters) EPI.ConsumedParameters = ConsumedParameters.data(); SmallVector<QualType, 4> Exceptions; SmallVector<ParsedType, 2> DynamicExceptions; SmallVector<SourceRange, 2> DynamicExceptionRanges; Expr *NoexceptExpr = nullptr; if (FTI.getExceptionSpecType() == EST_Dynamic) { // FIXME: It's rather inefficient to have to split into two vectors // here. unsigned N = FTI.NumExceptions; DynamicExceptions.reserve(N); DynamicExceptionRanges.reserve(N); for (unsigned I = 0; I != N; ++I) { DynamicExceptions.push_back(FTI.Exceptions[I].Ty); DynamicExceptionRanges.push_back(FTI.Exceptions[I].Range); } } else if (FTI.getExceptionSpecType() == EST_ComputedNoexcept) { NoexceptExpr = FTI.NoexceptExpr; } S.checkExceptionSpecification(D.isFunctionDeclarationContext(), FTI.getExceptionSpecType(), DynamicExceptions, DynamicExceptionRanges, NoexceptExpr, Exceptions, EPI.ExceptionSpec); T = Context.getFunctionType(T, ParamTys, EPI, ParamMods); } break; } case DeclaratorChunk::MemberPointer: // The scope spec must refer to a class, or be dependent. CXXScopeSpec &SS = DeclType.Mem.Scope(); QualType ClsType; // Handle pointer nullability. inferPointerNullability(SimplePointerKind::MemberPointer, DeclType.Loc, DeclType.getAttrListRef()); if (SS.isInvalid()) { // Avoid emitting extra errors if we already errored on the scope. D.setInvalidType(true); } else if (S.isDependentScopeSpecifier(SS) || dyn_cast_or_null<CXXRecordDecl>(S.computeDeclContext(SS))) { NestedNameSpecifier *NNS = SS.getScopeRep(); NestedNameSpecifier *NNSPrefix = NNS->getPrefix(); switch (NNS->getKind()) { case NestedNameSpecifier::Identifier: ClsType = Context.getDependentNameType(ETK_None, NNSPrefix, NNS->getAsIdentifier()); break; case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: llvm_unreachable("Nested-name-specifier must name a type"); case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: ClsType = QualType(NNS->getAsType(), 0); // Note: if the NNS has a prefix and ClsType is a nondependent // TemplateSpecializationType, then the NNS prefix is NOT included // in ClsType; hence we wrap ClsType into an ElaboratedType. // NOTE: in particular, no wrap occurs if ClsType already is an // Elaborated, DependentName, or DependentTemplateSpecialization. if (NNSPrefix && isa<TemplateSpecializationType>(NNS->getAsType())) ClsType = Context.getElaboratedType(ETK_None, NNSPrefix, ClsType); break; } } else { S.Diag(DeclType.Mem.Scope().getBeginLoc(), diag::err_illegal_decl_mempointer_in_nonclass) << (D.getIdentifier() ? D.getIdentifier()->getName() : "type name") << DeclType.Mem.Scope().getRange(); D.setInvalidType(true); } if (!ClsType.isNull()) T = S.BuildMemberPointerType(T, ClsType, DeclType.Loc, D.getIdentifier()); if (T.isNull()) { T = Context.IntTy; D.setInvalidType(true); } else if (DeclType.Mem.TypeQuals) { T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals); } break; } if (T.isNull()) { D.setInvalidType(true); T = Context.IntTy; } // See if there are any attributes on this declarator chunk. if (AttributeList *attrs = const_cast<AttributeList*>(DeclType.getAttrs())) processTypeAttrs(state, T, TAL_DeclChunk, attrs); } assert(!T.isNull() && "T must not be null after this point"); if (LangOpts.CPlusPlus && T->isFunctionType()) { const FunctionProtoType *FnTy = T->getAs<FunctionProtoType>(); assert(FnTy && "Why oh why is there not a FunctionProtoType here?"); // C++ 8.3.5p4: // A cv-qualifier-seq shall only be part of the function type // for a nonstatic member function, the function type to which a pointer // to member refers, or the top-level function type of a function typedef // declaration. // // Core issue 547 also allows cv-qualifiers on function types that are // top-level template type arguments. bool FreeFunction; if (!D.getCXXScopeSpec().isSet()) { FreeFunction = ((D.getContext() != Declarator::MemberContext && D.getContext() != Declarator::LambdaExprContext) || D.getDeclSpec().isFriendSpecified()); } else { DeclContext *DC = S.computeDeclContext(D.getCXXScopeSpec()); FreeFunction = (DC && !DC->isRecord()); } // C++11 [dcl.fct]p6 (w/DR1417): // An attempt to specify a function type with a cv-qualifier-seq or a // ref-qualifier (including by typedef-name) is ill-formed unless it is: // - the function type for a non-static member function, // - the function type to which a pointer to member refers, // - the top-level function type of a function typedef declaration or // alias-declaration, // - the type-id in the default argument of a type-parameter, or // - the type-id of a template-argument for a type-parameter // // FIXME: Checking this here is insufficient. We accept-invalid on: // // template<typename T> struct S { void f(T); }; // S<int() const> s; // // ... for instance. if (IsQualifiedFunction && !(!FreeFunction && D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) && !IsTypedefName && D.getContext() != Declarator::TemplateTypeArgContext) { SourceLocation Loc = D.getLocStart(); SourceRange RemovalRange; unsigned I; if (D.isFunctionDeclarator(I)) { SmallVector<SourceLocation, 4> RemovalLocs; const DeclaratorChunk &Chunk = D.getTypeObject(I); assert(Chunk.Kind == DeclaratorChunk::Function); if (Chunk.Fun.hasRefQualifier()) RemovalLocs.push_back(Chunk.Fun.getRefQualifierLoc()); if (Chunk.Fun.TypeQuals & Qualifiers::Const) RemovalLocs.push_back(Chunk.Fun.getConstQualifierLoc()); if (Chunk.Fun.TypeQuals & Qualifiers::Volatile) RemovalLocs.push_back(Chunk.Fun.getVolatileQualifierLoc()); if (Chunk.Fun.TypeQuals & Qualifiers::Restrict) RemovalLocs.push_back(Chunk.Fun.getRestrictQualifierLoc()); if (!RemovalLocs.empty()) { std::sort(RemovalLocs.begin(), RemovalLocs.end(), BeforeThanCompare<SourceLocation>(S.getSourceManager())); RemovalRange = SourceRange(RemovalLocs.front(), RemovalLocs.back()); Loc = RemovalLocs.front(); } } S.Diag(Loc, diag::err_invalid_qualified_function_type) << FreeFunction << D.isFunctionDeclarator() << T << getFunctionQualifiersAsString(FnTy) << FixItHint::CreateRemoval(RemovalRange); // Strip the cv-qualifiers and ref-qualifiers from the type. FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo(); EPI.TypeQuals = 0; EPI.RefQualifier = RQ_None; T = Context.getFunctionType(FnTy->getReturnType(), FnTy->getParamTypes(), EPI, FnTy->getParamMods()); // Rebuild any parens around the identifier in the function type. for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { if (D.getTypeObject(i).Kind != DeclaratorChunk::Paren) break; T = S.BuildParenType(T); } } } // Apply any undistributed attributes from the declarator. if (AttributeList *attrs = D.getAttributes()) processTypeAttrs(state, T, TAL_DeclName, attrs); // Diagnose any ignored type attributes. state.diagnoseIgnoredTypeAttrs(T); // C++0x [dcl.constexpr]p9: // A constexpr specifier used in an object declaration declares the object // as const. if (D.getDeclSpec().isConstexprSpecified() && T->isObjectType()) { T.addConst(); } // If there was an ellipsis in the declarator, the declaration declares a // parameter pack whose type may be a pack expansion type. if (D.hasEllipsis()) { // C++0x [dcl.fct]p13: // A declarator-id or abstract-declarator containing an ellipsis shall // only be used in a parameter-declaration. Such a parameter-declaration // is a parameter pack (14.5.3). [...] switch (D.getContext()) { case Declarator::PrototypeContext: case Declarator::LambdaExprParameterContext: // C++0x [dcl.fct]p13: // [...] When it is part of a parameter-declaration-clause, the // parameter pack is a function parameter pack (14.5.3). The type T // of the declarator-id of the function parameter pack shall contain // a template parameter pack; each template parameter pack in T is // expanded by the function parameter pack. // // We represent function parameter packs as function parameters whose // type is a pack expansion. if (!T->containsUnexpandedParameterPack()) { S.Diag(D.getEllipsisLoc(), diag::err_function_parameter_pack_without_parameter_packs) << T << D.getSourceRange(); D.setEllipsisLoc(SourceLocation()); } else { T = Context.getPackExpansionType(T, None); } break; case Declarator::TemplateParamContext: // C++0x [temp.param]p15: // If a template-parameter is a [...] is a parameter-declaration that // declares a parameter pack (8.3.5), then the template-parameter is a // template parameter pack (14.5.3). // // Note: core issue 778 clarifies that, if there are any unexpanded // parameter packs in the type of the non-type template parameter, then // it expands those parameter packs. // HLSL Change Starts if (LangOpts.HLSL) { S.Diag(D.getEllipsisLoc(), diag::err_hlsl_variadic_templates); break; } // HLSL Change Ends if (T->containsUnexpandedParameterPack()) T = Context.getPackExpansionType(T, None); else S.Diag(D.getEllipsisLoc(), LangOpts.CPlusPlus11 ? diag::warn_cxx98_compat_variadic_templates : diag::ext_variadic_templates); break; case Declarator::FileContext: case Declarator::KNRTypeListContext: case Declarator::ObjCParameterContext: // FIXME: special diagnostic here? case Declarator::ObjCResultContext: // FIXME: special diagnostic here? case Declarator::TypeNameContext: case Declarator::CXXNewContext: case Declarator::AliasDeclContext: case Declarator::AliasTemplateContext: case Declarator::MemberContext: case Declarator::BlockContext: case Declarator::ForContext: case Declarator::ConditionContext: case Declarator::CXXCatchContext: case Declarator::ObjCCatchContext: case Declarator::BlockLiteralContext: case Declarator::LambdaExprContext: case Declarator::ConversionIdContext: case Declarator::TrailingReturnContext: case Declarator::TemplateTypeArgContext: // FIXME: We may want to allow parameter packs in block-literal contexts // in the future. S.Diag(D.getEllipsisLoc(), diag::err_ellipsis_in_declarator_not_parameter); D.setEllipsisLoc(SourceLocation()); break; } } assert(!T.isNull() && "T must not be null at the end of this function"); if (D.isInvalidType()) return Context.getTrivialTypeSourceInfo(T); return S.GetTypeSourceInfoForDeclarator(D, T, TInfo); } /// GetTypeForDeclarator - Convert the type for the specified /// declarator to Type instances. /// /// The result of this call will never be null, but the associated /// type may be a null type if there's an unrecoverable error. TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S) { // Determine the type of the declarator. Not all forms of declarator // have a type. TypeProcessingState state(*this, D, S); // MS Change - add scope TypeSourceInfo *ReturnTypeInfo = nullptr; QualType T = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo); if (D.isPrototypeContext() && getLangOpts().ObjCAutoRefCount) inferARCWriteback(state, T); // HLSL changes begin // If there is no explicit pack orientation on matrix types, but there is file-level // default orientation set by #pragma pack_matrix, apply it here. // There is no default if rewriting (in the absence of #pragma pack_matrix), since // it is agnostic to default orientation and we want to preserve the lack of annotation. // For codegen, it'd be nice to annotate everything here, but it causes error // messages to have pack orientation added to types, so we handle it through // the codegen option's default packing orientation flag. bool defaultRowMajor; if (getLangOpts().HLSL && hlsl::IsHLSLMatType(T) && !hlsl::HasHLSLMatOrientation(T) && D.getDeclSpec().TryGetDefaultMatrixPackRowMajor(defaultRowMajor)) { AttributedType::Kind AttributeKind = defaultRowMajor ? AttributedType::attr_hlsl_row_major : AttributedType::attr_hlsl_column_major; T = Context.getAttributedType(AttributeKind, T, T); } // HLSL changes end return GetFullTypeForDeclarator(state, T, ReturnTypeInfo); } static void transferARCOwnershipToDeclSpec(Sema &S, QualType &declSpecTy, Qualifiers::ObjCLifetime ownership) { if (declSpecTy->isObjCRetainableType() && declSpecTy.getObjCLifetime() == Qualifiers::OCL_None) { Qualifiers qs; qs.addObjCLifetime(ownership); declSpecTy = S.Context.getQualifiedType(declSpecTy, qs); } } static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state, Qualifiers::ObjCLifetime ownership, unsigned chunkIndex) { Sema &S = state.getSema(); Declarator &D = state.getDeclarator(); // Look for an explicit lifetime attribute. DeclaratorChunk &chunk = D.getTypeObject(chunkIndex); for (const AttributeList *attr = chunk.getAttrs(); attr; attr = attr->getNext()) if (attr->getKind() == AttributeList::AT_ObjCOwnership) return; const char *attrStr = nullptr; switch (ownership) { case Qualifiers::OCL_None: llvm_unreachable("no ownership!"); case Qualifiers::OCL_ExplicitNone: attrStr = "none"; break; case Qualifiers::OCL_Strong: attrStr = "strong"; break; case Qualifiers::OCL_Weak: attrStr = "weak"; break; case Qualifiers::OCL_Autoreleasing: attrStr = "autoreleasing"; break; } IdentifierLoc *Arg = new (S.Context) IdentifierLoc; Arg->Ident = &S.Context.Idents.get(attrStr); Arg->Loc = SourceLocation(); ArgsUnion Args(Arg); // If there wasn't one, add one (with an invalid source location // so that we don't make an AttributedType for it). AttributeList *attr = D.getAttributePool() .create(&S.Context.Idents.get("objc_ownership"), SourceLocation(), /*scope*/ nullptr, SourceLocation(), /*args*/ &Args, 1, AttributeList::AS_GNU); spliceAttrIntoList(*attr, chunk.getAttrListRef()); // TODO: mark whether we did this inference? } /// \brief Used for transferring ownership in casts resulting in l-values. static void transferARCOwnership(TypeProcessingState &state, QualType &declSpecTy, Qualifiers::ObjCLifetime ownership) { Sema &S = state.getSema(); Declarator &D = state.getDeclarator(); int inner = -1; bool hasIndirection = false; for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { DeclaratorChunk &chunk = D.getTypeObject(i); switch (chunk.Kind) { case DeclaratorChunk::Paren: // Ignore parens. break; case DeclaratorChunk::Array: case DeclaratorChunk::Reference: case DeclaratorChunk::Pointer: if (inner != -1) hasIndirection = true; inner = i; break; case DeclaratorChunk::BlockPointer: if (inner != -1) transferARCOwnershipToDeclaratorChunk(state, ownership, i); return; case DeclaratorChunk::Function: case DeclaratorChunk::MemberPointer: return; } } if (inner == -1) return; DeclaratorChunk &chunk = D.getTypeObject(inner); if (chunk.Kind == DeclaratorChunk::Pointer) { if (declSpecTy->isObjCRetainableType()) return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership); if (declSpecTy->isObjCObjectType() && hasIndirection) return transferARCOwnershipToDeclaratorChunk(state, ownership, inner); } else { assert(chunk.Kind == DeclaratorChunk::Array || chunk.Kind == DeclaratorChunk::Reference); return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership); } } TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) { TypeProcessingState state(*this, D); TypeSourceInfo *ReturnTypeInfo = nullptr; QualType declSpecTy = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo); if (getLangOpts().ObjCAutoRefCount) { Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy); if (ownership != Qualifiers::OCL_None) transferARCOwnership(state, declSpecTy, ownership); } return GetFullTypeForDeclarator(state, declSpecTy, ReturnTypeInfo); } /// Map an AttributedType::Kind to an AttributeList::Kind. static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) { switch (kind) { case AttributedType::attr_address_space: return AttributeList::AT_AddressSpace; case AttributedType::attr_regparm: return AttributeList::AT_Regparm; case AttributedType::attr_vector_size: return AttributeList::AT_VectorSize; case AttributedType::attr_neon_vector_type: return AttributeList::AT_NeonVectorType; case AttributedType::attr_neon_polyvector_type: return AttributeList::AT_NeonPolyVectorType; case AttributedType::attr_objc_gc: return AttributeList::AT_ObjCGC; case AttributedType::attr_objc_ownership: return AttributeList::AT_ObjCOwnership; case AttributedType::attr_noreturn: return AttributeList::AT_NoReturn; case AttributedType::attr_cdecl: return AttributeList::AT_CDecl; case AttributedType::attr_fastcall: return AttributeList::AT_FastCall; case AttributedType::attr_stdcall: return AttributeList::AT_StdCall; case AttributedType::attr_thiscall: return AttributeList::AT_ThisCall; case AttributedType::attr_pascal: return AttributeList::AT_Pascal; case AttributedType::attr_vectorcall: return AttributeList::AT_VectorCall; case AttributedType::attr_pcs: case AttributedType::attr_pcs_vfp: return AttributeList::AT_Pcs; case AttributedType::attr_inteloclbicc: return AttributeList::AT_IntelOclBicc; case AttributedType::attr_ms_abi: return AttributeList::AT_MSABI; case AttributedType::attr_sysv_abi: return AttributeList::AT_SysVABI; case AttributedType::attr_ptr32: return AttributeList::AT_Ptr32; case AttributedType::attr_ptr64: return AttributeList::AT_Ptr64; case AttributedType::attr_sptr: return AttributeList::AT_SPtr; case AttributedType::attr_uptr: return AttributeList::AT_UPtr; case AttributedType::attr_nonnull: return AttributeList::AT_TypeNonNull; case AttributedType::attr_nullable: return AttributeList::AT_TypeNullable; case AttributedType::attr_null_unspecified: return AttributeList::AT_TypeNullUnspecified; case AttributedType::attr_objc_kindof: return AttributeList::AT_ObjCKindOf; // HLSL Change Begins case AttributedType::attr_hlsl_unorm: return AttributeList::AT_HLSLUnorm; case AttributedType::attr_hlsl_snorm: return AttributeList::AT_HLSLSnorm; case AttributedType::attr_hlsl_row_major: return AttributeList::AT_HLSLRowMajor; case AttributedType::attr_hlsl_column_major: return AttributeList::AT_HLSLColumnMajor; case AttributedType::attr_hlsl_globallycoherent: return AttributeList::AT_HLSLGloballyCoherent; // HLSL Change Ends } llvm_unreachable("unexpected attribute kind!"); } static void fillAttributedTypeLoc(AttributedTypeLoc TL, const AttributeList *attrs, const AttributeList *DeclAttrs = nullptr) { // HLSL changes begin // Don't fill the location info for matrix orientation attributes if (TL.getAttrKind() == AttributedType::attr_hlsl_row_major || TL.getAttrKind() == AttributedType::attr_hlsl_column_major) return; // HLSL changes end // DeclAttrs and attrs cannot be both empty. assert((attrs || DeclAttrs) && "no type attributes in the expected location!"); AttributeList::Kind parsedKind = getAttrListKind(TL.getAttrKind()); // Try to search for an attribute of matching kind in attrs list. while (attrs && attrs->getKind() != parsedKind) attrs = attrs->getNext(); if (!attrs) { // No matching type attribute in attrs list found. // Try searching through C++11 attributes in the declarator attribute list. while (DeclAttrs && (!DeclAttrs->isCXX11Attribute() || DeclAttrs->getKind() != parsedKind)) DeclAttrs = DeclAttrs->getNext(); attrs = DeclAttrs; } assert(attrs && "no matching type attribute in expected location!"); TL.setAttrNameLoc(attrs->getLoc()); if (TL.hasAttrExprOperand()) { assert(attrs->isArgExpr(0) && "mismatched attribute operand kind"); TL.setAttrExprOperand(attrs->getArgAsExpr(0)); } else if (TL.hasAttrEnumOperand()) { assert((attrs->isArgIdent(0) || attrs->isArgExpr(0)) && "unexpected attribute operand kind"); if (attrs->isArgIdent(0)) TL.setAttrEnumOperandLoc(attrs->getArgAsIdent(0)->Loc); else TL.setAttrEnumOperandLoc(attrs->getArgAsExpr(0)->getExprLoc()); } // FIXME: preserve this information to here. if (TL.hasAttrOperand()) TL.setAttrOperandParensRange(SourceRange()); } namespace { class TypeSpecLocFiller : public TypeLocVisitor<TypeSpecLocFiller> { ASTContext &Context; const DeclSpec &DS; public: TypeSpecLocFiller(ASTContext &Context, const DeclSpec &DS) : Context(Context), DS(DS) {} void VisitAttributedTypeLoc(AttributedTypeLoc TL) { fillAttributedTypeLoc(TL, DS.getAttributes().getList()); Visit(TL.getModifiedLoc()); } void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) { Visit(TL.getUnqualifiedLoc()); } void VisitTypedefTypeLoc(TypedefTypeLoc TL) { TL.setNameLoc(DS.getTypeSpecTypeLoc()); } void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) { TL.setNameLoc(DS.getTypeSpecTypeLoc()); // FIXME. We should have DS.getTypeSpecTypeEndLoc(). But, it requires // addition field. What we have is good enough for dispay of location // of 'fixit' on interface name. TL.setNameEndLoc(DS.getLocEnd()); } void VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) { TypeSourceInfo *RepTInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &RepTInfo); TL.copy(RepTInfo->getTypeLoc()); } void VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) { TypeSourceInfo *RepTInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &RepTInfo); TL.copy(RepTInfo->getTypeLoc()); } void VisitTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL) { TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); // If we got no declarator info from previous Sema routines, // just fill with the typespec loc. if (!TInfo) { TL.initialize(Context, DS.getTypeSpecTypeNameLoc()); return; } TypeLoc OldTL = TInfo->getTypeLoc(); if (TInfo->getType()->getAs<ElaboratedType>()) { ElaboratedTypeLoc ElabTL = OldTL.castAs<ElaboratedTypeLoc>(); TemplateSpecializationTypeLoc NamedTL = ElabTL.getNamedTypeLoc() .castAs<TemplateSpecializationTypeLoc>(); TL.copy(NamedTL); } else { TL.copy(OldTL.castAs<TemplateSpecializationTypeLoc>()); assert(TL.getRAngleLoc() == OldTL.castAs<TemplateSpecializationTypeLoc>().getRAngleLoc()); } } void VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) { assert(DS.getTypeSpecType() == DeclSpec::TST_typeofExpr); TL.setTypeofLoc(DS.getTypeSpecTypeLoc()); TL.setParensRange(DS.getTypeofParensRange()); } void VisitTypeOfTypeLoc(TypeOfTypeLoc TL) { assert(DS.getTypeSpecType() == DeclSpec::TST_typeofType); TL.setTypeofLoc(DS.getTypeSpecTypeLoc()); TL.setParensRange(DS.getTypeofParensRange()); assert(DS.getRepAsType()); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); TL.setUnderlyingTInfo(TInfo); } void VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) { // FIXME: This holds only because we only have one unary transform. assert(DS.getTypeSpecType() == DeclSpec::TST_underlyingType); TL.setKWLoc(DS.getTypeSpecTypeLoc()); TL.setParensRange(DS.getTypeofParensRange()); assert(DS.getRepAsType()); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); TL.setUnderlyingTInfo(TInfo); } void VisitBuiltinTypeLoc(BuiltinTypeLoc TL) { // By default, use the source location of the type specifier. TL.setBuiltinLoc(DS.getTypeSpecTypeLoc()); if (TL.needsExtraLocalData()) { // Set info for the written builtin specifiers. TL.getWrittenBuiltinSpecs() = DS.getWrittenBuiltinSpecs(); // Try to have a meaningful source location. if (TL.getWrittenSignSpec() != TSS_unspecified) // Sign spec loc overrides the others (e.g., 'unsigned long'). TL.setBuiltinLoc(DS.getTypeSpecSignLoc()); else if (TL.getWrittenWidthSpec() != TSW_unspecified) // Width spec loc overrides type spec loc (e.g., 'short int'). TL.setBuiltinLoc(DS.getTypeSpecWidthLoc()); } } void VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) { ElaboratedTypeKeyword Keyword = TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType()); if (DS.getTypeSpecType() == TST_typename) { TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); if (TInfo) { TL.copy(TInfo->getTypeLoc().castAs<ElaboratedTypeLoc>()); return; } } TL.setElaboratedKeywordLoc(Keyword != ETK_None ? DS.getTypeSpecTypeLoc() : SourceLocation()); const CXXScopeSpec& SS = DS.getTypeSpecScope(); TL.setQualifierLoc(SS.getWithLocInContext(Context)); Visit(TL.getNextTypeLoc().getUnqualifiedLoc()); } void VisitDependentNameTypeLoc(DependentNameTypeLoc TL) { assert(DS.getTypeSpecType() == TST_typename); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); assert(TInfo); TL.copy(TInfo->getTypeLoc().castAs<DependentNameTypeLoc>()); } void VisitDependentTemplateSpecializationTypeLoc( DependentTemplateSpecializationTypeLoc TL) { assert(DS.getTypeSpecType() == TST_typename); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); assert(TInfo); TL.copy( TInfo->getTypeLoc().castAs<DependentTemplateSpecializationTypeLoc>()); } void VisitTagTypeLoc(TagTypeLoc TL) { TL.setNameLoc(DS.getTypeSpecTypeNameLoc()); } void VisitAtomicTypeLoc(AtomicTypeLoc TL) { // An AtomicTypeLoc can come from either an _Atomic(...) type specifier // or an _Atomic qualifier. if (DS.getTypeSpecType() == DeclSpec::TST_atomic) { TL.setKWLoc(DS.getTypeSpecTypeLoc()); TL.setParensRange(DS.getTypeofParensRange()); TypeSourceInfo *TInfo = nullptr; Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo); assert(TInfo); TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc()); } else { TL.setKWLoc(DS.getAtomicSpecLoc()); // No parens, to indicate this was spelled as an _Atomic qualifier. TL.setParensRange(SourceRange()); Visit(TL.getValueLoc()); } } void VisitTypeLoc(TypeLoc TL) { // FIXME: add other typespec types and change this to an assert. TL.initialize(Context, DS.getTypeSpecTypeLoc()); } }; class DeclaratorLocFiller : public TypeLocVisitor<DeclaratorLocFiller> { ASTContext &Context; const DeclaratorChunk &Chunk; public: DeclaratorLocFiller(ASTContext &Context, const DeclaratorChunk &Chunk) : Context(Context), Chunk(Chunk) {} void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) { llvm_unreachable("qualified type locs not expected here!"); } void VisitDecayedTypeLoc(DecayedTypeLoc TL) { llvm_unreachable("decayed type locs not expected here!"); } void VisitAttributedTypeLoc(AttributedTypeLoc TL) { fillAttributedTypeLoc(TL, Chunk.getAttrs()); } void VisitAdjustedTypeLoc(AdjustedTypeLoc TL) { // nothing } void VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::BlockPointer); TL.setCaretLoc(Chunk.Loc); } void VisitPointerTypeLoc(PointerTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Pointer); TL.setStarLoc(Chunk.Loc); } void VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Pointer); TL.setStarLoc(Chunk.Loc); } void VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::MemberPointer); const CXXScopeSpec& SS = Chunk.Mem.Scope(); NestedNameSpecifierLoc NNSLoc = SS.getWithLocInContext(Context); const Type* ClsTy = TL.getClass(); QualType ClsQT = QualType(ClsTy, 0); TypeSourceInfo *ClsTInfo = Context.CreateTypeSourceInfo(ClsQT, 0); // Now copy source location info into the type loc component. TypeLoc ClsTL = ClsTInfo->getTypeLoc(); switch (NNSLoc.getNestedNameSpecifier()->getKind()) { case NestedNameSpecifier::Identifier: assert(isa<DependentNameType>(ClsTy) && "Unexpected TypeLoc"); { DependentNameTypeLoc DNTLoc = ClsTL.castAs<DependentNameTypeLoc>(); DNTLoc.setElaboratedKeywordLoc(SourceLocation()); DNTLoc.setQualifierLoc(NNSLoc.getPrefix()); DNTLoc.setNameLoc(NNSLoc.getLocalBeginLoc()); } break; case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: if (isa<ElaboratedType>(ClsTy)) { ElaboratedTypeLoc ETLoc = ClsTL.castAs<ElaboratedTypeLoc>(); ETLoc.setElaboratedKeywordLoc(SourceLocation()); ETLoc.setQualifierLoc(NNSLoc.getPrefix()); TypeLoc NamedTL = ETLoc.getNamedTypeLoc(); NamedTL.initializeFullCopy(NNSLoc.getTypeLoc()); } else { ClsTL.initializeFullCopy(NNSLoc.getTypeLoc()); } break; case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: llvm_unreachable("Nested-name-specifier must name a type"); } // Finally fill in MemberPointerLocInfo fields. TL.setStarLoc(Chunk.Loc); TL.setClassTInfo(ClsTInfo); } void VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Reference); // 'Amp' is misleading: this might have been originally /// spelled with AmpAmp. TL.setAmpLoc(Chunk.Loc); } void VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Reference); assert(!Chunk.Ref.LValueRef); TL.setAmpAmpLoc(Chunk.Loc); } void VisitArrayTypeLoc(ArrayTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Array); TL.setLBracketLoc(Chunk.Loc); TL.setRBracketLoc(Chunk.EndLoc); TL.setSizeExpr(static_cast<Expr*>(Chunk.Arr.NumElts)); } void VisitFunctionTypeLoc(FunctionTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Function); TL.setLocalRangeBegin(Chunk.Loc); TL.setLocalRangeEnd(Chunk.EndLoc); const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun; TL.setLParenLoc(FTI.getLParenLoc()); TL.setRParenLoc(FTI.getRParenLoc()); for (unsigned i = 0, e = TL.getNumParams(), tpi = 0; i != e; ++i) { ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param); TL.setParam(tpi++, Param); } // FIXME: exception specs } void VisitParenTypeLoc(ParenTypeLoc TL) { assert(Chunk.Kind == DeclaratorChunk::Paren); TL.setLParenLoc(Chunk.Loc); TL.setRParenLoc(Chunk.EndLoc); } void VisitTypeLoc(TypeLoc TL) { llvm_unreachable("unsupported TypeLoc kind in declarator!"); } }; } static void fillAtomicQualLoc(AtomicTypeLoc ATL, const DeclaratorChunk &Chunk) { SourceLocation Loc; switch (Chunk.Kind) { case DeclaratorChunk::Function: case DeclaratorChunk::Array: case DeclaratorChunk::Paren: llvm_unreachable("cannot be _Atomic qualified"); case DeclaratorChunk::Pointer: Loc = SourceLocation::getFromRawEncoding(Chunk.Ptr.AtomicQualLoc); break; case DeclaratorChunk::BlockPointer: case DeclaratorChunk::Reference: case DeclaratorChunk::MemberPointer: // FIXME: Provide a source location for the _Atomic keyword. break; } ATL.setKWLoc(Loc); ATL.setParensRange(SourceRange()); } /// \brief Create and instantiate a TypeSourceInfo with type source information. /// /// \param T QualType referring to the type as written in source code. /// /// \param ReturnTypeInfo For declarators whose return type does not show /// up in the normal place in the declaration specifiers (such as a C++ /// conversion function), this pointer will refer to a type source information /// for that return type. TypeSourceInfo * Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo) { TypeSourceInfo *TInfo = Context.CreateTypeSourceInfo(T); UnqualTypeLoc CurrTL = TInfo->getTypeLoc().getUnqualifiedLoc(); const AttributeList *DeclAttrs = D.getAttributes(); // Handle parameter packs whose type is a pack expansion. if (isa<PackExpansionType>(T)) { CurrTL.castAs<PackExpansionTypeLoc>().setEllipsisLoc(D.getEllipsisLoc()); CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc(); } for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) { // An AtomicTypeLoc might be produced by an atomic qualifier in this // declarator chunk. if (AtomicTypeLoc ATL = CurrTL.getAs<AtomicTypeLoc>()) { fillAtomicQualLoc(ATL, D.getTypeObject(i)); CurrTL = ATL.getValueLoc().getUnqualifiedLoc(); } while (AttributedTypeLoc TL = CurrTL.getAs<AttributedTypeLoc>()) { fillAttributedTypeLoc(TL, D.getTypeObject(i).getAttrs(), DeclAttrs); CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc(); } // FIXME: Ordering here? while (AdjustedTypeLoc TL = CurrTL.getAs<AdjustedTypeLoc>()) CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc(); DeclaratorLocFiller(Context, D.getTypeObject(i)).Visit(CurrTL); CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc(); } // If we have different source information for the return type, use // that. This really only applies to C++ conversion functions. if (ReturnTypeInfo) { TypeLoc TL = ReturnTypeInfo->getTypeLoc(); assert(TL.getFullDataSize() == CurrTL.getFullDataSize()); memcpy(CurrTL.getOpaqueData(), TL.getOpaqueData(), TL.getFullDataSize()); } else { TypeSpecLocFiller(Context, D.getDeclSpec()).Visit(CurrTL); } return TInfo; } /// \brief Create a LocInfoType to hold the given QualType and TypeSourceInfo. ParsedType Sema::CreateParsedType(QualType T, TypeSourceInfo *TInfo) { // FIXME: LocInfoTypes are "transient", only needed for passing to/from Parser // and Sema during declaration parsing. Try deallocating/caching them when // it's appropriate, instead of allocating them and keeping them around. LocInfoType *LocT = (LocInfoType*)BumpAlloc.Allocate(sizeof(LocInfoType), TypeAlignment); new (LocT) LocInfoType(T, TInfo); assert(LocT->getTypeClass() != T->getTypeClass() && "LocInfoType's TypeClass conflicts with an existing Type class"); return ParsedType::make(QualType(LocT, 0)); } void LocInfoType::getAsStringInternal(std::string &Str, const PrintingPolicy &Policy) const { llvm_unreachable("LocInfoType leaked into the type system; an opaque TypeTy*" " was used directly instead of getting the QualType through" " GetTypeFromParser"); } TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) { // C99 6.7.6: Type names have no identifier. This is already validated by // the parser. assert(D.getIdentifier() == nullptr && "Type name should have no identifier!"); TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); QualType T = TInfo->getType(); if (D.isInvalidType()) return true; // Make sure there are no unused decl attributes on the declarator. // We don't want to do this for ObjC parameters because we're going // to apply them to the actual parameter declaration. // Likewise, we don't want to do this for alias declarations, because // we are actually going to build a declaration from this eventually. if (D.getContext() != Declarator::ObjCParameterContext && D.getContext() != Declarator::AliasDeclContext && D.getContext() != Declarator::AliasTemplateContext) checkUnusedDeclAttributes(D); if (getLangOpts().CPlusPlus) { // Check that there are no default arguments (C++ only). CheckExtraCXXDefaultArguments(D); } return CreateParsedType(T, TInfo); } ParsedType Sema::ActOnObjCInstanceType(SourceLocation Loc) { QualType T = Context.getObjCInstanceType(); TypeSourceInfo *TInfo = Context.getTrivialTypeSourceInfo(T, Loc); return CreateParsedType(T, TInfo); } //===----------------------------------------------------------------------===// // Type Attribute Processing //===----------------------------------------------------------------------===// /// HandleAddressSpaceTypeAttribute - Process an address_space attribute on the /// specified type. The attribute contains 1 argument, the id of the address /// space for the type. static void HandleAddressSpaceTypeAttribute(QualType &Type, const AttributeList &Attr, Sema &S){ // If this type is already address space qualified, reject it. // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified by // qualifiers for two or more different address spaces." if (Type.getAddressSpace()) { S.Diag(Attr.getLoc(), diag::err_attribute_address_multiple_qualifiers); Attr.setInvalid(); return; } // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "A function type shall not be // qualified by an address-space qualifier." if (Type->isFunctionType()) { S.Diag(Attr.getLoc(), diag::err_attribute_address_function_type); Attr.setInvalid(); return; } unsigned ASIdx; if (Attr.getKind() == AttributeList::AT_AddressSpace) { // Check the attribute arguments. if (Attr.getNumArgs() != 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr.getName() << 1; Attr.setInvalid(); return; } Expr *ASArgExpr = static_cast<Expr *>(Attr.getArgAsExpr(0)); llvm::APSInt addrSpace(32); if (ASArgExpr->isTypeDependent() || ASArgExpr->isValueDependent() || !ASArgExpr->isIntegerConstantExpr(addrSpace, S.Context)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIntegerConstant << ASArgExpr->getSourceRange(); Attr.setInvalid(); return; } // Bounds checking. if (addrSpace.isSigned()) { if (addrSpace.isNegative()) { S.Diag(Attr.getLoc(), diag::err_attribute_address_space_negative) << ASArgExpr->getSourceRange(); Attr.setInvalid(); return; } addrSpace.setIsSigned(false); } llvm::APSInt max(addrSpace.getBitWidth()); max = Qualifiers::MaxAddressSpace; if (addrSpace > max) { S.Diag(Attr.getLoc(), diag::err_attribute_address_space_too_high) << int(Qualifiers::MaxAddressSpace) << ASArgExpr->getSourceRange(); Attr.setInvalid(); return; } ASIdx = static_cast<unsigned>(addrSpace.getZExtValue()); } else { // The keyword-based type attributes imply which address space to use. switch (Attr.getKind()) { case AttributeList::AT_OpenCLGlobalAddressSpace: ASIdx = LangAS::opencl_global; break; case AttributeList::AT_OpenCLLocalAddressSpace: ASIdx = LangAS::opencl_local; break; case AttributeList::AT_OpenCLConstantAddressSpace: ASIdx = LangAS::opencl_constant; break; case AttributeList::AT_OpenCLGenericAddressSpace: ASIdx = LangAS::opencl_generic; break; default: assert(Attr.getKind() == AttributeList::AT_OpenCLPrivateAddressSpace); ASIdx = 0; break; } } Type = S.Context.getAddrSpaceQualType(Type, ASIdx); } /// Does this type have a "direct" ownership qualifier? That is, /// is it written like "__strong id", as opposed to something like /// "typeof(foo)", where that happens to be strong? static bool hasDirectOwnershipQualifier(QualType type) { // Fast path: no qualifier at all. assert(type.getQualifiers().hasObjCLifetime()); while (true) { // __strong id if (const AttributedType *attr = dyn_cast<AttributedType>(type)) { if (attr->getAttrKind() == AttributedType::attr_objc_ownership) return true; type = attr->getModifiedType(); // X *__strong (...) } else if (const ParenType *paren = dyn_cast<ParenType>(type)) { type = paren->getInnerType(); // That's it for things we want to complain about. In particular, // we do not want to look through typedefs, typeof(expr), // typeof(type), or any other way that the type is somehow // abstracted. } else { return false; } } } /// handleObjCOwnershipTypeAttr - Process an objc_ownership /// attribute on the specified type. /// /// Returns 'true' if the attribute was handled. static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state, AttributeList &attr, QualType &type) { bool NonObjCPointer = false; if (!type->isDependentType() && !type->isUndeducedType()) { if (const PointerType *ptr = type->getAs<PointerType>()) { QualType pointee = ptr->getPointeeType(); if (pointee->isObjCRetainableType() || pointee->isPointerType()) return false; // It is important not to lose the source info that there was an attribute // applied to non-objc pointer. We will create an attributed type but // its type will be the same as the original type. NonObjCPointer = true; } else if (!type->isObjCRetainableType()) { return false; } // Don't accept an ownership attribute in the declspec if it would // just be the return type of a block pointer. if (state.isProcessingDeclSpec()) { Declarator &D = state.getDeclarator(); if (maybeMovePastReturnType(D, D.getNumTypeObjects(), /*onlyBlockPointers=*/true)) return false; } } Sema &S = state.getSema(); SourceLocation AttrLoc = attr.getLoc(); if (AttrLoc.isMacroID()) AttrLoc = S.getSourceManager().getImmediateExpansionRange(AttrLoc).first; if (!attr.isArgIdent(0)) { S.Diag(AttrLoc, diag::err_attribute_argument_type) << attr.getName() << AANT_ArgumentString; attr.setInvalid(); return true; } // Consume lifetime attributes without further comment outside of // ARC mode. if (!S.getLangOpts().ObjCAutoRefCount) return true; IdentifierInfo *II = attr.getArgAsIdent(0)->Ident; Qualifiers::ObjCLifetime lifetime; if (II->isStr("none")) lifetime = Qualifiers::OCL_ExplicitNone; else if (II->isStr("strong")) lifetime = Qualifiers::OCL_Strong; else if (II->isStr("weak")) lifetime = Qualifiers::OCL_Weak; else if (II->isStr("autoreleasing")) lifetime = Qualifiers::OCL_Autoreleasing; else { S.Diag(AttrLoc, diag::warn_attribute_type_not_supported) << attr.getName() << II; attr.setInvalid(); return true; } SplitQualType underlyingType = type.split(); // Check for redundant/conflicting ownership qualifiers. if (Qualifiers::ObjCLifetime previousLifetime = type.getQualifiers().getObjCLifetime()) { // If it's written directly, that's an error. if (hasDirectOwnershipQualifier(type)) { S.Diag(AttrLoc, diag::err_attr_objc_ownership_redundant) << type; return true; } // Otherwise, if the qualifiers actually conflict, pull sugar off // until we reach a type that is directly qualified. if (previousLifetime != lifetime) { // This should always terminate: the canonical type is // qualified, so some bit of sugar must be hiding it. while (!underlyingType.Quals.hasObjCLifetime()) { underlyingType = underlyingType.getSingleStepDesugaredType(); } underlyingType.Quals.removeObjCLifetime(); } } underlyingType.Quals.addObjCLifetime(lifetime); if (NonObjCPointer) { StringRef name = attr.getName()->getName(); switch (lifetime) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: break; case Qualifiers::OCL_Strong: name = "__strong"; break; case Qualifiers::OCL_Weak: name = "__weak"; break; case Qualifiers::OCL_Autoreleasing: name = "__autoreleasing"; break; } S.Diag(AttrLoc, diag::warn_type_attribute_wrong_type) << name << TDS_ObjCObjOrBlock << type; } QualType origType = type; if (!NonObjCPointer) type = S.Context.getQualifiedType(underlyingType); // If we have a valid source location for the attribute, use an // AttributedType instead. if (AttrLoc.isValid()) type = S.Context.getAttributedType(AttributedType::attr_objc_ownership, origType, type); // Forbid __weak if the runtime doesn't support it. if (lifetime == Qualifiers::OCL_Weak && !S.getLangOpts().ObjCARCWeak && !NonObjCPointer) { // Actually, delay this until we know what we're parsing. if (S.DelayedDiagnostics.shouldDelayDiagnostics()) { S.DelayedDiagnostics.add( sema::DelayedDiagnostic::makeForbiddenType( S.getSourceManager().getExpansionLoc(AttrLoc), diag::err_arc_weak_no_runtime, type, /*ignored*/ 0)); } else { S.Diag(AttrLoc, diag::err_arc_weak_no_runtime); } attr.setInvalid(); return true; } // Forbid __weak for class objects marked as // objc_arc_weak_reference_unavailable if (lifetime == Qualifiers::OCL_Weak) { if (const ObjCObjectPointerType *ObjT = type->getAs<ObjCObjectPointerType>()) { if (ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl()) { if (Class->isArcWeakrefUnavailable()) { S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class); S.Diag(ObjT->getInterfaceDecl()->getLocation(), diag::note_class_declared); } } } } return true; } /// handleObjCGCTypeAttr - Process the __attribute__((objc_gc)) type /// attribute on the specified type. Returns true to indicate that /// the attribute was handled, false to indicate that the type does /// not permit the attribute. static bool handleObjCGCTypeAttr(TypeProcessingState &state, AttributeList &attr, QualType &type) { Sema &S = state.getSema(); // Delay if this isn't some kind of pointer. if (!type->isPointerType() && !type->isObjCObjectPointerType() && !type->isBlockPointerType()) return false; if (type.getObjCGCAttr() != Qualifiers::GCNone) { S.Diag(attr.getLoc(), diag::err_attribute_multiple_objc_gc); attr.setInvalid(); return true; } // Check the attribute arguments. if (!attr.isArgIdent(0)) { S.Diag(attr.getLoc(), diag::err_attribute_argument_type) << attr.getName() << AANT_ArgumentString; attr.setInvalid(); return true; } Qualifiers::GC GCAttr; if (attr.getNumArgs() > 1) { S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << attr.getName() << 1; attr.setInvalid(); return true; } IdentifierInfo *II = attr.getArgAsIdent(0)->Ident; if (II->isStr("weak")) GCAttr = Qualifiers::Weak; else if (II->isStr("strong")) GCAttr = Qualifiers::Strong; else { S.Diag(attr.getLoc(), diag::warn_attribute_type_not_supported) << attr.getName() << II; attr.setInvalid(); return true; } QualType origType = type; type = S.Context.getObjCGCQualType(origType, GCAttr); // Make an attributed type to preserve the source information. if (attr.getLoc().isValid()) type = S.Context.getAttributedType(AttributedType::attr_objc_gc, origType, type); return true; } namespace { /// A helper class to unwrap a type down to a function for the /// purposes of applying attributes there. /// /// Use: /// FunctionTypeUnwrapper unwrapped(SemaRef, T); /// if (unwrapped.isFunctionType()) { /// const FunctionType *fn = unwrapped.get(); /// // change fn somehow /// T = unwrapped.wrap(fn); /// } struct FunctionTypeUnwrapper { enum WrapKind { Desugar, Parens, Pointer, BlockPointer, Reference, MemberPointer }; QualType Original; const FunctionType *Fn; SmallVector<unsigned char /*WrapKind*/, 8> Stack; FunctionTypeUnwrapper(Sema &S, QualType T) : Original(T) { while (true) { const Type *Ty = T.getTypePtr(); if (isa<FunctionType>(Ty)) { Fn = cast<FunctionType>(Ty); return; } else if (isa<ParenType>(Ty)) { T = cast<ParenType>(Ty)->getInnerType(); Stack.push_back(Parens); } else if (isa<PointerType>(Ty)) { T = cast<PointerType>(Ty)->getPointeeType(); Stack.push_back(Pointer); } else if (isa<BlockPointerType>(Ty)) { T = cast<BlockPointerType>(Ty)->getPointeeType(); Stack.push_back(BlockPointer); } else if (isa<MemberPointerType>(Ty)) { T = cast<MemberPointerType>(Ty)->getPointeeType(); Stack.push_back(MemberPointer); } else if (isa<ReferenceType>(Ty)) { T = cast<ReferenceType>(Ty)->getPointeeType(); Stack.push_back(Reference); } else { const Type *DTy = Ty->getUnqualifiedDesugaredType(); if (Ty == DTy) { Fn = nullptr; return; } T = QualType(DTy, 0); Stack.push_back(Desugar); } } } bool isFunctionType() const { return (Fn != nullptr); } const FunctionType *get() const { return Fn; } QualType wrap(Sema &S, const FunctionType *New) { // If T wasn't modified from the unwrapped type, do nothing. if (New == get()) return Original; Fn = New; return wrap(S.Context, Original, 0); } private: QualType wrap(ASTContext &C, QualType Old, unsigned I) { if (I == Stack.size()) return C.getQualifiedType(Fn, Old.getQualifiers()); // Build up the inner type, applying the qualifiers from the old // type to the new type. SplitQualType SplitOld = Old.split(); // As a special case, tail-recurse if there are no qualifiers. if (SplitOld.Quals.empty()) return wrap(C, SplitOld.Ty, I); return C.getQualifiedType(wrap(C, SplitOld.Ty, I), SplitOld.Quals); } QualType wrap(ASTContext &C, const Type *Old, unsigned I) { if (I == Stack.size()) return QualType(Fn, 0); switch (static_cast<WrapKind>(Stack[I++])) { case Desugar: // This is the point at which we potentially lose source // information. return wrap(C, Old->getUnqualifiedDesugaredType(), I); case Parens: { QualType New = wrap(C, cast<ParenType>(Old)->getInnerType(), I); return C.getParenType(New); } case Pointer: { QualType New = wrap(C, cast<PointerType>(Old)->getPointeeType(), I); return C.getPointerType(New); } case BlockPointer: { QualType New = wrap(C, cast<BlockPointerType>(Old)->getPointeeType(),I); return C.getBlockPointerType(New); } case MemberPointer: { const MemberPointerType *OldMPT = cast<MemberPointerType>(Old); QualType New = wrap(C, OldMPT->getPointeeType(), I); return C.getMemberPointerType(New, OldMPT->getClass()); } case Reference: { const ReferenceType *OldRef = cast<ReferenceType>(Old); QualType New = wrap(C, OldRef->getPointeeType(), I); if (isa<LValueReferenceType>(OldRef)) return C.getLValueReferenceType(New, OldRef->isSpelledAsLValue()); else return C.getRValueReferenceType(New); } } llvm_unreachable("unknown wrapping kind"); } }; } static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State, AttributeList &Attr, QualType &Type) { Sema &S = State.getSema(); AttributeList::Kind Kind = Attr.getKind(); QualType Desugared = Type; const AttributedType *AT = dyn_cast<AttributedType>(Type); while (AT) { AttributedType::Kind CurAttrKind = AT->getAttrKind(); // You cannot specify duplicate type attributes, so if the attribute has // already been applied, flag it. if (getAttrListKind(CurAttrKind) == Kind) { S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute_exact) << Attr.getName(); return true; } // You cannot have both __sptr and __uptr on the same type, nor can you // have __ptr32 and __ptr64. if ((CurAttrKind == AttributedType::attr_ptr32 && Kind == AttributeList::AT_Ptr64) || (CurAttrKind == AttributedType::attr_ptr64 && Kind == AttributeList::AT_Ptr32)) { S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible) << "'__ptr32'" << "'__ptr64'"; return true; } else if ((CurAttrKind == AttributedType::attr_sptr && Kind == AttributeList::AT_UPtr) || (CurAttrKind == AttributedType::attr_uptr && Kind == AttributeList::AT_SPtr)) { S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible) << "'__sptr'" << "'__uptr'"; return true; } Desugared = AT->getEquivalentType(); AT = dyn_cast<AttributedType>(Desugared); } // Pointer type qualifiers can only operate on pointer types, but not // pointer-to-member types. if (!isa<PointerType>(Desugared)) { S.Diag(Attr.getLoc(), Type->isMemberPointerType() ? diag::err_attribute_no_member_pointers : diag::err_attribute_pointers_only) << Attr.getName(); return true; } AttributedType::Kind TAK; switch (Kind) { default: llvm_unreachable("Unknown attribute kind"); case AttributeList::AT_Ptr32: TAK = AttributedType::attr_ptr32; break; case AttributeList::AT_Ptr64: TAK = AttributedType::attr_ptr64; break; case AttributeList::AT_SPtr: TAK = AttributedType::attr_sptr; break; case AttributeList::AT_UPtr: TAK = AttributedType::attr_uptr; break; } Type = S.Context.getAttributedType(TAK, Type, Type); return false; } bool Sema::checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive) { // We saw a nullability type specifier. If this is the first one for // this file, note that. FileID file = getNullabilityCompletenessCheckFileID(*this, nullabilityLoc); if (!file.isInvalid()) { FileNullability &fileNullability = NullabilityMap[file]; if (!fileNullability.SawTypeNullability) { // If we have already seen a pointer declarator without a nullability // annotation, complain about it. if (fileNullability.PointerLoc.isValid()) { Diag(fileNullability.PointerLoc, diag::warn_nullability_missing) << static_cast<unsigned>(fileNullability.PointerKind); } fileNullability.SawTypeNullability = true; } } // Check for existing nullability attributes on the type. QualType desugared = type; while (auto attributed = dyn_cast<AttributedType>(desugared.getTypePtr())) { // Check whether there is already a null if (auto existingNullability = attributed->getImmediateNullability()) { // Duplicated nullability. if (nullability == *existingNullability) { Diag(nullabilityLoc, diag::warn_nullability_duplicate) << DiagNullabilityKind(nullability, isContextSensitive) << FixItHint::CreateRemoval(nullabilityLoc); break; } // Conflicting nullability. Diag(nullabilityLoc, diag::err_nullability_conflicting) << DiagNullabilityKind(nullability, isContextSensitive) << DiagNullabilityKind(*existingNullability, false); return true; } desugared = attributed->getModifiedType(); } // If there is already a different nullability specifier, complain. // This (unlike the code above) looks through typedefs that might // have nullability specifiers on them, which means we cannot // provide a useful Fix-It. if (auto existingNullability = desugared->getNullability(Context)) { if (nullability != *existingNullability) { Diag(nullabilityLoc, diag::err_nullability_conflicting) << DiagNullabilityKind(nullability, isContextSensitive) << DiagNullabilityKind(*existingNullability, false); // Try to find the typedef with the existing nullability specifier. if (auto typedefType = desugared->getAs<TypedefType>()) { TypedefNameDecl *typedefDecl = typedefType->getDecl(); QualType underlyingType = typedefDecl->getUnderlyingType(); if (auto typedefNullability = AttributedType::stripOuterNullability(underlyingType)) { if (*typedefNullability == *existingNullability) { Diag(typedefDecl->getLocation(), diag::note_nullability_here) << DiagNullabilityKind(*existingNullability, false); } } } return true; } } // If this definitely isn't a pointer type, reject the specifier. if (!desugared->canHaveNullability()) { Diag(nullabilityLoc, diag::err_nullability_nonpointer) << DiagNullabilityKind(nullability, isContextSensitive) << type; return true; } // For the context-sensitive keywords/Objective-C property // attributes, require that the type be a single-level pointer. if (isContextSensitive) { // Make sure that the pointee isn't itself a pointer type. QualType pointeeType = desugared->getPointeeType(); if (pointeeType->isAnyPointerType() || pointeeType->isObjCObjectPointerType() || pointeeType->isMemberPointerType()) { Diag(nullabilityLoc, diag::err_nullability_cs_multilevel) << DiagNullabilityKind(nullability, true) << type; Diag(nullabilityLoc, diag::note_nullability_type_specifier) << DiagNullabilityKind(nullability, false) << type << FixItHint::CreateReplacement(nullabilityLoc, getNullabilitySpelling(nullability)); return true; } } // Form the attributed type. type = Context.getAttributedType( AttributedType::getNullabilityAttrKind(nullability), type, type); return false; } bool Sema::checkObjCKindOfType(QualType &type, SourceLocation loc) { // Find out if it's an Objective-C object or object pointer type; const ObjCObjectPointerType *ptrType = type->getAs<ObjCObjectPointerType>(); const ObjCObjectType *objType = ptrType ? ptrType->getObjectType() : type->getAs<ObjCObjectType>(); // If not, we can't apply __kindof. if (!objType) { // FIXME: Handle dependent types that aren't yet object types. Diag(loc, diag::err_objc_kindof_nonobject) << type; return true; } // Rebuild the "equivalent" type, which pushes __kindof down into // the object type. QualType equivType = Context.getObjCObjectType(objType->getBaseType(), objType->getTypeArgsAsWritten(), objType->getProtocols(), /*isKindOf=*/true); // If we started with an object pointer type, rebuild it. if (ptrType) { equivType = Context.getObjCObjectPointerType(equivType); if (auto nullability = type->getNullability(Context)) { auto attrKind = AttributedType::getNullabilityAttrKind(*nullability); equivType = Context.getAttributedType(attrKind, equivType, equivType); } } // Build the attributed type to record where __kindof occurred. type = Context.getAttributedType(AttributedType::attr_objc_kindof, type, equivType); return false; } /// Map a nullability attribute kind to a nullability kind. static NullabilityKind mapNullabilityAttrKind(AttributeList::Kind kind) { switch (kind) { case AttributeList::AT_TypeNonNull: return NullabilityKind::NonNull; case AttributeList::AT_TypeNullable: return NullabilityKind::Nullable; case AttributeList::AT_TypeNullUnspecified: return NullabilityKind::Unspecified; default: llvm_unreachable("not a nullability attribute kind"); } } /// Distribute a nullability type attribute that cannot be applied to /// the type specifier to a pointer, block pointer, or member pointer /// declarator, complaining if necessary. /// /// \returns true if the nullability annotation was distributed, false /// otherwise. static bool distributeNullabilityTypeAttr(TypeProcessingState &state, QualType type, AttributeList &attr) { Declarator &declarator = state.getDeclarator(); /// Attempt to move the attribute to the specified chunk. auto moveToChunk = [&](DeclaratorChunk &chunk, bool inFunction) -> bool { // If there is already a nullability attribute there, don't add // one. if (hasNullabilityAttr(chunk.getAttrListRef())) return false; // Complain about the nullability qualifier being in the wrong // place. enum { PK_Pointer, PK_BlockPointer, PK_MemberPointer, PK_FunctionPointer, PK_MemberFunctionPointer, } pointerKind = chunk.Kind == DeclaratorChunk::Pointer ? (inFunction ? PK_FunctionPointer : PK_Pointer) : chunk.Kind == DeclaratorChunk::BlockPointer ? PK_BlockPointer : inFunction? PK_MemberFunctionPointer : PK_MemberPointer; auto diag = state.getSema().Diag(attr.getLoc(), diag::warn_nullability_declspec) << DiagNullabilityKind(mapNullabilityAttrKind(attr.getKind()), attr.isContextSensitiveKeywordAttribute()) << type << static_cast<unsigned>(pointerKind); // FIXME: MemberPointer chunks don't carry the location of the *. if (chunk.Kind != DeclaratorChunk::MemberPointer) { diag << FixItHint::CreateRemoval(attr.getLoc()) << FixItHint::CreateInsertion( state.getSema().getPreprocessor() .getLocForEndOfToken(chunk.Loc), " " + attr.getName()->getName().str() + " "); } moveAttrFromListToList(attr, state.getCurrentAttrListRef(), chunk.getAttrListRef()); return true; }; // Move it to the outermost pointer, member pointer, or block // pointer declarator. for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) { DeclaratorChunk &chunk = declarator.getTypeObject(i-1); switch (chunk.Kind) { case DeclaratorChunk::Pointer: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: return moveToChunk(chunk, false); case DeclaratorChunk::Paren: case DeclaratorChunk::Array: continue; case DeclaratorChunk::Function: // Try to move past the return type to a function/block/member // function pointer. if (DeclaratorChunk *dest = maybeMovePastReturnType( declarator, i, /*onlyBlockPointers=*/false)) { return moveToChunk(*dest, true); } return false; // Don't walk through these. case DeclaratorChunk::Reference: return false; } } return false; } // HLSL Change Starts static bool isHLSLTypeAttr(AttributeList::Kind Kind) { switch (Kind) { case AttributeList::AT_HLSLRowMajor: case AttributeList::AT_HLSLColumnMajor: case AttributeList::AT_HLSLSnorm: case AttributeList::AT_HLSLUnorm: case AttributeList::AT_HLSLGloballyCoherent: return true; default: // Only meant to catch attr handled by handleHLSLTypeAttr, ignore the rest return false; } } static bool handleHLSLTypeAttr(TypeProcessingState &State, AttributeList &Attr, QualType &Type) { // Return true on error Sema &S = State.getSema(); AttributeList::Kind Kind = Attr.getKind(); // Check for attributes on incorrect types if ((Kind == AttributeList::AT_HLSLColumnMajor || Kind == AttributeList::AT_HLSLRowMajor) && !hlsl::IsMatrixType(&S, Type)) { S.Diag(Attr.getLoc(), diag::err_hlsl_matrix_layout_wrong_type) << Attr.getName() << Attr.getRange(); return true; } else if ((Kind == AttributeList::AT_HLSLSnorm || Kind == AttributeList::AT_HLSLUnorm) && !hlsl::GetOriginalElementType(&S, Type)->isFloatingType()) { S.Diag(Attr.getLoc(), diag::err_hlsl_norm_float_only) << Attr.getRange(); return true; } const AttributedType *pMatrixOrientation = nullptr; const AttributedType *pNorm = nullptr; const AttributedType *pGLC = nullptr; hlsl::GetHLSLAttributedTypes(&S, Type, &pMatrixOrientation, &pNorm, &pGLC); if (pMatrixOrientation && (Kind == AttributeList::AT_HLSLColumnMajor || Kind == AttributeList::AT_HLSLRowMajor)) { AttributedType::Kind CurAttrKind = pMatrixOrientation->getAttrKind(); if (Kind == getAttrListKind(CurAttrKind)) { S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute_exact) << Attr.getName() << Attr.getRange(); } else { S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible) << "'row_major'" << "'column_major'" << Attr.getRange(); } return true; } if (pNorm && (Kind == AttributeList::AT_HLSLSnorm || Kind == AttributeList::AT_HLSLUnorm)) { AttributedType::Kind CurAttrKind = pNorm->getAttrKind(); if (Kind == getAttrListKind(CurAttrKind)) { S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute_exact) << Attr.getName() << Attr.getRange(); } else { S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible) << "'unorm'" << "'snorm'" << Attr.getRange(); } return true; } if (pGLC && Kind == AttributeList::AT_HLSLGloballyCoherent) { AttributedType::Kind CurAttrKind = pGLC->getAttrKind(); if (Kind == getAttrListKind(CurAttrKind)) { S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute_exact) << Attr.getName() << Attr.getRange(); } } AttributedType::Kind TAK; switch (Kind) { default: llvm_unreachable("Unknown attribute kind"); case AttributeList::AT_HLSLRowMajor: TAK = AttributedType::attr_hlsl_row_major; break; case AttributeList::AT_HLSLColumnMajor: TAK = AttributedType::attr_hlsl_column_major; break; case AttributeList::AT_HLSLUnorm: TAK = AttributedType::attr_hlsl_unorm; break; case AttributeList::AT_HLSLSnorm: TAK = AttributedType::attr_hlsl_snorm; break; case AttributeList::AT_HLSLGloballyCoherent: TAK = AttributedType::attr_hlsl_globallycoherent; break; } Type = S.Context.getAttributedType(TAK, Type, Type); return false; } // HLSL Change Ends static AttributedType::Kind getCCTypeAttrKind(AttributeList &Attr) { assert(!Attr.isInvalid()); switch (Attr.getKind()) { default: llvm_unreachable("not a calling convention attribute"); case AttributeList::AT_CDecl: return AttributedType::attr_cdecl; case AttributeList::AT_FastCall: return AttributedType::attr_fastcall; case AttributeList::AT_StdCall: return AttributedType::attr_stdcall; case AttributeList::AT_ThisCall: return AttributedType::attr_thiscall; case AttributeList::AT_Pascal: return AttributedType::attr_pascal; case AttributeList::AT_VectorCall: return AttributedType::attr_vectorcall; case AttributeList::AT_Pcs: { // The attribute may have had a fixit applied where we treated an // identifier as a string literal. The contents of the string are valid, // but the form may not be. StringRef Str; if (Attr.isArgExpr(0)) Str = cast<StringLiteral>(Attr.getArgAsExpr(0))->getString(); else Str = Attr.getArgAsIdent(0)->Ident->getName(); return llvm::StringSwitch<AttributedType::Kind>(Str) .Case("aapcs", AttributedType::attr_pcs) .Case("aapcs-vfp", AttributedType::attr_pcs_vfp); } case AttributeList::AT_IntelOclBicc: return AttributedType::attr_inteloclbicc; case AttributeList::AT_MSABI: return AttributedType::attr_ms_abi; case AttributeList::AT_SysVABI: return AttributedType::attr_sysv_abi; } llvm_unreachable("unexpected attribute kind!"); } /// Process an individual function attribute. Returns true to /// indicate that the attribute was handled, false if it wasn't. static bool handleFunctionTypeAttr(TypeProcessingState &state, AttributeList &attr, QualType &type) { Sema &S = state.getSema(); FunctionTypeUnwrapper unwrapped(S, type); if (attr.getKind() == AttributeList::AT_NoReturn) { if (S.CheckNoReturnAttr(attr)) return true; // Delay if this is not a function type. if (!unwrapped.isFunctionType()) return false; // Otherwise we can process right away. FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withNoReturn(true); type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); return true; } // ns_returns_retained is not always a type attribute, but if we got // here, we're treating it as one right now. if (attr.getKind() == AttributeList::AT_NSReturnsRetained) { assert(S.getLangOpts().ObjCAutoRefCount && "ns_returns_retained treated as type attribute in non-ARC"); if (attr.getNumArgs()) return true; // Delay if this is not a function type. if (!unwrapped.isFunctionType()) return false; FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withProducesResult(true); type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); return true; } if (attr.getKind() == AttributeList::AT_Regparm) { unsigned value; if (S.CheckRegparmAttr(attr, value)) return true; // Delay if this is not a function type. if (!unwrapped.isFunctionType()) return false; // Diagnose regparm with fastcall. const FunctionType *fn = unwrapped.get(); CallingConv CC = fn->getCallConv(); if (CC == CC_X86FastCall) { S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible) << FunctionType::getNameForCallConv(CC) << "regparm"; attr.setInvalid(); return true; } FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withRegParm(value); type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); return true; } // Delay if the type didn't work out to a function. if (!unwrapped.isFunctionType()) return false; // Otherwise, a calling convention. CallingConv CC; if (S.CheckCallingConvAttr(attr, CC)) return true; const FunctionType *fn = unwrapped.get(); CallingConv CCOld = fn->getCallConv(); AttributedType::Kind CCAttrKind = getCCTypeAttrKind(attr); if (CCOld != CC) { // Error out on when there's already an attribute on the type // and the CCs don't match. const AttributedType *AT = S.getCallingConvAttributedType(type); if (AT && AT->getAttrKind() != CCAttrKind) { S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible) << FunctionType::getNameForCallConv(CC) << FunctionType::getNameForCallConv(CCOld); attr.setInvalid(); return true; } } // Diagnose use of callee-cleanup calling convention on variadic functions. if (!supportsVariadicCall(CC)) { const FunctionProtoType *FnP = dyn_cast<FunctionProtoType>(fn); if (FnP && FnP->isVariadic()) { unsigned DiagID = diag::err_cconv_varargs; // stdcall and fastcall are ignored with a warning for GCC and MS // compatibility. if (CC == CC_X86StdCall || CC == CC_X86FastCall) DiagID = diag::warn_cconv_varargs; S.Diag(attr.getLoc(), DiagID) << FunctionType::getNameForCallConv(CC); attr.setInvalid(); return true; } } // Also diagnose fastcall with regparm. if (CC == CC_X86FastCall && fn->getHasRegParm()) { S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible) << "regparm" << FunctionType::getNameForCallConv(CC_X86FastCall); attr.setInvalid(); return true; } // Modify the CC from the wrapped function type, wrap it all back, and then // wrap the whole thing in an AttributedType as written. The modified type // might have a different CC if we ignored the attribute. FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withCallingConv(CC); QualType Equivalent = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI)); type = S.Context.getAttributedType(CCAttrKind, type, Equivalent); return true; } bool Sema::hasExplicitCallingConv(QualType &T) { QualType R = T.IgnoreParens(); while (const AttributedType *AT = dyn_cast<AttributedType>(R)) { if (AT->isCallingConv()) return true; R = AT->getModifiedType().IgnoreParens(); } return false; } void Sema::adjustMemberFunctionCC(QualType &T, bool IsStatic) { FunctionTypeUnwrapper Unwrapped(*this, T); const FunctionType *FT = Unwrapped.get(); bool IsVariadic = (isa<FunctionProtoType>(FT) && cast<FunctionProtoType>(FT)->isVariadic()); // Only adjust types with the default convention. For example, on Windows we // should adjust a __cdecl type to __thiscall for instance methods, and a // __thiscall type to __cdecl for static methods. CallingConv CurCC = FT->getCallConv(); CallingConv FromCC = Context.getDefaultCallingConvention(IsVariadic, IsStatic); CallingConv ToCC = Context.getDefaultCallingConvention(IsVariadic, !IsStatic); if (CurCC != FromCC || FromCC == ToCC) return; if (hasExplicitCallingConv(T)) return; FT = Context.adjustFunctionType(FT, FT->getExtInfo().withCallingConv(ToCC)); QualType Wrapped = Unwrapped.wrap(*this, FT); T = Context.getAdjustedType(T, Wrapped); } /// HandleVectorSizeAttribute - this attribute is only applicable to integral /// and float scalars, although arrays, pointers, and function return values are /// allowed in conjunction with this construct. Aggregates with this attribute /// are invalid, even if they are of the same size as a corresponding scalar. /// The raw attribute should contain precisely 1 argument, the vector size for /// the variable, measured in bytes. If curType and rawAttr are well formed, /// this routine will return a new vector type. static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr, Sema &S) { // Check the attribute arguments. if (Attr.getNumArgs() != 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr.getName() << 1; Attr.setInvalid(); return; } Expr *sizeExpr = static_cast<Expr *>(Attr.getArgAsExpr(0)); llvm::APSInt vecSize(32); if (sizeExpr->isTypeDependent() || sizeExpr->isValueDependent() || !sizeExpr->isIntegerConstantExpr(vecSize, S.Context)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIntegerConstant << sizeExpr->getSourceRange(); Attr.setInvalid(); return; } // The base type must be integer (not Boolean or enumeration) or float, and // can't already be a vector. if (!CurType->isBuiltinType() || CurType->isBooleanType() || (!CurType->isIntegerType() && !CurType->isRealFloatingType())) { S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType; Attr.setInvalid(); return; } unsigned typeSize = static_cast<unsigned>(S.Context.getTypeSize(CurType)); // vecSize is specified in bytes - convert to bits. unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue() * 8); // the vector size needs to be an integral multiple of the type size. if (vectorSize % typeSize) { S.Diag(Attr.getLoc(), diag::err_attribute_invalid_size) << sizeExpr->getSourceRange(); Attr.setInvalid(); return; } if (VectorType::isVectorSizeTooLarge(vectorSize / typeSize)) { S.Diag(Attr.getLoc(), diag::err_attribute_size_too_large) << sizeExpr->getSourceRange(); Attr.setInvalid(); return; } if (vectorSize == 0) { S.Diag(Attr.getLoc(), diag::err_attribute_zero_size) << sizeExpr->getSourceRange(); Attr.setInvalid(); return; } // Success! Instantiate the vector type, the number of elements is > 0, and // not required to be a power of 2, unlike GCC. CurType = S.Context.getVectorType(CurType, vectorSize/typeSize, VectorType::GenericVector); } /// \brief Process the OpenCL-like ext_vector_type attribute when it occurs on /// a type. static void HandleExtVectorTypeAttr(QualType &CurType, const AttributeList &Attr, Sema &S) { // check the attribute arguments. if (Attr.getNumArgs() != 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr.getName() << 1; return; } Expr *sizeExpr; // Special case where the argument is a template id. if (Attr.isArgIdent(0)) { CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId id; id.setIdentifier(Attr.getArgAsIdent(0)->Ident, Attr.getLoc()); ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc, id, false, false); if (Size.isInvalid()) return; sizeExpr = Size.get(); } else { sizeExpr = Attr.getArgAsExpr(0); } // Create the vector type. QualType T = S.BuildExtVectorType(CurType, sizeExpr, Attr.getLoc()); if (!T.isNull()) CurType = T; } static bool isPermittedNeonBaseType(QualType &Ty, VectorType::VectorKind VecKind, Sema &S) { const BuiltinType *BTy = Ty->getAs<BuiltinType>(); if (!BTy) return false; llvm::Triple Triple = S.Context.getTargetInfo().getTriple(); // Signed poly is mathematically wrong, but has been baked into some ABIs by // now. bool IsPolyUnsigned = Triple.getArch() == llvm::Triple::aarch64 || Triple.getArch() == llvm::Triple::aarch64_be; if (VecKind == VectorType::NeonPolyVector) { if (IsPolyUnsigned) { // AArch64 polynomial vectors are unsigned and support poly64. return BTy->getKind() == BuiltinType::UChar || BTy->getKind() == BuiltinType::UShort || BTy->getKind() == BuiltinType::ULong || BTy->getKind() == BuiltinType::ULongLong; } else { // AArch32 polynomial vector are signed. return BTy->getKind() == BuiltinType::SChar || BTy->getKind() == BuiltinType::Short; } } // Non-polynomial vector types: the usual suspects are allowed, as well as // float64_t on AArch64. bool Is64Bit = Triple.getArch() == llvm::Triple::aarch64 || Triple.getArch() == llvm::Triple::aarch64_be; if (Is64Bit && BTy->getKind() == BuiltinType::Double) return true; return BTy->getKind() == BuiltinType::SChar || BTy->getKind() == BuiltinType::UChar || BTy->getKind() == BuiltinType::Short || BTy->getKind() == BuiltinType::UShort || BTy->getKind() == BuiltinType::Int || BTy->getKind() == BuiltinType::UInt || BTy->getKind() == BuiltinType::Long || BTy->getKind() == BuiltinType::ULong || BTy->getKind() == BuiltinType::LongLong || BTy->getKind() == BuiltinType::ULongLong || BTy->getKind() == BuiltinType::Float || BTy->getKind() == BuiltinType::Half; } /// HandleNeonVectorTypeAttr - The "neon_vector_type" and /// "neon_polyvector_type" attributes are used to create vector types that /// are mangled according to ARM's ABI. Otherwise, these types are identical /// to those created with the "vector_size" attribute. Unlike "vector_size" /// the argument to these Neon attributes is the number of vector elements, /// not the vector size in bytes. The vector width and element type must /// match one of the standard Neon vector types. static void HandleNeonVectorTypeAttr(QualType& CurType, const AttributeList &Attr, Sema &S, VectorType::VectorKind VecKind) { // Target must have NEON if (!S.Context.getTargetInfo().hasFeature("neon")) { S.Diag(Attr.getLoc(), diag::err_attribute_unsupported) << Attr.getName(); Attr.setInvalid(); return; } // Check the attribute arguments. if (Attr.getNumArgs() != 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr.getName() << 1; Attr.setInvalid(); return; } // The number of elements must be an ICE. Expr *numEltsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0)); llvm::APSInt numEltsInt(32); if (numEltsExpr->isTypeDependent() || numEltsExpr->isValueDependent() || !numEltsExpr->isIntegerConstantExpr(numEltsInt, S.Context)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIntegerConstant << numEltsExpr->getSourceRange(); Attr.setInvalid(); return; } // Only certain element types are supported for Neon vectors. if (!isPermittedNeonBaseType(CurType, VecKind, S)) { S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType; Attr.setInvalid(); return; } // The total size of the vector must be 64 or 128 bits. unsigned typeSize = static_cast<unsigned>(S.Context.getTypeSize(CurType)); unsigned numElts = static_cast<unsigned>(numEltsInt.getZExtValue()); unsigned vecSize = typeSize * numElts; if (vecSize != 64 && vecSize != 128) { S.Diag(Attr.getLoc(), diag::err_attribute_bad_neon_vector_size) << CurType; Attr.setInvalid(); return; } CurType = S.Context.getVectorType(CurType, numElts, VecKind); } static void processTypeAttrs(TypeProcessingState &state, QualType &type, TypeAttrLocation TAL, AttributeList *attrs) { // Scan through and apply attributes to this type where it makes sense. Some // attributes (such as __address_space__, __vector_size__, etc) apply to the // type, but others can be present in the type specifiers even though they // apply to the decl. Here we apply type attributes and ignore the rest. AttributeList *next; do { AttributeList &attr = *attrs; next = attr.getNext(); // Skip attributes that were marked to be invalid. if (attr.isInvalid()) continue; if (attr.isCXX11Attribute()) { // [[gnu::...]] attributes are treated as declaration attributes, so may // not appertain to a DeclaratorChunk, even if we handle them as type // attributes. if (attr.getScopeName() && attr.getScopeName()->isStr("gnu")) { if (TAL == TAL_DeclChunk) { state.getSema().Diag(attr.getLoc(), diag::warn_cxx11_gnu_attribute_on_type) << attr.getName(); continue; } // HLSL Change Starts } else if (isHLSLTypeAttr(attr.getKind())) { if (!handleHLSLTypeAttr(state, attr, type)) attr.setUsedAsTypeAttr(); else attr.setInvalid(true); continue; // HLSL Change Ends } else if (TAL != TAL_DeclChunk) { // Otherwise, only consider type processing for a C++11 attribute if // it's actually been applied to a type. continue; } } // If this is an attribute we can handle, do so now, // otherwise, add it to the FnAttrs list for rechaining. switch (attr.getKind()) { default: // A C++11 attribute on a declarator chunk must appertain to a type. if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk) { state.getSema().Diag(attr.getLoc(), diag::err_attribute_not_type_attr) << attr.getName(); attr.setUsedAsTypeAttr(); } break; case AttributeList::UnknownAttribute: if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk) state.getSema().Diag(attr.getLoc(), diag::warn_unknown_attribute_ignored) << attr.getName(); break; case AttributeList::IgnoredAttribute: break; case AttributeList::AT_MayAlias: // FIXME: This attribute needs to actually be handled, but if we ignore // it it breaks large amounts of Linux software. attr.setUsedAsTypeAttr(); break; case AttributeList::AT_OpenCLPrivateAddressSpace: case AttributeList::AT_OpenCLGlobalAddressSpace: case AttributeList::AT_OpenCLLocalAddressSpace: case AttributeList::AT_OpenCLConstantAddressSpace: case AttributeList::AT_OpenCLGenericAddressSpace: case AttributeList::AT_AddressSpace: HandleAddressSpaceTypeAttribute(type, attr, state.getSema()); attr.setUsedAsTypeAttr(); break; OBJC_POINTER_TYPE_ATTRS_CASELIST: if (!handleObjCPointerTypeAttr(state, attr, type)) distributeObjCPointerTypeAttr(state, attr, type); attr.setUsedAsTypeAttr(); break; case AttributeList::AT_VectorSize: HandleVectorSizeAttr(type, attr, state.getSema()); attr.setUsedAsTypeAttr(); break; case AttributeList::AT_ExtVectorType: HandleExtVectorTypeAttr(type, attr, state.getSema()); attr.setUsedAsTypeAttr(); break; case AttributeList::AT_NeonVectorType: HandleNeonVectorTypeAttr(type, attr, state.getSema(), VectorType::NeonVector); attr.setUsedAsTypeAttr(); break; case AttributeList::AT_NeonPolyVectorType: HandleNeonVectorTypeAttr(type, attr, state.getSema(), VectorType::NeonPolyVector); attr.setUsedAsTypeAttr(); break; case AttributeList::AT_OpenCLImageAccess: // FIXME: there should be some type checking happening here, I would // imagine, but the original handler's checking was entirely superfluous. attr.setUsedAsTypeAttr(); break; MS_TYPE_ATTRS_CASELIST: if (!handleMSPointerTypeQualifierAttr(state, attr, type)) attr.setUsedAsTypeAttr(); break; NULLABILITY_TYPE_ATTRS_CASELIST: // Either add nullability here or try to distribute it. We // don't want to distribute the nullability specifier past any // dependent type, because that complicates the user model. if (type->canHaveNullability() || type->isDependentType() || !distributeNullabilityTypeAttr(state, type, attr)) { if (state.getSema().checkNullabilityTypeSpecifier( type, mapNullabilityAttrKind(attr.getKind()), attr.getLoc(), attr.isContextSensitiveKeywordAttribute())) { attr.setInvalid(); } attr.setUsedAsTypeAttr(); } break; case AttributeList::AT_ObjCKindOf: // '__kindof' must be part of the decl-specifiers. switch (TAL) { case TAL_DeclSpec: break; case TAL_DeclChunk: case TAL_DeclName: state.getSema().Diag(attr.getLoc(), diag::err_objc_kindof_wrong_position) << FixItHint::CreateRemoval(attr.getLoc()) << FixItHint::CreateInsertion( state.getDeclarator().getDeclSpec().getLocStart(), "__kindof "); break; } // Apply it regardless. if (state.getSema().checkObjCKindOfType(type, attr.getLoc())) attr.setInvalid(); attr.setUsedAsTypeAttr(); break; case AttributeList::AT_NSReturnsRetained: // Begin HLSL Change - avoid dead-code fallthrough if (state.getSema().getLangOpts().ObjCAutoRefCount) { attr.setUsedAsTypeAttr(); // Never process function type attributes as part of the // declaration-specifiers. if (TAL == TAL_DeclSpec) distributeFunctionTypeAttrFromDeclSpec(state, attr, type); // Otherwise, handle the possible delays. else if (!handleFunctionTypeAttr(state, attr, type)) distributeFunctionTypeAttr(state, attr, type); } break; // End HLSL Change - avoid dead-code fallthrough FUNCTION_TYPE_ATTRS_CASELIST: attr.setUsedAsTypeAttr(); // Never process function type attributes as part of the // declaration-specifiers. if (TAL == TAL_DeclSpec) distributeFunctionTypeAttrFromDeclSpec(state, attr, type); // Otherwise, handle the possible delays. else if (!handleFunctionTypeAttr(state, attr, type)) distributeFunctionTypeAttr(state, attr, type); break; } } while ((attrs = next)); } /// \brief Ensure that the type of the given expression is complete. /// /// This routine checks whether the expression \p E has a complete type. If the /// expression refers to an instantiable construct, that instantiation is /// performed as needed to complete its type. Furthermore /// Sema::RequireCompleteType is called for the expression's type (or in the /// case of a reference type, the referred-to type). /// /// \param E The expression whose type is required to be complete. /// \param Diagnoser The object that will emit a diagnostic if the type is /// incomplete. /// /// \returns \c true if the type of \p E is incomplete and diagnosed, \c false /// otherwise. bool Sema::RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser){ QualType T = E->getType(); // Fast path the case where the type is already complete. if (!T->isIncompleteType()) // FIXME: The definition might not be visible. return false; // Incomplete array types may be completed by the initializer attached to // their definitions. For static data members of class templates and for // variable templates, we need to instantiate the definition to get this // initializer and complete the type. if (T->isIncompleteArrayType()) { if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) { if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) { if (isTemplateInstantiation(Var->getTemplateSpecializationKind())) { SourceLocation PointOfInstantiation = E->getExprLoc(); if (MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo()) { // If we don't already have a point of instantiation, this is it. if (MSInfo->getPointOfInstantiation().isInvalid()) { MSInfo->setPointOfInstantiation(PointOfInstantiation); // This is a modification of an existing AST node. Notify // listeners. if (ASTMutationListener *L = getASTMutationListener()) L->StaticDataMemberInstantiated(Var); } } else { VarTemplateSpecializationDecl *VarSpec = cast<VarTemplateSpecializationDecl>(Var); if (VarSpec->getPointOfInstantiation().isInvalid()) VarSpec->setPointOfInstantiation(PointOfInstantiation); } InstantiateVariableDefinition(PointOfInstantiation, Var); // Update the type to the newly instantiated definition's type both // here and within the expression. if (VarDecl *Def = Var->getDefinition()) { DRE->setDecl(Def); T = Def->getType(); DRE->setType(T); E->setType(T); } // We still go on to try to complete the type independently, as it // may also require instantiations or diagnostics if it remains // incomplete. } } } } // FIXME: Are there other cases which require instantiating something other // than the type to complete the type of an expression? // Look through reference types and complete the referred type. if (const ReferenceType *Ref = T->getAs<ReferenceType>()) T = Ref->getPointeeType(); return RequireCompleteType(E->getExprLoc(), T, Diagnoser); } namespace { struct TypeDiagnoserDiag : Sema::TypeDiagnoser { unsigned DiagID; TypeDiagnoserDiag(unsigned DiagID) : Sema::TypeDiagnoser(DiagID == 0), DiagID(DiagID) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; S.Diag(Loc, DiagID) << T; } }; } bool Sema::RequireCompleteExprType(Expr *E, unsigned DiagID) { TypeDiagnoserDiag Diagnoser(DiagID); return RequireCompleteExprType(E, Diagnoser); } /// @brief Ensure that the type T is a complete type. /// /// This routine checks whether the type @p T is complete in any /// context where a complete type is required. If @p T is a complete /// type, returns false. If @p T is a class template specialization, /// this routine then attempts to perform class template /// instantiation. If instantiation fails, or if @p T is incomplete /// and cannot be completed, issues the diagnostic @p diag (giving it /// the type @p T) and returns true. /// /// @param Loc The location in the source that the incomplete type /// diagnostic should refer to. /// /// @param T The type that this routine is examining for completeness. /// /// @returns @c true if @p T is incomplete and a diagnostic was emitted, /// @c false otherwise. bool Sema::RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { if (RequireCompleteTypeImpl(Loc, T, Diagnoser)) return true; if (const TagType *Tag = T->getAs<TagType>()) { if (!Tag->getDecl()->isCompleteDefinitionRequired()) { Tag->getDecl()->setCompleteDefinitionRequired(); Consumer.HandleTagDeclRequiredDefinition(Tag->getDecl()); } } return false; } /// \brief Determine whether there is any declaration of \p D that was ever a /// definition (perhaps before module merging) and is currently visible. /// \param D The definition of the entity. /// \param Suggested Filled in with the declaration that should be made visible /// in order to provide a definition of this entity. /// \param OnlyNeedComplete If \c true, we only need the type to be complete, /// not defined. This only matters for enums with a fixed underlying /// type, since in all other cases, a type is complete if and only if it /// is defined. bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete) { // Easy case: if we don't have modules, all declarations are visible. if (!getLangOpts().Modules && !getLangOpts().ModulesLocalVisibility) return true; // If this definition was instantiated from a template, map back to the // pattern from which it was instantiated. if (isa<TagDecl>(D) && cast<TagDecl>(D)->isBeingDefined()) { // We're in the middle of defining it; this definition should be treated // as visible. return true; } else if (auto *RD = dyn_cast<CXXRecordDecl>(D)) { if (auto *Pattern = RD->getTemplateInstantiationPattern()) RD = Pattern; D = RD->getDefinition(); } else if (auto *ED = dyn_cast<EnumDecl>(D)) { while (auto *NewED = ED->getInstantiatedFromMemberEnum()) ED = NewED; if (OnlyNeedComplete && ED->isFixed()) { // If the enum has a fixed underlying type, and we're only looking for a // complete type (not a definition), any visible declaration of it will // do. *Suggested = nullptr; for (auto *Redecl : ED->redecls()) { if (isVisible(Redecl)) return true; if (Redecl->isThisDeclarationADefinition() || (Redecl->isCanonicalDecl() && !*Suggested)) *Suggested = Redecl; } return false; } D = ED->getDefinition(); } assert(D && "missing definition for pattern of instantiated definition"); *Suggested = D; if (isVisible(D)) return true; // The external source may have additional definitions of this type that are // visible, so complete the redeclaration chain now and ask again. if (auto *Source = Context.getExternalSource()) { Source->CompleteRedeclChain(D); return isVisible(D); } return false; } /// Locks in the inheritance model for the given class and all of its bases. static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) { RD = RD->getMostRecentDecl(); if (!RD->hasAttr<MSInheritanceAttr>()) { MSInheritanceAttr::Spelling IM; switch (S.MSPointerToMemberRepresentationMethod) { case LangOptions::PPTMK_BestCase: IM = RD->calculateInheritanceModel(); break; case LangOptions::PPTMK_FullGeneralitySingleInheritance: IM = MSInheritanceAttr::Keyword_single_inheritance; break; case LangOptions::PPTMK_FullGeneralityMultipleInheritance: IM = MSInheritanceAttr::Keyword_multiple_inheritance; break; case LangOptions::PPTMK_FullGeneralityVirtualInheritance: IM = MSInheritanceAttr::Keyword_unspecified_inheritance; break; } RD->addAttr(MSInheritanceAttr::CreateImplicit( S.getASTContext(), IM, /*BestCase=*/S.MSPointerToMemberRepresentationMethod == LangOptions::PPTMK_BestCase, S.ImplicitMSInheritanceAttrLoc.isValid() ? S.ImplicitMSInheritanceAttrLoc : RD->getSourceRange())); } } /// \brief The implementation of RequireCompleteType bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { // FIXME: Add this assertion to make sure we always get instantiation points. // assert(!Loc.isInvalid() && "Invalid location in RequireCompleteType"); // FIXME: Add this assertion to help us flush out problems with // checking for dependent types and type-dependent expressions. // // assert(!T->isDependentType() && // "Can't ask whether a dependent type is complete"); // If we have a complete type, we're done. NamedDecl *Def = nullptr; if (!T->isIncompleteType(&Def)) { // If we know about the definition but it is not visible, complain. NamedDecl *SuggestedDef = nullptr; if (!Diagnoser.Suppressed && Def && !hasVisibleDefinition(Def, &SuggestedDef, /*OnlyNeedComplete*/true)) diagnoseMissingImport(Loc, SuggestedDef, /*NeedDefinition*/true); // We lock in the inheritance model once somebody has asked us to ensure // that a pointer-to-member type is complete. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>()) { if (!MPTy->getClass()->isDependentType()) { RequireCompleteType(Loc, QualType(MPTy->getClass(), 0), 0); assignInheritanceModel(*this, MPTy->getMostRecentCXXRecordDecl()); } } } return false; } const TagType *Tag = T->getAs<TagType>(); const ObjCInterfaceType *IFace = T->getAs<ObjCInterfaceType>(); // If there's an unimported definition of this type in a module (for // instance, because we forward declared it, then imported the definition), // import that definition now. // // FIXME: What about other cases where an import extends a redeclaration // chain for a declaration that can be accessed through a mechanism other // than name lookup (eg, referenced in a template, or a variable whose type // could be completed by the module)? if (Tag || IFace) { NamedDecl *D = Tag ? static_cast<NamedDecl *>(Tag->getDecl()) : IFace->getDecl(); // Avoid diagnosing invalid decls as incomplete. if (D->isInvalidDecl()) return true; // Give the external AST source a chance to complete the type. if (auto *Source = Context.getExternalSource()) { if (Tag) Source->CompleteType(Tag->getDecl()); else Source->CompleteType(IFace->getDecl()); // If the external source completed the type, go through the motions // again to ensure we're allowed to use the completed type. if (!T->isIncompleteType()) return RequireCompleteTypeImpl(Loc, T, Diagnoser); } } // If we have a class template specialization or a class member of a // class template specialization, or an array with known size of such, // try to instantiate it. QualType MaybeTemplate = T; while (const ConstantArrayType *Array = Context.getAsConstantArrayType(MaybeTemplate)) MaybeTemplate = Array->getElementType(); // HLSL Change - Begin // In HLSL we don't decay arrays to pointers, so we need to require complete // types for array elements. if (LangOpts.HLSL) { if (const TagType *Tag = MaybeTemplate->getAs<TagType>()) { if (auto *Source = Context.getExternalSource()) Source->CompleteType(Tag->getDecl()); } } // HLSL Change - End if (const RecordType *Record = MaybeTemplate->getAs<RecordType>()) { if (ClassTemplateSpecializationDecl *ClassTemplateSpec = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) { if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared) return InstantiateClassTemplateSpecialization(Loc, ClassTemplateSpec, TSK_ImplicitInstantiation, /*Complain=*/!Diagnoser.Suppressed); } else if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Record->getDecl())) { CXXRecordDecl *Pattern = Rec->getInstantiatedFromMemberClass(); if (!Rec->isBeingDefined() && Pattern) { MemberSpecializationInfo *MSI = Rec->getMemberSpecializationInfo(); assert(MSI && "Missing member specialization information?"); // This record was instantiated from a class within a template. if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) return InstantiateClass(Loc, Rec, Pattern, getTemplateInstantiationArgs(Rec), TSK_ImplicitInstantiation, /*Complain=*/!Diagnoser.Suppressed); } } } if (Diagnoser.Suppressed) return true; // We have an incomplete type. Produce a diagnostic. if (Ident___float128 && T == Context.getTypeDeclType(Context.getFloat128StubType())) { Diag(Loc, diag::err_typecheck_decl_incomplete_type___float128); return true; } Diagnoser.diagnose(*this, Loc, T); // If the type was a forward declaration of a class/struct/union // type, produce a note. if (Tag && !Tag->getDecl()->isInvalidDecl()) Diag(Tag->getDecl()->getLocation(), Tag->isBeingDefined() ? diag::note_type_being_defined : diag::note_forward_declaration) << QualType(Tag, 0); // If the Objective-C class was a forward declaration, produce a note. if (IFace && !IFace->getDecl()->isInvalidDecl()) Diag(IFace->getDecl()->getLocation(), diag::note_forward_class); // If we have external information that we can use to suggest a fix, // produce a note. if (ExternalSource) ExternalSource->MaybeDiagnoseMissingCompleteType(Loc, T); return true; } bool Sema::RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { TypeDiagnoserDiag Diagnoser(DiagID); return RequireCompleteType(Loc, T, Diagnoser); } /// \brief Get diagnostic %select index for tag kind for /// literal type diagnostic message. /// WARNING: Indexes apply to particular diagnostics only! /// /// \returns diagnostic %select index. static unsigned getLiteralDiagFromTagKind(TagTypeKind Tag) { switch (Tag) { case TTK_Struct: return 0; case TTK_Interface: return 1; case TTK_Class: return 2; default: llvm_unreachable("Invalid tag kind for literal type diagnostic!"); } } /// @brief Ensure that the type T is a literal type. /// /// This routine checks whether the type @p T is a literal type. If @p T is an /// incomplete type, an attempt is made to complete it. If @p T is a literal /// type, or @p AllowIncompleteType is true and @p T is an incomplete type, /// returns false. Otherwise, this routine issues the diagnostic @p PD (giving /// it the type @p T), along with notes explaining why the type is not a /// literal type, and returns true. /// /// @param Loc The location in the source that the non-literal type /// diagnostic should refer to. /// /// @param T The type that this routine is examining for literalness. /// /// @param Diagnoser Emits a diagnostic if T is not a literal type. /// /// @returns @c true if @p T is not a literal type and a diagnostic was emitted, /// @c false otherwise. bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { assert(!T->isDependentType() && "type should not be dependent"); QualType ElemType = Context.getBaseElementType(T); RequireCompleteType(Loc, ElemType, 0); if (T->isLiteralType(Context)) return false; if (Diagnoser.Suppressed) return true; Diagnoser.diagnose(*this, Loc, T); if (T->isVariableArrayType()) return true; const RecordType *RT = ElemType->getAs<RecordType>(); if (!RT) return true; const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); // A partially-defined class type can't be a literal type, because a literal // class type must have a trivial destructor (which can't be checked until // the class definition is complete). if (!RD->isCompleteDefinition()) { RequireCompleteType(Loc, ElemType, diag::note_non_literal_incomplete, T); return true; } // If the class has virtual base classes, then it's not an aggregate, and // cannot have any constexpr constructors or a trivial default constructor, // so is non-literal. This is better to diagnose than the resulting absence // of constexpr constructors. if (RD->getNumVBases()) { Diag(RD->getLocation(), diag::note_non_literal_virtual_base) << getLiteralDiagFromTagKind(RD->getTagKind()) << RD->getNumVBases(); for (const auto &I : RD->vbases()) Diag(I.getLocStart(), diag::note_constexpr_virtual_base_here) << I.getSourceRange(); } else if (!RD->isAggregate() && !RD->hasConstexprNonCopyMoveConstructor() && !RD->hasTrivialDefaultConstructor()) { Diag(RD->getLocation(), diag::note_non_literal_no_constexpr_ctors) << RD; } else if (RD->hasNonLiteralTypeFieldsOrBases()) { for (const auto &I : RD->bases()) { if (!I.getType()->isLiteralType(Context)) { Diag(I.getLocStart(), diag::note_non_literal_base_class) << RD << I.getType() << I.getSourceRange(); return true; } } for (const auto *I : RD->fields()) { if (!I->getType()->isLiteralType(Context) || I->getType().isVolatileQualified()) { Diag(I->getLocation(), diag::note_non_literal_field) << RD << I << I->getType() << I->getType().isVolatileQualified(); return true; } } } else if (!RD->hasTrivialDestructor()) { // All fields and bases are of literal types, so have trivial destructors. // If this class's destructor is non-trivial it must be user-declared. CXXDestructorDecl *Dtor = RD->getDestructor(); assert(Dtor && "class has literal fields and bases but no dtor?"); if (!Dtor) return true; Diag(Dtor->getLocation(), Dtor->isUserProvided() ? diag::note_non_literal_user_provided_dtor : diag::note_non_literal_nontrivial_dtor) << RD; if (!Dtor->isUserProvided()) SpecialMemberIsTrivial(Dtor, CXXDestructor, /*Diagnose*/true); } return true; } bool Sema::RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID) { TypeDiagnoserDiag Diagnoser(DiagID); return RequireLiteralType(Loc, T, Diagnoser); } /// \brief Retrieve a version of the type 'T' that is elaborated by Keyword /// and qualified by the nested-name-specifier contained in SS. QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T) { if (T.isNull()) return T; NestedNameSpecifier *NNS; if (SS.isValid()) NNS = SS.getScopeRep(); else { if (Keyword == ETK_None) return T; NNS = nullptr; } return Context.getElaboratedType(Keyword, NNS, T); } QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) { ExprResult ER = CheckPlaceholderExpr(E); if (ER.isInvalid()) return QualType(); E = ER.get(); if (!E->isTypeDependent()) { QualType T = E->getType(); if (const TagType *TT = T->getAs<TagType>()) DiagnoseUseOfDecl(TT->getDecl(), E->getExprLoc()); } return Context.getTypeOfExprType(E); } /// getDecltypeForExpr - Given an expr, will return the decltype for /// that expression, according to the rules in C++11 /// [dcl.type.simple]p4 and C++11 [expr.lambda.prim]p18. static QualType getDecltypeForExpr(Sema &S, Expr *E) { if (E->isTypeDependent()) return S.Context.DependentTy; // C++11 [dcl.type.simple]p4: // The type denoted by decltype(e) is defined as follows: // // - if e is an unparenthesized id-expression or an unparenthesized class // member access (5.2.5), decltype(e) is the type of the entity named // by e. If there is no such entity, or if e names a set of overloaded // functions, the program is ill-formed; // // We apply the same rules for Objective-C ivar and property references. if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { if (const ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl())) return VD->getType(); } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) return FD->getType(); } else if (const ObjCIvarRefExpr *IR = dyn_cast<ObjCIvarRefExpr>(E)) { return IR->getDecl()->getType(); } else if (const ObjCPropertyRefExpr *PR = dyn_cast<ObjCPropertyRefExpr>(E)) { if (PR->isExplicitProperty()) return PR->getExplicitProperty()->getType(); } else if (auto *PE = dyn_cast<PredefinedExpr>(E)) { return PE->getType(); } // C++11 [expr.lambda.prim]p18: // Every occurrence of decltype((x)) where x is a possibly // parenthesized id-expression that names an entity of automatic // storage duration is treated as if x were transformed into an // access to a corresponding data member of the closure type that // would have been declared if x were an odr-use of the denoted // entity. using namespace sema; if (S.getCurLambda()) { if (isa<ParenExpr>(E)) { if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) { if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) { QualType T = S.getCapturedDeclRefType(Var, DRE->getLocation()); if (!T.isNull()) return S.Context.getLValueReferenceType(T); } } } } // C++11 [dcl.type.simple]p4: // [...] QualType T = E->getType(); switch (E->getValueKind()) { // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the // type of e; case VK_XValue: T = S.Context.getRValueReferenceType(T); break; // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the // type of e; case VK_LValue: T = S.Context.getLValueReferenceType(T); break; // - otherwise, decltype(e) is the type of e. case VK_RValue: break; } return T; } QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated) { ExprResult ER = CheckPlaceholderExpr(E); if (ER.isInvalid()) return QualType(); E = ER.get(); if (AsUnevaluated && ActiveTemplateInstantiations.empty() && E->HasSideEffects(Context, false)) { // The expression operand for decltype is in an unevaluated expression // context, so side effects could result in unintended consequences. Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context); } return Context.getDecltypeType(E, getDecltypeForExpr(*this, E)); } QualType Sema::BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc) { switch (UKind) { case UnaryTransformType::EnumUnderlyingType: if (!BaseType->isDependentType() && !BaseType->isEnumeralType()) { Diag(Loc, diag::err_only_enums_have_underlying_types); return QualType(); } else { QualType Underlying = BaseType; if (!BaseType->isDependentType()) { // The enum could be incomplete if we're parsing its definition or // recovering from an error. NamedDecl *FwdDecl = nullptr; if (BaseType->isIncompleteType(&FwdDecl)) { Diag(Loc, diag::err_underlying_type_of_incomplete_enum) << BaseType; Diag(FwdDecl->getLocation(), diag::note_forward_declaration) << FwdDecl; return QualType(); } EnumDecl *ED = BaseType->getAs<EnumType>()->getDecl(); assert(ED && "EnumType has no EnumDecl"); DiagnoseUseOfDecl(ED, Loc); Underlying = ED->getIntegerType(); assert(!Underlying.isNull()); } return Context.getUnaryTransformType(BaseType, Underlying, UnaryTransformType::EnumUnderlyingType); } } llvm_unreachable("unknown unary transform type"); } QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) { if (!T->isDependentType()) { // FIXME: It isn't entirely clear whether incomplete atomic types // are allowed or not; for simplicity, ban them for the moment. if (RequireCompleteType(Loc, T, diag::err_atomic_specifier_bad_type, 0)) return QualType(); int DisallowedKind = -1; if (T->isArrayType()) DisallowedKind = 1; else if (T->isFunctionType()) DisallowedKind = 2; else if (T->isReferenceType()) DisallowedKind = 3; else if (T->isAtomicType()) DisallowedKind = 4; else if (T.hasQualifiers()) DisallowedKind = 5; else if (!T.isTriviallyCopyableType(Context)) // Some other non-trivially-copyable type (probably a C++ class) DisallowedKind = 6; if (DisallowedKind != -1) { Diag(Loc, diag::err_atomic_specifier_bad_type) << DisallowedKind << T; return QualType(); } // FIXME: Do we need any handling for ARC here? } // Build the pointer type. return Context.getAtomicType(T); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/ScopeInfo.cpp
//===--- ScopeInfo.cpp - Information about a semantic context -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements FunctionScopeInfo and its subclasses, which contain // information about a single function, block, lambda, or method body. // //===----------------------------------------------------------------------===// #include "clang/Sema/ScopeInfo.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" using namespace clang; using namespace sema; void FunctionScopeInfo::Clear() { HasBranchProtectedScope = false; HasBranchIntoScope = false; HasIndirectGoto = false; HasDroppedStmt = false; ObjCShouldCallSuper = false; ObjCIsDesignatedInit = false; ObjCWarnForNoDesignatedInitChain = false; ObjCIsSecondaryInit = false; ObjCWarnForNoInitDelegation = false; FirstCXXTryLoc = SourceLocation(); FirstSEHTryLoc = SourceLocation(); SwitchStack.clear(); Returns.clear(); ErrorTrap.reset(); PossiblyUnreachableDiags.clear(); WeakObjectUses.clear(); ModifiedNonNullParams.clear(); } static const NamedDecl *getBestPropertyDecl(const ObjCPropertyRefExpr *PropE) { if (PropE->isExplicitProperty()) return PropE->getExplicitProperty(); return PropE->getImplicitPropertyGetter(); } FunctionScopeInfo::WeakObjectProfileTy::BaseInfoTy FunctionScopeInfo::WeakObjectProfileTy::getBaseInfo(const Expr *E) { E = E->IgnoreParenCasts(); const NamedDecl *D = nullptr; bool IsExact = false; switch (E->getStmtClass()) { case Stmt::DeclRefExprClass: D = cast<DeclRefExpr>(E)->getDecl(); IsExact = isa<VarDecl>(D); break; case Stmt::MemberExprClass: { const MemberExpr *ME = cast<MemberExpr>(E); D = ME->getMemberDecl(); IsExact = isa<CXXThisExpr>(ME->getBase()->IgnoreParenImpCasts()); break; } case Stmt::ObjCIvarRefExprClass: { const ObjCIvarRefExpr *IE = cast<ObjCIvarRefExpr>(E); D = IE->getDecl(); IsExact = IE->getBase()->isObjCSelfExpr(); break; } case Stmt::PseudoObjectExprClass: { const PseudoObjectExpr *POE = cast<PseudoObjectExpr>(E); const ObjCPropertyRefExpr *BaseProp = dyn_cast<ObjCPropertyRefExpr>(POE->getSyntacticForm()); if (BaseProp) { D = getBestPropertyDecl(BaseProp); const Expr *DoubleBase = BaseProp->getBase(); if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(DoubleBase)) DoubleBase = OVE->getSourceExpr(); IsExact = DoubleBase->isObjCSelfExpr(); } break; } default: break; } return BaseInfoTy(D, IsExact); } bool CapturingScopeInfo::isVLATypeCaptured(const VariableArrayType *VAT) const { RecordDecl *RD = nullptr; if (auto *LSI = dyn_cast<LambdaScopeInfo>(this)) RD = LSI->Lambda; else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(this)) RD = CRSI->TheRecordDecl; if (RD) for (auto *FD : RD->fields()) { if (FD->hasCapturedVLAType() && FD->getCapturedVLAType() == VAT) return true; } return false; } FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy( const ObjCPropertyRefExpr *PropE) : Base(nullptr, true), Property(getBestPropertyDecl(PropE)) { if (PropE->isObjectReceiver()) { const OpaqueValueExpr *OVE = cast<OpaqueValueExpr>(PropE->getBase()); const Expr *E = OVE->getSourceExpr(); Base = getBaseInfo(E); } else if (PropE->isClassReceiver()) { Base.setPointer(PropE->getClassReceiver()); } else { assert(PropE->isSuperReceiver()); } } FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy(const Expr *BaseE, const ObjCPropertyDecl *Prop) : Base(nullptr, true), Property(Prop) { if (BaseE) Base = getBaseInfo(BaseE); // else, this is a message accessing a property on super. } FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy( const DeclRefExpr *DRE) : Base(nullptr, true), Property(DRE->getDecl()) { assert(isa<VarDecl>(Property)); } FunctionScopeInfo::WeakObjectProfileTy::WeakObjectProfileTy( const ObjCIvarRefExpr *IvarE) : Base(getBaseInfo(IvarE->getBase())), Property(IvarE->getDecl()) { } void FunctionScopeInfo::recordUseOfWeak(const ObjCMessageExpr *Msg, const ObjCPropertyDecl *Prop) { assert(Msg && Prop); WeakUseVector &Uses = WeakObjectUses[WeakObjectProfileTy(Msg->getInstanceReceiver(), Prop)]; Uses.push_back(WeakUseTy(Msg, Msg->getNumArgs() == 0)); } void FunctionScopeInfo::markSafeWeakUse(const Expr *E) { E = E->IgnoreParenCasts(); if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) { markSafeWeakUse(POE->getSyntacticForm()); return; } if (const ConditionalOperator *Cond = dyn_cast<ConditionalOperator>(E)) { markSafeWeakUse(Cond->getTrueExpr()); markSafeWeakUse(Cond->getFalseExpr()); return; } if (const BinaryConditionalOperator *Cond = dyn_cast<BinaryConditionalOperator>(E)) { markSafeWeakUse(Cond->getCommon()); markSafeWeakUse(Cond->getFalseExpr()); return; } // Has this weak object been seen before? FunctionScopeInfo::WeakObjectUseMap::iterator Uses; if (const ObjCPropertyRefExpr *RefExpr = dyn_cast<ObjCPropertyRefExpr>(E)) { if (!RefExpr->isObjectReceiver()) return; if (isa<OpaqueValueExpr>(RefExpr->getBase())) Uses = WeakObjectUses.find(WeakObjectProfileTy(RefExpr)); else { markSafeWeakUse(RefExpr->getBase()); return; } } else if (const ObjCIvarRefExpr *IvarE = dyn_cast<ObjCIvarRefExpr>(E)) Uses = WeakObjectUses.find(WeakObjectProfileTy(IvarE)); else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) Uses = WeakObjectUses.find(WeakObjectProfileTy(DRE)); else if (const ObjCMessageExpr *MsgE = dyn_cast<ObjCMessageExpr>(E)) { Uses = WeakObjectUses.end(); if (const ObjCMethodDecl *MD = MsgE->getMethodDecl()) { if (const ObjCPropertyDecl *Prop = MD->findPropertyDecl()) { Uses = WeakObjectUses.find(WeakObjectProfileTy(MsgE->getInstanceReceiver(), Prop)); } } } else return; if (Uses == WeakObjectUses.end()) return; // Has there been a read from the object using this Expr? FunctionScopeInfo::WeakUseVector::reverse_iterator ThisUse = std::find(Uses->second.rbegin(), Uses->second.rend(), WeakUseTy(E, true)); if (ThisUse == Uses->second.rend()) return; ThisUse->markSafe(); } void LambdaScopeInfo::getPotentialVariableCapture(unsigned Idx, VarDecl *&VD, Expr *&E) const { assert(Idx < getNumPotentialVariableCaptures() && "Index of potential capture must be within 0 to less than the " "number of captures!"); E = PotentiallyCapturingExprs[Idx]; if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) VD = dyn_cast<VarDecl>(DRE->getFoundDecl()); else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) VD = dyn_cast<VarDecl>(ME->getMemberDecl()); else llvm_unreachable("Only DeclRefExprs or MemberExprs should be added for " "potential captures"); assert(VD); } FunctionScopeInfo::~FunctionScopeInfo() { } BlockScopeInfo::~BlockScopeInfo() { } LambdaScopeInfo::~LambdaScopeInfo() { } CapturedRegionScopeInfo::~CapturedRegionScopeInfo() { }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaDeclAttr.cpp
//===--- SemaDeclAttr.cpp - Declaration Attribute Handling ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements decl-related attribute processing. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/Mangle.h" #include "clang/AST/ASTMutationListener.h" #include "clang/Basic/CharInfo.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/DelayedDiagnostic.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Support/MathExtras.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change using namespace clang; using namespace sema; namespace AttributeLangSupport { enum LANG { C, Cpp, ObjC }; } //===----------------------------------------------------------------------===// // Helper functions //===----------------------------------------------------------------------===// /// isFunctionOrMethod - Return true if the given decl has function /// type (function or function-typed variable) or an Objective-C /// method. static bool isFunctionOrMethod(const Decl *D) { return (D->getFunctionType() != nullptr) || isa<ObjCMethodDecl>(D); } /// \brief Return true if the given decl has function type (function or /// function-typed variable) or an Objective-C method or a block. static bool isFunctionOrMethodOrBlock(const Decl *D) { return isFunctionOrMethod(D) || isa<BlockDecl>(D); } /// Return true if the given decl has a declarator that should have /// been processed by Sema::GetTypeForDeclarator. static bool hasDeclarator(const Decl *D) { // In some sense, TypedefDecl really *ought* to be a DeclaratorDecl. return isa<DeclaratorDecl>(D) || isa<BlockDecl>(D) || isa<TypedefNameDecl>(D) || isa<ObjCPropertyDecl>(D); } /// hasFunctionProto - Return true if the given decl has a argument /// information. This decl should have already passed /// isFunctionOrMethod or isFunctionOrMethodOrBlock. static bool hasFunctionProto(const Decl *D) { if (const FunctionType *FnTy = D->getFunctionType()) return isa<FunctionProtoType>(FnTy); return isa<ObjCMethodDecl>(D) || isa<BlockDecl>(D); } /// getFunctionOrMethodNumParams - Return number of function or method /// parameters. It is an error to call this on a K&R function (use /// hasFunctionProto first). static unsigned getFunctionOrMethodNumParams(const Decl *D) { if (const FunctionType *FnTy = D->getFunctionType()) return cast<FunctionProtoType>(FnTy)->getNumParams(); if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) return BD->getNumParams(); return cast<ObjCMethodDecl>(D)->param_size(); } static QualType getFunctionOrMethodParamType(const Decl *D, unsigned Idx) { if (const FunctionType *FnTy = D->getFunctionType()) return cast<FunctionProtoType>(FnTy)->getParamType(Idx); if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) return BD->getParamDecl(Idx)->getType(); return cast<ObjCMethodDecl>(D)->parameters()[Idx]->getType(); } static SourceRange getFunctionOrMethodParamRange(const Decl *D, unsigned Idx) { if (const auto *FD = dyn_cast<FunctionDecl>(D)) return FD->getParamDecl(Idx)->getSourceRange(); if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) return MD->parameters()[Idx]->getSourceRange(); if (const auto *BD = dyn_cast<BlockDecl>(D)) return BD->getParamDecl(Idx)->getSourceRange(); return SourceRange(); } static QualType getFunctionOrMethodResultType(const Decl *D) { if (const FunctionType *FnTy = D->getFunctionType()) return cast<FunctionType>(FnTy)->getReturnType(); return cast<ObjCMethodDecl>(D)->getReturnType(); } static SourceRange getFunctionOrMethodResultSourceRange(const Decl *D) { if (const auto *FD = dyn_cast<FunctionDecl>(D)) return FD->getReturnTypeSourceRange(); if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) return MD->getReturnTypeSourceRange(); return SourceRange(); } static bool isFunctionOrMethodVariadic(const Decl *D) { if (const FunctionType *FnTy = D->getFunctionType()) { const FunctionProtoType *proto = cast<FunctionProtoType>(FnTy); return proto->isVariadic(); } if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) return BD->isVariadic(); return cast<ObjCMethodDecl>(D)->isVariadic(); } static bool isInstanceMethod(const Decl *D) { if (const CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(D)) return MethodDecl->isInstance(); return false; } static inline bool isNSStringType(QualType T, ASTContext &Ctx) { const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>(); if (!PT) return false; ObjCInterfaceDecl *Cls = PT->getObjectType()->getInterface(); if (!Cls) return false; IdentifierInfo* ClsName = Cls->getIdentifier(); // FIXME: Should we walk the chain of classes? return ClsName == &Ctx.Idents.get("NSString") || ClsName == &Ctx.Idents.get("NSMutableString"); } static inline bool isCFStringType(QualType T, ASTContext &Ctx) { const PointerType *PT = T->getAs<PointerType>(); if (!PT) return false; const RecordType *RT = PT->getPointeeType()->getAs<RecordType>(); if (!RT) return false; const RecordDecl *RD = RT->getDecl(); if (RD->getTagKind() != TTK_Struct) return false; return RD->getIdentifier() == &Ctx.Idents.get("__CFString"); } static unsigned getNumAttributeArgs(const AttributeList &Attr) { // FIXME: Include the type in the argument list. return Attr.getNumArgs() + Attr.hasParsedType(); } template <typename Compare> static bool checkAttributeNumArgsImpl(Sema &S, const AttributeList &Attr, unsigned Num, unsigned Diag, Compare Comp) { if (Comp(getNumAttributeArgs(Attr), Num)) { S.Diag(Attr.getLoc(), Diag) << Attr.getName() << Num; return false; } return true; } /// \brief Check if the attribute has exactly as many args as Num. May /// output an error. static bool checkAttributeNumArgs(Sema &S, const AttributeList &Attr, unsigned Num) { return checkAttributeNumArgsImpl(S, Attr, Num, diag::err_attribute_wrong_number_arguments, std::not_equal_to<unsigned>()); } /// \brief Check if the attribute has at least as many args as Num. May /// output an error. static bool checkAttributeAtLeastNumArgs(Sema &S, const AttributeList &Attr, unsigned Num) { return checkAttributeNumArgsImpl(S, Attr, Num, diag::err_attribute_too_few_arguments, std::less<unsigned>()); } /// \brief Check if the attribute has at most as many args as Num. May /// output an error. static bool checkAttributeAtMostNumArgs(Sema &S, const AttributeList &Attr, unsigned Num) { return checkAttributeNumArgsImpl(S, Attr, Num, diag::err_attribute_too_many_arguments, std::greater<unsigned>()); } /// \brief If Expr is a valid integer constant, get the value of the integer /// expression and return success or failure. May output an error. static bool checkUInt32Argument(Sema &S, const AttributeList &Attr, const Expr *Expr, uint32_t &Val, unsigned Idx = UINT_MAX) { llvm::APSInt I(32); if (Expr->isTypeDependent() || Expr->isValueDependent() || !Expr->isIntegerConstantExpr(I, S.Context)) { if (Idx != UINT_MAX) S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) << Attr.getName() << Idx << AANT_ArgumentIntegerConstant << Expr->getSourceRange(); else S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIntegerConstant << Expr->getSourceRange(); return false; } if (!I.isIntN(32)) { S.Diag(Expr->getExprLoc(), diag::err_ice_too_large) << I.toString(10, false) << 32 << /* Unsigned */ 1; return false; } Val = (uint32_t)I.getZExtValue(); return true; } /// \brief Diagnose mutually exclusive attributes when present on a given /// declaration. Returns true if diagnosed. template <typename AttrTy> static bool checkAttrMutualExclusion(Sema &S, Decl *D, const AttributeList &Attr) { if (AttrTy *A = D->getAttr<AttrTy>()) { S.Diag(Attr.getLoc(), diag::err_attributes_are_not_compatible) << Attr.getName() << A; return true; } return false; } /// \brief Check if IdxExpr is a valid parameter index for a function or /// instance method D. May output an error. /// /// \returns true if IdxExpr is a valid index. static bool checkFunctionOrMethodParameterIndex(Sema &S, const Decl *D, const AttributeList &Attr, unsigned AttrArgNum, const Expr *IdxExpr, uint64_t &Idx) { assert(isFunctionOrMethodOrBlock(D)); // In C++ the implicit 'this' function parameter also counts. // Parameters are counted from one. bool HP = hasFunctionProto(D); bool HasImplicitThisParam = isInstanceMethod(D); bool IV = HP && isFunctionOrMethodVariadic(D); unsigned NumParams = (HP ? getFunctionOrMethodNumParams(D) : 0) + HasImplicitThisParam; llvm::APSInt IdxInt; if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() || !IdxExpr->isIntegerConstantExpr(IdxInt, S.Context)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) << Attr.getName() << AttrArgNum << AANT_ArgumentIntegerConstant << IdxExpr->getSourceRange(); return false; } Idx = IdxInt.getLimitedValue(); if (Idx < 1 || (!IV && Idx > NumParams)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds) << Attr.getName() << AttrArgNum << IdxExpr->getSourceRange(); return false; } Idx--; // Convert to zero-based. if (HasImplicitThisParam) { if (Idx == 0) { S.Diag(Attr.getLoc(), diag::err_attribute_invalid_implicit_this_argument) << Attr.getName() << IdxExpr->getSourceRange(); return false; } --Idx; } return true; } /// \brief Check if the argument \p ArgNum of \p Attr is a ASCII string literal. /// If not emit an error and return false. If the argument is an identifier it /// will emit an error with a fixit hint and treat it as if it was a string /// literal. bool Sema::checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation) { // Look for identifiers. If we have one emit a hint to fix it to a literal. if (Attr.isArgIdent(ArgNum)) { IdentifierLoc *Loc = Attr.getArgAsIdent(ArgNum); Diag(Loc->Loc, diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentString << FixItHint::CreateInsertion(Loc->Loc, "\"") << FixItHint::CreateInsertion(PP.getLocForEndOfToken(Loc->Loc), "\""); Str = Loc->Ident->getName(); if (ArgLocation) *ArgLocation = Loc->Loc; return true; } // Now check for an actual string literal. Expr *ArgExpr = Attr.getArgAsExpr(ArgNum); StringLiteral *Literal = dyn_cast<StringLiteral>(ArgExpr->IgnoreParenCasts()); if (ArgLocation) *ArgLocation = ArgExpr->getLocStart(); if (!Literal || !Literal->isAscii()) { Diag(ArgExpr->getLocStart(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentString; return false; } Str = Literal->getString(); return true; } /// \brief Applies the given attribute to the Decl without performing any /// additional semantic checking. template <typename AttrType> static void handleSimpleAttribute(Sema &S, Decl *D, const AttributeList &Attr) { D->addAttr(::new (S.Context) AttrType(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } /// \brief Check if the passed-in expression is of type int or bool. static bool isIntOrBool(Expr *Exp) { QualType QT = Exp->getType(); return QT->isBooleanType() || QT->isIntegerType(); } // Check to see if the type is a smart pointer of some kind. We assume // it's a smart pointer if it defines both operator-> and operator*. static bool threadSafetyCheckIsSmartPointer(Sema &S, const RecordType* RT) { DeclContextLookupResult Res1 = RT->getDecl()->lookup( S.Context.DeclarationNames.getCXXOperatorName(OO_Star)); if (Res1.empty()) return false; DeclContextLookupResult Res2 = RT->getDecl()->lookup( S.Context.DeclarationNames.getCXXOperatorName(OO_Arrow)); if (Res2.empty()) return false; return true; } /// \brief Check if passed in Decl is a pointer type. /// Note that this function may produce an error message. /// \return true if the Decl is a pointer type; false otherwise static bool threadSafetyCheckIsPointer(Sema &S, const Decl *D, const AttributeList &Attr) { const ValueDecl *vd = cast<ValueDecl>(D); QualType QT = vd->getType(); if (QT->isAnyPointerType()) return true; if (const RecordType *RT = QT->getAs<RecordType>()) { // If it's an incomplete type, it could be a smart pointer; skip it. // (We don't want to force template instantiation if we can avoid it, // since that would alter the order in which templates are instantiated.) if (RT->isIncompleteType()) return true; if (threadSafetyCheckIsSmartPointer(S, RT)) return true; } S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_pointer) << Attr.getName() << QT; return false; } /// \brief Checks that the passed in QualType either is of RecordType or points /// to RecordType. Returns the relevant RecordType, null if it does not exit. static const RecordType *getRecordType(QualType QT) { if (const RecordType *RT = QT->getAs<RecordType>()) return RT; // Now check if we point to record type. if (const PointerType *PT = QT->getAs<PointerType>()) return PT->getPointeeType()->getAs<RecordType>(); return nullptr; } static bool checkRecordTypeForCapability(Sema &S, QualType Ty) { const RecordType *RT = getRecordType(Ty); if (!RT) return false; // Don't check for the capability if the class hasn't been defined yet. if (RT->isIncompleteType()) return true; // Allow smart pointers to be used as capability objects. // FIXME -- Check the type that the smart pointer points to. if (threadSafetyCheckIsSmartPointer(S, RT)) return true; // Check if the record itself has a capability. RecordDecl *RD = RT->getDecl(); if (RD->hasAttr<CapabilityAttr>()) return true; // Else check if any base classes have a capability. if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { CXXBasePaths BPaths(false, false); if (CRD->lookupInBases([](const CXXBaseSpecifier *BS, CXXBasePath &P, void *) { return BS->getType()->getAs<RecordType>() ->getDecl()->hasAttr<CapabilityAttr>(); }, nullptr, BPaths)) return true; } return false; } static bool checkTypedefTypeForCapability(QualType Ty) { const auto *TD = Ty->getAs<TypedefType>(); if (!TD) return false; TypedefNameDecl *TN = TD->getDecl(); if (!TN) return false; return TN->hasAttr<CapabilityAttr>(); } static bool typeHasCapability(Sema &S, QualType Ty) { if (checkTypedefTypeForCapability(Ty)) return true; if (checkRecordTypeForCapability(S, Ty)) return true; return false; } static bool isCapabilityExpr(Sema &S, const Expr *Ex) { // Capability expressions are simple expressions involving the boolean logic // operators &&, || or !, a simple DeclRefExpr, CastExpr or a ParenExpr. Once // a DeclRefExpr is found, its type should be checked to determine whether it // is a capability or not. if (const auto *E = dyn_cast<DeclRefExpr>(Ex)) return typeHasCapability(S, E->getType()); else if (const auto *E = dyn_cast<CastExpr>(Ex)) return isCapabilityExpr(S, E->getSubExpr()); else if (const auto *E = dyn_cast<ParenExpr>(Ex)) return isCapabilityExpr(S, E->getSubExpr()); else if (const auto *E = dyn_cast<UnaryOperator>(Ex)) { if (E->getOpcode() == UO_LNot) return isCapabilityExpr(S, E->getSubExpr()); return false; } else if (const auto *E = dyn_cast<BinaryOperator>(Ex)) { if (E->getOpcode() == BO_LAnd || E->getOpcode() == BO_LOr) return isCapabilityExpr(S, E->getLHS()) && isCapabilityExpr(S, E->getRHS()); return false; } return false; } /// \brief Checks that all attribute arguments, starting from Sidx, resolve to /// a capability object. /// \param Sidx The attribute argument index to start checking with. /// \param ParamIdxOk Whether an argument can be indexing into a function /// parameter list. static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D, const AttributeList &Attr, SmallVectorImpl<Expr *> &Args, int Sidx = 0, bool ParamIdxOk = false) { for (unsigned Idx = Sidx; Idx < Attr.getNumArgs(); ++Idx) { Expr *ArgExp = Attr.getArgAsExpr(Idx); if (ArgExp->isTypeDependent()) { // FIXME -- need to check this again on template instantiation Args.push_back(ArgExp); continue; } if (StringLiteral *StrLit = dyn_cast<StringLiteral>(ArgExp)) { if (StrLit->getLength() == 0 || (StrLit->isAscii() && StrLit->getString() == StringRef("*"))) { // Pass empty strings to the analyzer without warnings. // Treat "*" as the universal lock. Args.push_back(ArgExp); continue; } // We allow constant strings to be used as a placeholder for expressions // that are not valid C++ syntax, but warn that they are ignored. S.Diag(Attr.getLoc(), diag::warn_thread_attribute_ignored) << Attr.getName(); Args.push_back(ArgExp); continue; } QualType ArgTy = ArgExp->getType(); // A pointer to member expression of the form &MyClass::mu is treated // specially -- we need to look at the type of the member. if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(ArgExp)) if (UOp->getOpcode() == UO_AddrOf) if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(UOp->getSubExpr())) if (DRE->getDecl()->isCXXInstanceMember()) ArgTy = DRE->getDecl()->getType(); // First see if we can just cast to record type, or pointer to record type. const RecordType *RT = getRecordType(ArgTy); // Now check if we index into a record type function param. if(!RT && ParamIdxOk) { FunctionDecl *FD = dyn_cast<FunctionDecl>(D); IntegerLiteral *IL = dyn_cast<IntegerLiteral>(ArgExp); if(FD && IL) { unsigned int NumParams = FD->getNumParams(); llvm::APInt ArgValue = IL->getValue(); uint64_t ParamIdxFromOne = ArgValue.getZExtValue(); uint64_t ParamIdxFromZero = ParamIdxFromOne - 1; if(!ArgValue.isStrictlyPositive() || ParamIdxFromOne > NumParams) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_range) << Attr.getName() << Idx + 1 << NumParams; continue; } ArgTy = FD->getParamDecl(ParamIdxFromZero)->getType(); } } // If the type does not have a capability, see if the components of the // expression have capabilities. This allows for writing C code where the // capability may be on the type, and the expression is a capability // boolean logic expression. Eg) requires_capability(A || B && !C) if (!typeHasCapability(S, ArgTy) && !isCapabilityExpr(S, ArgExp)) S.Diag(Attr.getLoc(), diag::warn_thread_attribute_argument_not_lockable) << Attr.getName() << ArgTy; Args.push_back(ArgExp); } } //===----------------------------------------------------------------------===// // Attribute Implementations //===----------------------------------------------------------------------===// static void handlePtGuardedVarAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!threadSafetyCheckIsPointer(S, D, Attr)) return; D->addAttr(::new (S.Context) PtGuardedVarAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static bool checkGuardedByAttrCommon(Sema &S, Decl *D, const AttributeList &Attr, Expr* &Arg) { SmallVector<Expr*, 1> Args; // check that all arguments are lockable objects checkAttrArgsAreCapabilityObjs(S, D, Attr, Args); unsigned Size = Args.size(); if (Size != 1) return false; Arg = Args[0]; return true; } static void handleGuardedByAttr(Sema &S, Decl *D, const AttributeList &Attr) { Expr *Arg = nullptr; if (!checkGuardedByAttrCommon(S, D, Attr, Arg)) return; D->addAttr(::new (S.Context) GuardedByAttr(Attr.getRange(), S.Context, Arg, Attr.getAttributeSpellingListIndex())); } static void handlePtGuardedByAttr(Sema &S, Decl *D, const AttributeList &Attr) { Expr *Arg = nullptr; if (!checkGuardedByAttrCommon(S, D, Attr, Arg)) return; if (!threadSafetyCheckIsPointer(S, D, Attr)) return; D->addAttr(::new (S.Context) PtGuardedByAttr(Attr.getRange(), S.Context, Arg, Attr.getAttributeSpellingListIndex())); } static bool checkAcquireOrderAttrCommon(Sema &S, Decl *D, const AttributeList &Attr, SmallVectorImpl<Expr *> &Args) { if (!checkAttributeAtLeastNumArgs(S, Attr, 1)) return false; // Check that this attribute only applies to lockable types. QualType QT = cast<ValueDecl>(D)->getType(); if (!QT->isDependentType()) { const RecordType *RT = getRecordType(QT); if (!RT || !RT->getDecl()->hasAttr<CapabilityAttr>()) { S.Diag(Attr.getLoc(), diag::warn_thread_attribute_decl_not_lockable) << Attr.getName(); return false; } } // Check that all arguments are lockable objects. checkAttrArgsAreCapabilityObjs(S, D, Attr, Args); if (Args.empty()) return false; return true; } static void handleAcquiredAfterAttr(Sema &S, Decl *D, const AttributeList &Attr) { SmallVector<Expr*, 1> Args; if (!checkAcquireOrderAttrCommon(S, D, Attr, Args)) return; Expr **StartArg = &Args[0]; D->addAttr(::new (S.Context) AcquiredAfterAttr(Attr.getRange(), S.Context, StartArg, Args.size(), Attr.getAttributeSpellingListIndex())); } static void handleAcquiredBeforeAttr(Sema &S, Decl *D, const AttributeList &Attr) { SmallVector<Expr*, 1> Args; if (!checkAcquireOrderAttrCommon(S, D, Attr, Args)) return; Expr **StartArg = &Args[0]; D->addAttr(::new (S.Context) AcquiredBeforeAttr(Attr.getRange(), S.Context, StartArg, Args.size(), Attr.getAttributeSpellingListIndex())); } static bool checkLockFunAttrCommon(Sema &S, Decl *D, const AttributeList &Attr, SmallVectorImpl<Expr *> &Args) { // zero or more arguments ok // check that all arguments are lockable objects checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true); return true; } static void handleAssertSharedLockAttr(Sema &S, Decl *D, const AttributeList &Attr) { SmallVector<Expr*, 1> Args; if (!checkLockFunAttrCommon(S, D, Attr, Args)) return; unsigned Size = Args.size(); Expr **StartArg = Size == 0 ? nullptr : &Args[0]; D->addAttr(::new (S.Context) AssertSharedLockAttr(Attr.getRange(), S.Context, StartArg, Size, Attr.getAttributeSpellingListIndex())); } static void handleAssertExclusiveLockAttr(Sema &S, Decl *D, const AttributeList &Attr) { SmallVector<Expr*, 1> Args; if (!checkLockFunAttrCommon(S, D, Attr, Args)) return; unsigned Size = Args.size(); Expr **StartArg = Size == 0 ? nullptr : &Args[0]; D->addAttr(::new (S.Context) AssertExclusiveLockAttr(Attr.getRange(), S.Context, StartArg, Size, Attr.getAttributeSpellingListIndex())); } static bool checkTryLockFunAttrCommon(Sema &S, Decl *D, const AttributeList &Attr, SmallVectorImpl<Expr *> &Args) { if (!checkAttributeAtLeastNumArgs(S, Attr, 1)) return false; if (!isIntOrBool(Attr.getArgAsExpr(0))) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) << Attr.getName() << 1 << AANT_ArgumentIntOrBool; return false; } // check that all arguments are lockable objects checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 1); return true; } static void handleSharedTrylockFunctionAttr(Sema &S, Decl *D, const AttributeList &Attr) { SmallVector<Expr*, 2> Args; if (!checkTryLockFunAttrCommon(S, D, Attr, Args)) return; D->addAttr(::new (S.Context) SharedTrylockFunctionAttr(Attr.getRange(), S.Context, Attr.getArgAsExpr(0), Args.data(), Args.size(), Attr.getAttributeSpellingListIndex())); } static void handleExclusiveTrylockFunctionAttr(Sema &S, Decl *D, const AttributeList &Attr) { SmallVector<Expr*, 2> Args; if (!checkTryLockFunAttrCommon(S, D, Attr, Args)) return; D->addAttr(::new (S.Context) ExclusiveTrylockFunctionAttr( Attr.getRange(), S.Context, Attr.getArgAsExpr(0), Args.data(), Args.size(), Attr.getAttributeSpellingListIndex())); } static void handleLockReturnedAttr(Sema &S, Decl *D, const AttributeList &Attr) { // check that the argument is lockable object SmallVector<Expr*, 1> Args; checkAttrArgsAreCapabilityObjs(S, D, Attr, Args); unsigned Size = Args.size(); if (Size == 0) return; D->addAttr(::new (S.Context) LockReturnedAttr(Attr.getRange(), S.Context, Args[0], Attr.getAttributeSpellingListIndex())); } static void handleLocksExcludedAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!checkAttributeAtLeastNumArgs(S, Attr, 1)) return; // check that all arguments are lockable objects SmallVector<Expr*, 1> Args; checkAttrArgsAreCapabilityObjs(S, D, Attr, Args); unsigned Size = Args.size(); if (Size == 0) return; Expr **StartArg = &Args[0]; D->addAttr(::new (S.Context) LocksExcludedAttr(Attr.getRange(), S.Context, StartArg, Size, Attr.getAttributeSpellingListIndex())); } static void handleEnableIfAttr(Sema &S, Decl *D, const AttributeList &Attr) { Expr *Cond = Attr.getArgAsExpr(0); if (!Cond->isTypeDependent()) { ExprResult Converted = S.PerformContextuallyConvertToBool(Cond); if (Converted.isInvalid()) return; Cond = Converted.get(); } StringRef Msg; if (!S.checkStringLiteralArgumentAttr(Attr, 1, Msg)) return; SmallVector<PartialDiagnosticAt, 8> Diags; if (!Cond->isValueDependent() && !Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(D), Diags)) { S.Diag(Attr.getLoc(), diag::err_enable_if_never_constant_expr); for (int I = 0, N = Diags.size(); I != N; ++I) S.Diag(Diags[I].first, Diags[I].second); return; } D->addAttr(::new (S.Context) EnableIfAttr(Attr.getRange(), S.Context, Cond, Msg, Attr.getAttributeSpellingListIndex())); } static void handleConsumableAttr(Sema &S, Decl *D, const AttributeList &Attr) { ConsumableAttr::ConsumedState DefaultState; if (Attr.isArgIdent(0)) { IdentifierLoc *IL = Attr.getArgAsIdent(0); if (!ConsumableAttr::ConvertStrToConsumedState(IL->Ident->getName(), DefaultState)) { S.Diag(IL->Loc, diag::warn_attribute_type_not_supported) << Attr.getName() << IL->Ident; return; } } else { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIdentifier; return; } D->addAttr(::new (S.Context) ConsumableAttr(Attr.getRange(), S.Context, DefaultState, Attr.getAttributeSpellingListIndex())); } static bool checkForConsumableClass(Sema &S, const CXXMethodDecl *MD, const AttributeList &Attr) { ASTContext &CurrContext = S.getASTContext(); QualType ThisType = MD->getThisType(CurrContext)->getPointeeType(); if (const CXXRecordDecl *RD = ThisType->getAsCXXRecordDecl()) { if (!RD->hasAttr<ConsumableAttr>()) { S.Diag(Attr.getLoc(), diag::warn_attr_on_unconsumable_class) << RD->getNameAsString(); return false; } } return true; } static void handleCallableWhenAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!checkAttributeAtLeastNumArgs(S, Attr, 1)) return; if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr)) return; SmallVector<CallableWhenAttr::ConsumedState, 3> States; for (unsigned ArgIndex = 0; ArgIndex < Attr.getNumArgs(); ++ArgIndex) { CallableWhenAttr::ConsumedState CallableState; StringRef StateString; SourceLocation Loc; if (Attr.isArgIdent(ArgIndex)) { IdentifierLoc *Ident = Attr.getArgAsIdent(ArgIndex); StateString = Ident->Ident->getName(); Loc = Ident->Loc; } else { if (!S.checkStringLiteralArgumentAttr(Attr, ArgIndex, StateString, &Loc)) return; } if (!CallableWhenAttr::ConvertStrToConsumedState(StateString, CallableState)) { S.Diag(Loc, diag::warn_attribute_type_not_supported) << Attr.getName() << StateString; return; } States.push_back(CallableState); } D->addAttr(::new (S.Context) CallableWhenAttr(Attr.getRange(), S.Context, States.data(), States.size(), Attr.getAttributeSpellingListIndex())); } static void handleParamTypestateAttr(Sema &S, Decl *D, const AttributeList &Attr) { ParamTypestateAttr::ConsumedState ParamState; if (Attr.isArgIdent(0)) { IdentifierLoc *Ident = Attr.getArgAsIdent(0); StringRef StateString = Ident->Ident->getName(); if (!ParamTypestateAttr::ConvertStrToConsumedState(StateString, ParamState)) { S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported) << Attr.getName() << StateString; return; } } else { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIdentifier; return; } // FIXME: This check is currently being done in the analysis. It can be // enabled here only after the parser propagates attributes at // template specialization definition, not declaration. //QualType ReturnType = cast<ParmVarDecl>(D)->getType(); //const CXXRecordDecl *RD = ReturnType->getAsCXXRecordDecl(); // //if (!RD || !RD->hasAttr<ConsumableAttr>()) { // S.Diag(Attr.getLoc(), diag::warn_return_state_for_unconsumable_type) << // ReturnType.getAsString(); // return; //} D->addAttr(::new (S.Context) ParamTypestateAttr(Attr.getRange(), S.Context, ParamState, Attr.getAttributeSpellingListIndex())); } static void handleReturnTypestateAttr(Sema &S, Decl *D, const AttributeList &Attr) { ReturnTypestateAttr::ConsumedState ReturnState; if (Attr.isArgIdent(0)) { IdentifierLoc *IL = Attr.getArgAsIdent(0); if (!ReturnTypestateAttr::ConvertStrToConsumedState(IL->Ident->getName(), ReturnState)) { S.Diag(IL->Loc, diag::warn_attribute_type_not_supported) << Attr.getName() << IL->Ident; return; } } else { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIdentifier; return; } // FIXME: This check is currently being done in the analysis. It can be // enabled here only after the parser propagates attributes at // template specialization definition, not declaration. //QualType ReturnType; // //if (const ParmVarDecl *Param = dyn_cast<ParmVarDecl>(D)) { // ReturnType = Param->getType(); // //} else if (const CXXConstructorDecl *Constructor = // dyn_cast<CXXConstructorDecl>(D)) { // ReturnType = Constructor->getThisType(S.getASTContext())->getPointeeType(); // //} else { // // ReturnType = cast<FunctionDecl>(D)->getCallResultType(); //} // //const CXXRecordDecl *RD = ReturnType->getAsCXXRecordDecl(); // //if (!RD || !RD->hasAttr<ConsumableAttr>()) { // S.Diag(Attr.getLoc(), diag::warn_return_state_for_unconsumable_type) << // ReturnType.getAsString(); // return; //} D->addAttr(::new (S.Context) ReturnTypestateAttr(Attr.getRange(), S.Context, ReturnState, Attr.getAttributeSpellingListIndex())); } static void handleSetTypestateAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr)) return; SetTypestateAttr::ConsumedState NewState; if (Attr.isArgIdent(0)) { IdentifierLoc *Ident = Attr.getArgAsIdent(0); StringRef Param = Ident->Ident->getName(); if (!SetTypestateAttr::ConvertStrToConsumedState(Param, NewState)) { S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported) << Attr.getName() << Param; return; } } else { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIdentifier; return; } D->addAttr(::new (S.Context) SetTypestateAttr(Attr.getRange(), S.Context, NewState, Attr.getAttributeSpellingListIndex())); } static void handleTestTypestateAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), Attr)) return; TestTypestateAttr::ConsumedState TestState; if (Attr.isArgIdent(0)) { IdentifierLoc *Ident = Attr.getArgAsIdent(0); StringRef Param = Ident->Ident->getName(); if (!TestTypestateAttr::ConvertStrToConsumedState(Param, TestState)) { S.Diag(Ident->Loc, diag::warn_attribute_type_not_supported) << Attr.getName() << Param; return; } } else { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIdentifier; return; } D->addAttr(::new (S.Context) TestTypestateAttr(Attr.getRange(), S.Context, TestState, Attr.getAttributeSpellingListIndex())); } static void handleExtVectorTypeAttr(Sema &S, Scope *scope, Decl *D, const AttributeList &Attr) { // Remember this typedef decl, we will need it later for diagnostics. S.ExtVectorDecls.push_back(cast<TypedefNameDecl>(D)); } static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (TagDecl *TD = dyn_cast<TagDecl>(D)) TD->addAttr(::new (S.Context) PackedAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) { // If the alignment is less than or equal to 8 bits, the packed attribute // has no effect. if (!FD->getType()->isDependentType() && !FD->getType()->isIncompleteType() && S.Context.getTypeAlign(FD->getType()) <= 8) S.Diag(Attr.getLoc(), diag::warn_attribute_ignored_for_field_of_type) << Attr.getName() << FD->getType(); else FD->addAttr(::new (S.Context) PackedAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } else S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName(); } static bool checkIBOutletCommon(Sema &S, Decl *D, const AttributeList &Attr) { // The IBOutlet/IBOutletCollection attributes only apply to instance // variables or properties of Objective-C classes. The outlet must also // have an object reference type. if (const ObjCIvarDecl *VD = dyn_cast<ObjCIvarDecl>(D)) { if (!VD->getType()->getAs<ObjCObjectPointerType>()) { S.Diag(Attr.getLoc(), diag::warn_iboutlet_object_type) << Attr.getName() << VD->getType() << 0; return false; } } else if (const ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) { if (!PD->getType()->getAs<ObjCObjectPointerType>()) { S.Diag(Attr.getLoc(), diag::warn_iboutlet_object_type) << Attr.getName() << PD->getType() << 1; return false; } } else { S.Diag(Attr.getLoc(), diag::warn_attribute_iboutlet) << Attr.getName(); return false; } return true; } static void handleIBOutlet(Sema &S, Decl *D, const AttributeList &Attr) { if (!checkIBOutletCommon(S, D, Attr)) return; D->addAttr(::new (S.Context) IBOutletAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleIBOutletCollection(Sema &S, Decl *D, const AttributeList &Attr) { // The iboutletcollection attribute can have zero or one arguments. if (Attr.getNumArgs() > 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr.getName() << 1; return; } if (!checkIBOutletCommon(S, D, Attr)) return; ParsedType PT; if (Attr.hasParsedType()) PT = Attr.getTypeArg(); else { PT = S.getTypeName(S.Context.Idents.get("NSObject"), Attr.getLoc(), S.getScopeForContext(D->getDeclContext()->getParent())); if (!PT) { S.Diag(Attr.getLoc(), diag::err_iboutletcollection_type) << "NSObject"; return; } } TypeSourceInfo *QTLoc = nullptr; QualType QT = S.GetTypeFromParser(PT, &QTLoc); if (!QTLoc) QTLoc = S.Context.getTrivialTypeSourceInfo(QT, Attr.getLoc()); // Diagnose use of non-object type in iboutletcollection attribute. // FIXME. Gnu attribute extension ignores use of builtin types in // attributes. So, __attribute__((iboutletcollection(char))) will be // treated as __attribute__((iboutletcollection())). if (!QT->isObjCIdType() && !QT->isObjCObjectType()) { S.Diag(Attr.getLoc(), QT->isBuiltinType() ? diag::err_iboutletcollection_builtintype : diag::err_iboutletcollection_type) << QT; return; } D->addAttr(::new (S.Context) IBOutletCollectionAttr(Attr.getRange(), S.Context, QTLoc, Attr.getAttributeSpellingListIndex())); } bool Sema::isValidPointerAttrType(QualType T, bool RefOkay) { if (RefOkay) { if (T->isReferenceType()) return true; } else { T = T.getNonReferenceType(); } // The nonnull attribute, and other similar attributes, can be applied to a // transparent union that contains a pointer type. if (const RecordType *UT = T->getAsUnionType()) { if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) { RecordDecl *UD = UT->getDecl(); for (const auto *I : UD->fields()) { QualType QT = I->getType(); if (QT->isAnyPointerType() || QT->isBlockPointerType()) return true; } } } return T->isAnyPointerType() || T->isBlockPointerType(); } static bool attrNonNullArgCheck(Sema &S, QualType T, const AttributeList &Attr, SourceRange AttrParmRange, SourceRange TypeRange, bool isReturnValue = false) { if (!S.isValidPointerAttrType(T)) { S.Diag(Attr.getLoc(), isReturnValue ? diag::warn_attribute_return_pointers_only : diag::warn_attribute_pointers_only) << Attr.getName() << AttrParmRange << TypeRange; return false; } return true; } static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) { SmallVector<unsigned, 8> NonNullArgs; for (unsigned I = 0; I < Attr.getNumArgs(); ++I) { Expr *Ex = Attr.getArgAsExpr(I); uint64_t Idx; if (!checkFunctionOrMethodParameterIndex(S, D, Attr, I + 1, Ex, Idx)) return; // Is the function argument a pointer type? if (Idx < getFunctionOrMethodNumParams(D) && !attrNonNullArgCheck(S, getFunctionOrMethodParamType(D, Idx), Attr, Ex->getSourceRange(), getFunctionOrMethodParamRange(D, Idx))) continue; NonNullArgs.push_back(Idx); } // If no arguments were specified to __attribute__((nonnull)) then all pointer // arguments have a nonnull attribute; warn if there aren't any. Skip this // check if the attribute came from a macro expansion or a template // instantiation. if (NonNullArgs.empty() && Attr.getLoc().isFileID() && S.ActiveTemplateInstantiations.empty()) { bool AnyPointers = isFunctionOrMethodVariadic(D); for (unsigned I = 0, E = getFunctionOrMethodNumParams(D); I != E && !AnyPointers; ++I) { QualType T = getFunctionOrMethodParamType(D, I); if (T->isDependentType() || S.isValidPointerAttrType(T)) AnyPointers = true; } if (!AnyPointers) S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_no_pointers); } unsigned *Start = NonNullArgs.data(); unsigned Size = NonNullArgs.size(); llvm::array_pod_sort(Start, Start + Size); D->addAttr(::new (S.Context) NonNullAttr(Attr.getRange(), S.Context, Start, Size, Attr.getAttributeSpellingListIndex())); } static void handleNonNullAttrParameter(Sema &S, ParmVarDecl *D, const AttributeList &Attr) { if (Attr.getNumArgs() > 0) { if (D->getFunctionType()) { handleNonNullAttr(S, D, Attr); } else { S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_parm_no_args) << D->getSourceRange(); } return; } // Is the argument a pointer type? if (!attrNonNullArgCheck(S, D->getType(), Attr, SourceRange(), D->getSourceRange())) return; D->addAttr(::new (S.Context) NonNullAttr(Attr.getRange(), S.Context, nullptr, 0, Attr.getAttributeSpellingListIndex())); } static void handleReturnsNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) { QualType ResultType = getFunctionOrMethodResultType(D); SourceRange SR = getFunctionOrMethodResultSourceRange(D); if (!attrNonNullArgCheck(S, ResultType, Attr, SourceRange(), SR, /* isReturnValue */ true)) return; D->addAttr(::new (S.Context) ReturnsNonNullAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleAssumeAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) { Expr *E = Attr.getArgAsExpr(0), *OE = Attr.getNumArgs() > 1 ? Attr.getArgAsExpr(1) : nullptr; S.AddAssumeAlignedAttr(Attr.getRange(), D, E, OE, Attr.getAttributeSpellingListIndex()); } void Sema::AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex) { QualType ResultType = getFunctionOrMethodResultType(D); SourceRange SR = getFunctionOrMethodResultSourceRange(D); AssumeAlignedAttr TmpAttr(AttrRange, Context, E, OE, SpellingListIndex); SourceLocation AttrLoc = AttrRange.getBegin(); if (!isValidPointerAttrType(ResultType, /* RefOkay */ true)) { Diag(AttrLoc, diag::warn_attribute_return_pointers_refs_only) << &TmpAttr << AttrRange << SR; return; } if (!E->isValueDependent()) { llvm::APSInt I(64); if (!E->isIntegerConstantExpr(I, Context)) { if (OE) Diag(AttrLoc, diag::err_attribute_argument_n_type) << &TmpAttr << 1 << AANT_ArgumentIntegerConstant << E->getSourceRange(); else Diag(AttrLoc, diag::err_attribute_argument_type) << &TmpAttr << AANT_ArgumentIntegerConstant << E->getSourceRange(); return; } if (!I.isPowerOf2()) { Diag(AttrLoc, diag::err_alignment_not_power_of_two) << E->getSourceRange(); return; } } if (OE) { if (!OE->isValueDependent()) { llvm::APSInt I(64); if (!OE->isIntegerConstantExpr(I, Context)) { Diag(AttrLoc, diag::err_attribute_argument_n_type) << &TmpAttr << 2 << AANT_ArgumentIntegerConstant << OE->getSourceRange(); return; } } } D->addAttr(::new (Context) AssumeAlignedAttr(AttrRange, Context, E, OE, SpellingListIndex)); } static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) { // This attribute must be applied to a function declaration. The first // argument to the attribute must be an identifier, the name of the resource, // for example: malloc. The following arguments must be argument indexes, the // arguments must be of integer type for Returns, otherwise of pointer type. // The difference between Holds and Takes is that a pointer may still be used // after being held. free() should be __attribute((ownership_takes)), whereas // a list append function may well be __attribute((ownership_holds)). if (!AL.isArgIdent(0)) { S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type) << AL.getName() << 1 << AANT_ArgumentIdentifier; return; } // Figure out our Kind. OwnershipAttr::OwnershipKind K = OwnershipAttr(AL.getLoc(), S.Context, nullptr, nullptr, 0, AL.getAttributeSpellingListIndex()).getOwnKind(); // Check arguments. switch (K) { case OwnershipAttr::Takes: case OwnershipAttr::Holds: if (AL.getNumArgs() < 2) { S.Diag(AL.getLoc(), diag::err_attribute_too_few_arguments) << AL.getName() << 2; return; } break; case OwnershipAttr::Returns: if (AL.getNumArgs() > 2) { S.Diag(AL.getLoc(), diag::err_attribute_too_many_arguments) << AL.getName() << 1; return; } break; } IdentifierInfo *Module = AL.getArgAsIdent(0)->Ident; // Normalize the argument, __foo__ becomes foo. StringRef ModuleName = Module->getName(); if (ModuleName.startswith("__") && ModuleName.endswith("__") && ModuleName.size() > 4) { ModuleName = ModuleName.drop_front(2).drop_back(2); Module = &S.PP.getIdentifierTable().get(ModuleName); } SmallVector<unsigned, 8> OwnershipArgs; for (unsigned i = 1; i < AL.getNumArgs(); ++i) { Expr *Ex = AL.getArgAsExpr(i); uint64_t Idx; if (!checkFunctionOrMethodParameterIndex(S, D, AL, i, Ex, Idx)) return; // Is the function argument a pointer type? QualType T = getFunctionOrMethodParamType(D, Idx); int Err = -1; // No error switch (K) { case OwnershipAttr::Takes: case OwnershipAttr::Holds: if (!T->isAnyPointerType() && !T->isBlockPointerType()) Err = 0; break; case OwnershipAttr::Returns: if (!T->isIntegerType()) Err = 1; break; } if (-1 != Err) { S.Diag(AL.getLoc(), diag::err_ownership_type) << AL.getName() << Err << Ex->getSourceRange(); return; } // Check we don't have a conflict with another ownership attribute. for (const auto *I : D->specific_attrs<OwnershipAttr>()) { // Cannot have two ownership attributes of different kinds for the same // index. if (I->getOwnKind() != K && I->args_end() != std::find(I->args_begin(), I->args_end(), Idx)) { S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible) << AL.getName() << I; return; } else if (K == OwnershipAttr::Returns && I->getOwnKind() == OwnershipAttr::Returns) { // A returns attribute conflicts with any other returns attribute using // a different index. Note, diagnostic reporting is 1-based, but stored // argument indexes are 0-based. if (std::find(I->args_begin(), I->args_end(), Idx) == I->args_end()) { S.Diag(I->getLocation(), diag::err_ownership_returns_index_mismatch) << *(I->args_begin()) + 1; if (I->args_size()) S.Diag(AL.getLoc(), diag::note_ownership_returns_index_mismatch) << (unsigned)Idx + 1 << Ex->getSourceRange(); return; } } } OwnershipArgs.push_back(Idx); } unsigned* start = OwnershipArgs.data(); unsigned size = OwnershipArgs.size(); llvm::array_pod_sort(start, start + size); D->addAttr(::new (S.Context) OwnershipAttr(AL.getLoc(), S.Context, Module, start, size, AL.getAttributeSpellingListIndex())); } static void handleWeakRefAttr(Sema &S, Decl *D, const AttributeList &Attr) { // Check the attribute arguments. if (Attr.getNumArgs() > 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr.getName() << 1; return; } NamedDecl *nd = cast<NamedDecl>(D); // gcc rejects // class c { // static int a __attribute__((weakref ("v2"))); // static int b() __attribute__((weakref ("f3"))); // }; // and ignores the attributes of // void f(void) { // static int a __attribute__((weakref ("v2"))); // } // we reject them const DeclContext *Ctx = D->getDeclContext()->getRedeclContext(); if (!Ctx->isFileContext()) { S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_global_context) << nd; return; } // The GCC manual says // // At present, a declaration to which `weakref' is attached can only // be `static'. // // It also says // // Without a TARGET, // given as an argument to `weakref' or to `alias', `weakref' is // equivalent to `weak'. // // gcc 4.4.1 will accept // int a7 __attribute__((weakref)); // as // int a7 __attribute__((weak)); // This looks like a bug in gcc. We reject that for now. We should revisit // it if this behaviour is actually used. // GCC rejects // static ((alias ("y"), weakref)). // Should we? How to check that weakref is before or after alias? // FIXME: it would be good for us to keep the WeakRefAttr as-written instead // of transforming it into an AliasAttr. The WeakRefAttr never uses the // StringRef parameter it was given anyway. StringRef Str; if (Attr.getNumArgs() && S.checkStringLiteralArgumentAttr(Attr, 0, Str)) // GCC will accept anything as the argument of weakref. Should we // check for an existing decl? D->addAttr(::new (S.Context) AliasAttr(Attr.getRange(), S.Context, Str, Attr.getAttributeSpellingListIndex())); D->addAttr(::new (S.Context) WeakRefAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) { StringRef Str; if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str)) return; if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { S.Diag(Attr.getLoc(), diag::err_alias_not_supported_on_darwin); return; } // Aliases should be on declarations, not definitions. if (const auto *FD = dyn_cast<FunctionDecl>(D)) { if (FD->isThisDeclarationADefinition()) { S.Diag(Attr.getLoc(), diag::err_alias_is_definition) << FD; return; } } else { const auto *VD = cast<VarDecl>(D); if (VD->isThisDeclarationADefinition() && VD->isExternallyVisible()) { S.Diag(Attr.getLoc(), diag::err_alias_is_definition) << VD; return; } } // FIXME: check if target symbol exists in current file D->addAttr(::new (S.Context) AliasAttr(Attr.getRange(), S.Context, Str, Attr.getAttributeSpellingListIndex())); } static void handleColdAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (checkAttrMutualExclusion<HotAttr>(S, D, Attr)) return; D->addAttr(::new (S.Context) ColdAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleHotAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (checkAttrMutualExclusion<ColdAttr>(S, D, Attr)) return; D->addAttr(::new (S.Context) HotAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleTLSModelAttr(Sema &S, Decl *D, const AttributeList &Attr) { StringRef Model; SourceLocation LiteralLoc; // Check that it is a string. if (!S.checkStringLiteralArgumentAttr(Attr, 0, Model, &LiteralLoc)) return; // Check that the value. if (Model != "global-dynamic" && Model != "local-dynamic" && Model != "initial-exec" && Model != "local-exec") { S.Diag(LiteralLoc, diag::err_attr_tlsmodel_arg); return; } D->addAttr(::new (S.Context) TLSModelAttr(Attr.getRange(), S.Context, Model, Attr.getAttributeSpellingListIndex())); } static void handleRestrictAttr(Sema &S, Decl *D, const AttributeList &Attr) { QualType ResultType = getFunctionOrMethodResultType(D); if (ResultType->isAnyPointerType() || ResultType->isBlockPointerType()) { D->addAttr(::new (S.Context) RestrictAttr( Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; } S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only) << Attr.getName() << getFunctionOrMethodResultSourceRange(D); } static void handleCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (S.LangOpts.CPlusPlus) { S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang) << Attr.getName() << AttributeLangSupport::Cpp; return; } D->addAttr(::new (S.Context) CommonAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleNoReturnAttr(Sema &S, Decl *D, const AttributeList &attr) { if (hasDeclarator(D)) return; if (S.CheckNoReturnAttr(attr)) return; if (!isa<ObjCMethodDecl>(D)) { S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type) << attr.getName() << ExpectedFunctionOrMethod; return; } D->addAttr(::new (S.Context) NoReturnAttr(attr.getRange(), S.Context, attr.getAttributeSpellingListIndex())); } bool Sema::CheckNoReturnAttr(const AttributeList &attr) { if (!checkAttributeNumArgs(*this, attr, 0)) { attr.setInvalid(); return true; } return false; } static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D, const AttributeList &Attr) { // The checking path for 'noreturn' and 'analyzer_noreturn' are different // because 'analyzer_noreturn' does not impact the type. if (!isFunctionOrMethodOrBlock(D)) { ValueDecl *VD = dyn_cast<ValueDecl>(D); if (!VD || (!VD->getType()->isBlockPointerType() && !VD->getType()->isFunctionPointerType())) { S.Diag(Attr.getLoc(), Attr.isCXX11Attribute() ? diag::err_attribute_wrong_decl_type : diag::warn_attribute_wrong_decl_type) << Attr.getName() << ExpectedFunctionMethodOrBlock; return; } } D->addAttr(::new (S.Context) AnalyzerNoReturnAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } // PS3 PPU-specific. static void handleVecReturnAttr(Sema &S, Decl *D, const AttributeList &Attr) { /* Returning a Vector Class in Registers According to the PPU ABI specifications, a class with a single member of vector type is returned in memory when used as the return value of a function. This results in inefficient code when implementing vector classes. To return the value in a single vector register, add the vecreturn attribute to the class definition. This attribute is also applicable to struct types. Example: struct Vector { __vector float xyzw; } __attribute__((vecreturn)); Vector Add(Vector lhs, Vector rhs) { Vector result; result.xyzw = vec_add(lhs.xyzw, rhs.xyzw); return result; // This will be returned in a register } */ if (VecReturnAttr *A = D->getAttr<VecReturnAttr>()) { S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << A; return; } RecordDecl *record = cast<RecordDecl>(D); int count = 0; if (!isa<CXXRecordDecl>(record)) { S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member); return; } if (!cast<CXXRecordDecl>(record)->isPOD()) { S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_pod_record); return; } for (const auto *I : record->fields()) { if ((count == 1) || !I->getType()->isVectorType()) { S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member); return; } count++; } D->addAttr(::new (S.Context) VecReturnAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleDependencyAttr(Sema &S, Scope *Scope, Decl *D, const AttributeList &Attr) { if (isa<ParmVarDecl>(D)) { // [[carries_dependency]] can only be applied to a parameter if it is a // parameter of a function declaration or lambda. if (!(Scope->getFlags() & clang::Scope::FunctionDeclarationScope)) { S.Diag(Attr.getLoc(), diag::err_carries_dependency_param_not_function_decl); return; } } D->addAttr(::new (S.Context) CarriesDependencyAttr( Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleUsedAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { if (VD->hasLocalStorage()) { S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName(); return; } } else if (!isFunctionOrMethod(D)) { S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) << Attr.getName() << ExpectedVariableOrFunction; return; } D->addAttr(::new (S.Context) UsedAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleConstructorAttr(Sema &S, Decl *D, const AttributeList &Attr) { uint32_t priority = ConstructorAttr::DefaultPriority; if (Attr.getNumArgs() && !checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), priority)) return; D->addAttr(::new (S.Context) ConstructorAttr(Attr.getRange(), S.Context, priority, Attr.getAttributeSpellingListIndex())); } static void handleDestructorAttr(Sema &S, Decl *D, const AttributeList &Attr) { uint32_t priority = DestructorAttr::DefaultPriority; if (Attr.getNumArgs() && !checkUInt32Argument(S, Attr, Attr.getArgAsExpr(0), priority)) return; D->addAttr(::new (S.Context) DestructorAttr(Attr.getRange(), S.Context, priority, Attr.getAttributeSpellingListIndex())); } template <typename AttrTy> static void handleAttrWithMessage(Sema &S, Decl *D, const AttributeList &Attr) { // Handle the case where the attribute has a text message. StringRef Str; if (Attr.getNumArgs() == 1 && !S.checkStringLiteralArgumentAttr(Attr, 0, Str)) return; D->addAttr(::new (S.Context) AttrTy(Attr.getRange(), S.Context, Str, Attr.getAttributeSpellingListIndex())); } static void handleObjCSuppresProtocolAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!cast<ObjCProtocolDecl>(D)->isThisDeclarationADefinition()) { S.Diag(Attr.getLoc(), diag::err_objc_attr_protocol_requires_definition) << Attr.getName() << Attr.getRange(); return; } D->addAttr(::new (S.Context) ObjCExplicitProtocolImplAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static bool checkAvailabilityAttr(Sema &S, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted) { StringRef PlatformName = AvailabilityAttr::getPrettyPlatformName(Platform->getName()); if (PlatformName.empty()) PlatformName = Platform->getName(); // Ensure that Introduced <= Deprecated <= Obsoleted (although not all // of these steps are needed). if (!Introduced.empty() && !Deprecated.empty() && !(Introduced <= Deprecated)) { S.Diag(Range.getBegin(), diag::warn_availability_version_ordering) << 1 << PlatformName << Deprecated.getAsString() << 0 << Introduced.getAsString(); return true; } if (!Introduced.empty() && !Obsoleted.empty() && !(Introduced <= Obsoleted)) { S.Diag(Range.getBegin(), diag::warn_availability_version_ordering) << 2 << PlatformName << Obsoleted.getAsString() << 0 << Introduced.getAsString(); return true; } if (!Deprecated.empty() && !Obsoleted.empty() && !(Deprecated <= Obsoleted)) { S.Diag(Range.getBegin(), diag::warn_availability_version_ordering) << 2 << PlatformName << Obsoleted.getAsString() << 1 << Deprecated.getAsString(); return true; } return false; } /// \brief Check whether the two versions match. /// /// If either version tuple is empty, then they are assumed to match. If /// \p BeforeIsOkay is true, then \p X can be less than or equal to \p Y. static bool versionsMatch(const VersionTuple &X, const VersionTuple &Y, bool BeforeIsOkay) { if (X.empty() || Y.empty()) return true; if (X == Y) return true; if (BeforeIsOkay && X < Y) return true; return false; } AvailabilityAttr *Sema::mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex) { VersionTuple MergedIntroduced = Introduced; VersionTuple MergedDeprecated = Deprecated; VersionTuple MergedObsoleted = Obsoleted; bool FoundAny = false; if (D->hasAttrs()) { AttrVec &Attrs = D->getAttrs(); for (unsigned i = 0, e = Attrs.size(); i != e;) { const AvailabilityAttr *OldAA = dyn_cast<AvailabilityAttr>(Attrs[i]); if (!OldAA) { ++i; continue; } IdentifierInfo *OldPlatform = OldAA->getPlatform(); if (OldPlatform != Platform) { ++i; continue; } FoundAny = true; VersionTuple OldIntroduced = OldAA->getIntroduced(); VersionTuple OldDeprecated = OldAA->getDeprecated(); VersionTuple OldObsoleted = OldAA->getObsoleted(); bool OldIsUnavailable = OldAA->getUnavailable(); if (!versionsMatch(OldIntroduced, Introduced, Override) || !versionsMatch(Deprecated, OldDeprecated, Override) || !versionsMatch(Obsoleted, OldObsoleted, Override) || !(OldIsUnavailable == IsUnavailable || (Override && !OldIsUnavailable && IsUnavailable))) { if (Override) { int Which = -1; VersionTuple FirstVersion; VersionTuple SecondVersion; if (!versionsMatch(OldIntroduced, Introduced, Override)) { Which = 0; FirstVersion = OldIntroduced; SecondVersion = Introduced; } else if (!versionsMatch(Deprecated, OldDeprecated, Override)) { Which = 1; FirstVersion = Deprecated; SecondVersion = OldDeprecated; } else if (!versionsMatch(Obsoleted, OldObsoleted, Override)) { Which = 2; FirstVersion = Obsoleted; SecondVersion = OldObsoleted; } if (Which == -1) { Diag(OldAA->getLocation(), diag::warn_mismatched_availability_override_unavail) << AvailabilityAttr::getPrettyPlatformName(Platform->getName()); } else { Diag(OldAA->getLocation(), diag::warn_mismatched_availability_override) << Which << AvailabilityAttr::getPrettyPlatformName(Platform->getName()) << FirstVersion.getAsString() << SecondVersion.getAsString(); } Diag(Range.getBegin(), diag::note_overridden_method); } else { Diag(OldAA->getLocation(), diag::warn_mismatched_availability); Diag(Range.getBegin(), diag::note_previous_attribute); } Attrs.erase(Attrs.begin() + i); --e; continue; } VersionTuple MergedIntroduced2 = MergedIntroduced; VersionTuple MergedDeprecated2 = MergedDeprecated; VersionTuple MergedObsoleted2 = MergedObsoleted; if (MergedIntroduced2.empty()) MergedIntroduced2 = OldIntroduced; if (MergedDeprecated2.empty()) MergedDeprecated2 = OldDeprecated; if (MergedObsoleted2.empty()) MergedObsoleted2 = OldObsoleted; if (checkAvailabilityAttr(*this, OldAA->getRange(), Platform, MergedIntroduced2, MergedDeprecated2, MergedObsoleted2)) { Attrs.erase(Attrs.begin() + i); --e; continue; } MergedIntroduced = MergedIntroduced2; MergedDeprecated = MergedDeprecated2; MergedObsoleted = MergedObsoleted2; ++i; } } if (FoundAny && MergedIntroduced == Introduced && MergedDeprecated == Deprecated && MergedObsoleted == Obsoleted) return nullptr; // Only create a new attribute if !Override, but we want to do // the checking. if (!checkAvailabilityAttr(*this, Range, Platform, MergedIntroduced, MergedDeprecated, MergedObsoleted) && !Override) { return ::new (Context) AvailabilityAttr(Range, Context, Platform, Introduced, Deprecated, Obsoleted, IsUnavailable, Message, AttrSpellingListIndex); } return nullptr; } static void handleAvailabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!checkAttributeNumArgs(S, Attr, 1)) return; IdentifierLoc *Platform = Attr.getArgAsIdent(0); unsigned Index = Attr.getAttributeSpellingListIndex(); IdentifierInfo *II = Platform->Ident; if (AvailabilityAttr::getPrettyPlatformName(II->getName()).empty()) S.Diag(Platform->Loc, diag::warn_availability_unknown_platform) << Platform->Ident; NamedDecl *ND = dyn_cast<NamedDecl>(D); if (!ND) { S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName(); return; } AvailabilityChange Introduced = Attr.getAvailabilityIntroduced(); AvailabilityChange Deprecated = Attr.getAvailabilityDeprecated(); AvailabilityChange Obsoleted = Attr.getAvailabilityObsoleted(); bool IsUnavailable = Attr.getUnavailableLoc().isValid(); StringRef Str; if (const StringLiteral *SE = dyn_cast_or_null<StringLiteral>(Attr.getMessageExpr())) Str = SE->getString(); AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(ND, Attr.getRange(), II, Introduced.Version, Deprecated.Version, Obsoleted.Version, IsUnavailable, Str, /*Override=*/false, Index); if (NewAttr) D->addAttr(NewAttr); } template <class T> static T *mergeVisibilityAttr(Sema &S, Decl *D, SourceRange range, typename T::VisibilityType value, unsigned attrSpellingListIndex) { T *existingAttr = D->getAttr<T>(); if (existingAttr) { typename T::VisibilityType existingValue = existingAttr->getVisibility(); if (existingValue == value) return nullptr; S.Diag(existingAttr->getLocation(), diag::err_mismatched_visibility); S.Diag(range.getBegin(), diag::note_previous_attribute); D->dropAttr<T>(); } return ::new (S.Context) T(range, S.Context, value, attrSpellingListIndex); } VisibilityAttr *Sema::mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex) { return ::mergeVisibilityAttr<VisibilityAttr>(*this, D, Range, Vis, AttrSpellingListIndex); } TypeVisibilityAttr *Sema::mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex) { return ::mergeVisibilityAttr<TypeVisibilityAttr>(*this, D, Range, Vis, AttrSpellingListIndex); } static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr, bool isTypeVisibility) { // Visibility attributes don't mean anything on a typedef. if (isa<TypedefNameDecl>(D)) { S.Diag(Attr.getRange().getBegin(), diag::warn_attribute_ignored) << Attr.getName(); return; } // 'type_visibility' can only go on a type or namespace. if (isTypeVisibility && !(isa<TagDecl>(D) || isa<ObjCInterfaceDecl>(D) || isa<NamespaceDecl>(D))) { S.Diag(Attr.getRange().getBegin(), diag::err_attribute_wrong_decl_type) << Attr.getName() << ExpectedTypeOrNamespace; return; } // Check that the argument is a string literal. StringRef TypeStr; SourceLocation LiteralLoc; if (!S.checkStringLiteralArgumentAttr(Attr, 0, TypeStr, &LiteralLoc)) return; VisibilityAttr::VisibilityType type; if (!VisibilityAttr::ConvertStrToVisibilityType(TypeStr, type)) { S.Diag(LiteralLoc, diag::warn_attribute_type_not_supported) << Attr.getName() << TypeStr; return; } // Complain about attempts to use protected visibility on targets // (like Darwin) that don't support it. if (type == VisibilityAttr::Protected && !S.Context.getTargetInfo().hasProtectedVisibility()) { S.Diag(Attr.getLoc(), diag::warn_attribute_protected_visibility); type = VisibilityAttr::Default; } unsigned Index = Attr.getAttributeSpellingListIndex(); clang::Attr *newAttr; if (isTypeVisibility) { newAttr = S.mergeTypeVisibilityAttr(D, Attr.getRange(), (TypeVisibilityAttr::VisibilityType) type, Index); } else { newAttr = S.mergeVisibilityAttr(D, Attr.getRange(), type, Index); } if (newAttr) D->addAttr(newAttr); } static void handleObjCMethodFamilyAttr(Sema &S, Decl *decl, const AttributeList &Attr) { ObjCMethodDecl *method = cast<ObjCMethodDecl>(decl); if (!Attr.isArgIdent(0)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) << Attr.getName() << 1 << AANT_ArgumentIdentifier; return; } IdentifierLoc *IL = Attr.getArgAsIdent(0); ObjCMethodFamilyAttr::FamilyKind F; if (!ObjCMethodFamilyAttr::ConvertStrToFamilyKind(IL->Ident->getName(), F)) { S.Diag(IL->Loc, diag::warn_attribute_type_not_supported) << Attr.getName() << IL->Ident; return; } if (F == ObjCMethodFamilyAttr::OMF_init && !method->getReturnType()->isObjCObjectPointerType()) { S.Diag(method->getLocation(), diag::err_init_method_bad_return_type) << method->getReturnType(); // Ignore the attribute. return; } method->addAttr(new (S.Context) ObjCMethodFamilyAttr(Attr.getRange(), S.Context, F, Attr.getAttributeSpellingListIndex())); } static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) { if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) { QualType T = TD->getUnderlyingType(); if (!T->isCARCBridgableType()) { S.Diag(TD->getLocation(), diag::err_nsobject_attribute); return; } } else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) { QualType T = PD->getType(); if (!T->isCARCBridgableType()) { S.Diag(PD->getLocation(), diag::err_nsobject_attribute); return; } } else { // It is okay to include this attribute on properties, e.g.: // // @property (retain, nonatomic) struct Bork *Q __attribute__((NSObject)); // // In this case it follows tradition and suppresses an error in the above // case. S.Diag(D->getLocation(), diag::warn_nsobject_attribute); } D->addAttr(::new (S.Context) ObjCNSObjectAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleObjCIndependentClass(Sema &S, Decl *D, const AttributeList &Attr) { if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) { QualType T = TD->getUnderlyingType(); if (!T->isObjCObjectPointerType()) { S.Diag(TD->getLocation(), diag::warn_ptr_independentclass_attribute); return; } } else { S.Diag(D->getLocation(), diag::warn_independentclass_attribute); return; } D->addAttr(::new (S.Context) ObjCIndependentClassAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleBlocksAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!Attr.isArgIdent(0)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) << Attr.getName() << 1 << AANT_ArgumentIdentifier; return; } IdentifierInfo *II = Attr.getArgAsIdent(0)->Ident; BlocksAttr::BlockType type; if (!BlocksAttr::ConvertStrToBlockType(II->getName(), type)) { S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported) << Attr.getName() << II; return; } D->addAttr(::new (S.Context) BlocksAttr(Attr.getRange(), S.Context, type, Attr.getAttributeSpellingListIndex())); } static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) { unsigned sentinel = (unsigned)SentinelAttr::DefaultSentinel; if (Attr.getNumArgs() > 0) { Expr *E = Attr.getArgAsExpr(0); llvm::APSInt Idx(32); if (E->isTypeDependent() || E->isValueDependent() || !E->isIntegerConstantExpr(Idx, S.Context)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) << Attr.getName() << 1 << AANT_ArgumentIntegerConstant << E->getSourceRange(); return; } if (Idx.isSigned() && Idx.isNegative()) { S.Diag(Attr.getLoc(), diag::err_attribute_sentinel_less_than_zero) << E->getSourceRange(); return; } sentinel = Idx.getZExtValue(); } unsigned nullPos = (unsigned)SentinelAttr::DefaultNullPos; if (Attr.getNumArgs() > 1) { Expr *E = Attr.getArgAsExpr(1); llvm::APSInt Idx(32); if (E->isTypeDependent() || E->isValueDependent() || !E->isIntegerConstantExpr(Idx, S.Context)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) << Attr.getName() << 2 << AANT_ArgumentIntegerConstant << E->getSourceRange(); return; } nullPos = Idx.getZExtValue(); if ((Idx.isSigned() && Idx.isNegative()) || nullPos > 1) { // FIXME: This error message could be improved, it would be nice // to say what the bounds actually are. S.Diag(Attr.getLoc(), diag::err_attribute_sentinel_not_zero_or_one) << E->getSourceRange(); return; } } if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { const FunctionType *FT = FD->getType()->castAs<FunctionType>(); if (isa<FunctionNoProtoType>(FT)) { S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_named_arguments); return; } if (!cast<FunctionProtoType>(FT)->isVariadic()) { S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0; return; } } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { if (!MD->isVariadic()) { S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0; return; } } else if (BlockDecl *BD = dyn_cast<BlockDecl>(D)) { if (!BD->isVariadic()) { S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 1; return; } } else if (const VarDecl *V = dyn_cast<VarDecl>(D)) { QualType Ty = V->getType(); if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) { const FunctionType *FT = Ty->isFunctionPointerType() ? D->getFunctionType() : Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>(); if (!cast<FunctionProtoType>(FT)->isVariadic()) { int m = Ty->isFunctionPointerType() ? 0 : 1; S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << m; return; } } else { S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) << Attr.getName() << ExpectedFunctionMethodOrBlock; return; } } else { S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) << Attr.getName() << ExpectedFunctionMethodOrBlock; return; } D->addAttr(::new (S.Context) SentinelAttr(Attr.getRange(), S.Context, sentinel, nullPos, Attr.getAttributeSpellingListIndex())); } static void handleWarnUnusedResult(Sema &S, Decl *D, const AttributeList &Attr) { if (D->getFunctionType() && D->getFunctionType()->getReturnType()->isVoidType()) { S.Diag(Attr.getLoc(), diag::warn_attribute_void_function_method) << Attr.getName() << 0; return; } if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) if (MD->getReturnType()->isVoidType()) { S.Diag(Attr.getLoc(), diag::warn_attribute_void_function_method) << Attr.getName() << 1; return; } D->addAttr(::new (S.Context) WarnUnusedResultAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleWeakImportAttr(Sema &S, Decl *D, const AttributeList &Attr) { // weak_import only applies to variable & function declarations. bool isDef = false; if (!D->canBeWeakImported(isDef)) { if (isDef) S.Diag(Attr.getLoc(), diag::warn_attribute_invalid_on_definition) << "weak_import"; else if (isa<ObjCPropertyDecl>(D) || isa<ObjCMethodDecl>(D) || (S.Context.getTargetInfo().getTriple().isOSDarwin() && (isa<ObjCInterfaceDecl>(D) || isa<EnumDecl>(D)))) { // Nothing to warn about here. } else S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) << Attr.getName() << ExpectedVariableOrFunction; return; } D->addAttr(::new (S.Context) WeakImportAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } // Handles reqd_work_group_size and work_group_size_hint. template <typename WorkGroupAttr> static void handleWorkGroupSize(Sema &S, Decl *D, const AttributeList &Attr) { uint32_t WGSize[3]; for (unsigned i = 0; i < 3; ++i) { const Expr *E = Attr.getArgAsExpr(i); if (!checkUInt32Argument(S, Attr, E, WGSize[i], i)) return; if (WGSize[i] == 0) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero) << Attr.getName() << E->getSourceRange(); return; } } WorkGroupAttr *Existing = D->getAttr<WorkGroupAttr>(); if (Existing && !(Existing->getXDim() == WGSize[0] && Existing->getYDim() == WGSize[1] && Existing->getZDim() == WGSize[2])) S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName(); D->addAttr(::new (S.Context) WorkGroupAttr(Attr.getRange(), S.Context, WGSize[0], WGSize[1], WGSize[2], Attr.getAttributeSpellingListIndex())); } static void handleVecTypeHint(Sema &S, Decl *D, const AttributeList &Attr) { if (!Attr.hasParsedType()) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr.getName() << 1; return; } TypeSourceInfo *ParmTSI = nullptr; QualType ParmType = S.GetTypeFromParser(Attr.getTypeArg(), &ParmTSI); assert(ParmTSI && "no type source info for attribute argument"); if (!ParmType->isExtVectorType() && !ParmType->isFloatingType() && (ParmType->isBooleanType() || !ParmType->isIntegralType(S.getASTContext()))) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_vec_type_hint) << ParmType; return; } if (VecTypeHintAttr *A = D->getAttr<VecTypeHintAttr>()) { if (!S.Context.hasSameType(A->getTypeHint(), ParmType)) { S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName(); return; } } D->addAttr(::new (S.Context) VecTypeHintAttr(Attr.getLoc(), S.Context, ParmTSI, Attr.getAttributeSpellingListIndex())); } SectionAttr *Sema::mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex) { if (SectionAttr *ExistingAttr = D->getAttr<SectionAttr>()) { if (ExistingAttr->getName() == Name) return nullptr; Diag(ExistingAttr->getLocation(), diag::warn_mismatched_section); Diag(Range.getBegin(), diag::note_previous_attribute); return nullptr; } return ::new (Context) SectionAttr(Range, Context, Name, AttrSpellingListIndex); } bool Sema::checkSectionName(SourceLocation LiteralLoc, StringRef SecName) { std::string Error = Context.getTargetInfo().isValidSectionSpecifier(SecName); if (!Error.empty()) { Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target) << Error; return false; } return true; } static void handleSectionAttr(Sema &S, Decl *D, const AttributeList &Attr) { // Make sure that there is a string literal as the sections's single // argument. StringRef Str; SourceLocation LiteralLoc; if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &LiteralLoc)) return; if (!S.checkSectionName(LiteralLoc, Str)) return; // If the target wants to validate the section specifier, make it happen. std::string Error = S.Context.getTargetInfo().isValidSectionSpecifier(Str); if (!Error.empty()) { S.Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target) << Error; return; } unsigned Index = Attr.getAttributeSpellingListIndex(); SectionAttr *NewAttr = S.mergeSectionAttr(D, Attr.getRange(), Str, Index); if (NewAttr) D->addAttr(NewAttr); } // Check for things we'd like to warn about, no errors or validation for now. // TODO: Validation should use a backend target library that specifies // the allowable subtarget features and cpus. We could use something like a // TargetCodeGenInfo hook here to do validation. void Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) { for (auto Str : {"tune=", "fpmath="}) if (AttrStr.find(Str) != StringRef::npos) Diag(LiteralLoc, diag::warn_unsupported_target_attribute) << Str; } static void handleTargetAttr(Sema &S, Decl *D, const AttributeList &Attr) { StringRef Str; SourceLocation LiteralLoc; if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &LiteralLoc)) return; S.checkTargetAttr(LiteralLoc, Str); unsigned Index = Attr.getAttributeSpellingListIndex(); TargetAttr *NewAttr = ::new (S.Context) TargetAttr(Attr.getRange(), S.Context, Str, Index); D->addAttr(NewAttr); } static void handleCleanupAttr(Sema &S, Decl *D, const AttributeList &Attr) { VarDecl *VD = cast<VarDecl>(D); if (!VD->hasLocalStorage()) { S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName(); return; } Expr *E = Attr.getArgAsExpr(0); SourceLocation Loc = E->getExprLoc(); FunctionDecl *FD = nullptr; DeclarationNameInfo NI; // gcc only allows for simple identifiers. Since we support more than gcc, we // will warn the user. if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { if (DRE->hasQualifier()) S.Diag(Loc, diag::warn_cleanup_ext); FD = dyn_cast<FunctionDecl>(DRE->getDecl()); NI = DRE->getNameInfo(); if (!FD) { S.Diag(Loc, diag::err_attribute_cleanup_arg_not_function) << 1 << NI.getName(); return; } } else if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { if (ULE->hasExplicitTemplateArgs()) S.Diag(Loc, diag::warn_cleanup_ext); FD = S.ResolveSingleFunctionTemplateSpecialization(ULE, true); NI = ULE->getNameInfo(); if (!FD) { S.Diag(Loc, diag::err_attribute_cleanup_arg_not_function) << 2 << NI.getName(); if (ULE->getType() == S.Context.OverloadTy) S.NoteAllOverloadCandidates(ULE); return; } } else { S.Diag(Loc, diag::err_attribute_cleanup_arg_not_function) << 0; return; } if (FD->getNumParams() != 1) { S.Diag(Loc, diag::err_attribute_cleanup_func_must_take_one_arg) << NI.getName(); return; } // We're currently more strict than GCC about what function types we accept. // If this ever proves to be a problem it should be easy to fix. QualType Ty = S.Context.getPointerType(VD->getType()); QualType ParamTy = FD->getParamDecl(0)->getType(); if (S.CheckAssignmentConstraints(FD->getParamDecl(0)->getLocation(), ParamTy, Ty) != Sema::Compatible) { S.Diag(Loc, diag::err_attribute_cleanup_func_arg_incompatible_type) << NI.getName() << ParamTy << Ty; return; } D->addAttr(::new (S.Context) CleanupAttr(Attr.getRange(), S.Context, FD, Attr.getAttributeSpellingListIndex())); } /// Handle __attribute__((format_arg((idx)))) attribute based on /// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) { Expr *IdxExpr = Attr.getArgAsExpr(0); uint64_t Idx; if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 1, IdxExpr, Idx)) return; // make sure the format string is really a string QualType Ty = getFunctionOrMethodParamType(D, Idx); bool not_nsstring_type = !isNSStringType(Ty, S.Context); if (not_nsstring_type && !isCFStringType(Ty, S.Context) && (!Ty->isPointerType() || !Ty->getAs<PointerType>()->getPointeeType()->isCharType())) { S.Diag(Attr.getLoc(), diag::err_format_attribute_not) << (not_nsstring_type ? "a string type" : "an NSString") << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, 0); return; } Ty = getFunctionOrMethodResultType(D); if (!isNSStringType(Ty, S.Context) && !isCFStringType(Ty, S.Context) && (!Ty->isPointerType() || !Ty->getAs<PointerType>()->getPointeeType()->isCharType())) { S.Diag(Attr.getLoc(), diag::err_format_attribute_result_not) << (not_nsstring_type ? "string type" : "NSString") << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, 0); return; } // We cannot use the Idx returned from checkFunctionOrMethodParameterIndex // because that has corrected for the implicit this parameter, and is zero- // based. The attribute expects what the user wrote explicitly. llvm::APSInt Val; IdxExpr->EvaluateAsInt(Val, S.Context); D->addAttr(::new (S.Context) FormatArgAttr(Attr.getRange(), S.Context, Val.getZExtValue(), Attr.getAttributeSpellingListIndex())); } enum FormatAttrKind { CFStringFormat, NSStringFormat, StrftimeFormat, SupportedFormat, IgnoredFormat, InvalidFormat }; /// getFormatAttrKind - Map from format attribute names to supported format /// types. static FormatAttrKind getFormatAttrKind(StringRef Format) { return llvm::StringSwitch<FormatAttrKind>(Format) // Check for formats that get handled specially. .Case("NSString", NSStringFormat) .Case("CFString", CFStringFormat) .Case("strftime", StrftimeFormat) // Otherwise, check for supported formats. .Cases("scanf", "printf", "printf0", "strfmon", SupportedFormat) .Cases("cmn_err", "vcmn_err", "zcmn_err", SupportedFormat) .Case("kprintf", SupportedFormat) // OpenBSD. .Case("freebsd_kprintf", SupportedFormat) // FreeBSD. .Case("os_trace", SupportedFormat) .Cases("gcc_diag", "gcc_cdiag", "gcc_cxxdiag", "gcc_tdiag", IgnoredFormat) .Default(InvalidFormat); } /// Handle __attribute__((init_priority(priority))) attributes based on /// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html static void handleInitPriorityAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!S.getLangOpts().CPlusPlus) { S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName(); return; } if (S.getCurFunctionOrMethodDecl()) { S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr); Attr.setInvalid(); return; } QualType T = cast<VarDecl>(D)->getType(); if (S.Context.getAsArrayType(T)) T = S.Context.getBaseElementType(T); if (!T->getAs<RecordType>()) { S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr); Attr.setInvalid(); return; } Expr *E = Attr.getArgAsExpr(0); uint32_t prioritynum; if (!checkUInt32Argument(S, Attr, E, prioritynum)) { Attr.setInvalid(); return; } if (prioritynum < 101 || prioritynum > 65535) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_outof_range) << E->getSourceRange(); Attr.setInvalid(); return; } D->addAttr(::new (S.Context) InitPriorityAttr(Attr.getRange(), S.Context, prioritynum, Attr.getAttributeSpellingListIndex())); } FormatAttr *Sema::mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex) { // Check whether we already have an equivalent format attribute. for (auto *F : D->specific_attrs<FormatAttr>()) { if (F->getType() == Format && F->getFormatIdx() == FormatIdx && F->getFirstArg() == FirstArg) { // If we don't have a valid location for this attribute, adopt the // location. if (F->getLocation().isInvalid()) F->setRange(Range); return nullptr; } } return ::new (Context) FormatAttr(Range, Context, Format, FormatIdx, FirstArg, AttrSpellingListIndex); } /// Handle __attribute__((format(type,idx,firstarg))) attributes based on /// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!Attr.isArgIdent(0)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) << Attr.getName() << 1 << AANT_ArgumentIdentifier; return; } // In C++ the implicit 'this' function parameter also counts, and they are // counted from one. bool HasImplicitThisParam = isInstanceMethod(D); unsigned NumArgs = getFunctionOrMethodNumParams(D) + HasImplicitThisParam; IdentifierInfo *II = Attr.getArgAsIdent(0)->Ident; StringRef Format = II->getName(); // Normalize the argument, __foo__ becomes foo. if (Format.startswith("__") && Format.endswith("__")) { Format = Format.substr(2, Format.size() - 4); // If we've modified the string name, we need a new identifier for it. II = &S.Context.Idents.get(Format); } // Check for supported formats. FormatAttrKind Kind = getFormatAttrKind(Format); if (Kind == IgnoredFormat) return; if (Kind == InvalidFormat) { S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported) << Attr.getName() << II->getName(); return; } // checks for the 2nd argument Expr *IdxExpr = Attr.getArgAsExpr(1); uint32_t Idx; if (!checkUInt32Argument(S, Attr, IdxExpr, Idx, 2)) return; if (Idx < 1 || Idx > NumArgs) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds) << Attr.getName() << 2 << IdxExpr->getSourceRange(); return; } // FIXME: Do we need to bounds check? unsigned ArgIdx = Idx - 1; if (HasImplicitThisParam) { if (ArgIdx == 0) { S.Diag(Attr.getLoc(), diag::err_format_attribute_implicit_this_format_string) << IdxExpr->getSourceRange(); return; } ArgIdx--; } // make sure the format string is really a string QualType Ty = getFunctionOrMethodParamType(D, ArgIdx); if (Kind == CFStringFormat) { if (!isCFStringType(Ty, S.Context)) { S.Diag(Attr.getLoc(), diag::err_format_attribute_not) << "a CFString" << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, ArgIdx); return; } } else if (Kind == NSStringFormat) { // FIXME: do we need to check if the type is NSString*? What are the // semantics? if (!isNSStringType(Ty, S.Context)) { S.Diag(Attr.getLoc(), diag::err_format_attribute_not) << "an NSString" << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, ArgIdx); return; } } else if (!Ty->isPointerType() || !Ty->getAs<PointerType>()->getPointeeType()->isCharType()) { S.Diag(Attr.getLoc(), diag::err_format_attribute_not) << "a string type" << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, ArgIdx); return; } // check the 3rd argument Expr *FirstArgExpr = Attr.getArgAsExpr(2); uint32_t FirstArg; if (!checkUInt32Argument(S, Attr, FirstArgExpr, FirstArg, 3)) return; // check if the function is variadic if the 3rd argument non-zero if (FirstArg != 0) { if (isFunctionOrMethodVariadic(D)) { ++NumArgs; // +1 for ... } else { S.Diag(D->getLocation(), diag::err_format_attribute_requires_variadic); return; } } // strftime requires FirstArg to be 0 because it doesn't read from any // variable the input is just the current time + the format string. if (Kind == StrftimeFormat) { if (FirstArg != 0) { S.Diag(Attr.getLoc(), diag::err_format_strftime_third_parameter) << FirstArgExpr->getSourceRange(); return; } // if 0 it disables parameter checking (to use with e.g. va_list) } else if (FirstArg != 0 && FirstArg != NumArgs) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds) << Attr.getName() << 3 << FirstArgExpr->getSourceRange(); return; } FormatAttr *NewAttr = S.mergeFormatAttr(D, Attr.getRange(), II, Idx, FirstArg, Attr.getAttributeSpellingListIndex()); if (NewAttr) D->addAttr(NewAttr); } static void handleTransparentUnionAttr(Sema &S, Decl *D, const AttributeList &Attr) { // Try to find the underlying union declaration. RecordDecl *RD = nullptr; TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D); if (TD && TD->getUnderlyingType()->isUnionType()) RD = TD->getUnderlyingType()->getAsUnionType()->getDecl(); else RD = dyn_cast<RecordDecl>(D); if (!RD || !RD->isUnion()) { S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) << Attr.getName() << ExpectedUnion; return; } if (!RD->isCompleteDefinition()) { S.Diag(Attr.getLoc(), diag::warn_transparent_union_attribute_not_definition); return; } RecordDecl::field_iterator Field = RD->field_begin(), FieldEnd = RD->field_end(); if (Field == FieldEnd) { S.Diag(Attr.getLoc(), diag::warn_transparent_union_attribute_zero_fields); return; } FieldDecl *FirstField = *Field; QualType FirstType = FirstField->getType(); if (FirstType->hasFloatingRepresentation() || FirstType->isVectorType()) { S.Diag(FirstField->getLocation(), diag::warn_transparent_union_attribute_floating) << FirstType->isVectorType() << FirstType; return; } uint64_t FirstSize = S.Context.getTypeSize(FirstType); uint64_t FirstAlign = S.Context.getTypeAlign(FirstType); for (; Field != FieldEnd; ++Field) { QualType FieldType = Field->getType(); // FIXME: this isn't fully correct; we also need to test whether the // members of the union would all have the same calling convention as the // first member of the union. Checking just the size and alignment isn't // sufficient (consider structs passed on the stack instead of in registers // as an example). if (S.Context.getTypeSize(FieldType) != FirstSize || S.Context.getTypeAlign(FieldType) > FirstAlign) { // Warn if we drop the attribute. bool isSize = S.Context.getTypeSize(FieldType) != FirstSize; unsigned FieldBits = isSize? S.Context.getTypeSize(FieldType) : S.Context.getTypeAlign(FieldType); S.Diag(Field->getLocation(), diag::warn_transparent_union_attribute_field_size_align) << isSize << Field->getDeclName() << FieldBits; unsigned FirstBits = isSize? FirstSize : FirstAlign; S.Diag(FirstField->getLocation(), diag::note_transparent_union_first_field_size_align) << isSize << FirstBits; return; } } RD->addAttr(::new (S.Context) TransparentUnionAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleAnnotateAttr(Sema &S, Decl *D, const AttributeList &Attr) { // Make sure that there is a string literal as the annotation's single // argument. StringRef Str; if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str)) return; // Don't duplicate annotations that are already set. for (const auto *I : D->specific_attrs<AnnotateAttr>()) { if (I->getAnnotation() == Str) return; } D->addAttr(::new (S.Context) AnnotateAttr(Attr.getRange(), S.Context, Str, Attr.getAttributeSpellingListIndex())); } static void handleAlignValueAttr(Sema &S, Decl *D, const AttributeList &Attr) { S.AddAlignValueAttr(Attr.getRange(), D, Attr.getArgAsExpr(0), Attr.getAttributeSpellingListIndex()); } void Sema::AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex) { AlignValueAttr TmpAttr(AttrRange, Context, E, SpellingListIndex); SourceLocation AttrLoc = AttrRange.getBegin(); QualType T; if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) T = TD->getUnderlyingType(); else if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) T = VD->getType(); else llvm_unreachable("Unknown decl type for align_value"); if (!T->isDependentType() && !T->isAnyPointerType() && !T->isReferenceType() && !T->isMemberPointerType()) { Diag(AttrLoc, diag::warn_attribute_pointer_or_reference_only) << &TmpAttr /*TmpAttr.getName()*/ << T << D->getSourceRange(); return; } if (!E->isValueDependent()) { llvm::APSInt Alignment(32); ExprResult ICE = VerifyIntegerConstantExpression(E, &Alignment, diag::err_align_value_attribute_argument_not_int, /*AllowFold*/ false); if (ICE.isInvalid()) return; if (!Alignment.isPowerOf2()) { Diag(AttrLoc, diag::err_alignment_not_power_of_two) << E->getSourceRange(); return; } D->addAttr(::new (Context) AlignValueAttr(AttrRange, Context, ICE.get(), SpellingListIndex)); return; } // Save dependent expressions in the AST to be instantiated. D->addAttr(::new (Context) AlignValueAttr(TmpAttr)); return; } static void handleAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) { // check the attribute arguments. if (Attr.getNumArgs() > 1) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Attr.getName() << 1; return; } if (Attr.getNumArgs() == 0) { D->addAttr(::new (S.Context) AlignedAttr(Attr.getRange(), S.Context, true, nullptr, Attr.getAttributeSpellingListIndex())); return; } Expr *E = Attr.getArgAsExpr(0); if (Attr.isPackExpansion() && !E->containsUnexpandedParameterPack()) { S.Diag(Attr.getEllipsisLoc(), diag::err_pack_expansion_without_parameter_packs); return; } if (!Attr.isPackExpansion() && S.DiagnoseUnexpandedParameterPack(E)) return; if (E->isValueDependent()) { if (const auto *TND = dyn_cast<TypedefNameDecl>(D)) { if (!TND->getUnderlyingType()->isDependentType()) { S.Diag(Attr.getLoc(), diag::err_alignment_dependent_typedef_name) << E->getSourceRange(); return; } } } S.AddAlignedAttr(Attr.getRange(), D, E, Attr.getAttributeSpellingListIndex(), Attr.isPackExpansion()); } void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion) { AlignedAttr TmpAttr(AttrRange, Context, true, E, SpellingListIndex); SourceLocation AttrLoc = AttrRange.getBegin(); // C++11 alignas(...) and C11 _Alignas(...) have additional requirements. if (TmpAttr.isAlignas()) { // C++11 [dcl.align]p1: // An alignment-specifier may be applied to a variable or to a class // data member, but it shall not be applied to a bit-field, a function // parameter, the formal parameter of a catch clause, or a variable // declared with the register storage class specifier. An // alignment-specifier may also be applied to the declaration of a class // or enumeration type. // C11 6.7.5/2: // An alignment attribute shall not be specified in a declaration of // a typedef, or a bit-field, or a function, or a parameter, or an // object declared with the register storage-class specifier. int DiagKind = -1; if (isa<ParmVarDecl>(D)) { DiagKind = 0; } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) { if (VD->getStorageClass() == SC_Register) DiagKind = 1; if (VD->isExceptionVariable()) DiagKind = 2; } else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) { if (FD->isBitField()) DiagKind = 3; } else if (!isa<TagDecl>(D)) { Diag(AttrLoc, diag::err_attribute_wrong_decl_type) << &TmpAttr << (TmpAttr.isC11() ? ExpectedVariableOrField : ExpectedVariableFieldOrTag); return; } if (DiagKind != -1) { Diag(AttrLoc, diag::err_alignas_attribute_wrong_decl_type) << &TmpAttr << DiagKind; return; } } if (E->isTypeDependent() || E->isValueDependent()) { // Save dependent expressions in the AST to be instantiated. AlignedAttr *AA = ::new (Context) AlignedAttr(TmpAttr); AA->setPackExpansion(IsPackExpansion); D->addAttr(AA); return; } // FIXME: Cache the number on the Attr object? llvm::APSInt Alignment(32); ExprResult ICE = VerifyIntegerConstantExpression(E, &Alignment, diag::err_aligned_attribute_argument_not_int, /*AllowFold*/ false); if (ICE.isInvalid()) return; // C++11 [dcl.align]p2: // -- if the constant expression evaluates to zero, the alignment // specifier shall have no effect // C11 6.7.5p6: // An alignment specification of zero has no effect. if (!(TmpAttr.isAlignas() && !Alignment)) { if(!llvm::isPowerOf2_64(Alignment.getZExtValue())) { Diag(AttrLoc, diag::err_alignment_not_power_of_two) << E->getSourceRange(); return; } if (Context.getTargetInfo().isTLSSupported()) { if (unsigned MaxAlign = Context.getTargetInfo().getMaxTLSAlign()) { if (VarDecl *VD = dyn_cast<VarDecl>(D)) { if (VD->getTLSKind()) { CharUnits MaxAlignChars = Context.toCharUnitsFromBits(MaxAlign); if (Alignment.getSExtValue() > MaxAlignChars.getQuantity()) { Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum) << (unsigned)Alignment.getZExtValue() << VD << (unsigned)MaxAlignChars.getQuantity(); return; } } } } } } // Alignment calculations can wrap around if it's greater than 2**28. unsigned MaxValidAlignment = TmpAttr.isDeclspec() ? 8192 : 268435456; if (Alignment.getZExtValue() > MaxValidAlignment) { Diag(AttrLoc, diag::err_attribute_aligned_too_great) << MaxValidAlignment << E->getSourceRange(); return; } AlignedAttr *AA = ::new (Context) AlignedAttr(AttrRange, Context, true, ICE.get(), SpellingListIndex); AA->setPackExpansion(IsPackExpansion); D->addAttr(AA); } void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *TS, unsigned SpellingListIndex, bool IsPackExpansion) { // FIXME: Cache the number on the Attr object if non-dependent? // FIXME: Perform checking of type validity AlignedAttr *AA = ::new (Context) AlignedAttr(AttrRange, Context, false, TS, SpellingListIndex); AA->setPackExpansion(IsPackExpansion); D->addAttr(AA); } void Sema::CheckAlignasUnderalignment(Decl *D) { assert(D->hasAttrs() && "no attributes on decl"); QualType UnderlyingTy, DiagTy; if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) { UnderlyingTy = DiagTy = VD->getType(); } else { UnderlyingTy = DiagTy = Context.getTagDeclType(cast<TagDecl>(D)); if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) UnderlyingTy = ED->getIntegerType(); } if (DiagTy->isDependentType() || DiagTy->isIncompleteType()) return; // C++11 [dcl.align]p5, C11 6.7.5/4: // The combined effect of all alignment attributes in a declaration shall // not specify an alignment that is less strict than the alignment that // would otherwise be required for the entity being declared. AlignedAttr *AlignasAttr = nullptr; unsigned Align = 0; for (auto *I : D->specific_attrs<AlignedAttr>()) { if (I->isAlignmentDependent()) return; if (I->isAlignas()) AlignasAttr = I; Align = std::max(Align, I->getAlignment(Context)); } if (AlignasAttr && Align) { CharUnits RequestedAlign = Context.toCharUnitsFromBits(Align); CharUnits NaturalAlign = Context.getTypeAlignInChars(UnderlyingTy); if (NaturalAlign > RequestedAlign) Diag(AlignasAttr->getLocation(), diag::err_alignas_underaligned) << DiagTy << (unsigned)NaturalAlign.getQuantity(); } } bool Sema::checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling) { assert(RD->hasDefinition() && "RD has no definition!"); // We may not have seen base specifiers or any virtual methods yet. We will // have to wait until the record is defined to catch any mismatches. if (!RD->getDefinition()->isCompleteDefinition()) return false; // The unspecified model never matches what a definition could need. if (SemanticSpelling == MSInheritanceAttr::Keyword_unspecified_inheritance) return false; if (BestCase) { if (RD->calculateInheritanceModel() == SemanticSpelling) return false; } else { if (RD->calculateInheritanceModel() <= SemanticSpelling) return false; } Diag(Range.getBegin(), diag::err_mismatched_ms_inheritance) << 0 /*definition*/; Diag(RD->getDefinition()->getLocation(), diag::note_defined_here) << RD->getNameAsString(); return true; } /// handleModeAttr - This attribute modifies the width of a decl with primitive /// type. /// /// Despite what would be logical, the mode attribute is a decl attribute, not a /// type attribute: 'int ** __attribute((mode(HI))) *G;' tries to make 'G' be /// HImode, not an intermediate pointer. static void handleModeAttr(Sema &S, Decl *D, const AttributeList &Attr) { // This attribute isn't documented, but glibc uses it. It changes // the width of an int or unsigned int to the specified size. if (!Attr.isArgIdent(0)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIdentifier; return; } IdentifierInfo *Name = Attr.getArgAsIdent(0)->Ident; StringRef Str = Name->getName(); // Normalize the attribute name, __foo__ becomes foo. if (Str.startswith("__") && Str.endswith("__")) Str = Str.substr(2, Str.size() - 4); unsigned DestWidth = 0; bool IntegerMode = true; bool ComplexMode = false; switch (Str.size()) { case 2: switch (Str[0]) { case 'Q': DestWidth = 8; break; case 'H': DestWidth = 16; break; case 'S': DestWidth = 32; break; case 'D': DestWidth = 64; break; case 'X': DestWidth = 96; break; case 'T': DestWidth = 128; break; } if (Str[1] == 'F') { IntegerMode = false; } else if (Str[1] == 'C') { IntegerMode = false; ComplexMode = true; } else if (Str[1] != 'I') { DestWidth = 0; } break; case 4: // FIXME: glibc uses 'word' to define register_t; this is narrower than a // pointer on PIC16 and other embedded platforms. if (Str == "word") DestWidth = S.Context.getTargetInfo().getPointerWidth(0); else if (Str == "byte") DestWidth = S.Context.getTargetInfo().getCharWidth(); break; case 7: if (Str == "pointer") DestWidth = S.Context.getTargetInfo().getPointerWidth(0); break; case 11: if (Str == "unwind_word") DestWidth = S.Context.getTargetInfo().getUnwindWordWidth(); break; } QualType OldTy; if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) OldTy = TD->getUnderlyingType(); else if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) OldTy = VD->getType(); else { S.Diag(D->getLocation(), diag::err_attr_wrong_decl) << Attr.getName() << Attr.getRange(); return; } // Base type can also be a vector type (see PR17453). // Distinguish between base type and base element type. QualType OldElemTy = OldTy; if (const VectorType *VT = OldTy->getAs<VectorType>()) OldElemTy = VT->getElementType(); if (!OldElemTy->getAs<BuiltinType>() && !OldElemTy->isComplexType()) S.Diag(Attr.getLoc(), diag::err_mode_not_primitive); else if (IntegerMode) { if (!OldElemTy->isIntegralOrEnumerationType()) S.Diag(Attr.getLoc(), diag::err_mode_wrong_type); } else if (ComplexMode) { if (!OldElemTy->isComplexType()) S.Diag(Attr.getLoc(), diag::err_mode_wrong_type); } else { if (!OldElemTy->isFloatingType()) S.Diag(Attr.getLoc(), diag::err_mode_wrong_type); } // FIXME: Sync this with InitializePredefinedMacros; we need to match int8_t // and friends, at least with glibc. // FIXME: Make sure floating-point mappings are accurate // FIXME: Support XF and TF types if (!DestWidth) { S.Diag(Attr.getLoc(), diag::err_machine_mode) << 0 /*Unknown*/ << Name; return; } QualType NewElemTy; if (IntegerMode) NewElemTy = S.Context.getIntTypeForBitwidth( DestWidth, OldElemTy->isSignedIntegerType()); else NewElemTy = S.Context.getRealTypeForBitwidth(DestWidth); if (NewElemTy.isNull()) { S.Diag(Attr.getLoc(), diag::err_machine_mode) << 1 /*Unsupported*/ << Name; return; } if (ComplexMode) { NewElemTy = S.Context.getComplexType(NewElemTy); } QualType NewTy = NewElemTy; if (const VectorType *OldVT = OldTy->getAs<VectorType>()) { // Complex machine mode does not support base vector types. if (ComplexMode) { S.Diag(Attr.getLoc(), diag::err_complex_mode_vector_type); return; } unsigned NumElements = S.Context.getTypeSize(OldElemTy) * OldVT->getNumElements() / S.Context.getTypeSize(NewElemTy); NewTy = S.Context.getVectorType(NewElemTy, NumElements, OldVT->getVectorKind()); } if (NewTy.isNull()) { S.Diag(Attr.getLoc(), diag::err_mode_wrong_type); return; } // Install the new type. if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) TD->setModedTypeSourceInfo(TD->getTypeSourceInfo(), NewTy); else cast<ValueDecl>(D)->setType(NewTy); D->addAttr(::new (S.Context) ModeAttr(Attr.getRange(), S.Context, Name, Attr.getAttributeSpellingListIndex())); } static void handleNoDebugAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { if (!VD->hasGlobalStorage()) S.Diag(Attr.getLoc(), diag::warn_attribute_requires_functions_or_static_globals) << Attr.getName(); } else if (!isFunctionOrMethod(D)) { S.Diag(Attr.getLoc(), diag::warn_attribute_requires_functions_or_static_globals) << Attr.getName(); return; } D->addAttr(::new (S.Context) NoDebugAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } AlwaysInlineAttr *Sema::mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex) { if (OptimizeNoneAttr *Optnone = D->getAttr<OptimizeNoneAttr>()) { Diag(Range.getBegin(), diag::warn_attribute_ignored) << Ident; Diag(Optnone->getLocation(), diag::note_conflicting_attribute); return nullptr; } if (D->hasAttr<AlwaysInlineAttr>()) return nullptr; return ::new (Context) AlwaysInlineAttr(Range, Context, AttrSpellingListIndex); } MinSizeAttr *Sema::mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex) { if (OptimizeNoneAttr *Optnone = D->getAttr<OptimizeNoneAttr>()) { Diag(Range.getBegin(), diag::warn_attribute_ignored) << "'minsize'"; Diag(Optnone->getLocation(), diag::note_conflicting_attribute); return nullptr; } if (D->hasAttr<MinSizeAttr>()) return nullptr; return ::new (Context) MinSizeAttr(Range, Context, AttrSpellingListIndex); } OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex) { if (AlwaysInlineAttr *Inline = D->getAttr<AlwaysInlineAttr>()) { Diag(Inline->getLocation(), diag::warn_attribute_ignored) << Inline; Diag(Range.getBegin(), diag::note_conflicting_attribute); D->dropAttr<AlwaysInlineAttr>(); } if (MinSizeAttr *MinSize = D->getAttr<MinSizeAttr>()) { Diag(MinSize->getLocation(), diag::warn_attribute_ignored) << MinSize; Diag(Range.getBegin(), diag::note_conflicting_attribute); D->dropAttr<MinSizeAttr>(); } if (D->hasAttr<OptimizeNoneAttr>()) return nullptr; return ::new (Context) OptimizeNoneAttr(Range, Context, AttrSpellingListIndex); } static void handleAlwaysInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (AlwaysInlineAttr *Inline = S.mergeAlwaysInlineAttr( D, Attr.getRange(), Attr.getName(), Attr.getAttributeSpellingListIndex())) D->addAttr(Inline); } static void handleMinSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (MinSizeAttr *MinSize = S.mergeMinSizeAttr( D, Attr.getRange(), Attr.getAttributeSpellingListIndex())) D->addAttr(MinSize); } static void handleOptimizeNoneAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (OptimizeNoneAttr *Optnone = S.mergeOptimizeNoneAttr( D, Attr.getRange(), Attr.getAttributeSpellingListIndex())) D->addAttr(Optnone); } static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) { FunctionDecl *FD = cast<FunctionDecl>(D); if (!FD->getReturnType()->isVoidType()) { SourceRange RTRange = FD->getReturnTypeSourceRange(); S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return) << FD->getType() << (RTRange.isValid() ? FixItHint::CreateReplacement(RTRange, "void") : FixItHint()); return; } D->addAttr(::new (S.Context) CUDAGlobalAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleGNUInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) { FunctionDecl *Fn = cast<FunctionDecl>(D); if (!Fn->isInlineSpecified()) { S.Diag(Attr.getLoc(), diag::warn_gnu_inline_attribute_requires_inline); return; } D->addAttr(::new (S.Context) GNUInlineAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (hasDeclarator(D)) return; // Diagnostic is emitted elsewhere: here we store the (valid) Attr // in the Decl node for syntactic reasoning, e.g., pretty-printing. CallingConv CC; if (S.CheckCallingConvAttr(Attr, CC, /*FD*/nullptr)) return; if (!isa<ObjCMethodDecl>(D)) { S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) << Attr.getName() << ExpectedFunctionOrMethod; return; } switch (Attr.getKind()) { case AttributeList::AT_FastCall: D->addAttr(::new (S.Context) FastCallAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_StdCall: D->addAttr(::new (S.Context) StdCallAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_ThisCall: D->addAttr(::new (S.Context) ThisCallAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_CDecl: D->addAttr(::new (S.Context) CDeclAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_Pascal: D->addAttr(::new (S.Context) PascalAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_VectorCall: D->addAttr(::new (S.Context) VectorCallAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_MSABI: D->addAttr(::new (S.Context) MSABIAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_SysVABI: D->addAttr(::new (S.Context) SysVABIAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_Pcs: { PcsAttr::PCSType PCS; switch (CC) { case CC_AAPCS: PCS = PcsAttr::AAPCS; break; case CC_AAPCS_VFP: PCS = PcsAttr::AAPCS_VFP; break; default: llvm_unreachable("unexpected calling convention in pcs attribute"); } D->addAttr(::new (S.Context) PcsAttr(Attr.getRange(), S.Context, PCS, Attr.getAttributeSpellingListIndex())); return; } case AttributeList::AT_IntelOclBicc: D->addAttr(::new (S.Context) IntelOclBiccAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; default: llvm_unreachable("unexpected attribute kind"); } } bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD) { if (attr.isInvalid()) return true; unsigned ReqArgs = attr.getKind() == AttributeList::AT_Pcs ? 1 : 0; if (!checkAttributeNumArgs(*this, attr, ReqArgs)) { attr.setInvalid(); return true; } // TODO: diagnose uses of these conventions on the wrong target. switch (attr.getKind()) { case AttributeList::AT_CDecl: CC = CC_C; break; case AttributeList::AT_FastCall: CC = CC_X86FastCall; break; case AttributeList::AT_StdCall: CC = CC_X86StdCall; break; case AttributeList::AT_ThisCall: CC = CC_X86ThisCall; break; case AttributeList::AT_Pascal: CC = CC_X86Pascal; break; case AttributeList::AT_VectorCall: CC = CC_X86VectorCall; break; case AttributeList::AT_MSABI: CC = Context.getTargetInfo().getTriple().isOSWindows() ? CC_C : CC_X86_64Win64; break; case AttributeList::AT_SysVABI: CC = Context.getTargetInfo().getTriple().isOSWindows() ? CC_X86_64SysV : CC_C; break; case AttributeList::AT_Pcs: { StringRef StrRef; if (!checkStringLiteralArgumentAttr(attr, 0, StrRef)) { attr.setInvalid(); return true; } if (StrRef == "aapcs") { CC = CC_AAPCS; break; } else if (StrRef == "aapcs-vfp") { CC = CC_AAPCS_VFP; break; } attr.setInvalid(); Diag(attr.getLoc(), diag::err_invalid_pcs); return true; } case AttributeList::AT_IntelOclBicc: CC = CC_IntelOclBicc; break; default: llvm_unreachable("unexpected attribute kind"); } const TargetInfo &TI = Context.getTargetInfo(); TargetInfo::CallingConvCheckResult A = TI.checkCallingConvention(CC); if (A != TargetInfo::CCCR_OK) { if (A == TargetInfo::CCCR_Warning) Diag(attr.getLoc(), diag::warn_cconv_ignored) << attr.getName(); // This convention is not valid for the target. Use the default function or // method calling convention. TargetInfo::CallingConvMethodType MT = TargetInfo::CCMT_Unknown; if (FD) MT = FD->isCXXInstanceMember() ? TargetInfo::CCMT_Member : TargetInfo::CCMT_NonMember; CC = TI.getDefaultCallingConv(MT); } return false; } /// Checks a regparm attribute, returning true if it is ill-formed and /// otherwise setting numParams to the appropriate value. bool Sema::CheckRegparmAttr(const AttributeList &Attr, unsigned &numParams) { if (Attr.isInvalid()) return true; if (!checkAttributeNumArgs(*this, Attr, 1)) { Attr.setInvalid(); return true; } uint32_t NP; Expr *NumParamsExpr = Attr.getArgAsExpr(0); if (!checkUInt32Argument(*this, Attr, NumParamsExpr, NP)) { Attr.setInvalid(); return true; } if (Context.getTargetInfo().getRegParmMax() == 0) { Diag(Attr.getLoc(), diag::err_attribute_regparm_wrong_platform) << NumParamsExpr->getSourceRange(); Attr.setInvalid(); return true; } numParams = NP; if (numParams > Context.getTargetInfo().getRegParmMax()) { Diag(Attr.getLoc(), diag::err_attribute_regparm_invalid_number) << Context.getTargetInfo().getRegParmMax() << NumParamsExpr->getSourceRange(); Attr.setInvalid(); return true; } return false; } // Checks whether an argument of launch_bounds attribute is acceptable // May output an error. static bool checkLaunchBoundsArgument(Sema &S, Expr *E, const CUDALaunchBoundsAttr &Attr, const unsigned Idx) { if (S.DiagnoseUnexpandedParameterPack(E)) return false; // Accept template arguments for now as they depend on something else. // We'll get to check them when they eventually get instantiated. if (E->isValueDependent()) return true; llvm::APSInt I(64); if (!E->isIntegerConstantExpr(I, S.Context)) { S.Diag(E->getExprLoc(), diag::err_attribute_argument_n_type) << &Attr << Idx << AANT_ArgumentIntegerConstant << E->getSourceRange(); return false; } // Make sure we can fit it in 32 bits. if (!I.isIntN(32)) { S.Diag(E->getExprLoc(), diag::err_ice_too_large) << I.toString(10, false) << 32 << /* Unsigned */ 1; return false; } if (I < 0) S.Diag(E->getExprLoc(), diag::warn_attribute_argument_n_negative) << &Attr << Idx << E->getSourceRange(); return true; } void Sema::AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex) { CUDALaunchBoundsAttr TmpAttr(AttrRange, Context, MaxThreads, MinBlocks, SpellingListIndex); if (!checkLaunchBoundsArgument(*this, MaxThreads, TmpAttr, 0)) return; if (MinBlocks && !checkLaunchBoundsArgument(*this, MinBlocks, TmpAttr, 1)) return; D->addAttr(::new (Context) CUDALaunchBoundsAttr( AttrRange, Context, MaxThreads, MinBlocks, SpellingListIndex)); } static void handleLaunchBoundsAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!checkAttributeAtLeastNumArgs(S, Attr, 1) || !checkAttributeAtMostNumArgs(S, Attr, 2)) return; S.AddLaunchBoundsAttr(Attr.getRange(), D, Attr.getArgAsExpr(0), Attr.getNumArgs() > 1 ? Attr.getArgAsExpr(1) : nullptr, Attr.getAttributeSpellingListIndex()); } static void handleArgumentWithTypeTagAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!Attr.isArgIdent(0)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) << Attr.getName() << /* arg num = */ 1 << AANT_ArgumentIdentifier; return; } if (!checkAttributeNumArgs(S, Attr, 3)) return; IdentifierInfo *ArgumentKind = Attr.getArgAsIdent(0)->Ident; if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type) << Attr.getName() << ExpectedFunctionOrMethod; return; } uint64_t ArgumentIdx; if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 2, Attr.getArgAsExpr(1), ArgumentIdx)) return; uint64_t TypeTagIdx; if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 3, Attr.getArgAsExpr(2), TypeTagIdx)) return; bool IsPointer = (Attr.getName()->getName() == "pointer_with_type_tag"); if (IsPointer) { // Ensure that buffer has a pointer type. QualType BufferTy = getFunctionOrMethodParamType(D, ArgumentIdx); if (!BufferTy->isPointerType()) { S.Diag(Attr.getLoc(), diag::err_attribute_pointers_only) << Attr.getName(); } } D->addAttr(::new (S.Context) ArgumentWithTypeTagAttr(Attr.getRange(), S.Context, ArgumentKind, ArgumentIdx, TypeTagIdx, IsPointer, Attr.getAttributeSpellingListIndex())); } static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!Attr.isArgIdent(0)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type) << Attr.getName() << 1 << AANT_ArgumentIdentifier; return; } if (!checkAttributeNumArgs(S, Attr, 1)) return; if (!isa<VarDecl>(D)) { S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type) << Attr.getName() << ExpectedVariable; return; } IdentifierInfo *PointerKind = Attr.getArgAsIdent(0)->Ident; TypeSourceInfo *MatchingCTypeLoc = nullptr; S.GetTypeFromParser(Attr.getMatchingCType(), &MatchingCTypeLoc); assert(MatchingCTypeLoc && "no type source info for attribute argument"); D->addAttr(::new (S.Context) TypeTagForDatatypeAttr(Attr.getRange(), S.Context, PointerKind, MatchingCTypeLoc, Attr.getLayoutCompatible(), Attr.getMustBeNull(), Attr.getAttributeSpellingListIndex())); } //===----------------------------------------------------------------------===// // Checker-specific attribute handlers. //===----------------------------------------------------------------------===// static bool isValidSubjectOfNSReturnsRetainedAttribute(QualType type) { return type->isDependentType() || type->isObjCRetainableType(); } static bool isValidSubjectOfNSAttribute(Sema &S, QualType type) { return type->isDependentType() || type->isObjCObjectPointerType() || S.Context.isObjCNSObjectType(type); } static bool isValidSubjectOfCFAttribute(Sema &S, QualType type) { return type->isDependentType() || type->isPointerType() || isValidSubjectOfNSAttribute(S, type); } static void handleNSConsumedAttr(Sema &S, Decl *D, const AttributeList &Attr) { ParmVarDecl *param = cast<ParmVarDecl>(D); bool typeOK, cf; if (Attr.getKind() == AttributeList::AT_NSConsumed) { typeOK = isValidSubjectOfNSAttribute(S, param->getType()); cf = false; } else { typeOK = isValidSubjectOfCFAttribute(S, param->getType()); cf = true; } if (!typeOK) { S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type) << Attr.getRange() << Attr.getName() << cf; return; } if (cf) param->addAttr(::new (S.Context) CFConsumedAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); else param->addAttr(::new (S.Context) NSConsumedAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleNSReturnsRetainedAttr(Sema &S, Decl *D, const AttributeList &Attr) { QualType returnType; if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) returnType = MD->getReturnType(); else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) && (Attr.getKind() == AttributeList::AT_NSReturnsRetained)) return; // ignore: was handled as a type attribute else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) returnType = PD->getType(); else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) returnType = FD->getReturnType(); else if (auto *Param = dyn_cast<ParmVarDecl>(D)) { returnType = Param->getType()->getPointeeType(); if (returnType.isNull()) { S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type) << Attr.getName() << /*pointer-to-CF*/2 << Attr.getRange(); return; } } else { AttributeDeclKind ExpectedDeclKind; switch (Attr.getKind()) { default: llvm_unreachable("invalid ownership attribute"); case AttributeList::AT_NSReturnsRetained: case AttributeList::AT_NSReturnsAutoreleased: case AttributeList::AT_NSReturnsNotRetained: ExpectedDeclKind = ExpectedFunctionOrMethod; break; case AttributeList::AT_CFReturnsRetained: case AttributeList::AT_CFReturnsNotRetained: ExpectedDeclKind = ExpectedFunctionMethodOrParameter; break; } S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type) << Attr.getRange() << Attr.getName() << ExpectedDeclKind; return; } bool typeOK; bool cf; switch (Attr.getKind()) { default: llvm_unreachable("invalid ownership attribute"); case AttributeList::AT_NSReturnsRetained: typeOK = isValidSubjectOfNSReturnsRetainedAttribute(returnType); cf = false; break; case AttributeList::AT_NSReturnsAutoreleased: case AttributeList::AT_NSReturnsNotRetained: typeOK = isValidSubjectOfNSAttribute(S, returnType); cf = false; break; case AttributeList::AT_CFReturnsRetained: case AttributeList::AT_CFReturnsNotRetained: typeOK = isValidSubjectOfCFAttribute(S, returnType); cf = true; break; } if (!typeOK) { if (isa<ParmVarDecl>(D)) { S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type) << Attr.getName() << /*pointer-to-CF*/2 << Attr.getRange(); } else { // Needs to be kept in sync with warn_ns_attribute_wrong_return_type. enum : unsigned { Function, Method, Property } SubjectKind = Function; if (isa<ObjCMethodDecl>(D)) SubjectKind = Method; else if (isa<ObjCPropertyDecl>(D)) SubjectKind = Property; S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_return_type) << Attr.getName() << SubjectKind << cf << Attr.getRange(); } return; } switch (Attr.getKind()) { default: llvm_unreachable("invalid ownership attribute"); case AttributeList::AT_NSReturnsAutoreleased: D->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr( Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_CFReturnsNotRetained: D->addAttr(::new (S.Context) CFReturnsNotRetainedAttr( Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_NSReturnsNotRetained: D->addAttr(::new (S.Context) NSReturnsNotRetainedAttr( Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_CFReturnsRetained: D->addAttr(::new (S.Context) CFReturnsRetainedAttr( Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; case AttributeList::AT_NSReturnsRetained: D->addAttr(::new (S.Context) NSReturnsRetainedAttr( Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); return; }; } static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D, const AttributeList &attr) { const int EP_ObjCMethod = 1; const int EP_ObjCProperty = 2; SourceLocation loc = attr.getLoc(); QualType resultType; if (isa<ObjCMethodDecl>(D)) resultType = cast<ObjCMethodDecl>(D)->getReturnType(); else resultType = cast<ObjCPropertyDecl>(D)->getType(); if (!resultType->isReferenceType() && (!resultType->isPointerType() || resultType->isObjCRetainableType())) { S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_return_type) << SourceRange(loc) << attr.getName() << (isa<ObjCMethodDecl>(D) ? EP_ObjCMethod : EP_ObjCProperty) << /*non-retainable pointer*/ 2; // Drop the attribute. return; } D->addAttr(::new (S.Context) ObjCReturnsInnerPointerAttr( attr.getRange(), S.Context, attr.getAttributeSpellingListIndex())); } static void handleObjCRequiresSuperAttr(Sema &S, Decl *D, const AttributeList &attr) { ObjCMethodDecl *method = cast<ObjCMethodDecl>(D); DeclContext *DC = method->getDeclContext(); if (const ObjCProtocolDecl *PDecl = dyn_cast_or_null<ObjCProtocolDecl>(DC)) { S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol) << attr.getName() << 0; S.Diag(PDecl->getLocation(), diag::note_protocol_decl); return; } if (method->getMethodFamily() == OMF_dealloc) { S.Diag(D->getLocStart(), diag::warn_objc_requires_super_protocol) << attr.getName() << 1; return; } method->addAttr(::new (S.Context) ObjCRequiresSuperAttr(attr.getRange(), S.Context, attr.getAttributeSpellingListIndex())); } static void handleCFAuditedTransferAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (checkAttrMutualExclusion<CFUnknownTransferAttr>(S, D, Attr)) return; D->addAttr(::new (S.Context) CFAuditedTransferAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleCFUnknownTransferAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (checkAttrMutualExclusion<CFAuditedTransferAttr>(S, D, Attr)) return; D->addAttr(::new (S.Context) CFUnknownTransferAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleObjCBridgeAttr(Sema &S, Scope *Sc, Decl *D, const AttributeList &Attr) { IdentifierLoc * Parm = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : nullptr; if (!Parm) { S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0; return; } // Typedefs only allow objc_bridge(id) and have some additional checking. if (auto TD = dyn_cast<TypedefNameDecl>(D)) { if (!Parm->Ident->isStr("id")) { S.Diag(Attr.getLoc(), diag::err_objc_attr_typedef_not_id) << Attr.getName(); return; } // Only allow 'cv void *'. QualType T = TD->getUnderlyingType(); if (!T->isVoidPointerType()) { S.Diag(Attr.getLoc(), diag::err_objc_attr_typedef_not_void_pointer); return; } } D->addAttr(::new (S.Context) ObjCBridgeAttr(Attr.getRange(), S.Context, Parm->Ident, Attr.getAttributeSpellingListIndex())); } static void handleObjCBridgeMutableAttr(Sema &S, Scope *Sc, Decl *D, const AttributeList &Attr) { IdentifierLoc * Parm = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0) : nullptr; if (!Parm) { S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0; return; } D->addAttr(::new (S.Context) ObjCBridgeMutableAttr(Attr.getRange(), S.Context, Parm->Ident, Attr.getAttributeSpellingListIndex())); } static void handleObjCBridgeRelatedAttr(Sema &S, Scope *Sc, Decl *D, const AttributeList &Attr) { IdentifierInfo *RelatedClass = Attr.isArgIdent(0) ? Attr.getArgAsIdent(0)->Ident : nullptr; if (!RelatedClass) { S.Diag(D->getLocStart(), diag::err_objc_attr_not_id) << Attr.getName() << 0; return; } IdentifierInfo *ClassMethod = Attr.getArgAsIdent(1) ? Attr.getArgAsIdent(1)->Ident : nullptr; IdentifierInfo *InstanceMethod = Attr.getArgAsIdent(2) ? Attr.getArgAsIdent(2)->Ident : nullptr; D->addAttr(::new (S.Context) ObjCBridgeRelatedAttr(Attr.getRange(), S.Context, RelatedClass, ClassMethod, InstanceMethod, Attr.getAttributeSpellingListIndex())); } static void handleObjCDesignatedInitializer(Sema &S, Decl *D, const AttributeList &Attr) { ObjCInterfaceDecl *IFace; if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(D->getDeclContext())) IFace = CatDecl->getClassInterface(); else IFace = cast<ObjCInterfaceDecl>(D->getDeclContext()); if (!IFace) return; IFace->setHasDesignatedInitializers(); D->addAttr(::new (S.Context) ObjCDesignatedInitializerAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleObjCRuntimeName(Sema &S, Decl *D, const AttributeList &Attr) { StringRef MetaDataName; if (!S.checkStringLiteralArgumentAttr(Attr, 0, MetaDataName)) return; D->addAttr(::new (S.Context) ObjCRuntimeNameAttr(Attr.getRange(), S.Context, MetaDataName, Attr.getAttributeSpellingListIndex())); } // when a user wants to use objc_boxable with a union or struct // but she doesn't have access to the declaration (legacy/third-party code) // then she can 'enable' this feature via trick with a typedef // e.g.: // typedef struct __attribute((objc_boxable)) legacy_struct legacy_struct; static void handleObjCBoxable(Sema &S, Decl *D, const AttributeList &Attr) { bool notify = false; RecordDecl *RD = dyn_cast<RecordDecl>(D); if (RD && RD->getDefinition()) { RD = RD->getDefinition(); notify = true; } if (RD) { ObjCBoxableAttr *BoxableAttr = ::new (S.Context) ObjCBoxableAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()); RD->addAttr(BoxableAttr); if (notify) { // we need to notify ASTReader/ASTWriter about // modification of existing declaration if (ASTMutationListener *L = S.getASTMutationListener()) L->AddedAttributeToRecord(BoxableAttr, RD); } } } static void handleObjCOwnershipAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (hasDeclarator(D)) return; S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type) << Attr.getRange() << Attr.getName() << ExpectedVariable; } static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D, const AttributeList &Attr) { ValueDecl *vd = cast<ValueDecl>(D); QualType type = vd->getType(); if (!type->isDependentType() && !type->isObjCLifetimeType()) { S.Diag(Attr.getLoc(), diag::err_objc_precise_lifetime_bad_type) << type; return; } Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime(); // If we have no lifetime yet, check the lifetime we're presumably // going to infer. if (lifetime == Qualifiers::OCL_None && !type->isDependentType()) lifetime = type->getObjCARCImplicitLifetime(); switch (lifetime) { case Qualifiers::OCL_None: assert(type->isDependentType() && "didn't infer lifetime for non-dependent type?"); break; case Qualifiers::OCL_Weak: // meaningful case Qualifiers::OCL_Strong: // meaningful break; case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: S.Diag(Attr.getLoc(), diag::warn_objc_precise_lifetime_meaningless) << (lifetime == Qualifiers::OCL_Autoreleasing); break; } D->addAttr(::new (S.Context) ObjCPreciseLifetimeAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } //===----------------------------------------------------------------------===// // Microsoft specific attribute handlers. //===----------------------------------------------------------------------===// static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!S.LangOpts.CPlusPlus) { S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang) << Attr.getName() << AttributeLangSupport::C; return; } if (!isa<CXXRecordDecl>(D)) { S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) << Attr.getName() << ExpectedClass; return; } StringRef StrRef; SourceLocation LiteralLoc; if (!S.checkStringLiteralArgumentAttr(Attr, 0, StrRef, &LiteralLoc)) return; // GUID format is "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" or // "{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}", normalize to the former. if (StrRef.size() == 38 && StrRef.front() == '{' && StrRef.back() == '}') StrRef = StrRef.drop_front().drop_back(); // Validate GUID length. if (StrRef.size() != 36) { S.Diag(LiteralLoc, diag::err_attribute_uuid_malformed_guid); return; } for (unsigned i = 0; i < 36; ++i) { if (i == 8 || i == 13 || i == 18 || i == 23) { if (StrRef[i] != '-') { S.Diag(LiteralLoc, diag::err_attribute_uuid_malformed_guid); return; } } else if (!isHexDigit(StrRef[i])) { S.Diag(LiteralLoc, diag::err_attribute_uuid_malformed_guid); return; } } D->addAttr(::new (S.Context) UuidAttr(Attr.getRange(), S.Context, StrRef, Attr.getAttributeSpellingListIndex())); } static void handleMSInheritanceAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!S.LangOpts.CPlusPlus) { S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang) << Attr.getName() << AttributeLangSupport::C; return; } MSInheritanceAttr *IA = S.mergeMSInheritanceAttr( D, Attr.getRange(), /*BestCase=*/true, Attr.getAttributeSpellingListIndex(), (MSInheritanceAttr::Spelling)Attr.getSemanticSpelling()); if (IA) D->addAttr(IA); } static void handleDeclspecThreadAttr(Sema &S, Decl *D, const AttributeList &Attr) { VarDecl *VD = cast<VarDecl>(D); if (!S.Context.getTargetInfo().isTLSSupported()) { S.Diag(Attr.getLoc(), diag::err_thread_unsupported); return; } if (VD->getTSCSpec() != TSCS_unspecified) { S.Diag(Attr.getLoc(), diag::err_declspec_thread_on_thread_variable); return; } if (VD->hasLocalStorage()) { S.Diag(Attr.getLoc(), diag::err_thread_non_global) << "__declspec(thread)"; return; } VD->addAttr(::new (S.Context) ThreadAttr( Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } static void handleARMInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) { // Check the attribute arguments. if (Attr.getNumArgs() > 1) { S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << Attr.getName() << 1; return; } StringRef Str; SourceLocation ArgLoc; if (Attr.getNumArgs() == 0) Str = ""; else if (!S.checkStringLiteralArgumentAttr(Attr, 0, Str, &ArgLoc)) return; ARMInterruptAttr::InterruptType Kind; if (!ARMInterruptAttr::ConvertStrToInterruptType(Str, Kind)) { S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported) << Attr.getName() << Str << ArgLoc; return; } unsigned Index = Attr.getAttributeSpellingListIndex(); D->addAttr(::new (S.Context) ARMInterruptAttr(Attr.getLoc(), S.Context, Kind, Index)); } static void handleMSP430InterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!checkAttributeNumArgs(S, Attr, 1)) return; if (!Attr.isArgExpr(0)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIntegerConstant; return; } // FIXME: Check for decl - it should be void ()(void). Expr *NumParamsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0)); llvm::APSInt NumParams(32); if (!NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_type) << Attr.getName() << AANT_ArgumentIntegerConstant << NumParamsExpr->getSourceRange(); return; } unsigned Num = NumParams.getLimitedValue(255); if ((Num & 1) || Num > 30) { S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds) << Attr.getName() << (int)NumParams.getSExtValue() << NumParamsExpr->getSourceRange(); return; } D->addAttr(::new (S.Context) MSP430InterruptAttr(Attr.getLoc(), S.Context, Num, Attr.getAttributeSpellingListIndex())); D->addAttr(UsedAttr::CreateImplicit(S.Context)); } static void handleInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) { // Dispatch the interrupt attribute based on the current target. if (S.Context.getTargetInfo().getTriple().getArch() == llvm::Triple::msp430) handleMSP430InterruptAttr(S, D, Attr); else handleARMInterruptAttr(S, D, Attr); } static void handleAMDGPUNumVGPRAttr(Sema &S, Decl *D, const AttributeList &Attr) { uint32_t NumRegs; Expr *NumRegsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0)); if (!checkUInt32Argument(S, Attr, NumRegsExpr, NumRegs)) return; D->addAttr(::new (S.Context) AMDGPUNumVGPRAttr(Attr.getLoc(), S.Context, NumRegs, Attr.getAttributeSpellingListIndex())); } static void handleAMDGPUNumSGPRAttr(Sema &S, Decl *D, const AttributeList &Attr) { uint32_t NumRegs; Expr *NumRegsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0)); if (!checkUInt32Argument(S, Attr, NumRegsExpr, NumRegs)) return; D->addAttr(::new (S.Context) AMDGPUNumSGPRAttr(Attr.getLoc(), S.Context, NumRegs, Attr.getAttributeSpellingListIndex())); } static void handleX86ForceAlignArgPointerAttr(Sema &S, Decl *D, const AttributeList& Attr) { // If we try to apply it to a function pointer, don't warn, but don't // do anything, either. It doesn't matter anyway, because there's nothing // special about calling a force_align_arg_pointer function. ValueDecl *VD = dyn_cast<ValueDecl>(D); if (VD && VD->getType()->isFunctionPointerType()) return; // Also don't warn on function pointer typedefs. TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D); if (TD && (TD->getUnderlyingType()->isFunctionPointerType() || TD->getUnderlyingType()->isFunctionType())) return; // Attribute can only be applied to function types. if (!isa<FunctionDecl>(D)) { S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type) << Attr.getName() << /* function */0; return; } D->addAttr(::new (S.Context) X86ForceAlignArgPointerAttr(Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex())); } DLLImportAttr *Sema::mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex) { if (D->hasAttr<DLLExportAttr>()) { Diag(Range.getBegin(), diag::warn_attribute_ignored) << "'dllimport'"; return nullptr; } if (D->hasAttr<DLLImportAttr>()) return nullptr; return ::new (Context) DLLImportAttr(Range, Context, AttrSpellingListIndex); } DLLExportAttr *Sema::mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex) { if (DLLImportAttr *Import = D->getAttr<DLLImportAttr>()) { Diag(Import->getLocation(), diag::warn_attribute_ignored) << Import; D->dropAttr<DLLImportAttr>(); } if (D->hasAttr<DLLExportAttr>()) return nullptr; return ::new (Context) DLLExportAttr(Range, Context, AttrSpellingListIndex); } static void handleDLLAttr(Sema &S, Decl *D, const AttributeList &A) { if (isa<ClassTemplatePartialSpecializationDecl>(D) && S.Context.getTargetInfo().getCXXABI().isMicrosoft()) { S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored) << A.getName(); return; } if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { if (FD->isInlined() && A.getKind() == AttributeList::AT_DLLImport && !S.Context.getTargetInfo().getCXXABI().isMicrosoft()) { // MinGW doesn't allow dllimport on inline functions. S.Diag(A.getRange().getBegin(), diag::warn_attribute_ignored_on_inline) << A.getName(); return; } } unsigned Index = A.getAttributeSpellingListIndex(); Attr *NewAttr = A.getKind() == AttributeList::AT_DLLExport ? (Attr *)S.mergeDLLExportAttr(D, A.getRange(), Index) : (Attr *)S.mergeDLLImportAttr(D, A.getRange(), Index); if (NewAttr) D->addAttr(NewAttr); } MSInheritanceAttr * Sema::mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling) { if (MSInheritanceAttr *IA = D->getAttr<MSInheritanceAttr>()) { if (IA->getSemanticSpelling() == SemanticSpelling) return nullptr; Diag(IA->getLocation(), diag::err_mismatched_ms_inheritance) << 1 /*previous declaration*/; Diag(Range.getBegin(), diag::note_previous_ms_inheritance); D->dropAttr<MSInheritanceAttr>(); } CXXRecordDecl *RD = cast<CXXRecordDecl>(D); if (RD->hasDefinition()) { if (checkMSInheritanceAttrOnDefinition(RD, Range, BestCase, SemanticSpelling)) { return nullptr; } } else { if (isa<ClassTemplatePartialSpecializationDecl>(RD)) { Diag(Range.getBegin(), diag::warn_ignored_ms_inheritance) << 1 /*partial specialization*/; return nullptr; } if (RD->getDescribedClassTemplate()) { Diag(Range.getBegin(), diag::warn_ignored_ms_inheritance) << 0 /*primary template*/; return nullptr; } } return ::new (Context) MSInheritanceAttr(Range, Context, BestCase, AttrSpellingListIndex); } static void handleCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) { // The capability attributes take a single string parameter for the name of // the capability they represent. The lockable attribute does not take any // parameters. However, semantically, both attributes represent the same // concept, and so they use the same semantic attribute. Eventually, the // lockable attribute will be removed. // // For backward compatibility, any capability which has no specified string // literal will be considered a "mutex." StringRef N("mutex"); SourceLocation LiteralLoc; if (Attr.getKind() == AttributeList::AT_Capability && !S.checkStringLiteralArgumentAttr(Attr, 0, N, &LiteralLoc)) return; // Currently, there are only two names allowed for a capability: role and // mutex (case insensitive). Diagnose other capability names. if (!N.equals_lower("mutex") && !N.equals_lower("role")) S.Diag(LiteralLoc, diag::warn_invalid_capability_name) << N; D->addAttr(::new (S.Context) CapabilityAttr(Attr.getRange(), S.Context, N, Attr.getAttributeSpellingListIndex())); } static void handleAssertCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) { D->addAttr(::new (S.Context) AssertCapabilityAttr(Attr.getRange(), S.Context, Attr.getArgAsExpr(0), Attr.getAttributeSpellingListIndex())); } static void handleAcquireCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) { SmallVector<Expr*, 1> Args; if (!checkLockFunAttrCommon(S, D, Attr, Args)) return; D->addAttr(::new (S.Context) AcquireCapabilityAttr(Attr.getRange(), S.Context, Args.data(), Args.size(), Attr.getAttributeSpellingListIndex())); } static void handleTryAcquireCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) { SmallVector<Expr*, 2> Args; if (!checkTryLockFunAttrCommon(S, D, Attr, Args)) return; D->addAttr(::new (S.Context) TryAcquireCapabilityAttr(Attr.getRange(), S.Context, Attr.getArgAsExpr(0), Args.data(), Args.size(), Attr.getAttributeSpellingListIndex())); } static void handleReleaseCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) { // Check that all arguments are lockable objects. SmallVector<Expr *, 1> Args; checkAttrArgsAreCapabilityObjs(S, D, Attr, Args, 0, true); D->addAttr(::new (S.Context) ReleaseCapabilityAttr( Attr.getRange(), S.Context, Args.data(), Args.size(), Attr.getAttributeSpellingListIndex())); } static void handleRequiresCapabilityAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!checkAttributeAtLeastNumArgs(S, Attr, 1)) return; // check that all arguments are lockable objects SmallVector<Expr*, 1> Args; checkAttrArgsAreCapabilityObjs(S, D, Attr, Args); if (Args.empty()) return; RequiresCapabilityAttr *RCA = ::new (S.Context) RequiresCapabilityAttr(Attr.getRange(), S.Context, Args.data(), Args.size(), Attr.getAttributeSpellingListIndex()); D->addAttr(RCA); } static void handleDeprecatedAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (auto *NSD = dyn_cast<NamespaceDecl>(D)) { if (NSD->isAnonymousNamespace()) { S.Diag(Attr.getLoc(), diag::warn_deprecated_anonymous_namespace); // Do not want to attach the attribute to the namespace because that will // cause confusing diagnostic reports for uses of declarations within the // namespace. return; } } if (!S.getLangOpts().CPlusPlus14) if (Attr.isCXX11Attribute() && !(Attr.hasScope() && Attr.getScopeName()->isStr("gnu"))) S.Diag(Attr.getLoc(), diag::ext_deprecated_attr_is_a_cxx14_extension); handleAttrWithMessage<DeprecatedAttr>(S, D, Attr); } static void handleNoSanitizeAttr(Sema &S, Decl *D, const AttributeList &Attr) { if (!checkAttributeAtLeastNumArgs(S, Attr, 1)) return; std::vector<std::string> Sanitizers; for (unsigned I = 0, E = Attr.getNumArgs(); I != E; ++I) { StringRef SanitizerName; SourceLocation LiteralLoc; if (!S.checkStringLiteralArgumentAttr(Attr, I, SanitizerName, &LiteralLoc)) return; if (parseSanitizerValue(SanitizerName, /*AllowGroups=*/true) == 0) S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName; Sanitizers.push_back(SanitizerName); } D->addAttr(::new (S.Context) NoSanitizeAttr( Attr.getRange(), S.Context, Sanitizers.data(), Sanitizers.size(), Attr.getAttributeSpellingListIndex())); } static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D, const AttributeList &Attr) { std::string SanitizerName = llvm::StringSwitch<std::string>(Attr.getName()->getName()) .Case("no_address_safety_analysis", "address") .Case("no_sanitize_address", "address") .Case("no_sanitize_thread", "thread") .Case("no_sanitize_memory", "memory"); D->addAttr(::new (S.Context) NoSanitizeAttr(Attr.getRange(), S.Context, &SanitizerName, 1, Attr.getAttributeSpellingListIndex())); } /// Handles semantic checking for features that are common to all attributes, /// such as checking whether a parameter was properly specified, or the correct /// number of arguments were passed, etc. static bool handleCommonAttributeFeatures(Sema &S, Scope *scope, Decl *D, const AttributeList &Attr) { // Several attributes carry different semantics than the parsing requires, so // those are opted out of the common handling. // // We also bail on unknown and ignored attributes because those are handled // as part of the target-specific handling logic. if (Attr.hasCustomParsing() || Attr.getKind() == AttributeList::UnknownAttribute) return false; // Check whether the attribute requires specific language extensions to be // enabled. if (!Attr.diagnoseLangOpts(S)) return true; if (Attr.getMinArgs() == Attr.getMaxArgs()) { // If there are no optional arguments, then checking for the argument count // is trivial. if (!checkAttributeNumArgs(S, Attr, Attr.getMinArgs())) return true; } else { // There are optional arguments, so checking is slightly more involved. if (Attr.getMinArgs() && !checkAttributeAtLeastNumArgs(S, Attr, Attr.getMinArgs())) return true; else if (!Attr.hasVariadicArg() && Attr.getMaxArgs() && !checkAttributeAtMostNumArgs(S, Attr, Attr.getMaxArgs())) return true; } // Check whether the attribute appertains to the given subject. if (!Attr.diagnoseAppertainsTo(S, D)) return true; return false; } //===----------------------------------------------------------------------===// // Top Level Sema Entry Points //===----------------------------------------------------------------------===// /// ProcessDeclAttribute - Apply the specific attribute to the specified decl if /// the attribute applies to decls. If the attribute is a type attribute, just /// silently ignore it if a GNU attribute. static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const AttributeList &Attr, bool IncludeCXX11Attributes) { if (Attr.isInvalid() || Attr.getKind() == AttributeList::IgnoredAttribute) return; // Ignore C++11 attributes on declarator chunks: they appertain to the type // instead. if (Attr.isCXX11Attribute() && !IncludeCXX11Attributes) return; // Unknown attributes are automatically warned on. Target-specific attributes // which do not apply to the current target architecture are treated as // though they were unknown attributes. if (Attr.getKind() == AttributeList::UnknownAttribute || !Attr.existsInTarget(S.Context.getTargetInfo().getTriple())) { S.Diag(Attr.getLoc(), Attr.isDeclspecAttribute() ? diag::warn_unhandled_ms_attribute_ignored : diag::warn_unknown_attribute_ignored) << Attr.getName(); return; } if (handleCommonAttributeFeatures(S, scope, D, Attr)) return; switch (Attr.getKind()) { default: // HLSL Change Starts if (S.LangOpts.HLSL) { bool Handled = false; hlsl::HandleDeclAttributeForHLSL(S, D, Attr, Handled); if (Handled) break; } // HLSL Change Ends // Type attributes are handled elsewhere; silently move on. assert(Attr.isTypeAttr() && "Non-type attribute not handled"); break; case AttributeList::AT_Interrupt: handleInterruptAttr(S, D, Attr); break; case AttributeList::AT_X86ForceAlignArgPointer: handleX86ForceAlignArgPointerAttr(S, D, Attr); break; case AttributeList::AT_DLLExport: case AttributeList::AT_DLLImport: handleDLLAttr(S, D, Attr); break; case AttributeList::AT_Mips16: handleSimpleAttribute<Mips16Attr>(S, D, Attr); break; case AttributeList::AT_NoMips16: handleSimpleAttribute<NoMips16Attr>(S, D, Attr); break; case AttributeList::AT_AMDGPUNumVGPR: handleAMDGPUNumVGPRAttr(S, D, Attr); break; case AttributeList::AT_AMDGPUNumSGPR: handleAMDGPUNumSGPRAttr(S, D, Attr); break; case AttributeList::AT_IBAction: handleSimpleAttribute<IBActionAttr>(S, D, Attr); break; case AttributeList::AT_IBOutlet: handleIBOutlet(S, D, Attr); break; case AttributeList::AT_IBOutletCollection: handleIBOutletCollection(S, D, Attr); break; case AttributeList::AT_Alias: handleAliasAttr(S, D, Attr); break; case AttributeList::AT_Aligned: handleAlignedAttr(S, D, Attr); break; case AttributeList::AT_AlignValue: handleAlignValueAttr(S, D, Attr); break; case AttributeList::AT_AlwaysInline: handleAlwaysInlineAttr(S, D, Attr); break; case AttributeList::AT_AnalyzerNoReturn: handleAnalyzerNoReturnAttr(S, D, Attr); break; case AttributeList::AT_TLSModel: handleTLSModelAttr(S, D, Attr); break; case AttributeList::AT_Annotate: handleAnnotateAttr(S, D, Attr); break; case AttributeList::AT_Availability: handleAvailabilityAttr(S, D, Attr); break; case AttributeList::AT_CarriesDependency: handleDependencyAttr(S, scope, D, Attr); break; case AttributeList::AT_Common: handleCommonAttr(S, D, Attr); break; case AttributeList::AT_CUDAConstant: handleSimpleAttribute<CUDAConstantAttr>(S, D, Attr); break; case AttributeList::AT_Constructor: handleConstructorAttr(S, D, Attr); break; case AttributeList::AT_CXX11NoReturn: handleSimpleAttribute<CXX11NoReturnAttr>(S, D, Attr); break; case AttributeList::AT_Deprecated: handleDeprecatedAttr(S, D, Attr); break; case AttributeList::AT_Destructor: handleDestructorAttr(S, D, Attr); break; case AttributeList::AT_EnableIf: handleEnableIfAttr(S, D, Attr); break; case AttributeList::AT_ExtVectorType: handleExtVectorTypeAttr(S, scope, D, Attr); break; case AttributeList::AT_MinSize: handleMinSizeAttr(S, D, Attr); break; case AttributeList::AT_OptimizeNone: handleOptimizeNoneAttr(S, D, Attr); break; case AttributeList::AT_FlagEnum: handleSimpleAttribute<FlagEnumAttr>(S, D, Attr); break; case AttributeList::AT_Flatten: handleSimpleAttribute<FlattenAttr>(S, D, Attr); break; case AttributeList::AT_Format: handleFormatAttr(S, D, Attr); break; case AttributeList::AT_FormatArg: handleFormatArgAttr(S, D, Attr); break; case AttributeList::AT_CUDAGlobal: handleGlobalAttr(S, D, Attr); break; case AttributeList::AT_CUDADevice: handleSimpleAttribute<CUDADeviceAttr>(S, D, Attr); break; case AttributeList::AT_CUDAHost: handleSimpleAttribute<CUDAHostAttr>(S, D, Attr); break; case AttributeList::AT_GNUInline: handleGNUInlineAttr(S, D, Attr); break; case AttributeList::AT_CUDALaunchBounds: handleLaunchBoundsAttr(S, D, Attr); break; case AttributeList::AT_Restrict: handleRestrictAttr(S, D, Attr); break; case AttributeList::AT_MayAlias: handleSimpleAttribute<MayAliasAttr>(S, D, Attr); break; case AttributeList::AT_Mode: handleModeAttr(S, D, Attr); break; case AttributeList::AT_NoCommon: handleSimpleAttribute<NoCommonAttr>(S, D, Attr); break; case AttributeList::AT_NoSplitStack: handleSimpleAttribute<NoSplitStackAttr>(S, D, Attr); break; case AttributeList::AT_NonNull: if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(D)) handleNonNullAttrParameter(S, PVD, Attr); else handleNonNullAttr(S, D, Attr); break; case AttributeList::AT_ReturnsNonNull: handleReturnsNonNullAttr(S, D, Attr); break; case AttributeList::AT_AssumeAligned: handleAssumeAlignedAttr(S, D, Attr); break; case AttributeList::AT_Overloadable: handleSimpleAttribute<OverloadableAttr>(S, D, Attr); break; case AttributeList::AT_Ownership: handleOwnershipAttr(S, D, Attr); break; case AttributeList::AT_Cold: handleColdAttr(S, D, Attr); break; case AttributeList::AT_Hot: handleHotAttr(S, D, Attr); break; case AttributeList::AT_Naked: handleSimpleAttribute<NakedAttr>(S, D, Attr); break; case AttributeList::AT_NoReturn: handleNoReturnAttr(S, D, Attr); break; case AttributeList::AT_NoThrow: handleSimpleAttribute<NoThrowAttr>(S, D, Attr); break; case AttributeList::AT_CUDAShared: handleSimpleAttribute<CUDASharedAttr>(S, D, Attr); break; case AttributeList::AT_VecReturn: handleVecReturnAttr(S, D, Attr); break; case AttributeList::AT_ObjCOwnership: handleObjCOwnershipAttr(S, D, Attr); break; case AttributeList::AT_ObjCPreciseLifetime: handleObjCPreciseLifetimeAttr(S, D, Attr); break; case AttributeList::AT_ObjCReturnsInnerPointer: handleObjCReturnsInnerPointerAttr(S, D, Attr); break; case AttributeList::AT_ObjCRequiresSuper: handleObjCRequiresSuperAttr(S, D, Attr); break; case AttributeList::AT_ObjCBridge: handleObjCBridgeAttr(S, scope, D, Attr); break; case AttributeList::AT_ObjCBridgeMutable: handleObjCBridgeMutableAttr(S, scope, D, Attr); break; case AttributeList::AT_ObjCBridgeRelated: handleObjCBridgeRelatedAttr(S, scope, D, Attr); break; case AttributeList::AT_ObjCDesignatedInitializer: handleObjCDesignatedInitializer(S, D, Attr); break; case AttributeList::AT_ObjCRuntimeName: handleObjCRuntimeName(S, D, Attr); break; case AttributeList::AT_ObjCBoxable: handleObjCBoxable(S, D, Attr); break; case AttributeList::AT_CFAuditedTransfer: handleCFAuditedTransferAttr(S, D, Attr); break; case AttributeList::AT_CFUnknownTransfer: handleCFUnknownTransferAttr(S, D, Attr); break; case AttributeList::AT_CFConsumed: case AttributeList::AT_NSConsumed: handleNSConsumedAttr(S, D, Attr); break; case AttributeList::AT_NSConsumesSelf: handleSimpleAttribute<NSConsumesSelfAttr>(S, D, Attr); break; case AttributeList::AT_NSReturnsAutoreleased: case AttributeList::AT_NSReturnsNotRetained: case AttributeList::AT_CFReturnsNotRetained: case AttributeList::AT_NSReturnsRetained: case AttributeList::AT_CFReturnsRetained: handleNSReturnsRetainedAttr(S, D, Attr); break; case AttributeList::AT_WorkGroupSizeHint: handleWorkGroupSize<WorkGroupSizeHintAttr>(S, D, Attr); break; case AttributeList::AT_ReqdWorkGroupSize: handleWorkGroupSize<ReqdWorkGroupSizeAttr>(S, D, Attr); break; case AttributeList::AT_VecTypeHint: handleVecTypeHint(S, D, Attr); break; case AttributeList::AT_InitPriority: handleInitPriorityAttr(S, D, Attr); break; case AttributeList::AT_Packed: handlePackedAttr(S, D, Attr); break; case AttributeList::AT_Section: handleSectionAttr(S, D, Attr); break; case AttributeList::AT_Target: handleTargetAttr(S, D, Attr); break; case AttributeList::AT_Unavailable: handleAttrWithMessage<UnavailableAttr>(S, D, Attr); break; case AttributeList::AT_ArcWeakrefUnavailable: handleSimpleAttribute<ArcWeakrefUnavailableAttr>(S, D, Attr); break; case AttributeList::AT_ObjCRootClass: handleSimpleAttribute<ObjCRootClassAttr>(S, D, Attr); break; case AttributeList::AT_ObjCExplicitProtocolImpl: handleObjCSuppresProtocolAttr(S, D, Attr); break; case AttributeList::AT_ObjCRequiresPropertyDefs: handleSimpleAttribute<ObjCRequiresPropertyDefsAttr>(S, D, Attr); break; case AttributeList::AT_Unused: handleSimpleAttribute<UnusedAttr>(S, D, Attr); break; case AttributeList::AT_ReturnsTwice: handleSimpleAttribute<ReturnsTwiceAttr>(S, D, Attr); break; case AttributeList::AT_Used: handleUsedAttr(S, D, Attr); break; case AttributeList::AT_Visibility: handleVisibilityAttr(S, D, Attr, false); break; case AttributeList::AT_TypeVisibility: handleVisibilityAttr(S, D, Attr, true); break; case AttributeList::AT_WarnUnused: handleSimpleAttribute<WarnUnusedAttr>(S, D, Attr); break; case AttributeList::AT_WarnUnusedResult: handleWarnUnusedResult(S, D, Attr); break; case AttributeList::AT_Weak: handleSimpleAttribute<WeakAttr>(S, D, Attr); break; case AttributeList::AT_WeakRef: handleWeakRefAttr(S, D, Attr); break; case AttributeList::AT_WeakImport: handleWeakImportAttr(S, D, Attr); break; case AttributeList::AT_TransparentUnion: handleTransparentUnionAttr(S, D, Attr); break; case AttributeList::AT_ObjCException: handleSimpleAttribute<ObjCExceptionAttr>(S, D, Attr); break; case AttributeList::AT_ObjCMethodFamily: handleObjCMethodFamilyAttr(S, D, Attr); break; case AttributeList::AT_ObjCNSObject: handleObjCNSObject(S, D, Attr); break; case AttributeList::AT_ObjCIndependentClass: handleObjCIndependentClass(S, D, Attr); break; case AttributeList::AT_Blocks: handleBlocksAttr(S, D, Attr); break; case AttributeList::AT_Sentinel: handleSentinelAttr(S, D, Attr); break; case AttributeList::AT_Const: handleSimpleAttribute<ConstAttr>(S, D, Attr); break; case AttributeList::AT_Pure: handleSimpleAttribute<PureAttr>(S, D, Attr); break; case AttributeList::AT_Cleanup: handleCleanupAttr(S, D, Attr); break; case AttributeList::AT_NoDebug: handleNoDebugAttr(S, D, Attr); break; case AttributeList::AT_NoDuplicate: handleSimpleAttribute<NoDuplicateAttr>(S, D, Attr); break; case AttributeList::AT_NoInline: handleSimpleAttribute<NoInlineAttr>(S, D, Attr); break; case AttributeList::AT_NoInstrumentFunction: // Interacts with -pg. handleSimpleAttribute<NoInstrumentFunctionAttr>(S, D, Attr); break; case AttributeList::AT_StdCall: case AttributeList::AT_CDecl: case AttributeList::AT_FastCall: case AttributeList::AT_ThisCall: case AttributeList::AT_Pascal: case AttributeList::AT_VectorCall: case AttributeList::AT_MSABI: case AttributeList::AT_SysVABI: case AttributeList::AT_Pcs: case AttributeList::AT_IntelOclBicc: handleCallConvAttr(S, D, Attr); break; case AttributeList::AT_OpenCLKernel: handleSimpleAttribute<OpenCLKernelAttr>(S, D, Attr); break; case AttributeList::AT_OpenCLImageAccess: handleSimpleAttribute<OpenCLImageAccessAttr>(S, D, Attr); break; // Microsoft attributes: case AttributeList::AT_MSNoVTable: handleSimpleAttribute<MSNoVTableAttr>(S, D, Attr); break; case AttributeList::AT_MSStruct: handleSimpleAttribute<MSStructAttr>(S, D, Attr); break; case AttributeList::AT_Uuid: handleUuidAttr(S, D, Attr); break; case AttributeList::AT_MSInheritance: handleMSInheritanceAttr(S, D, Attr); break; case AttributeList::AT_SelectAny: handleSimpleAttribute<SelectAnyAttr>(S, D, Attr); break; case AttributeList::AT_Thread: handleDeclspecThreadAttr(S, D, Attr); break; // Thread safety attributes: case AttributeList::AT_AssertExclusiveLock: handleAssertExclusiveLockAttr(S, D, Attr); break; case AttributeList::AT_AssertSharedLock: handleAssertSharedLockAttr(S, D, Attr); break; case AttributeList::AT_GuardedVar: handleSimpleAttribute<GuardedVarAttr>(S, D, Attr); break; case AttributeList::AT_PtGuardedVar: handlePtGuardedVarAttr(S, D, Attr); break; case AttributeList::AT_ScopedLockable: handleSimpleAttribute<ScopedLockableAttr>(S, D, Attr); break; case AttributeList::AT_NoSanitize: handleNoSanitizeAttr(S, D, Attr); break; case AttributeList::AT_NoSanitizeSpecific: handleNoSanitizeSpecificAttr(S, D, Attr); break; case AttributeList::AT_NoThreadSafetyAnalysis: handleSimpleAttribute<NoThreadSafetyAnalysisAttr>(S, D, Attr); break; case AttributeList::AT_GuardedBy: handleGuardedByAttr(S, D, Attr); break; case AttributeList::AT_PtGuardedBy: handlePtGuardedByAttr(S, D, Attr); break; case AttributeList::AT_ExclusiveTrylockFunction: handleExclusiveTrylockFunctionAttr(S, D, Attr); break; case AttributeList::AT_LockReturned: handleLockReturnedAttr(S, D, Attr); break; case AttributeList::AT_LocksExcluded: handleLocksExcludedAttr(S, D, Attr); break; case AttributeList::AT_SharedTrylockFunction: handleSharedTrylockFunctionAttr(S, D, Attr); break; case AttributeList::AT_AcquiredBefore: handleAcquiredBeforeAttr(S, D, Attr); break; case AttributeList::AT_AcquiredAfter: handleAcquiredAfterAttr(S, D, Attr); break; // Capability analysis attributes. case AttributeList::AT_Capability: case AttributeList::AT_Lockable: handleCapabilityAttr(S, D, Attr); break; case AttributeList::AT_RequiresCapability: handleRequiresCapabilityAttr(S, D, Attr); break; case AttributeList::AT_AssertCapability: handleAssertCapabilityAttr(S, D, Attr); break; case AttributeList::AT_AcquireCapability: handleAcquireCapabilityAttr(S, D, Attr); break; case AttributeList::AT_ReleaseCapability: handleReleaseCapabilityAttr(S, D, Attr); break; case AttributeList::AT_TryAcquireCapability: handleTryAcquireCapabilityAttr(S, D, Attr); break; // Consumed analysis attributes. case AttributeList::AT_Consumable: handleConsumableAttr(S, D, Attr); break; case AttributeList::AT_ConsumableAutoCast: handleSimpleAttribute<ConsumableAutoCastAttr>(S, D, Attr); break; case AttributeList::AT_ConsumableSetOnRead: handleSimpleAttribute<ConsumableSetOnReadAttr>(S, D, Attr); break; case AttributeList::AT_CallableWhen: handleCallableWhenAttr(S, D, Attr); break; case AttributeList::AT_ParamTypestate: handleParamTypestateAttr(S, D, Attr); break; case AttributeList::AT_ReturnTypestate: handleReturnTypestateAttr(S, D, Attr); break; case AttributeList::AT_SetTypestate: handleSetTypestateAttr(S, D, Attr); break; case AttributeList::AT_TestTypestate: handleTestTypestateAttr(S, D, Attr); break; // Type safety attributes. case AttributeList::AT_ArgumentWithTypeTag: handleArgumentWithTypeTagAttr(S, D, Attr); break; case AttributeList::AT_TypeTagForDatatype: handleTypeTagForDatatypeAttr(S, D, Attr); break; } } /// ProcessDeclAttributeList - Apply all the decl attributes in the specified /// attribute list to the specified decl, ignoring any type attributes. void Sema::ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AttrList, bool IncludeCXX11Attributes) { for (const AttributeList* l = AttrList; l; l = l->getNext()) ProcessDeclAttribute(*this, S, D, *l, IncludeCXX11Attributes); // FIXME: We should be able to handle these cases in TableGen. // GCC accepts // static int a9 __attribute__((weakref)); // but that looks really pointless. We reject it. if (D->hasAttr<WeakRefAttr>() && !D->hasAttr<AliasAttr>()) { Diag(AttrList->getLoc(), diag::err_attribute_weakref_without_alias) << cast<NamedDecl>(D); D->dropAttr<WeakRefAttr>(); return; } // FIXME: We should be able to handle this in TableGen as well. It would be // good to have a way to specify "these attributes must appear as a group", // for these. Additionally, it would be good to have a way to specify "these // attribute must never appear as a group" for attributes like cold and hot. if (!D->hasAttr<OpenCLKernelAttr>()) { // These attributes cannot be applied to a non-kernel function. if (Attr *A = D->getAttr<ReqdWorkGroupSizeAttr>()) { // FIXME: This emits a different error message than // diag::err_attribute_wrong_decl_type + ExpectedKernelFunction. Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A; D->setInvalidDecl(); } else if (Attr *A = D->getAttr<WorkGroupSizeHintAttr>()) { Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A; D->setInvalidDecl(); } else if (Attr *A = D->getAttr<VecTypeHintAttr>()) { Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A; D->setInvalidDecl(); } else if (Attr *A = D->getAttr<AMDGPUNumVGPRAttr>()) { Diag(D->getLocation(), diag::err_attribute_wrong_decl_type) << A << ExpectedKernelFunction; D->setInvalidDecl(); } else if (Attr *A = D->getAttr<AMDGPUNumSGPRAttr>()) { Diag(D->getLocation(), diag::err_attribute_wrong_decl_type) << A << ExpectedKernelFunction; D->setInvalidDecl(); } } } // Annotation attributes are the only attributes allowed after an access // specifier. bool Sema::ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList) { for (const AttributeList* l = AttrList; l; l = l->getNext()) { if (l->getKind() == AttributeList::AT_Annotate) { ProcessDeclAttribute(*this, nullptr, ASDecl, *l, l->isCXX11Attribute()); } else { Diag(l->getLoc(), diag::err_only_annotate_after_access_spec); return true; } } return false; } /// checkUnusedDeclAttributes - Check a list of attributes to see if it /// contains any decl attributes that we should warn about. static void checkUnusedDeclAttributes(Sema &S, const AttributeList *A) { for ( ; A; A = A->getNext()) { // Only warn if the attribute is an unignored, non-type attribute. if (A->isUsedAsTypeAttr() || A->isInvalid()) continue; if (A->getKind() == AttributeList::IgnoredAttribute) continue; if (A->getKind() == AttributeList::UnknownAttribute) { S.Diag(A->getLoc(), diag::warn_unknown_attribute_ignored) << A->getName() << A->getRange(); } else { S.Diag(A->getLoc(), diag::warn_attribute_not_on_decl) << A->getName() << A->getRange(); } } } /// checkUnusedDeclAttributes - Given a declarator which is not being /// used to build a declaration, complain about any decl attributes /// which might be lying around on it. void Sema::checkUnusedDeclAttributes(Declarator &D) { ::checkUnusedDeclAttributes(*this, D.getDeclSpec().getAttributes().getList()); ::checkUnusedDeclAttributes(*this, D.getAttributes()); for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) ::checkUnusedDeclAttributes(*this, D.getTypeObject(i).getAttrs()); } /// DeclClonePragmaWeak - clone existing decl (maybe definition), /// \#pragma weak needs a non-definition decl and source may not have one. NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc) { assert(isa<FunctionDecl>(ND) || isa<VarDecl>(ND)); NamedDecl *NewD = nullptr; if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { FunctionDecl *NewFD; // FIXME: Missing call to CheckFunctionDeclaration(). // FIXME: Mangling? // FIXME: Is the qualifier info correct? // FIXME: Is the DeclContext correct? NewFD = FunctionDecl::Create(FD->getASTContext(), FD->getDeclContext(), Loc, Loc, DeclarationName(II), FD->getType(), FD->getTypeSourceInfo(), SC_None, false/*isInlineSpecified*/, FD->hasPrototype(), false/*isConstexprSpecified*/); NewD = NewFD; if (FD->getQualifier()) NewFD->setQualifierInfo(FD->getQualifierLoc()); // Fake up parameter variables; they are declared as if this were // a typedef. QualType FDTy = FD->getType(); if (const FunctionProtoType *FT = FDTy->getAs<FunctionProtoType>()) { SmallVector<ParmVarDecl*, 16> Params; for (const auto &AI : FT->param_types()) { ParmVarDecl *Param = BuildParmVarDeclForTypedef(NewFD, Loc, AI); Param->setScopeInfo(0, Params.size()); Params.push_back(Param); } NewFD->setParams(Params); } } else if (VarDecl *VD = dyn_cast<VarDecl>(ND)) { NewD = VarDecl::Create(VD->getASTContext(), VD->getDeclContext(), VD->getInnerLocStart(), VD->getLocation(), II, VD->getType(), VD->getTypeSourceInfo(), VD->getStorageClass()); if (VD->getQualifier()) { VarDecl *NewVD = cast<VarDecl>(NewD); NewVD->setQualifierInfo(VD->getQualifierLoc()); } } return NewD; } /// DeclApplyPragmaWeak - A declaration (maybe definition) needs \#pragma weak /// applied to it, possibly with an alias. void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W) { if (W.getUsed()) return; // only do this once W.setUsed(true); if (W.getAlias()) { // clone decl, impersonate __attribute(weak,alias(...)) IdentifierInfo *NDId = ND->getIdentifier(); NamedDecl *NewD = DeclClonePragmaWeak(ND, W.getAlias(), W.getLocation()); NewD->addAttr(AliasAttr::CreateImplicit(Context, NDId->getName(), W.getLocation())); NewD->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation())); WeakTopLevelDecl.push_back(NewD); // FIXME: "hideous" code from Sema::LazilyCreateBuiltin // to insert Decl at TU scope, sorry. DeclContext *SavedContext = CurContext; CurContext = Context.getTranslationUnitDecl(); NewD->setDeclContext(CurContext); NewD->setLexicalDeclContext(CurContext); PushOnScopeChains(NewD, S); CurContext = SavedContext; } else { // just add weak to existing ND->addAttr(WeakAttr::CreateImplicit(Context, W.getLocation())); } } void Sema::ProcessPragmaWeak(Scope *S, Decl *D) { // It's valid to "forward-declare" #pragma weak, in which case we // have to do this. LoadExternalWeakUndeclaredIdentifiers(); if (!WeakUndeclaredIdentifiers.empty()) { NamedDecl *ND = nullptr; if (VarDecl *VD = dyn_cast<VarDecl>(D)) if (VD->isExternC()) ND = VD; if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) if (FD->isExternC()) ND = FD; if (ND) { if (IdentifierInfo *Id = ND->getIdentifier()) { auto I = WeakUndeclaredIdentifiers.find(Id); if (I != WeakUndeclaredIdentifiers.end()) { WeakInfo W = I->second; DeclApplyPragmaWeak(S, ND, W); WeakUndeclaredIdentifiers[Id] = W; } } } } } /// ProcessDeclAttributes - Given a declarator (PD) with attributes indicated in /// it, apply them to D. This is a bit tricky because PD can have attributes /// specified in many different places, and we need to find and apply them all. void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) { // Apply decl attributes from the DeclSpec if present. if (const AttributeList *Attrs = PD.getDeclSpec().getAttributes().getList()) ProcessDeclAttributeList(S, D, Attrs); // Walk the declarator structure, applying decl attributes that were in a type // position to the decl itself. This handles cases like: // int *__attr__(x)** D; // when X is a decl attribute. for (unsigned i = 0, e = PD.getNumTypeObjects(); i != e; ++i) if (const AttributeList *Attrs = PD.getTypeObject(i).getAttrs()) ProcessDeclAttributeList(S, D, Attrs, /*IncludeCXX11Attributes=*/false); // Finally, apply any attributes on the decl itself. if (const AttributeList *Attrs = PD.getAttributes()) ProcessDeclAttributeList(S, D, Attrs); // HLSL Change Starts - Get custom attributes from our declarator object and append to AST Decl object for (std::vector<InheritableAttr*>::const_iterator i = PD.customAttributesList.begin(), e = PD.customAttributesList.end(); i != e; ++i) { D->addAttr(*i); } // HLSL Change Ends } /// Is the given declaration allowed to use a forbidden type? static bool isForbiddenTypeAllowed(Sema &S, Decl *decl) { // Private ivars are always okay. Unfortunately, people don't // always properly make their ivars private, even in system headers. // Plus we need to make fields okay, too. // Function declarations in sys headers will be marked unavailable. if (!isa<FieldDecl>(decl) && !isa<ObjCPropertyDecl>(decl) && !isa<FunctionDecl>(decl)) return false; // Require it to be declared in a system header. return S.Context.getSourceManager().isInSystemHeader(decl->getLocation()); } /// Handle a delayed forbidden-type diagnostic. static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag, Decl *decl) { if (decl && isForbiddenTypeAllowed(S, decl)) { decl->addAttr(UnavailableAttr::CreateImplicit(S.Context, "this system declaration uses an unsupported type", diag.Loc)); return; } if (S.getLangOpts().ObjCAutoRefCount) if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(decl)) { // FIXME: we may want to suppress diagnostics for all // kind of forbidden type messages on unavailable functions. if (FD->hasAttr<UnavailableAttr>() && diag.getForbiddenTypeDiagnostic() == diag::err_arc_array_param_no_ownership) { diag.Triggered = true; return; } } S.Diag(diag.Loc, diag.getForbiddenTypeDiagnostic()) << diag.getForbiddenTypeOperand() << diag.getForbiddenTypeArgument(); diag.Triggered = true; } static bool isDeclDeprecated(Decl *D) { do { if (D->isDeprecated()) return true; // A category implicitly has the availability of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D)) if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface()) return Interface->isDeprecated(); } while ((D = cast_or_null<Decl>(D->getDeclContext()))); return false; } static bool isDeclUnavailable(Decl *D) { do { if (D->isUnavailable()) return true; // A category implicitly has the availability of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D)) if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface()) return Interface->isUnavailable(); } while ((D = cast_or_null<Decl>(D->getDeclContext()))); return false; } static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K, Decl *Ctx, const NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess) { // Diagnostics for deprecated or unavailable. unsigned diag, diag_message, diag_fwdclass_message; // Matches 'diag::note_property_attribute' options. unsigned property_note_select; // Matches diag::note_availability_specified_here. unsigned available_here_select_kind; // Don't warn if our current context is deprecated or unavailable. switch (K) { case Sema::AD_Deprecation: if (isDeclDeprecated(Ctx) || isDeclUnavailable(Ctx)) return; diag = !ObjCPropertyAccess ? diag::warn_deprecated : diag::warn_property_method_deprecated; diag_message = diag::warn_deprecated_message; diag_fwdclass_message = diag::warn_deprecated_fwdclass_message; property_note_select = /* deprecated */ 0; available_here_select_kind = /* deprecated */ 2; break; case Sema::AD_Unavailable: if (isDeclUnavailable(Ctx)) return; diag = !ObjCPropertyAccess ? diag::err_unavailable : diag::err_property_method_unavailable; diag_message = diag::err_unavailable_message; diag_fwdclass_message = diag::warn_unavailable_fwdclass_message; property_note_select = /* unavailable */ 1; available_here_select_kind = /* unavailable */ 0; break; case Sema::AD_Partial: diag = diag::warn_partial_availability; diag_message = diag::warn_partial_message; diag_fwdclass_message = diag::warn_partial_fwdclass_message; property_note_select = /* partial */ 2; available_here_select_kind = /* partial */ 3; break; } if (!Message.empty()) { S.Diag(Loc, diag_message) << D << Message; if (ObjCProperty) S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute) << ObjCProperty->getDeclName() << property_note_select; } else if (!UnknownObjCClass) { S.Diag(Loc, diag) << D; if (ObjCProperty) S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute) << ObjCProperty->getDeclName() << property_note_select; } else { S.Diag(Loc, diag_fwdclass_message) << D; S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class); } S.Diag(D->getLocation(), diag::note_availability_specified_here) << D << available_here_select_kind; if (K == Sema::AD_Partial) S.Diag(Loc, diag::note_partial_availability_silence) << D; } static void handleDelayedAvailabilityCheck(Sema &S, DelayedDiagnostic &DD, Decl *Ctx) { assert(DD.Kind == DelayedDiagnostic::Deprecation || DD.Kind == DelayedDiagnostic::Unavailable); Sema::AvailabilityDiagnostic AD = DD.Kind == DelayedDiagnostic::Deprecation ? Sema::AD_Deprecation : Sema::AD_Unavailable; DD.Triggered = true; DoEmitAvailabilityWarning( S, AD, Ctx, DD.getDeprecationDecl(), DD.getDeprecationMessage(), DD.Loc, DD.getUnknownObjCClass(), DD.getObjCProperty(), false); } void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) { assert(DelayedDiagnostics.getCurrentPool()); DelayedDiagnosticPool &poppedPool = *DelayedDiagnostics.getCurrentPool(); DelayedDiagnostics.popWithoutEmitting(state); // When delaying diagnostics to run in the context of a parsed // declaration, we only want to actually emit anything if parsing // succeeds. if (!decl) return; // We emit all the active diagnostics in this pool or any of its // parents. In general, we'll get one pool for the decl spec // and a child pool for each declarator; in a decl group like: // deprecated_typedef foo, *bar, baz(); // only the declarator pops will be passed decls. This is correct; // we really do need to consider delayed diagnostics from the decl spec // for each of the different declarations. const DelayedDiagnosticPool *pool = &poppedPool; do { for (DelayedDiagnosticPool::pool_iterator i = pool->pool_begin(), e = pool->pool_end(); i != e; ++i) { // This const_cast is a bit lame. Really, Triggered should be mutable. DelayedDiagnostic &diag = const_cast<DelayedDiagnostic&>(*i); if (diag.Triggered) continue; switch (diag.Kind) { case DelayedDiagnostic::Deprecation: case DelayedDiagnostic::Unavailable: // Don't bother giving deprecation/unavailable diagnostics if // the decl is invalid. if (!decl->isInvalidDecl()) handleDelayedAvailabilityCheck(*this, diag, decl); break; case DelayedDiagnostic::Access: HandleDelayedAccessCheck(diag, decl); break; case DelayedDiagnostic::ForbiddenType: handleDelayedForbiddenType(*this, diag, decl); break; } } } while ((pool = pool->getParent())); } /// Given a set of delayed diagnostics, re-emit them as if they had /// been delayed in the current context instead of in the given pool. /// Essentially, this just moves them to the current pool. void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) { DelayedDiagnosticPool *curPool = DelayedDiagnostics.getCurrentPool(); assert(curPool && "re-emitting in undelayed context not supported"); curPool->steal(pool); } void Sema::EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess) { // Delay if we're currently parsing a declaration. if (DelayedDiagnostics.shouldDelayDiagnostics() && AD != AD_Partial) { DelayedDiagnostics.add(DelayedDiagnostic::makeAvailability( AD, Loc, D, UnknownObjCClass, ObjCProperty, Message, ObjCPropertyAccess)); return; } Decl *Ctx = cast<Decl>(getCurLexicalContext()); DoEmitAvailabilityWarning(*this, AD, Ctx, D, Message, Loc, UnknownObjCClass, ObjCProperty, ObjCPropertyAccess); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/CMakeLists.txt
set(LLVM_LINK_COMPONENTS HLSL # SemaHLSL.cpp references {Has|Get}UnsignedOpcode() Support ) # HLSL Change Begin add_hlsl_hctgen(DxilIntrinsicTables BUILD_DIR OUTPUT gen_intrin_main_tables_15.h) include_directories(${CMAKE_CURRENT_BINARY_DIR}) list(APPEND LLVM_COMMON_DEPENDS DxilIntrinsicTables) # HLSL Change End add_clang_library(clangSema AnalysisBasedWarnings.cpp AttributeList.cpp CodeCompleteConsumer.cpp DeclSpec.cpp DelayedDiagnostic.cpp IdentifierResolver.cpp JumpDiagnostics.cpp MultiplexExternalSemaSource.cpp Scope.cpp ScopeInfo.cpp Sema.cpp SemaAccess.cpp SemaAttr.cpp SemaCXXScopeSpec.cpp SemaCast.cpp SemaChecking.cpp SemaCodeComplete.cpp SemaConsumer.cpp SemaCUDA.cpp SemaDecl.cpp SemaDeclAttr.cpp SemaDeclCXX.cpp SemaDeclObjC.cpp SemaDXR.cpp SemaExceptionSpec.cpp SemaExpr.cpp SemaExprCXX.cpp SemaExprMember.cpp SemaExprObjC.cpp SemaFixItUtils.cpp SemaHLSL.cpp # HLSL Change SemaHLSLDiagnoseTU.cpp # HLSL Change SemaInit.cpp SemaLambda.cpp SemaLookup.cpp SemaObjCProperty.cpp SemaOpenMP.cpp SemaOverload.cpp SemaPseudoObject.cpp SemaStmt.cpp SemaStmtAsm.cpp SemaStmtAttr.cpp SemaTemplate.cpp SemaTemplateDeduction.cpp SemaTemplateInstantiate.cpp SemaTemplateInstantiateDecl.cpp SemaTemplateVariadic.cpp SemaType.cpp TypeLocBuilder.cpp LINK_LIBS clangAST clangAnalysis clangBasic clangEdit clangLex ) # HLSL Change Begin # Sema got too big for debug builds on arm64ec, which means it will hit # other targets too eventually. if(WIN32) if(MSVC) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wa,-mbig-obj") endif() endif(WIN32) # HLSL Change End
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaAccess.cpp
//===---- SemaAccess.cpp - C++ Access Control -------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file provides Sema routines for C++ access control semantics. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DependentDiagnostic.h" #include "clang/AST/ExprCXX.h" #include "clang/Sema/DelayedDiagnostic.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" using namespace clang; using namespace sema; /// A copy of Sema's enum without AR_delayed. enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent }; /// SetMemberAccessSpecifier - Set the access specifier of a member. /// Returns true on error (when the previous member decl access specifier /// is different from the new member decl access specifier). bool Sema::SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS) { if (!PrevMemberDecl) { // Use the lexical access specifier. MemberDecl->setAccess(LexicalAS); return false; } // C++ [class.access.spec]p3: When a member is redeclared its access // specifier must be same as its initial declaration. if (LexicalAS != AS_none && LexicalAS != PrevMemberDecl->getAccess()) { Diag(MemberDecl->getLocation(), diag::err_class_redeclared_with_different_access) << MemberDecl << LexicalAS; Diag(PrevMemberDecl->getLocation(), diag::note_previous_access_declaration) << PrevMemberDecl << PrevMemberDecl->getAccess(); MemberDecl->setAccess(LexicalAS); return true; } MemberDecl->setAccess(PrevMemberDecl->getAccess()); return false; } static CXXRecordDecl *FindDeclaringClass(NamedDecl *D) { DeclContext *DC = D->getDeclContext(); // This can only happen at top: enum decls only "publish" their // immediate members. if (isa<EnumDecl>(DC)) DC = cast<EnumDecl>(DC)->getDeclContext(); CXXRecordDecl *DeclaringClass = cast<CXXRecordDecl>(DC); while (DeclaringClass->isAnonymousStructOrUnion()) DeclaringClass = cast<CXXRecordDecl>(DeclaringClass->getDeclContext()); return DeclaringClass; } namespace { struct EffectiveContext { EffectiveContext() : Inner(nullptr), Dependent(false) {} explicit EffectiveContext(DeclContext *DC) : Inner(DC), Dependent(DC->isDependentContext()) { // C++11 [class.access.nest]p1: // A nested class is a member and as such has the same access // rights as any other member. // C++11 [class.access]p2: // A member of a class can also access all the names to which // the class has access. A local class of a member function // may access the same names that the member function itself // may access. // This almost implies that the privileges of nesting are transitive. // Technically it says nothing about the local classes of non-member // functions (which can gain privileges through friendship), but we // take that as an oversight. while (true) { // We want to add canonical declarations to the EC lists for // simplicity of checking, but we need to walk up through the // actual current DC chain. Otherwise, something like a local // extern or friend which happens to be the canonical // declaration will really mess us up. if (isa<CXXRecordDecl>(DC)) { CXXRecordDecl *Record = cast<CXXRecordDecl>(DC); Records.push_back(Record->getCanonicalDecl()); DC = Record->getDeclContext(); } else if (isa<FunctionDecl>(DC)) { FunctionDecl *Function = cast<FunctionDecl>(DC); Functions.push_back(Function->getCanonicalDecl()); if (Function->getFriendObjectKind()) DC = Function->getLexicalDeclContext(); else DC = Function->getDeclContext(); } else if (DC->isFileContext()) { break; } else { DC = DC->getParent(); } } } bool isDependent() const { return Dependent; } bool includesClass(const CXXRecordDecl *R) const { R = R->getCanonicalDecl(); return std::find(Records.begin(), Records.end(), R) != Records.end(); } /// Retrieves the innermost "useful" context. Can be null if we're /// doing access-control without privileges. DeclContext *getInnerContext() const { return Inner; } typedef SmallVectorImpl<CXXRecordDecl*>::const_iterator record_iterator; DeclContext *Inner; SmallVector<FunctionDecl*, 4> Functions; SmallVector<CXXRecordDecl*, 4> Records; bool Dependent; }; /// Like sema::AccessedEntity, but kindly lets us scribble all over /// it. struct AccessTarget : public AccessedEntity { AccessTarget(const AccessedEntity &Entity) : AccessedEntity(Entity) { initialize(); } AccessTarget(ASTContext &Context, MemberNonce _, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, QualType BaseObjectType) : AccessedEntity(Context.getDiagAllocator(), Member, NamingClass, FoundDecl, BaseObjectType) { initialize(); } AccessTarget(ASTContext &Context, BaseNonce _, CXXRecordDecl *BaseClass, CXXRecordDecl *DerivedClass, AccessSpecifier Access) : AccessedEntity(Context.getDiagAllocator(), Base, BaseClass, DerivedClass, Access) { initialize(); } bool isInstanceMember() const { return (isMemberAccess() && getTargetDecl()->isCXXInstanceMember()); } bool hasInstanceContext() const { return HasInstanceContext; } class SavedInstanceContext { public: ~SavedInstanceContext() { Target.HasInstanceContext = Has; } private: friend struct AccessTarget; explicit SavedInstanceContext(AccessTarget &Target) : Target(Target), Has(Target.HasInstanceContext) {} AccessTarget &Target; bool Has; }; SavedInstanceContext saveInstanceContext() { return SavedInstanceContext(*this); } void suppressInstanceContext() { HasInstanceContext = false; } const CXXRecordDecl *resolveInstanceContext(Sema &S) const { assert(HasInstanceContext); if (CalculatedInstanceContext) return InstanceContext; CalculatedInstanceContext = true; DeclContext *IC = S.computeDeclContext(getBaseObjectType()); InstanceContext = (IC ? cast<CXXRecordDecl>(IC)->getCanonicalDecl() : nullptr); return InstanceContext; } const CXXRecordDecl *getDeclaringClass() const { return DeclaringClass; } /// The "effective" naming class is the canonical non-anonymous /// class containing the actual naming class. const CXXRecordDecl *getEffectiveNamingClass() const { const CXXRecordDecl *namingClass = getNamingClass(); while (namingClass->isAnonymousStructOrUnion()) namingClass = cast<CXXRecordDecl>(namingClass->getParent()); return namingClass->getCanonicalDecl(); } private: void initialize() { HasInstanceContext = (isMemberAccess() && !getBaseObjectType().isNull() && getTargetDecl()->isCXXInstanceMember()); CalculatedInstanceContext = false; InstanceContext = nullptr; if (isMemberAccess()) DeclaringClass = FindDeclaringClass(getTargetDecl()); else DeclaringClass = getBaseClass(); DeclaringClass = DeclaringClass->getCanonicalDecl(); } bool HasInstanceContext : 1; mutable bool CalculatedInstanceContext : 1; mutable const CXXRecordDecl *InstanceContext; const CXXRecordDecl *DeclaringClass; }; } /// Checks whether one class might instantiate to the other. static bool MightInstantiateTo(const CXXRecordDecl *From, const CXXRecordDecl *To) { // Declaration names are always preserved by instantiation. if (From->getDeclName() != To->getDeclName()) return false; const DeclContext *FromDC = From->getDeclContext()->getPrimaryContext(); const DeclContext *ToDC = To->getDeclContext()->getPrimaryContext(); if (FromDC == ToDC) return true; if (FromDC->isFileContext() || ToDC->isFileContext()) return false; // Be conservative. return true; } /// Checks whether one class is derived from another, inclusively. /// Properly indicates when it couldn't be determined due to /// dependence. /// /// This should probably be donated to AST or at least Sema. static AccessResult IsDerivedFromInclusive(const CXXRecordDecl *Derived, const CXXRecordDecl *Target) { assert(Derived->getCanonicalDecl() == Derived); assert(Target->getCanonicalDecl() == Target); if (Derived == Target) return AR_accessible; bool CheckDependent = Derived->isDependentContext(); if (CheckDependent && MightInstantiateTo(Derived, Target)) return AR_dependent; AccessResult OnFailure = AR_inaccessible; SmallVector<const CXXRecordDecl*, 8> Queue; // actually a stack while (true) { if (Derived->isDependentContext() && !Derived->hasDefinition()) return AR_dependent; for (const auto &I : Derived->bases()) { const CXXRecordDecl *RD; QualType T = I.getType(); if (const RecordType *RT = T->getAs<RecordType>()) { RD = cast<CXXRecordDecl>(RT->getDecl()); } else if (const InjectedClassNameType *IT = T->getAs<InjectedClassNameType>()) { RD = IT->getDecl(); } else { assert(T->isDependentType() && "non-dependent base wasn't a record?"); OnFailure = AR_dependent; continue; } RD = RD->getCanonicalDecl(); if (RD == Target) return AR_accessible; if (CheckDependent && MightInstantiateTo(RD, Target)) OnFailure = AR_dependent; Queue.push_back(RD); } if (Queue.empty()) break; Derived = Queue.pop_back_val(); } return OnFailure; } static bool MightInstantiateTo(Sema &S, DeclContext *Context, DeclContext *Friend) { if (Friend == Context) return true; assert(!Friend->isDependentContext() && "can't handle friends with dependent contexts here"); if (!Context->isDependentContext()) return false; if (Friend->isFileContext()) return false; // TODO: this is very conservative return true; } // Asks whether the type in 'context' can ever instantiate to the type // in 'friend'. static bool MightInstantiateTo(Sema &S, CanQualType Context, CanQualType Friend) { if (Friend == Context) return true; if (!Friend->isDependentType() && !Context->isDependentType()) return false; // TODO: this is very conservative. return true; } static bool MightInstantiateTo(Sema &S, FunctionDecl *Context, FunctionDecl *Friend) { if (Context->getDeclName() != Friend->getDeclName()) return false; if (!MightInstantiateTo(S, Context->getDeclContext(), Friend->getDeclContext())) return false; CanQual<FunctionProtoType> FriendTy = S.Context.getCanonicalType(Friend->getType()) ->getAs<FunctionProtoType>(); CanQual<FunctionProtoType> ContextTy = S.Context.getCanonicalType(Context->getType()) ->getAs<FunctionProtoType>(); // There isn't any way that I know of to add qualifiers // during instantiation. if (FriendTy.getQualifiers() != ContextTy.getQualifiers()) return false; if (FriendTy->getNumParams() != ContextTy->getNumParams()) return false; if (!MightInstantiateTo(S, ContextTy->getReturnType(), FriendTy->getReturnType())) return false; for (unsigned I = 0, E = FriendTy->getNumParams(); I != E; ++I) if (!MightInstantiateTo(S, ContextTy->getParamType(I), FriendTy->getParamType(I))) return false; return true; } static bool MightInstantiateTo(Sema &S, FunctionTemplateDecl *Context, FunctionTemplateDecl *Friend) { return MightInstantiateTo(S, Context->getTemplatedDecl(), Friend->getTemplatedDecl()); } static AccessResult MatchesFriend(Sema &S, const EffectiveContext &EC, const CXXRecordDecl *Friend) { if (EC.includesClass(Friend)) return AR_accessible; if (EC.isDependent()) { CanQualType FriendTy = S.Context.getCanonicalType(S.Context.getTypeDeclType(Friend)); for (EffectiveContext::record_iterator I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) { CanQualType ContextTy = S.Context.getCanonicalType(S.Context.getTypeDeclType(*I)); if (MightInstantiateTo(S, ContextTy, FriendTy)) return AR_dependent; } } return AR_inaccessible; } static AccessResult MatchesFriend(Sema &S, const EffectiveContext &EC, CanQualType Friend) { if (const RecordType *RT = Friend->getAs<RecordType>()) return MatchesFriend(S, EC, cast<CXXRecordDecl>(RT->getDecl())); // TODO: we can do better than this if (Friend->isDependentType()) return AR_dependent; return AR_inaccessible; } /// Determines whether the given friend class template matches /// anything in the effective context. static AccessResult MatchesFriend(Sema &S, const EffectiveContext &EC, ClassTemplateDecl *Friend) { AccessResult OnFailure = AR_inaccessible; // Check whether the friend is the template of a class in the // context chain. for (SmallVectorImpl<CXXRecordDecl*>::const_iterator I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) { CXXRecordDecl *Record = *I; // Figure out whether the current class has a template: ClassTemplateDecl *CTD; // A specialization of the template... if (isa<ClassTemplateSpecializationDecl>(Record)) { CTD = cast<ClassTemplateSpecializationDecl>(Record) ->getSpecializedTemplate(); // ... or the template pattern itself. } else { CTD = Record->getDescribedClassTemplate(); if (!CTD) continue; } // It's a match. if (Friend == CTD->getCanonicalDecl()) return AR_accessible; // If the context isn't dependent, it can't be a dependent match. if (!EC.isDependent()) continue; // If the template names don't match, it can't be a dependent // match. if (CTD->getDeclName() != Friend->getDeclName()) continue; // If the class's context can't instantiate to the friend's // context, it can't be a dependent match. if (!MightInstantiateTo(S, CTD->getDeclContext(), Friend->getDeclContext())) continue; // Otherwise, it's a dependent match. OnFailure = AR_dependent; } return OnFailure; } /// Determines whether the given friend function matches anything in /// the effective context. static AccessResult MatchesFriend(Sema &S, const EffectiveContext &EC, FunctionDecl *Friend) { AccessResult OnFailure = AR_inaccessible; for (SmallVectorImpl<FunctionDecl*>::const_iterator I = EC.Functions.begin(), E = EC.Functions.end(); I != E; ++I) { if (Friend == *I) return AR_accessible; if (EC.isDependent() && MightInstantiateTo(S, *I, Friend)) OnFailure = AR_dependent; } return OnFailure; } /// Determines whether the given friend function template matches /// anything in the effective context. static AccessResult MatchesFriend(Sema &S, const EffectiveContext &EC, FunctionTemplateDecl *Friend) { if (EC.Functions.empty()) return AR_inaccessible; AccessResult OnFailure = AR_inaccessible; for (SmallVectorImpl<FunctionDecl*>::const_iterator I = EC.Functions.begin(), E = EC.Functions.end(); I != E; ++I) { FunctionTemplateDecl *FTD = (*I)->getPrimaryTemplate(); if (!FTD) FTD = (*I)->getDescribedFunctionTemplate(); if (!FTD) continue; FTD = FTD->getCanonicalDecl(); if (Friend == FTD) return AR_accessible; if (EC.isDependent() && MightInstantiateTo(S, FTD, Friend)) OnFailure = AR_dependent; } return OnFailure; } /// Determines whether the given friend declaration matches anything /// in the effective context. static AccessResult MatchesFriend(Sema &S, const EffectiveContext &EC, FriendDecl *FriendD) { // Whitelist accesses if there's an invalid or unsupported friend // declaration. if (FriendD->isInvalidDecl() || FriendD->isUnsupportedFriend()) return AR_accessible; if (TypeSourceInfo *T = FriendD->getFriendType()) return MatchesFriend(S, EC, T->getType()->getCanonicalTypeUnqualified()); NamedDecl *Friend = cast<NamedDecl>(FriendD->getFriendDecl()->getCanonicalDecl()); // FIXME: declarations with dependent or templated scope. if (isa<ClassTemplateDecl>(Friend)) return MatchesFriend(S, EC, cast<ClassTemplateDecl>(Friend)); if (isa<FunctionTemplateDecl>(Friend)) return MatchesFriend(S, EC, cast<FunctionTemplateDecl>(Friend)); if (isa<CXXRecordDecl>(Friend)) return MatchesFriend(S, EC, cast<CXXRecordDecl>(Friend)); assert(isa<FunctionDecl>(Friend) && "unknown friend decl kind"); return MatchesFriend(S, EC, cast<FunctionDecl>(Friend)); } static AccessResult GetFriendKind(Sema &S, const EffectiveContext &EC, const CXXRecordDecl *Class) { AccessResult OnFailure = AR_inaccessible; // Okay, check friends. for (auto *Friend : Class->friends()) { switch (MatchesFriend(S, EC, Friend)) { case AR_accessible: return AR_accessible; case AR_inaccessible: continue; case AR_dependent: OnFailure = AR_dependent; break; } } // That's it, give up. return OnFailure; } namespace { /// A helper class for checking for a friend which will grant access /// to a protected instance member. struct ProtectedFriendContext { Sema &S; const EffectiveContext &EC; const CXXRecordDecl *NamingClass; bool CheckDependent; bool EverDependent; /// The path down to the current base class. SmallVector<const CXXRecordDecl*, 20> CurPath; ProtectedFriendContext(Sema &S, const EffectiveContext &EC, const CXXRecordDecl *InstanceContext, const CXXRecordDecl *NamingClass) : S(S), EC(EC), NamingClass(NamingClass), CheckDependent(InstanceContext->isDependentContext() || NamingClass->isDependentContext()), EverDependent(false) {} /// Check classes in the current path for friendship, starting at /// the given index. bool checkFriendshipAlongPath(unsigned I) { assert(I < CurPath.size()); for (unsigned E = CurPath.size(); I != E; ++I) { switch (GetFriendKind(S, EC, CurPath[I])) { case AR_accessible: return true; case AR_inaccessible: continue; case AR_dependent: EverDependent = true; continue; } } return false; } /// Perform a search starting at the given class. /// /// PrivateDepth is the index of the last (least derived) class /// along the current path such that a notional public member of /// the final class in the path would have access in that class. bool findFriendship(const CXXRecordDecl *Cur, unsigned PrivateDepth) { // If we ever reach the naming class, check the current path for // friendship. We can also stop recursing because we obviously // won't find the naming class there again. if (Cur == NamingClass) return checkFriendshipAlongPath(PrivateDepth); if (CheckDependent && MightInstantiateTo(Cur, NamingClass)) EverDependent = true; // Recurse into the base classes. for (const auto &I : Cur->bases()) { // If this is private inheritance, then a public member of the // base will not have any access in classes derived from Cur. unsigned BasePrivateDepth = PrivateDepth; if (I.getAccessSpecifier() == AS_private) BasePrivateDepth = CurPath.size() - 1; const CXXRecordDecl *RD; QualType T = I.getType(); if (const RecordType *RT = T->getAs<RecordType>()) { RD = cast<CXXRecordDecl>(RT->getDecl()); } else if (const InjectedClassNameType *IT = T->getAs<InjectedClassNameType>()) { RD = IT->getDecl(); } else { assert(T->isDependentType() && "non-dependent base wasn't a record?"); EverDependent = true; continue; } // Recurse. We don't need to clean up if this returns true. CurPath.push_back(RD); if (findFriendship(RD->getCanonicalDecl(), BasePrivateDepth)) return true; CurPath.pop_back(); } return false; } bool findFriendship(const CXXRecordDecl *Cur) { assert(CurPath.empty()); CurPath.push_back(Cur); return findFriendship(Cur, 0); } }; } /// Search for a class P that EC is a friend of, under the constraint /// InstanceContext <= P /// if InstanceContext exists, or else /// NamingClass <= P /// and with the additional restriction that a protected member of /// NamingClass would have some natural access in P, which implicitly /// imposes the constraint that P <= NamingClass. /// /// This isn't quite the condition laid out in the standard. /// Instead of saying that a notional protected member of NamingClass /// would have to have some natural access in P, it says the actual /// target has to have some natural access in P, which opens up the /// possibility that the target (which is not necessarily a member /// of NamingClass) might be more accessible along some path not /// passing through it. That's really a bad idea, though, because it /// introduces two problems: /// - Most importantly, it breaks encapsulation because you can /// access a forbidden base class's members by directly subclassing /// it elsewhere. /// - It also makes access substantially harder to compute because it /// breaks the hill-climbing algorithm: knowing that the target is /// accessible in some base class would no longer let you change /// the question solely to whether the base class is accessible, /// because the original target might have been more accessible /// because of crazy subclassing. /// So we don't implement that. static AccessResult GetProtectedFriendKind(Sema &S, const EffectiveContext &EC, const CXXRecordDecl *InstanceContext, const CXXRecordDecl *NamingClass) { assert(InstanceContext == nullptr || InstanceContext->getCanonicalDecl() == InstanceContext); assert(NamingClass->getCanonicalDecl() == NamingClass); // If we don't have an instance context, our constraints give us // that NamingClass <= P <= NamingClass, i.e. P == NamingClass. // This is just the usual friendship check. if (!InstanceContext) return GetFriendKind(S, EC, NamingClass); ProtectedFriendContext PRC(S, EC, InstanceContext, NamingClass); if (PRC.findFriendship(InstanceContext)) return AR_accessible; if (PRC.EverDependent) return AR_dependent; return AR_inaccessible; } static AccessResult HasAccess(Sema &S, const EffectiveContext &EC, const CXXRecordDecl *NamingClass, AccessSpecifier Access, const AccessTarget &Target) { assert(NamingClass->getCanonicalDecl() == NamingClass && "declaration should be canonicalized before being passed here"); if (Access == AS_public) return AR_accessible; assert(Access == AS_private || Access == AS_protected); AccessResult OnFailure = AR_inaccessible; for (EffectiveContext::record_iterator I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) { // All the declarations in EC have been canonicalized, so pointer // equality from this point on will work fine. const CXXRecordDecl *ECRecord = *I; // [B2] and [M2] if (Access == AS_private) { if (ECRecord == NamingClass) return AR_accessible; if (EC.isDependent() && MightInstantiateTo(ECRecord, NamingClass)) OnFailure = AR_dependent; // [B3] and [M3] } else { assert(Access == AS_protected); switch (IsDerivedFromInclusive(ECRecord, NamingClass)) { case AR_accessible: break; case AR_inaccessible: continue; case AR_dependent: OnFailure = AR_dependent; continue; } // C++ [class.protected]p1: // An additional access check beyond those described earlier in // [class.access] is applied when a non-static data member or // non-static member function is a protected member of its naming // class. As described earlier, access to a protected member is // granted because the reference occurs in a friend or member of // some class C. If the access is to form a pointer to member, // the nested-name-specifier shall name C or a class derived from // C. All other accesses involve a (possibly implicit) object // expression. In this case, the class of the object expression // shall be C or a class derived from C. // // We interpret this as a restriction on [M3]. // In this part of the code, 'C' is just our context class ECRecord. // These rules are different if we don't have an instance context. if (!Target.hasInstanceContext()) { // If it's not an instance member, these restrictions don't apply. if (!Target.isInstanceMember()) return AR_accessible; // If it's an instance member, use the pointer-to-member rule // that the naming class has to be derived from the effective // context. // Emulate a MSVC bug where the creation of pointer-to-member // to protected member of base class is allowed but only from // static member functions. if (S.getLangOpts().MSVCCompat && !EC.Functions.empty()) if (CXXMethodDecl* MD = dyn_cast<CXXMethodDecl>(EC.Functions.front())) if (MD->isStatic()) return AR_accessible; // Despite the standard's confident wording, there is a case // where you can have an instance member that's neither in a // pointer-to-member expression nor in a member access: when // it names a field in an unevaluated context that can't be an // implicit member. Pending clarification, we just apply the // same naming-class restriction here. // FIXME: we're probably not correctly adding the // protected-member restriction when we retroactively convert // an expression to being evaluated. // We know that ECRecord derives from NamingClass. The // restriction says to check whether NamingClass derives from // ECRecord, but that's not really necessary: two distinct // classes can't be recursively derived from each other. So // along this path, we just need to check whether the classes // are equal. if (NamingClass == ECRecord) return AR_accessible; // Otherwise, this context class tells us nothing; on to the next. continue; } assert(Target.isInstanceMember()); const CXXRecordDecl *InstanceContext = Target.resolveInstanceContext(S); if (!InstanceContext) { OnFailure = AR_dependent; continue; } switch (IsDerivedFromInclusive(InstanceContext, ECRecord)) { case AR_accessible: return AR_accessible; case AR_inaccessible: continue; case AR_dependent: OnFailure = AR_dependent; continue; } } } // [M3] and [B3] say that, if the target is protected in N, we grant // access if the access occurs in a friend or member of some class P // that's a subclass of N and where the target has some natural // access in P. The 'member' aspect is easy to handle because P // would necessarily be one of the effective-context records, and we // address that above. The 'friend' aspect is completely ridiculous // to implement because there are no restrictions at all on P // *unless* the [class.protected] restriction applies. If it does, // however, we should ignore whether the naming class is a friend, // and instead rely on whether any potential P is a friend. if (Access == AS_protected && Target.isInstanceMember()) { // Compute the instance context if possible. const CXXRecordDecl *InstanceContext = nullptr; if (Target.hasInstanceContext()) { InstanceContext = Target.resolveInstanceContext(S); if (!InstanceContext) return AR_dependent; } switch (GetProtectedFriendKind(S, EC, InstanceContext, NamingClass)) { case AR_accessible: return AR_accessible; case AR_inaccessible: return OnFailure; case AR_dependent: return AR_dependent; } llvm_unreachable("impossible friendship kind"); } switch (GetFriendKind(S, EC, NamingClass)) { case AR_accessible: return AR_accessible; case AR_inaccessible: return OnFailure; case AR_dependent: return AR_dependent; } // Silence bogus warnings llvm_unreachable("impossible friendship kind"); } /// Finds the best path from the naming class to the declaring class, /// taking friend declarations into account. /// /// C++0x [class.access.base]p5: /// A member m is accessible at the point R when named in class N if /// [M1] m as a member of N is public, or /// [M2] m as a member of N is private, and R occurs in a member or /// friend of class N, or /// [M3] m as a member of N is protected, and R occurs in a member or /// friend of class N, or in a member or friend of a class P /// derived from N, where m as a member of P is public, private, /// or protected, or /// [M4] there exists a base class B of N that is accessible at R, and /// m is accessible at R when named in class B. /// /// C++0x [class.access.base]p4: /// A base class B of N is accessible at R, if /// [B1] an invented public member of B would be a public member of N, or /// [B2] R occurs in a member or friend of class N, and an invented public /// member of B would be a private or protected member of N, or /// [B3] R occurs in a member or friend of a class P derived from N, and an /// invented public member of B would be a private or protected member /// of P, or /// [B4] there exists a class S such that B is a base class of S accessible /// at R and S is a base class of N accessible at R. /// /// Along a single inheritance path we can restate both of these /// iteratively: /// /// First, we note that M1-4 are equivalent to B1-4 if the member is /// treated as a notional base of its declaring class with inheritance /// access equivalent to the member's access. Therefore we need only /// ask whether a class B is accessible from a class N in context R. /// /// Let B_1 .. B_n be the inheritance path in question (i.e. where /// B_1 = N, B_n = B, and for all i, B_{i+1} is a direct base class of /// B_i). For i in 1..n, we will calculate ACAB(i), the access to the /// closest accessible base in the path: /// Access(a, b) = (* access on the base specifier from a to b *) /// Merge(a, forbidden) = forbidden /// Merge(a, private) = forbidden /// Merge(a, b) = min(a,b) /// Accessible(c, forbidden) = false /// Accessible(c, private) = (R is c) || IsFriend(c, R) /// Accessible(c, protected) = (R derived from c) || IsFriend(c, R) /// Accessible(c, public) = true /// ACAB(n) = public /// ACAB(i) = /// let AccessToBase = Merge(Access(B_i, B_{i+1}), ACAB(i+1)) in /// if Accessible(B_i, AccessToBase) then public else AccessToBase /// /// B is an accessible base of N at R iff ACAB(1) = public. /// /// \param FinalAccess the access of the "final step", or AS_public if /// there is no final step. /// \return null if friendship is dependent static CXXBasePath *FindBestPath(Sema &S, const EffectiveContext &EC, AccessTarget &Target, AccessSpecifier FinalAccess, CXXBasePaths &Paths) { // Derive the paths to the desired base. const CXXRecordDecl *Derived = Target.getNamingClass(); const CXXRecordDecl *Base = Target.getDeclaringClass(); // FIXME: fail correctly when there are dependent paths. bool isDerived = Derived->isDerivedFrom(const_cast<CXXRecordDecl*>(Base), Paths); assert(isDerived && "derived class not actually derived from base"); (void) isDerived; CXXBasePath *BestPath = nullptr; assert(FinalAccess != AS_none && "forbidden access after declaring class"); bool AnyDependent = false; // Derive the friend-modified access along each path. for (CXXBasePaths::paths_iterator PI = Paths.begin(), PE = Paths.end(); PI != PE; ++PI) { AccessTarget::SavedInstanceContext _ = Target.saveInstanceContext(); // Walk through the path backwards. AccessSpecifier PathAccess = FinalAccess; CXXBasePath::iterator I = PI->end(), E = PI->begin(); while (I != E) { --I; assert(PathAccess != AS_none); // If the declaration is a private member of a base class, there // is no level of friendship in derived classes that can make it // accessible. if (PathAccess == AS_private) { PathAccess = AS_none; break; } const CXXRecordDecl *NC = I->Class->getCanonicalDecl(); AccessSpecifier BaseAccess = I->Base->getAccessSpecifier(); PathAccess = std::max(PathAccess, BaseAccess); switch (HasAccess(S, EC, NC, PathAccess, Target)) { case AR_inaccessible: break; case AR_accessible: PathAccess = AS_public; // Future tests are not against members and so do not have // instance context. Target.suppressInstanceContext(); break; case AR_dependent: AnyDependent = true; goto Next; } } // Note that we modify the path's Access field to the // friend-modified access. if (BestPath == nullptr || PathAccess < BestPath->Access) { BestPath = &*PI; BestPath->Access = PathAccess; // Short-circuit if we found a public path. if (BestPath->Access == AS_public) return BestPath; } Next: ; } assert((!BestPath || BestPath->Access != AS_public) && "fell out of loop with public path"); // We didn't find a public path, but at least one path was subject // to dependent friendship, so delay the check. if (AnyDependent) return nullptr; return BestPath; } /// Given that an entity has protected natural access, check whether /// access might be denied because of the protected member access /// restriction. /// /// \return true if a note was emitted static bool TryDiagnoseProtectedAccess(Sema &S, const EffectiveContext &EC, AccessTarget &Target) { // Only applies to instance accesses. if (!Target.isInstanceMember()) return false; assert(Target.isMemberAccess()); const CXXRecordDecl *NamingClass = Target.getEffectiveNamingClass(); for (EffectiveContext::record_iterator I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) { const CXXRecordDecl *ECRecord = *I; switch (IsDerivedFromInclusive(ECRecord, NamingClass)) { case AR_accessible: break; case AR_inaccessible: continue; case AR_dependent: continue; } // The effective context is a subclass of the declaring class. // Check whether the [class.protected] restriction is limiting // access. // To get this exactly right, this might need to be checked more // holistically; it's not necessarily the case that gaining // access here would grant us access overall. NamedDecl *D = Target.getTargetDecl(); // If we don't have an instance context, [class.protected] says the // naming class has to equal the context class. if (!Target.hasInstanceContext()) { // If it does, the restriction doesn't apply. if (NamingClass == ECRecord) continue; // TODO: it would be great to have a fixit here, since this is // such an obvious error. S.Diag(D->getLocation(), diag::note_access_protected_restricted_noobject) << S.Context.getTypeDeclType(ECRecord); return true; } const CXXRecordDecl *InstanceContext = Target.resolveInstanceContext(S); assert(InstanceContext && "diagnosing dependent access"); switch (IsDerivedFromInclusive(InstanceContext, ECRecord)) { case AR_accessible: continue; case AR_dependent: continue; case AR_inaccessible: break; } // Okay, the restriction seems to be what's limiting us. // Use a special diagnostic for constructors and destructors. if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D) || (isa<FunctionTemplateDecl>(D) && isa<CXXConstructorDecl>( cast<FunctionTemplateDecl>(D)->getTemplatedDecl()))) { return S.Diag(D->getLocation(), diag::note_access_protected_restricted_ctordtor) << isa<CXXDestructorDecl>(D->getAsFunction()); } // Otherwise, use the generic diagnostic. return S.Diag(D->getLocation(), diag::note_access_protected_restricted_object) << S.Context.getTypeDeclType(ECRecord); } return false; } /// We are unable to access a given declaration due to its direct /// access control; diagnose that. static void diagnoseBadDirectAccess(Sema &S, const EffectiveContext &EC, AccessTarget &entity) { assert(entity.isMemberAccess()); NamedDecl *D = entity.getTargetDecl(); if (D->getAccess() == AS_protected && TryDiagnoseProtectedAccess(S, EC, entity)) return; // Find an original declaration. while (D->isOutOfLine()) { NamedDecl *PrevDecl = nullptr; if (VarDecl *VD = dyn_cast<VarDecl>(D)) PrevDecl = VD->getPreviousDecl(); else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) PrevDecl = FD->getPreviousDecl(); else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(D)) PrevDecl = TND->getPreviousDecl(); else if (TagDecl *TD = dyn_cast<TagDecl>(D)) { if (isa<RecordDecl>(D) && cast<RecordDecl>(D)->isInjectedClassName()) break; PrevDecl = TD->getPreviousDecl(); } if (!PrevDecl) break; D = PrevDecl; } CXXRecordDecl *DeclaringClass = FindDeclaringClass(D); Decl *ImmediateChild; if (D->getDeclContext() == DeclaringClass) ImmediateChild = D; else { DeclContext *DC = D->getDeclContext(); while (DC->getParent() != DeclaringClass) DC = DC->getParent(); ImmediateChild = cast<Decl>(DC); } // Check whether there's an AccessSpecDecl preceding this in the // chain of the DeclContext. bool isImplicit = true; for (const auto *I : DeclaringClass->decls()) { if (I == ImmediateChild) break; if (isa<AccessSpecDecl>(I)) { isImplicit = false; break; } } S.Diag(D->getLocation(), diag::note_access_natural) << (unsigned) (D->getAccess() == AS_protected) << isImplicit; } /// Diagnose the path which caused the given declaration or base class /// to become inaccessible. static void DiagnoseAccessPath(Sema &S, const EffectiveContext &EC, AccessTarget &entity) { // Save the instance context to preserve invariants. AccessTarget::SavedInstanceContext _ = entity.saveInstanceContext(); // This basically repeats the main algorithm but keeps some more // information. // The natural access so far. AccessSpecifier accessSoFar = AS_public; // Check whether we have special rights to the declaring class. if (entity.isMemberAccess()) { NamedDecl *D = entity.getTargetDecl(); accessSoFar = D->getAccess(); const CXXRecordDecl *declaringClass = entity.getDeclaringClass(); switch (HasAccess(S, EC, declaringClass, accessSoFar, entity)) { // If the declaration is accessible when named in its declaring // class, then we must be constrained by the path. case AR_accessible: accessSoFar = AS_public; entity.suppressInstanceContext(); break; case AR_inaccessible: if (accessSoFar == AS_private || declaringClass == entity.getEffectiveNamingClass()) return diagnoseBadDirectAccess(S, EC, entity); break; case AR_dependent: llvm_unreachable("cannot diagnose dependent access"); } } CXXBasePaths paths; CXXBasePath &path = *FindBestPath(S, EC, entity, accessSoFar, paths); assert(path.Access != AS_public); CXXBasePath::iterator i = path.end(), e = path.begin(); CXXBasePath::iterator constrainingBase = i; while (i != e) { --i; assert(accessSoFar != AS_none && accessSoFar != AS_private); // Is the entity accessible when named in the deriving class, as // modified by the base specifier? const CXXRecordDecl *derivingClass = i->Class->getCanonicalDecl(); const CXXBaseSpecifier *base = i->Base; // If the access to this base is worse than the access we have to // the declaration, remember it. AccessSpecifier baseAccess = base->getAccessSpecifier(); if (baseAccess > accessSoFar) { constrainingBase = i; accessSoFar = baseAccess; } switch (HasAccess(S, EC, derivingClass, accessSoFar, entity)) { case AR_inaccessible: break; case AR_accessible: accessSoFar = AS_public; entity.suppressInstanceContext(); constrainingBase = nullptr; break; case AR_dependent: llvm_unreachable("cannot diagnose dependent access"); } // If this was private inheritance, but we don't have access to // the deriving class, we're done. if (accessSoFar == AS_private) { assert(baseAccess == AS_private); assert(constrainingBase == i); break; } } // If we don't have a constraining base, the access failure must be // due to the original declaration. if (constrainingBase == path.end()) return diagnoseBadDirectAccess(S, EC, entity); // We're constrained by inheritance, but we want to say // "declared private here" if we're diagnosing a hierarchy // conversion and this is the final step. unsigned diagnostic; if (entity.isMemberAccess() || constrainingBase + 1 != path.end()) { diagnostic = diag::note_access_constrained_by_path; } else { diagnostic = diag::note_access_natural; } const CXXBaseSpecifier *base = constrainingBase->Base; S.Diag(base->getSourceRange().getBegin(), diagnostic) << base->getSourceRange() << (base->getAccessSpecifier() == AS_protected) << (base->getAccessSpecifierAsWritten() == AS_none); if (entity.isMemberAccess()) S.Diag(entity.getTargetDecl()->getLocation(), diag::note_member_declared_at); } static void DiagnoseBadAccess(Sema &S, SourceLocation Loc, const EffectiveContext &EC, AccessTarget &Entity) { const CXXRecordDecl *NamingClass = Entity.getNamingClass(); const CXXRecordDecl *DeclaringClass = Entity.getDeclaringClass(); NamedDecl *D = (Entity.isMemberAccess() ? Entity.getTargetDecl() : nullptr); S.Diag(Loc, Entity.getDiag()) << (Entity.getAccess() == AS_protected) << (D ? D->getDeclName() : DeclarationName()) << S.Context.getTypeDeclType(NamingClass) << S.Context.getTypeDeclType(DeclaringClass); DiagnoseAccessPath(S, EC, Entity); } /// MSVC has a bug where if during an using declaration name lookup, /// the declaration found is unaccessible (private) and that declaration /// was bring into scope via another using declaration whose target /// declaration is accessible (public) then no error is generated. /// Example: /// class A { /// public: /// int f(); /// }; /// class B : public A { /// private: /// using A::f; /// }; /// class C : public B { /// private: /// using B::f; /// }; /// /// Here, B::f is private so this should fail in Standard C++, but /// because B::f refers to A::f which is public MSVC accepts it. static bool IsMicrosoftUsingDeclarationAccessBug(Sema& S, SourceLocation AccessLoc, AccessTarget &Entity) { if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(Entity.getTargetDecl())) { const NamedDecl *OrigDecl = Entity.getTargetDecl()->getUnderlyingDecl(); if (Entity.getTargetDecl()->getAccess() == AS_private && (OrigDecl->getAccess() == AS_public || OrigDecl->getAccess() == AS_protected)) { S.Diag(AccessLoc, diag::ext_ms_using_declaration_inaccessible) << Shadow->getUsingDecl()->getQualifiedNameAsString() << OrigDecl->getQualifiedNameAsString(); return true; } } return false; } /// Determines whether the accessed entity is accessible. Public members /// have been weeded out by this point. static AccessResult IsAccessible(Sema &S, const EffectiveContext &EC, AccessTarget &Entity) { // Determine the actual naming class. const CXXRecordDecl *NamingClass = Entity.getEffectiveNamingClass(); AccessSpecifier UnprivilegedAccess = Entity.getAccess(); assert(UnprivilegedAccess != AS_public && "public access not weeded out"); // Before we try to recalculate access paths, try to white-list // accesses which just trade in on the final step, i.e. accesses // which don't require [M4] or [B4]. These are by far the most // common forms of privileged access. if (UnprivilegedAccess != AS_none) { switch (HasAccess(S, EC, NamingClass, UnprivilegedAccess, Entity)) { case AR_dependent: // This is actually an interesting policy decision. We don't // *have* to delay immediately here: we can do the full access // calculation in the hope that friendship on some intermediate // class will make the declaration accessible non-dependently. // But that's not cheap, and odds are very good (note: assertion // made without data) that the friend declaration will determine // access. return AR_dependent; case AR_accessible: return AR_accessible; case AR_inaccessible: break; } } AccessTarget::SavedInstanceContext _ = Entity.saveInstanceContext(); // We lower member accesses to base accesses by pretending that the // member is a base class of its declaring class. AccessSpecifier FinalAccess; if (Entity.isMemberAccess()) { // Determine if the declaration is accessible from EC when named // in its declaring class. NamedDecl *Target = Entity.getTargetDecl(); const CXXRecordDecl *DeclaringClass = Entity.getDeclaringClass(); FinalAccess = Target->getAccess(); switch (HasAccess(S, EC, DeclaringClass, FinalAccess, Entity)) { case AR_accessible: // Target is accessible at EC when named in its declaring class. // We can now hill-climb and simply check whether the declaring // class is accessible as a base of the naming class. This is // equivalent to checking the access of a notional public // member with no instance context. FinalAccess = AS_public; Entity.suppressInstanceContext(); break; case AR_inaccessible: break; case AR_dependent: return AR_dependent; // see above } if (DeclaringClass == NamingClass) return (FinalAccess == AS_public ? AR_accessible : AR_inaccessible); } else { FinalAccess = AS_public; } assert(Entity.getDeclaringClass() != NamingClass); // Append the declaration's access if applicable. CXXBasePaths Paths; CXXBasePath *Path = FindBestPath(S, EC, Entity, FinalAccess, Paths); if (!Path) return AR_dependent; assert(Path->Access <= UnprivilegedAccess && "access along best path worse than direct?"); if (Path->Access == AS_public) return AR_accessible; return AR_inaccessible; } static void DelayDependentAccess(Sema &S, const EffectiveContext &EC, SourceLocation Loc, const AccessTarget &Entity) { assert(EC.isDependent() && "delaying non-dependent access"); DeclContext *DC = EC.getInnerContext(); assert(DC->isDependentContext() && "delaying non-dependent access"); DependentDiagnostic::Create(S.Context, DC, DependentDiagnostic::Access, Loc, Entity.isMemberAccess(), Entity.getAccess(), Entity.getTargetDecl(), Entity.getNamingClass(), Entity.getBaseObjectType(), Entity.getDiag()); } /// Checks access to an entity from the given effective context. static AccessResult CheckEffectiveAccess(Sema &S, const EffectiveContext &EC, SourceLocation Loc, AccessTarget &Entity) { assert(Entity.getAccess() != AS_public && "called for public access!"); switch (IsAccessible(S, EC, Entity)) { case AR_dependent: DelayDependentAccess(S, EC, Loc, Entity); return AR_dependent; case AR_inaccessible: if (S.getLangOpts().MSVCCompat && IsMicrosoftUsingDeclarationAccessBug(S, Loc, Entity)) return AR_accessible; if (!Entity.isQuiet()) DiagnoseBadAccess(S, Loc, EC, Entity); return AR_inaccessible; case AR_accessible: return AR_accessible; } // silence unnecessary warning llvm_unreachable("invalid access result"); } static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc, AccessTarget &Entity) { // If the access path is public, it's accessible everywhere. if (Entity.getAccess() == AS_public) return Sema::AR_accessible; // If we're currently parsing a declaration, we may need to delay // access control checking, because our effective context might be // different based on what the declaration comes out as. // // For example, we might be parsing a declaration with a scope // specifier, like this: // A::private_type A::foo() { ... } // // Or we might be parsing something that will turn out to be a friend: // void foo(A::private_type); // void B::foo(A::private_type); if (S.DelayedDiagnostics.shouldDelayDiagnostics()) { S.DelayedDiagnostics.add(DelayedDiagnostic::makeAccess(Loc, Entity)); return Sema::AR_delayed; } EffectiveContext EC(S.CurContext); switch (CheckEffectiveAccess(S, EC, Loc, Entity)) { case AR_accessible: return Sema::AR_accessible; case AR_inaccessible: return Sema::AR_inaccessible; case AR_dependent: return Sema::AR_dependent; } llvm_unreachable("invalid access result"); } void Sema::HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *D) { // Access control for names used in the declarations of functions // and function templates should normally be evaluated in the context // of the declaration, just in case it's a friend of something. // However, this does not apply to local extern declarations. DeclContext *DC = D->getDeclContext(); if (D->isLocalExternDecl()) { DC = D->getLexicalDeclContext(); } else if (FunctionDecl *FN = dyn_cast<FunctionDecl>(D)) { DC = FN; } else if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) { DC = cast<DeclContext>(TD->getTemplatedDecl()); } EffectiveContext EC(DC); AccessTarget Target(DD.getAccessData()); if (CheckEffectiveAccess(*this, EC, DD.Loc, Target) == ::AR_inaccessible) DD.Triggered = true; } void Sema::HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs) { SourceLocation Loc = DD.getAccessLoc(); AccessSpecifier Access = DD.getAccess(); Decl *NamingD = FindInstantiatedDecl(Loc, DD.getAccessNamingClass(), TemplateArgs); if (!NamingD) return; Decl *TargetD = FindInstantiatedDecl(Loc, DD.getAccessTarget(), TemplateArgs); if (!TargetD) return; if (DD.isAccessToMember()) { CXXRecordDecl *NamingClass = cast<CXXRecordDecl>(NamingD); NamedDecl *TargetDecl = cast<NamedDecl>(TargetD); QualType BaseObjectType = DD.getAccessBaseObjectType(); if (!BaseObjectType.isNull()) { BaseObjectType = SubstType(BaseObjectType, TemplateArgs, Loc, DeclarationName()); if (BaseObjectType.isNull()) return; } AccessTarget Entity(Context, AccessTarget::Member, NamingClass, DeclAccessPair::make(TargetDecl, Access), BaseObjectType); Entity.setDiag(DD.getDiagnostic()); CheckAccess(*this, Loc, Entity); } else { AccessTarget Entity(Context, AccessTarget::Base, cast<CXXRecordDecl>(TargetD), cast<CXXRecordDecl>(NamingD), Access); Entity.setDiag(DD.getDiagnostic()); CheckAccess(*this, Loc, Entity); } } Sema::AccessResult Sema::CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair Found) { if (!getLangOpts().AccessControl || !E->getNamingClass() || Found.getAccess() == AS_public) return AR_accessible; AccessTarget Entity(Context, AccessTarget::Member, E->getNamingClass(), Found, QualType()); Entity.setDiag(diag::err_access) << E->getSourceRange(); return CheckAccess(*this, E->getNameLoc(), Entity); } /// Perform access-control checking on a previously-unresolved member /// access which has now been resolved to a member. Sema::AccessResult Sema::CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair Found) { if (!getLangOpts().AccessControl || Found.getAccess() == AS_public) return AR_accessible; QualType BaseType = E->getBaseType(); if (E->isArrow()) BaseType = BaseType->getAs<PointerType>()->getPointeeType(); AccessTarget Entity(Context, AccessTarget::Member, E->getNamingClass(), Found, BaseType); Entity.setDiag(diag::err_access) << E->getSourceRange(); return CheckAccess(*this, E->getMemberLoc(), Entity); } /// Is the given special member function accessible for the purposes of /// deciding whether to define a special member function as deleted? bool Sema::isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType) { // Fast path. if (access == AS_public || !getLangOpts().AccessControl) return true; AccessTarget entity(Context, AccessTarget::Member, decl->getParent(), DeclAccessPair::make(decl, access), objectType); // Suppress diagnostics. entity.setDiag(PDiag()); switch (CheckAccess(*this, SourceLocation(), entity)) { case AR_accessible: return true; case AR_inaccessible: return false; case AR_dependent: llvm_unreachable("dependent for =delete computation"); case AR_delayed: llvm_unreachable("cannot delay =delete computation"); } llvm_unreachable("bad access result"); } Sema::AccessResult Sema::CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType ObjectTy) { if (!getLangOpts().AccessControl) return AR_accessible; // There's never a path involved when checking implicit destructor access. AccessSpecifier Access = Dtor->getAccess(); if (Access == AS_public) return AR_accessible; CXXRecordDecl *NamingClass = Dtor->getParent(); if (ObjectTy.isNull()) ObjectTy = Context.getTypeDeclType(NamingClass); AccessTarget Entity(Context, AccessTarget::Member, NamingClass, DeclAccessPair::make(Dtor, Access), ObjectTy); Entity.setDiag(PDiag); // TODO: avoid copy return CheckAccess(*this, Loc, Entity); } /// Checks access to a constructor. Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc, CXXConstructorDecl *Constructor, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp) { if (!getLangOpts().AccessControl || Access == AS_public) return AR_accessible; PartialDiagnostic PD(PDiag()); switch (Entity.getKind()) { default: PD = PDiag(IsCopyBindingRefToTemp ? diag::ext_rvalue_to_reference_access_ctor : diag::err_access_ctor); break; case InitializedEntity::EK_Base: PD = PDiag(diag::err_access_base_ctor); PD << Entity.isInheritedVirtualBase() << Entity.getBaseSpecifier()->getType() << getSpecialMember(Constructor); break; case InitializedEntity::EK_Member: { const FieldDecl *Field = cast<FieldDecl>(Entity.getDecl()); PD = PDiag(diag::err_access_field_ctor); PD << Field->getType() << getSpecialMember(Constructor); break; } case InitializedEntity::EK_LambdaCapture: { StringRef VarName = Entity.getCapturedVarName(); PD = PDiag(diag::err_access_lambda_capture); PD << VarName << Entity.getType() << getSpecialMember(Constructor); break; } } return CheckConstructorAccess(UseLoc, Constructor, Entity, Access, PD); } /// Checks access to a constructor. Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc, CXXConstructorDecl *Constructor, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PD) { if (!getLangOpts().AccessControl || Access == AS_public) return AR_accessible; CXXRecordDecl *NamingClass = Constructor->getParent(); // Initializing a base sub-object is an instance method call on an // object of the derived class. Otherwise, we have an instance method // call on an object of the constructed type. CXXRecordDecl *ObjectClass; if (Entity.getKind() == InitializedEntity::EK_Base) { ObjectClass = cast<CXXConstructorDecl>(CurContext)->getParent(); } else { ObjectClass = NamingClass; } AccessTarget AccessEntity(Context, AccessTarget::Member, NamingClass, DeclAccessPair::make(Constructor, Access), Context.getTypeDeclType(ObjectClass)); AccessEntity.setDiag(PD); return CheckAccess(*this, UseLoc, AccessEntity); } /// Checks access to an overloaded operator new or delete. Sema::AccessResult Sema::CheckAllocationAccess(SourceLocation OpLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair Found, bool Diagnose) { if (!getLangOpts().AccessControl || !NamingClass || Found.getAccess() == AS_public) return AR_accessible; AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found, QualType()); if (Diagnose) Entity.setDiag(diag::err_access) << PlacementRange; return CheckAccess(*this, OpLoc, Entity); } /// \brief Checks access to a member. Sema::AccessResult Sema::CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found) { if (!getLangOpts().AccessControl || !NamingClass || Found.getAccess() == AS_public) return AR_accessible; AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found, QualType()); return CheckAccess(*this, UseLoc, Entity); } /// Checks access to an overloaded member operator, including /// conversion operators. Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair Found) { if (!getLangOpts().AccessControl || Found.getAccess() == AS_public) return AR_accessible; const RecordType *RT = ObjectExpr->getType()->castAs<RecordType>(); CXXRecordDecl *NamingClass = cast<CXXRecordDecl>(RT->getDecl()); AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found, ObjectExpr->getType()); Entity.setDiag(diag::err_access) << ObjectExpr->getSourceRange() << (ArgExpr ? ArgExpr->getSourceRange() : SourceRange()); return CheckAccess(*this, OpLoc, Entity); } /// Checks access to the target of a friend declaration. Sema::AccessResult Sema::CheckFriendAccess(NamedDecl *target) { assert(isa<CXXMethodDecl>(target->getAsFunction())); // Friendship lookup is a redeclaration lookup, so there's never an // inheritance path modifying access. AccessSpecifier access = target->getAccess(); if (!getLangOpts().AccessControl || access == AS_public) return AR_accessible; CXXMethodDecl *method = cast<CXXMethodDecl>(target->getAsFunction()); AccessTarget entity(Context, AccessTarget::Member, cast<CXXRecordDecl>(target->getDeclContext()), DeclAccessPair::make(target, access), /*no instance context*/ QualType()); entity.setDiag(diag::err_access_friend_function) << (method->getQualifier() ? method->getQualifierLoc().getSourceRange() : method->getNameInfo().getSourceRange()); // We need to bypass delayed-diagnostics because we might be called // while the ParsingDeclarator is active. EffectiveContext EC(CurContext); switch (CheckEffectiveAccess(*this, EC, target->getLocation(), entity)) { case ::AR_accessible: return Sema::AR_accessible; // HLSL Change - disambiguate enum case ::AR_inaccessible: return Sema::AR_inaccessible; // HLSL Change - disambiguate enum case ::AR_dependent: return Sema::AR_dependent; // HLSL Change - disambiguate enum } llvm_unreachable("falling off end"); } Sema::AccessResult Sema::CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair Found) { if (!getLangOpts().AccessControl || Found.getAccess() == AS_none || Found.getAccess() == AS_public) return AR_accessible; OverloadExpr *Ovl = OverloadExpr::find(OvlExpr).Expression; CXXRecordDecl *NamingClass = Ovl->getNamingClass(); AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found, /*no instance context*/ QualType()); Entity.setDiag(diag::err_access) << Ovl->getSourceRange(); return CheckAccess(*this, Ovl->getNameLoc(), Entity); } /// Checks access for a hierarchy conversion. /// /// \param ForceCheck true if this check should be performed even if access /// control is disabled; some things rely on this for semantics /// \param ForceUnprivileged true if this check should proceed as if the /// context had no special privileges Sema::AccessResult Sema::CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck, bool ForceUnprivileged) { if (!ForceCheck && !getLangOpts().AccessControl) return AR_accessible; if (Path.Access == AS_public) return AR_accessible; CXXRecordDecl *BaseD, *DerivedD; BaseD = cast<CXXRecordDecl>(Base->getAs<RecordType>()->getDecl()); DerivedD = cast<CXXRecordDecl>(Derived->getAs<RecordType>()->getDecl()); AccessTarget Entity(Context, AccessTarget::Base, BaseD, DerivedD, Path.Access); if (DiagID) Entity.setDiag(DiagID) << Derived << Base; if (ForceUnprivileged) { switch (CheckEffectiveAccess(*this, EffectiveContext(), AccessLoc, Entity)) { case ::AR_accessible: return Sema::AR_accessible; case ::AR_inaccessible: return Sema::AR_inaccessible; case ::AR_dependent: return Sema::AR_dependent; } llvm_unreachable("unexpected result from CheckEffectiveAccess"); } return CheckAccess(*this, AccessLoc, Entity); } /// Checks access to all the declarations in the given result set. void Sema::CheckLookupAccess(const LookupResult &R) { assert(getLangOpts().AccessControl && "performing access check without access control"); assert(R.getNamingClass() && "performing access check without naming class"); for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { if (I.getAccess() != AS_public) { AccessTarget Entity(Context, AccessedEntity::Member, R.getNamingClass(), I.getPair(), R.getBaseObjectType()); Entity.setDiag(diag::err_access); CheckAccess(*this, R.getNameLoc(), Entity); } } } /// Checks access to Decl from the given class. The check will take access /// specifiers into account, but no member access expressions and such. /// /// \param Decl the declaration to check if it can be accessed /// \param Ctx the class/context from which to start the search /// \return true if the Decl is accessible from the Class, false otherwise. bool Sema::IsSimplyAccessible(NamedDecl *Decl, DeclContext *Ctx) { if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) { if (!Decl->isCXXClassMember()) return true; QualType qType = Class->getTypeForDecl()->getCanonicalTypeInternal(); AccessTarget Entity(Context, AccessedEntity::Member, Class, DeclAccessPair::make(Decl, Decl->getAccess()), qType); if (Entity.getAccess() == AS_public) return true; EffectiveContext EC(CurContext); return ::IsAccessible(*this, EC, Entity) != ::AR_inaccessible; } if (ObjCIvarDecl *Ivar = dyn_cast<ObjCIvarDecl>(Decl)) { // @public and @package ivars are always accessible. if (Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Public || Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Package) return true; // If we are inside a class or category implementation, determine the // interface we're in. ObjCInterfaceDecl *ClassOfMethodDecl = nullptr; if (ObjCMethodDecl *MD = getCurMethodDecl()) ClassOfMethodDecl = MD->getClassInterface(); else if (FunctionDecl *FD = getCurFunctionDecl()) { if (ObjCImplDecl *Impl = dyn_cast<ObjCImplDecl>(FD->getLexicalDeclContext())) { if (ObjCImplementationDecl *IMPD = dyn_cast<ObjCImplementationDecl>(Impl)) ClassOfMethodDecl = IMPD->getClassInterface(); else if (ObjCCategoryImplDecl* CatImplClass = dyn_cast<ObjCCategoryImplDecl>(Impl)) ClassOfMethodDecl = CatImplClass->getClassInterface(); } } // If we're not in an interface, this ivar is inaccessible. if (!ClassOfMethodDecl) return false; // If we're inside the same interface that owns the ivar, we're fine. if (declaresSameEntity(ClassOfMethodDecl, Ivar->getContainingInterface())) return true; // If the ivar is private, it's inaccessible. if (Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Private) return false; return Ivar->getContainingInterface()->isSuperClassOf(ClassOfMethodDecl); } return true; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaLambda.cpp
//===--- SemaLambda.cpp - Semantic Analysis for C++11 Lambdas -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for C++ lambda expressions. // //===----------------------------------------------------------------------===// #include "clang/Sema/DeclSpec.h" #include "TypeLocBuilder.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/ExprCXX.h" #include "clang/Basic/TargetInfo.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/SemaLambda.h" using namespace clang; using namespace sema; // HLSL Change Starts // No lambda support, so simply skip all of this compilation. // Here are enough stubs to link the current targets. #if 1 /// \brief Determine whether the given context is or is enclosed in an inline /// function. static bool isInInlineFunction(const DeclContext *DC) { while (!DC->isFileContext()) { if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(DC)) if (FD->isInlined()) return true; DC = DC->getLexicalParent(); } return false; } Optional<unsigned> clang::getStackIndexOfNearestEnclosingCaptureCapableLambda( ArrayRef<const sema::FunctionScopeInfo *> FunctionScopes, VarDecl *VarToCapture, Sema &S) { llvm_unreachable("HLSL does not support lambdas"); } CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault) { llvm_unreachable("HLSL does not support lambdas"); } MangleNumberingContext * Sema::getCurrentMangleNumberContext(const DeclContext *DC, Decl *&ManglingContextDecl) { // Compute the context for allocating mangling numbers in the current // expression, if the ABI requires them. ManglingContextDecl = ExprEvalContexts.back().ManglingContextDecl; enum ContextKind { Normal, DefaultArgument, DataMember, StaticDataMember } Kind = Normal; // Default arguments of member function parameters that appear in a class // definition, as well as the initializers of data members, receive special // treatment. Identify them. if (ManglingContextDecl) { if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(ManglingContextDecl)) { if (const DeclContext *LexicalDC = Param->getDeclContext()->getLexicalParent()) if (LexicalDC->isRecord()) Kind = DefaultArgument; } else if (VarDecl *Var = dyn_cast<VarDecl>(ManglingContextDecl)) { if (Var->getDeclContext()->isRecord()) Kind = StaticDataMember; } else if (isa<FieldDecl>(ManglingContextDecl)) { Kind = DataMember; } } // Itanium ABI [5.1.7]: // In the following contexts [...] the one-definition rule requires closure // types in different translation units to "correspond": bool IsInNonspecializedTemplate = !ActiveTemplateInstantiations.empty() || CurContext->isDependentContext(); switch (Kind) { case Normal: // -- the bodies of non-exported nonspecialized template functions // -- the bodies of inline functions if ((IsInNonspecializedTemplate && !(ManglingContextDecl && isa<ParmVarDecl>(ManglingContextDecl))) || isInInlineFunction(CurContext)) { ManglingContextDecl = nullptr; return &Context.getManglingNumberContext(DC); } ManglingContextDecl = nullptr; return nullptr; case StaticDataMember: // -- the initializers of nonspecialized static members of template classes if (!IsInNonspecializedTemplate) { ManglingContextDecl = nullptr; return nullptr; } // Fall through to get the current context. LLVM_FALLTHROUGH; // HLSL Change case DataMember: // -- the in-class initializers of class members case DefaultArgument: // -- default arguments appearing in class definitions return &ExprEvalContexts.back().getMangleNumberingContext(Context); } llvm_unreachable("unexpected context"); } MangleNumberingContext & Sema::ExpressionEvaluationContextRecord::getMangleNumberingContext( ASTContext &Ctx) { assert(ManglingContextDecl && "Need to have a context declaration"); if (!MangleNumbering) MangleNumbering = Ctx.createMangleNumberingContext(); return *MangleNumbering; } CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodTypeInfo, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params) { llvm_unreachable("HLSL does not support lambdas"); } void Sema::buildLambdaScope(LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable) { llvm_unreachable("HLSL does not support lambdas"); } void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) { llvm_unreachable("HLSL does not support lambdas"); } void Sema::addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope) { llvm_unreachable("HLSL does not support lambdas"); } void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) { llvm_unreachable("HLSL does not support lambdas"); } QualType Sema::performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init) { llvm_unreachable("HLSL does not support lambdas"); } VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init) { llvm_unreachable("HLSL does not support lambdas"); } FieldDecl *Sema::buildInitCaptureField(LambdaScopeInfo *LSI, VarDecl *Var) { llvm_unreachable("HLSL does not support lambdas"); } void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope) { llvm_unreachable("HLSL does not support lambdas"); } void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation) { llvm_unreachable("HLSL does not support lambdas"); } ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope) { llvm_unreachable("HLSL does not support lambdas"); return ExprError(); } ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, LambdaScopeInfo *LSI) { llvm_unreachable("HLSL does not support lambdas"); } ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src) { llvm_unreachable("HLSL does not support lambdas"); } #else // HLSL Change Ends /// \brief Examines the FunctionScopeInfo stack to determine the nearest /// enclosing lambda (to the current lambda) that is 'capture-ready' for /// the variable referenced in the current lambda (i.e. \p VarToCapture). /// If successful, returns the index into Sema's FunctionScopeInfo stack /// of the capture-ready lambda's LambdaScopeInfo. /// /// Climbs down the stack of lambdas (deepest nested lambda - i.e. current /// lambda - is on top) to determine the index of the nearest enclosing/outer /// lambda that is ready to capture the \p VarToCapture being referenced in /// the current lambda. /// As we climb down the stack, we want the index of the first such lambda - /// that is the lambda with the highest index that is 'capture-ready'. /// /// A lambda 'L' is capture-ready for 'V' (var or this) if: /// - its enclosing context is non-dependent /// - and if the chain of lambdas between L and the lambda in which /// V is potentially used (i.e. the lambda at the top of the scope info /// stack), can all capture or have already captured V. /// If \p VarToCapture is 'null' then we are trying to capture 'this'. /// /// Note that a lambda that is deemed 'capture-ready' still needs to be checked /// for whether it is 'capture-capable' (see /// getStackIndexOfNearestEnclosingCaptureCapableLambda), before it can truly /// capture. /// /// \param FunctionScopes - Sema's stack of nested FunctionScopeInfo's (which a /// LambdaScopeInfo inherits from). The current/deepest/innermost lambda /// is at the top of the stack and has the highest index. /// \param VarToCapture - the variable to capture. If NULL, capture 'this'. /// /// \returns An Optional<unsigned> Index that if evaluates to 'true' contains /// the index (into Sema's FunctionScopeInfo stack) of the innermost lambda /// which is capture-ready. If the return value evaluates to 'false' then /// no lambda is capture-ready for \p VarToCapture. static inline Optional<unsigned> getStackIndexOfNearestEnclosingCaptureReadyLambda( ArrayRef<const clang::sema::FunctionScopeInfo *> FunctionScopes, VarDecl *VarToCapture) { // Label failure to capture. const Optional<unsigned> NoLambdaIsCaptureReady; assert( isa<clang::sema::LambdaScopeInfo>( FunctionScopes[FunctionScopes.size() - 1]) && "The function on the top of sema's function-info stack must be a lambda"); // If VarToCapture is null, we are attempting to capture 'this'. const bool IsCapturingThis = !VarToCapture; const bool IsCapturingVariable = !IsCapturingThis; // Start with the current lambda at the top of the stack (highest index). unsigned CurScopeIndex = FunctionScopes.size() - 1; DeclContext *EnclosingDC = cast<sema::LambdaScopeInfo>(FunctionScopes[CurScopeIndex])->CallOperator; do { const clang::sema::LambdaScopeInfo *LSI = cast<sema::LambdaScopeInfo>(FunctionScopes[CurScopeIndex]); // IF we have climbed down to an intervening enclosing lambda that contains // the variable declaration - it obviously can/must not capture the // variable. // Since its enclosing DC is dependent, all the lambdas between it and the // innermost nested lambda are dependent (otherwise we wouldn't have // arrived here) - so we don't yet have a lambda that can capture the // variable. if (IsCapturingVariable && VarToCapture->getDeclContext()->Equals(EnclosingDC)) return NoLambdaIsCaptureReady; // For an enclosing lambda to be capture ready for an entity, all // intervening lambda's have to be able to capture that entity. If even // one of the intervening lambda's is not capable of capturing the entity // then no enclosing lambda can ever capture that entity. // For e.g. // const int x = 10; // [=](auto a) { #1 // [](auto b) { #2 <-- an intervening lambda that can never capture 'x' // [=](auto c) { #3 // f(x, c); <-- can not lead to x's speculative capture by #1 or #2 // }; }; }; // If they do not have a default implicit capture, check to see // if the entity has already been explicitly captured. // If even a single dependent enclosing lambda lacks the capability // to ever capture this variable, there is no further enclosing // non-dependent lambda that can capture this variable. if (LSI->ImpCaptureStyle == sema::LambdaScopeInfo::ImpCap_None) { if (IsCapturingVariable && !LSI->isCaptured(VarToCapture)) return NoLambdaIsCaptureReady; if (IsCapturingThis && !LSI->isCXXThisCaptured()) return NoLambdaIsCaptureReady; } EnclosingDC = getLambdaAwareParentOfDeclContext(EnclosingDC); assert(CurScopeIndex); --CurScopeIndex; } while (!EnclosingDC->isTranslationUnit() && EnclosingDC->isDependentContext() && isLambdaCallOperator(EnclosingDC)); assert(CurScopeIndex < (FunctionScopes.size() - 1)); // If the enclosingDC is not dependent, then the immediately nested lambda // (one index above) is capture-ready. if (!EnclosingDC->isDependentContext()) return CurScopeIndex + 1; return NoLambdaIsCaptureReady; } /// \brief Examines the FunctionScopeInfo stack to determine the nearest /// enclosing lambda (to the current lambda) that is 'capture-capable' for /// the variable referenced in the current lambda (i.e. \p VarToCapture). /// If successful, returns the index into Sema's FunctionScopeInfo stack /// of the capture-capable lambda's LambdaScopeInfo. /// /// Given the current stack of lambdas being processed by Sema and /// the variable of interest, to identify the nearest enclosing lambda (to the /// current lambda at the top of the stack) that can truly capture /// a variable, it has to have the following two properties: /// a) 'capture-ready' - be the innermost lambda that is 'capture-ready': /// - climb down the stack (i.e. starting from the innermost and examining /// each outer lambda step by step) checking if each enclosing /// lambda can either implicitly or explicitly capture the variable. /// Record the first such lambda that is enclosed in a non-dependent /// context. If no such lambda currently exists return failure. /// b) 'capture-capable' - make sure the 'capture-ready' lambda can truly /// capture the variable by checking all its enclosing lambdas: /// - check if all outer lambdas enclosing the 'capture-ready' lambda /// identified above in 'a' can also capture the variable (this is done /// via tryCaptureVariable for variables and CheckCXXThisCapture for /// 'this' by passing in the index of the Lambda identified in step 'a') /// /// \param FunctionScopes - Sema's stack of nested FunctionScopeInfo's (which a /// LambdaScopeInfo inherits from). The current/deepest/innermost lambda /// is at the top of the stack. /// /// \param VarToCapture - the variable to capture. If NULL, capture 'this'. /// /// /// \returns An Optional<unsigned> Index that if evaluates to 'true' contains /// the index (into Sema's FunctionScopeInfo stack) of the innermost lambda /// which is capture-capable. If the return value evaluates to 'false' then /// no lambda is capture-capable for \p VarToCapture. Optional<unsigned> clang::getStackIndexOfNearestEnclosingCaptureCapableLambda( ArrayRef<const sema::FunctionScopeInfo *> FunctionScopes, VarDecl *VarToCapture, Sema &S) { const Optional<unsigned> NoLambdaIsCaptureCapable; const Optional<unsigned> OptionalStackIndex = getStackIndexOfNearestEnclosingCaptureReadyLambda(FunctionScopes, VarToCapture); if (!OptionalStackIndex) return NoLambdaIsCaptureCapable; const unsigned IndexOfCaptureReadyLambda = OptionalStackIndex.getValue(); assert(((IndexOfCaptureReadyLambda != (FunctionScopes.size() - 1)) || S.getCurGenericLambda()) && "The capture ready lambda for a potential capture can only be the " "current lambda if it is a generic lambda"); const sema::LambdaScopeInfo *const CaptureReadyLambdaLSI = cast<sema::LambdaScopeInfo>(FunctionScopes[IndexOfCaptureReadyLambda]); // If VarToCapture is null, we are attempting to capture 'this' const bool IsCapturingThis = !VarToCapture; const bool IsCapturingVariable = !IsCapturingThis; if (IsCapturingVariable) { // Check if the capture-ready lambda can truly capture the variable, by // checking whether all enclosing lambdas of the capture-ready lambda allow // the capture - i.e. make sure it is capture-capable. QualType CaptureType, DeclRefType; const bool CanCaptureVariable = !S.tryCaptureVariable(VarToCapture, /*ExprVarIsUsedInLoc*/ SourceLocation(), clang::Sema::TryCapture_Implicit, /*EllipsisLoc*/ SourceLocation(), /*BuildAndDiagnose*/ false, CaptureType, DeclRefType, &IndexOfCaptureReadyLambda); if (!CanCaptureVariable) return NoLambdaIsCaptureCapable; } else { // Check if the capture-ready lambda can truly capture 'this' by checking // whether all enclosing lambdas of the capture-ready lambda can capture // 'this'. const bool CanCaptureThis = !S.CheckCXXThisCapture( CaptureReadyLambdaLSI->PotentialThisCaptureLocation, /*Explicit*/ false, /*BuildAndDiagnose*/ false, &IndexOfCaptureReadyLambda); if (!CanCaptureThis) return NoLambdaIsCaptureCapable; } return IndexOfCaptureReadyLambda; } static inline TemplateParameterList * getGenericLambdaTemplateParameterList(LambdaScopeInfo *LSI, Sema &SemaRef) { if (LSI->GLTemplateParameterList) return LSI->GLTemplateParameterList; if (LSI->AutoTemplateParams.size()) { SourceRange IntroRange = LSI->IntroducerRange; SourceLocation LAngleLoc = IntroRange.getBegin(); SourceLocation RAngleLoc = IntroRange.getEnd(); LSI->GLTemplateParameterList = TemplateParameterList::Create( SemaRef.Context, /*Template kw loc*/ SourceLocation(), LAngleLoc, (NamedDecl **)LSI->AutoTemplateParams.data(), LSI->AutoTemplateParams.size(), RAngleLoc); } return LSI->GLTemplateParameterList; } CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault) { DeclContext *DC = CurContext; while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext())) DC = DC->getParent(); bool IsGenericLambda = getGenericLambdaTemplateParameterList(getCurLambda(), *this); // Start constructing the lambda class. CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC, Info, IntroducerRange.getBegin(), KnownDependent, IsGenericLambda, CaptureDefault); DC->addDecl(Class); return Class; } /// \brief Determine whether the given context is or is enclosed in an inline /// function. static bool isInInlineFunction(const DeclContext *DC) { while (!DC->isFileContext()) { if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(DC)) if (FD->isInlined()) return true; DC = DC->getLexicalParent(); } return false; } MangleNumberingContext * Sema::getCurrentMangleNumberContext(const DeclContext *DC, Decl *&ManglingContextDecl) { // Compute the context for allocating mangling numbers in the current // expression, if the ABI requires them. ManglingContextDecl = ExprEvalContexts.back().ManglingContextDecl; enum ContextKind { Normal, DefaultArgument, DataMember, StaticDataMember } Kind = Normal; // Default arguments of member function parameters that appear in a class // definition, as well as the initializers of data members, receive special // treatment. Identify them. if (ManglingContextDecl) { if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(ManglingContextDecl)) { if (const DeclContext *LexicalDC = Param->getDeclContext()->getLexicalParent()) if (LexicalDC->isRecord()) Kind = DefaultArgument; } else if (VarDecl *Var = dyn_cast<VarDecl>(ManglingContextDecl)) { if (Var->getDeclContext()->isRecord()) Kind = StaticDataMember; } else if (isa<FieldDecl>(ManglingContextDecl)) { Kind = DataMember; } } // Itanium ABI [5.1.7]: // In the following contexts [...] the one-definition rule requires closure // types in different translation units to "correspond": bool IsInNonspecializedTemplate = !ActiveTemplateInstantiations.empty() || CurContext->isDependentContext(); switch (Kind) { case Normal: // -- the bodies of non-exported nonspecialized template functions // -- the bodies of inline functions if ((IsInNonspecializedTemplate && !(ManglingContextDecl && isa<ParmVarDecl>(ManglingContextDecl))) || isInInlineFunction(CurContext)) { ManglingContextDecl = nullptr; return &Context.getManglingNumberContext(DC); } ManglingContextDecl = nullptr; return nullptr; case StaticDataMember: // -- the initializers of nonspecialized static members of template classes if (!IsInNonspecializedTemplate) { ManglingContextDecl = nullptr; return nullptr; } // Fall through to get the current context. case DataMember: // -- the in-class initializers of class members case DefaultArgument: // -- default arguments appearing in class definitions return &ExprEvalContexts.back().getMangleNumberingContext(Context); } llvm_unreachable("unexpected context"); } MangleNumberingContext & Sema::ExpressionEvaluationContextRecord::getMangleNumberingContext( ASTContext &Ctx) { assert(ManglingContextDecl && "Need to have a context declaration"); if (!MangleNumbering) MangleNumbering = Ctx.createMangleNumberingContext(); return *MangleNumbering; } CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodTypeInfo, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params) { QualType MethodType = MethodTypeInfo->getType(); TemplateParameterList *TemplateParams = getGenericLambdaTemplateParameterList(getCurLambda(), *this); // If a lambda appears in a dependent context or is a generic lambda (has // template parameters) and has an 'auto' return type, deduce it to a // dependent type. if (Class->isDependentContext() || TemplateParams) { const FunctionProtoType *FPT = MethodType->castAs<FunctionProtoType>(); QualType Result = FPT->getReturnType(); if (Result->isUndeducedType()) { Result = SubstAutoType(Result, Context.DependentTy); MethodType = Context.getFunctionType(Result, FPT->getParamTypes(), FPT->getExtProtoInfo()); } } // C++11 [expr.prim.lambda]p5: // The closure type for a lambda-expression has a public inline function // call operator (13.5.4) whose parameters and return type are described by // the lambda-expression's parameter-declaration-clause and // trailing-return-type respectively. DeclarationName MethodName = Context.DeclarationNames.getCXXOperatorName(OO_Call); DeclarationNameLoc MethodNameLoc; MethodNameLoc.CXXOperatorName.BeginOpNameLoc = IntroducerRange.getBegin().getRawEncoding(); MethodNameLoc.CXXOperatorName.EndOpNameLoc = IntroducerRange.getEnd().getRawEncoding(); CXXMethodDecl *Method = CXXMethodDecl::Create(Context, Class, EndLoc, DeclarationNameInfo(MethodName, IntroducerRange.getBegin(), MethodNameLoc), MethodType, MethodTypeInfo, SC_None, /*isInline=*/true, /*isConstExpr=*/false, EndLoc); Method->setAccess(AS_public); // Temporarily set the lexical declaration context to the current // context, so that the Scope stack matches the lexical nesting. Method->setLexicalDeclContext(CurContext); // Create a function template if we have a template parameter list FunctionTemplateDecl *const TemplateMethod = TemplateParams ? FunctionTemplateDecl::Create(Context, Class, Method->getLocation(), MethodName, TemplateParams, Method) : nullptr; if (TemplateMethod) { TemplateMethod->setLexicalDeclContext(CurContext); TemplateMethod->setAccess(AS_public); Method->setDescribedFunctionTemplate(TemplateMethod); } // Add parameters. if (!Params.empty()) { Method->setParams(Params); CheckParmsForFunctionDef(const_cast<ParmVarDecl **>(Params.begin()), const_cast<ParmVarDecl **>(Params.end()), /*CheckParameterNames=*/false); for (auto P : Method->params()) P->setOwningFunction(Method); } Decl *ManglingContextDecl; if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(Class->getDeclContext(), ManglingContextDecl)) { unsigned ManglingNumber = MCtx->getManglingNumber(Method); Class->setLambdaMangling(ManglingNumber, ManglingContextDecl); } return Method; } void Sema::buildLambdaScope(LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable) { LSI->CallOperator = CallOperator; CXXRecordDecl *LambdaClass = CallOperator->getParent(); LSI->Lambda = LambdaClass; if (CaptureDefault == LCD_ByCopy) LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByval; else if (CaptureDefault == LCD_ByRef) LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByref; LSI->CaptureDefaultLoc = CaptureDefaultLoc; LSI->IntroducerRange = IntroducerRange; LSI->ExplicitParams = ExplicitParams; LSI->Mutable = Mutable; if (ExplicitResultType) { LSI->ReturnType = CallOperator->getReturnType(); if (!LSI->ReturnType->isDependentType() && !LSI->ReturnType->isVoidType()) { if (RequireCompleteType(CallOperator->getLocStart(), LSI->ReturnType, diag::err_lambda_incomplete_result)) { // Do nothing. } } } else { LSI->HasImplicitReturnType = true; } } void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) { LSI->finishedExplicitCaptures(); } void Sema::addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope) { // Introduce our parameters into the function scope for (unsigned p = 0, NumParams = CallOperator->getNumParams(); p < NumParams; ++p) { ParmVarDecl *Param = CallOperator->getParamDecl(p); // If this has an identifier, add it to the scope stack. if (CurScope && Param->getIdentifier()) { CheckShadow(CurScope, Param); PushOnScopeChains(Param, CurScope); } } } /// If this expression is an enumerator-like expression of some type /// T, return the type T; otherwise, return null. /// /// Pointer comparisons on the result here should always work because /// it's derived from either the parent of an EnumConstantDecl /// (i.e. the definition) or the declaration returned by /// EnumType::getDecl() (i.e. the definition). static EnumDecl *findEnumForBlockReturn(Expr *E) { // An expression is an enumerator-like expression of type T if, // ignoring parens and parens-like expressions: E = E->IgnoreParens(); // - it is an enumerator whose enum type is T or if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { if (EnumConstantDecl *D = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { return cast<EnumDecl>(D->getDeclContext()); } return nullptr; } // - it is a comma expression whose RHS is an enumerator-like // expression of type T or if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { if (BO->getOpcode() == BO_Comma) return findEnumForBlockReturn(BO->getRHS()); return nullptr; } // - it is a statement-expression whose value expression is an // enumerator-like expression of type T or if (StmtExpr *SE = dyn_cast<StmtExpr>(E)) { if (Expr *last = dyn_cast_or_null<Expr>(SE->getSubStmt()->body_back())) return findEnumForBlockReturn(last); return nullptr; } // - it is a ternary conditional operator (not the GNU ?: // extension) whose second and third operands are // enumerator-like expressions of type T or if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { if (EnumDecl *ED = findEnumForBlockReturn(CO->getTrueExpr())) if (ED == findEnumForBlockReturn(CO->getFalseExpr())) return ED; return nullptr; } // (implicitly:) // - it is an implicit integral conversion applied to an // enumerator-like expression of type T or if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { // We can sometimes see integral conversions in valid // enumerator-like expressions. if (ICE->getCastKind() == CK_IntegralCast) return findEnumForBlockReturn(ICE->getSubExpr()); // Otherwise, just rely on the type. } // - it is an expression of that formal enum type. if (const EnumType *ET = E->getType()->getAs<EnumType>()) { return ET->getDecl(); } // Otherwise, nope. return nullptr; } /// Attempt to find a type T for which the returned expression of the /// given statement is an enumerator-like expression of that type. static EnumDecl *findEnumForBlockReturn(ReturnStmt *ret) { if (Expr *retValue = ret->getRetValue()) return findEnumForBlockReturn(retValue); return nullptr; } /// Attempt to find a common type T for which all of the returned /// expressions in a block are enumerator-like expressions of that /// type. static EnumDecl *findCommonEnumForBlockReturns(ArrayRef<ReturnStmt*> returns) { ArrayRef<ReturnStmt*>::iterator i = returns.begin(), e = returns.end(); // Try to find one for the first return. EnumDecl *ED = findEnumForBlockReturn(*i); if (!ED) return nullptr; // Check that the rest of the returns have the same enum. for (++i; i != e; ++i) { if (findEnumForBlockReturn(*i) != ED) return nullptr; } // Never infer an anonymous enum type. if (!ED->hasNameForLinkage()) return nullptr; return ED; } /// Adjust the given return statements so that they formally return /// the given type. It should require, at most, an IntegralCast. static void adjustBlockReturnsToEnum(Sema &S, ArrayRef<ReturnStmt*> returns, QualType returnType) { for (ArrayRef<ReturnStmt*>::iterator i = returns.begin(), e = returns.end(); i != e; ++i) { ReturnStmt *ret = *i; Expr *retValue = ret->getRetValue(); if (S.Context.hasSameType(retValue->getType(), returnType)) continue; // Right now we only support integral fixup casts. assert(returnType->isIntegralOrUnscopedEnumerationType()); assert(retValue->getType()->isIntegralOrUnscopedEnumerationType()); ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(retValue); Expr *E = (cleanups ? cleanups->getSubExpr() : retValue); E = ImplicitCastExpr::Create(S.Context, returnType, CK_IntegralCast, E, /*base path*/ nullptr, VK_RValue); if (cleanups) { cleanups->setSubExpr(E); } else { ret->setRetValue(E); } } } void Sema::deduceClosureReturnType(CapturingScopeInfo &CSI) { assert(CSI.HasImplicitReturnType); // If it was ever a placeholder, it had to been deduced to DependentTy. assert(CSI.ReturnType.isNull() || !CSI.ReturnType->isUndeducedType()); // C++ core issue 975: // If a lambda-expression does not include a trailing-return-type, // it is as if the trailing-return-type denotes the following type: // - if there are no return statements in the compound-statement, // or all return statements return either an expression of type // void or no expression or braced-init-list, the type void; // - otherwise, if all return statements return an expression // and the types of the returned expressions after // lvalue-to-rvalue conversion (4.1 [conv.lval]), // array-to-pointer conversion (4.2 [conv.array]), and // function-to-pointer conversion (4.3 [conv.func]) are the // same, that common type; // - otherwise, the program is ill-formed. // // C++ core issue 1048 additionally removes top-level cv-qualifiers // from the types of returned expressions to match the C++14 auto // deduction rules. // // In addition, in blocks in non-C++ modes, if all of the return // statements are enumerator-like expressions of some type T, where // T has a name for linkage, then we infer the return type of the // block to be that type. // First case: no return statements, implicit void return type. ASTContext &Ctx = getASTContext(); if (CSI.Returns.empty()) { // It's possible there were simply no /valid/ return statements. // In this case, the first one we found may have at least given us a type. if (CSI.ReturnType.isNull()) CSI.ReturnType = Ctx.VoidTy; return; } // Second case: at least one return statement has dependent type. // Delay type checking until instantiation. assert(!CSI.ReturnType.isNull() && "We should have a tentative return type."); if (CSI.ReturnType->isDependentType()) return; // Try to apply the enum-fuzz rule. if (!getLangOpts().CPlusPlus) { assert(isa<BlockScopeInfo>(CSI)); const EnumDecl *ED = findCommonEnumForBlockReturns(CSI.Returns); if (ED) { CSI.ReturnType = Context.getTypeDeclType(ED); adjustBlockReturnsToEnum(*this, CSI.Returns, CSI.ReturnType); return; } } // Third case: only one return statement. Don't bother doing extra work! SmallVectorImpl<ReturnStmt*>::iterator I = CSI.Returns.begin(), E = CSI.Returns.end(); if (I+1 == E) return; // General case: many return statements. // Check that they all have compatible return types. // We require the return types to strictly match here. // Note that we've already done the required promotions as part of // processing the return statement. for (; I != E; ++I) { const ReturnStmt *RS = *I; const Expr *RetE = RS->getRetValue(); QualType ReturnType = (RetE ? RetE->getType() : Context.VoidTy).getUnqualifiedType(); if (Context.hasSameType(ReturnType, CSI.ReturnType)) continue; // FIXME: This is a poor diagnostic for ReturnStmts without expressions. // TODO: It's possible that the *first* return is the divergent one. Diag(RS->getLocStart(), diag::err_typecheck_missing_return_type_incompatible) << ReturnType << CSI.ReturnType << isa<LambdaScopeInfo>(CSI); // Continue iterating so that we keep emitting diagnostics. } } QualType Sema::performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init) { // We do not need to distinguish between direct-list-initialization // and copy-list-initialization here, because we will always deduce // std::initializer_list<T>, and direct- and copy-list-initialization // always behave the same for such a type. // FIXME: We should model whether an '=' was present. const bool IsDirectInit = isa<ParenListExpr>(Init) || isa<InitListExpr>(Init); // Create an 'auto' or 'auto&' TypeSourceInfo that we can use to // deduce against. QualType DeductType = Context.getAutoDeductType(); TypeLocBuilder TLB; TLB.pushTypeSpec(DeductType).setNameLoc(Loc); if (ByRef) { DeductType = BuildReferenceType(DeductType, true, Loc, Id); assert(!DeductType.isNull() && "can't build reference to auto"); TLB.push<ReferenceTypeLoc>(DeductType).setSigilLoc(Loc); } TypeSourceInfo *TSI = TLB.getTypeSourceInfo(Context, DeductType); // Are we a non-list direct initialization? ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init); Expr *DeduceInit = Init; // Initializer could be a C++ direct-initializer. Deduction only works if it // contains exactly one expression. if (CXXDirectInit) { if (CXXDirectInit->getNumExprs() == 0) { Diag(CXXDirectInit->getLocStart(), diag::err_init_capture_no_expression) << DeclarationName(Id) << TSI->getType() << Loc; return QualType(); } else if (CXXDirectInit->getNumExprs() > 1) { Diag(CXXDirectInit->getExpr(1)->getLocStart(), diag::err_init_capture_multiple_expressions) << DeclarationName(Id) << TSI->getType() << Loc; return QualType(); } else { DeduceInit = CXXDirectInit->getExpr(0); if (isa<InitListExpr>(DeduceInit)) Diag(CXXDirectInit->getLocStart(), diag::err_init_capture_paren_braces) << DeclarationName(Id) << Loc; } } // Now deduce against the initialization expression and store the deduced // type below. QualType DeducedType; if (DeduceAutoType(TSI, DeduceInit, DeducedType) == DAR_Failed) { if (isa<InitListExpr>(Init)) Diag(Loc, diag::err_init_capture_deduction_failure_from_init_list) << DeclarationName(Id) << (DeduceInit->getType().isNull() ? TSI->getType() : DeduceInit->getType()) << DeduceInit->getSourceRange(); else Diag(Loc, diag::err_init_capture_deduction_failure) << DeclarationName(Id) << TSI->getType() << (DeduceInit->getType().isNull() ? TSI->getType() : DeduceInit->getType()) << DeduceInit->getSourceRange(); } if (DeducedType.isNull()) return QualType(); // Perform initialization analysis and ensure any implicit conversions // (such as lvalue-to-rvalue) are enforced. InitializedEntity Entity = InitializedEntity::InitializeLambdaCapture(Id, DeducedType, Loc); InitializationKind Kind = IsDirectInit ? (CXXDirectInit ? InitializationKind::CreateDirect( Loc, Init->getLocStart(), Init->getLocEnd()) : InitializationKind::CreateDirectList(Loc)) : InitializationKind::CreateCopy(Loc, Init->getLocStart()); MultiExprArg Args = Init; if (CXXDirectInit) Args = MultiExprArg(CXXDirectInit->getExprs(), CXXDirectInit->getNumExprs()); QualType DclT; InitializationSequence InitSeq(*this, Entity, Kind, Args); ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT); if (Result.isInvalid()) return QualType(); Init = Result.getAs<Expr>(); // The init-capture initialization is a full-expression that must be // processed as one before we enter the declcontext of the lambda's // call-operator. Result = ActOnFinishFullExpr(Init, Loc, /*DiscardedValue*/ false, /*IsConstexpr*/ false, /*IsLambdaInitCaptureInitalizer*/ true); if (Result.isInvalid()) return QualType(); Init = Result.getAs<Expr>(); return DeducedType; } VarDecl *Sema::createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init) { TypeSourceInfo *TSI = Context.getTrivialTypeSourceInfo(InitCaptureType, Loc); // Create a dummy variable representing the init-capture. This is not actually // used as a variable, and only exists as a way to name and refer to the // init-capture. // FIXME: Pass in separate source locations for '&' and identifier. VarDecl *NewVD = VarDecl::Create(Context, CurContext, Loc, Loc, Id, InitCaptureType, TSI, SC_Auto); NewVD->setInitCapture(true); NewVD->setReferenced(true); NewVD->markUsed(Context); NewVD->setInit(Init); return NewVD; } FieldDecl *Sema::buildInitCaptureField(LambdaScopeInfo *LSI, VarDecl *Var) { FieldDecl *Field = FieldDecl::Create( Context, LSI->Lambda, Var->getLocation(), Var->getLocation(), nullptr, Var->getType(), Var->getTypeSourceInfo(), nullptr, false, ICIS_NoInit); Field->setImplicit(true); Field->setAccess(AS_private); LSI->Lambda->addDecl(Field); LSI->addCapture(Var, /*isBlock*/false, Var->getType()->isReferenceType(), /*isNested*/false, Var->getLocation(), SourceLocation(), Var->getType(), Var->getInit()); return Field; } void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope) { // Determine if we're within a context where we know that the lambda will // be dependent, because there are template parameters in scope. bool KnownDependent = false; LambdaScopeInfo *const LSI = getCurLambda(); assert(LSI && "LambdaScopeInfo should be on stack!"); TemplateParameterList *TemplateParams = getGenericLambdaTemplateParameterList(LSI, *this); if (Scope *TmplScope = CurScope->getTemplateParamParent()) { // Since we have our own TemplateParams, so check if an outer scope // has template params, only then are we in a dependent scope. if (TemplateParams) { TmplScope = TmplScope->getParent(); TmplScope = TmplScope ? TmplScope->getTemplateParamParent() : nullptr; } if (TmplScope && !TmplScope->decl_empty()) KnownDependent = true; } // Determine the signature of the call operator. TypeSourceInfo *MethodTyInfo; bool ExplicitParams = true; bool ExplicitResultType = true; bool ContainsUnexpandedParameterPack = false; SourceLocation EndLoc; SmallVector<ParmVarDecl *, 8> Params; if (ParamInfo.getNumTypeObjects() == 0) { // C++11 [expr.prim.lambda]p4: // If a lambda-expression does not include a lambda-declarator, it is as // if the lambda-declarator were (). FunctionProtoType::ExtProtoInfo EPI(Context.getDefaultCallingConvention( /*IsVariadic=*/false, /*IsCXXMethod=*/true)); EPI.HasTrailingReturn = true; EPI.TypeQuals |= DeclSpec::TQ_const; // C++1y [expr.prim.lambda]: // The lambda return type is 'auto', which is replaced by the // trailing-return type if provided and/or deduced from 'return' // statements // We don't do this before C++1y, because we don't support deduced return // types there. QualType DefaultTypeForNoTrailingReturn = getLangOpts().CPlusPlus14 ? Context.getAutoDeductType() : Context.DependentTy; QualType MethodTy = Context.getFunctionType(DefaultTypeForNoTrailingReturn, None, EPI); MethodTyInfo = Context.getTrivialTypeSourceInfo(MethodTy); ExplicitParams = false; ExplicitResultType = false; EndLoc = Intro.Range.getEnd(); } else { assert(ParamInfo.isFunctionDeclarator() && "lambda-declarator is a function"); DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getFunctionTypeInfo(); // C++11 [expr.prim.lambda]p5: // This function call operator is declared const (9.3.1) if and only if // the lambda-expression's parameter-declaration-clause is not followed // by mutable. It is neither virtual nor declared volatile. [...] if (!FTI.hasMutableQualifier()) FTI.TypeQuals |= DeclSpec::TQ_const; MethodTyInfo = GetTypeForDeclarator(ParamInfo, CurScope); assert(MethodTyInfo && "no type from lambda-declarator"); EndLoc = ParamInfo.getSourceRange().getEnd(); ExplicitResultType = FTI.hasTrailingReturnType(); if (FTIHasNonVoidParameters(FTI)) { Params.reserve(FTI.NumParams); for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) Params.push_back(cast<ParmVarDecl>(FTI.Params[i].Param)); } // Check for unexpanded parameter packs in the method type. if (MethodTyInfo->getType()->containsUnexpandedParameterPack()) ContainsUnexpandedParameterPack = true; } CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, MethodTyInfo, KnownDependent, Intro.Default); CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range, MethodTyInfo, EndLoc, Params); if (ExplicitParams) CheckCXXDefaultArguments(Method); // Attributes on the lambda apply to the method. ProcessDeclAttributes(CurScope, Method, ParamInfo); // Introduce the function call operator as the current declaration context. PushDeclContext(CurScope, Method); // Build the lambda scope. buildLambdaScope(LSI, Method, Intro.Range, Intro.Default, Intro.DefaultLoc, ExplicitParams, ExplicitResultType, !Method->isConst()); // C++11 [expr.prim.lambda]p9: // A lambda-expression whose smallest enclosing scope is a block scope is a // local lambda expression; any other lambda expression shall not have a // capture-default or simple-capture in its lambda-introducer. // // For simple-captures, this is covered by the check below that any named // entity is a variable that can be captured. // // For DR1632, we also allow a capture-default in any context where we can // odr-use 'this' (in particular, in a default initializer for a non-static // data member). if (Intro.Default != LCD_None && !Class->getParent()->isFunctionOrMethod() && (getCurrentThisType().isNull() || CheckCXXThisCapture(SourceLocation(), /*Explicit*/true, /*BuildAndDiagnose*/false))) Diag(Intro.DefaultLoc, diag::err_capture_default_non_local); // Distinct capture names, for diagnostics. llvm::SmallSet<IdentifierInfo*, 8> CaptureNames; // Handle explicit captures. SourceLocation PrevCaptureLoc = Intro.Default == LCD_None? Intro.Range.getBegin() : Intro.DefaultLoc; for (auto C = Intro.Captures.begin(), E = Intro.Captures.end(); C != E; PrevCaptureLoc = C->Loc, ++C) { if (C->Kind == LCK_This) { // C++11 [expr.prim.lambda]p8: // An identifier or this shall not appear more than once in a // lambda-capture. if (LSI->isCXXThisCaptured()) { Diag(C->Loc, diag::err_capture_more_than_once) << "'this'" << SourceRange(LSI->getCXXThisCapture().getLocation()) << FixItHint::CreateRemoval( SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } // C++11 [expr.prim.lambda]p8: // If a lambda-capture includes a capture-default that is =, the // lambda-capture shall not contain this [...]. if (Intro.Default == LCD_ByCopy) { Diag(C->Loc, diag::err_this_capture_with_copy_default) << FixItHint::CreateRemoval( SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } // C++11 [expr.prim.lambda]p12: // If this is captured by a local lambda expression, its nearest // enclosing function shall be a non-static member function. QualType ThisCaptureType = getCurrentThisType(); if (ThisCaptureType.isNull()) { Diag(C->Loc, diag::err_this_capture) << true; continue; } CheckCXXThisCapture(C->Loc, /*Explicit=*/true); continue; } assert(C->Id && "missing identifier for capture"); if (C->Init.isInvalid()) continue; VarDecl *Var = nullptr; if (C->Init.isUsable()) { Diag(C->Loc, getLangOpts().CPlusPlus14 ? diag::warn_cxx11_compat_init_capture : diag::ext_init_capture); if (C->Init.get()->containsUnexpandedParameterPack()) ContainsUnexpandedParameterPack = true; // If the initializer expression is usable, but the InitCaptureType // is not, then an error has occurred - so ignore the capture for now. // for e.g., [n{0}] { }; <-- if no <initializer_list> is included. // FIXME: we should create the init capture variable and mark it invalid // in this case. if (C->InitCaptureType.get().isNull()) continue; Var = createLambdaInitCaptureVarDecl(C->Loc, C->InitCaptureType.get(), C->Id, C->Init.get()); // C++1y [expr.prim.lambda]p11: // An init-capture behaves as if it declares and explicitly // captures a variable [...] whose declarative region is the // lambda-expression's compound-statement if (Var) PushOnScopeChains(Var, CurScope, false); } else { // C++11 [expr.prim.lambda]p8: // If a lambda-capture includes a capture-default that is &, the // identifiers in the lambda-capture shall not be preceded by &. // If a lambda-capture includes a capture-default that is =, [...] // each identifier it contains shall be preceded by &. if (C->Kind == LCK_ByRef && Intro.Default == LCD_ByRef) { Diag(C->Loc, diag::err_reference_capture_with_reference_default) << FixItHint::CreateRemoval( SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } else if (C->Kind == LCK_ByCopy && Intro.Default == LCD_ByCopy) { Diag(C->Loc, diag::err_copy_capture_with_copy_default) << FixItHint::CreateRemoval( SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc)); continue; } // C++11 [expr.prim.lambda]p10: // The identifiers in a capture-list are looked up using the usual // rules for unqualified name lookup (3.4.1) DeclarationNameInfo Name(C->Id, C->Loc); LookupResult R(*this, Name, LookupOrdinaryName); LookupName(R, CurScope); if (R.isAmbiguous()) continue; if (R.empty()) { // FIXME: Disable corrections that would add qualification? CXXScopeSpec ScopeSpec; if (DiagnoseEmptyLookup(CurScope, ScopeSpec, R, llvm::make_unique<DeclFilterCCC<VarDecl>>())) continue; } Var = R.getAsSingle<VarDecl>(); if (Var && DiagnoseUseOfDecl(Var, C->Loc)) continue; } // C++11 [expr.prim.lambda]p8: // An identifier or this shall not appear more than once in a // lambda-capture. if (!CaptureNames.insert(C->Id).second) { if (Var && LSI->isCaptured(Var)) { Diag(C->Loc, diag::err_capture_more_than_once) << C->Id << SourceRange(LSI->getCapture(Var).getLocation()) << FixItHint::CreateRemoval( SourceRange(getLocForEndOfToken(PrevCaptureLoc), C->Loc)); } else // Previous capture captured something different (one or both was // an init-cpature): no fixit. Diag(C->Loc, diag::err_capture_more_than_once) << C->Id; continue; } // C++11 [expr.prim.lambda]p10: // [...] each such lookup shall find a variable with automatic storage // duration declared in the reaching scope of the local lambda expression. // Note that the 'reaching scope' check happens in tryCaptureVariable(). if (!Var) { Diag(C->Loc, diag::err_capture_does_not_name_variable) << C->Id; continue; } // Ignore invalid decls; they'll just confuse the code later. if (Var->isInvalidDecl()) continue; if (!Var->hasLocalStorage()) { Diag(C->Loc, diag::err_capture_non_automatic_variable) << C->Id; Diag(Var->getLocation(), diag::note_previous_decl) << C->Id; continue; } // C++11 [expr.prim.lambda]p23: // A capture followed by an ellipsis is a pack expansion (14.5.3). SourceLocation EllipsisLoc; if (C->EllipsisLoc.isValid()) { if (Var->isParameterPack()) { EllipsisLoc = C->EllipsisLoc; } else { Diag(C->EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << SourceRange(C->Loc); // Just ignore the ellipsis. } } else if (Var->isParameterPack()) { ContainsUnexpandedParameterPack = true; } if (C->Init.isUsable()) { buildInitCaptureField(LSI, Var); } else { TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef : TryCapture_ExplicitByVal; tryCaptureVariable(Var, C->Loc, Kind, EllipsisLoc); } } finishLambdaExplicitCaptures(LSI); LSI->ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack; // Add lambda parameters into scope. addLambdaParameters(Method, CurScope); // Enter a new evaluation context to insulate the lambda from any // cleanups from the enclosing full-expression. PushExpressionEvaluationContext(PotentiallyEvaluated); } void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation) { LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(FunctionScopes.back()); // Leave the expression-evaluation context. DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); // Leave the context of the lambda. if (!IsInstantiation) PopDeclContext(); // Finalize the lambda. CXXRecordDecl *Class = LSI->Lambda; Class->setInvalidDecl(); SmallVector<Decl*, 4> Fields(Class->fields()); ActOnFields(nullptr, Class->getLocation(), Class, Fields, SourceLocation(), SourceLocation(), nullptr); CheckCompletedCXXClass(Class); PopFunctionScopeInfo(); } /// \brief Add a lambda's conversion to function pointer, as described in /// C++11 [expr.prim.lambda]p6. static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange, CXXRecordDecl *Class, CXXMethodDecl *CallOperator) { // Add the conversion to function pointer. const FunctionProtoType *CallOpProto = CallOperator->getType()->getAs<FunctionProtoType>(); const FunctionProtoType::ExtProtoInfo CallOpExtInfo = CallOpProto->getExtProtoInfo(); QualType PtrToFunctionTy; QualType InvokerFunctionTy; { FunctionProtoType::ExtProtoInfo InvokerExtInfo = CallOpExtInfo; CallingConv CC = S.Context.getDefaultCallingConvention( CallOpProto->isVariadic(), /*IsCXXMethod=*/false); InvokerExtInfo.ExtInfo = InvokerExtInfo.ExtInfo.withCallingConv(CC); InvokerExtInfo.TypeQuals = 0; assert(InvokerExtInfo.RefQualifier == RQ_None && "Lambda's call operator should not have a reference qualifier"); InvokerFunctionTy = S.Context.getFunctionType(CallOpProto->getReturnType(), CallOpProto->getParamTypes(), InvokerExtInfo); PtrToFunctionTy = S.Context.getPointerType(InvokerFunctionTy); } // Create the type of the conversion function. FunctionProtoType::ExtProtoInfo ConvExtInfo( S.Context.getDefaultCallingConvention( /*IsVariadic=*/false, /*IsCXXMethod=*/true)); // The conversion function is always const. ConvExtInfo.TypeQuals = Qualifiers::Const; QualType ConvTy = S.Context.getFunctionType(PtrToFunctionTy, None, ConvExtInfo); SourceLocation Loc = IntroducerRange.getBegin(); DeclarationName ConversionName = S.Context.DeclarationNames.getCXXConversionFunctionName( S.Context.getCanonicalType(PtrToFunctionTy)); DeclarationNameLoc ConvNameLoc; // Construct a TypeSourceInfo for the conversion function, and wire // all the parameters appropriately for the FunctionProtoTypeLoc // so that everything works during transformation/instantiation of // generic lambdas. // The main reason for wiring up the parameters of the conversion // function with that of the call operator is so that constructs // like the following work: // auto L = [](auto b) { <-- 1 // return [](auto a) -> decltype(a) { <-- 2 // return a; // }; // }; // int (*fp)(int) = L(5); // Because the trailing return type can contain DeclRefExprs that refer // to the original call operator's variables, we hijack the call // operators ParmVarDecls below. TypeSourceInfo *ConvNamePtrToFunctionTSI = S.Context.getTrivialTypeSourceInfo(PtrToFunctionTy, Loc); ConvNameLoc.NamedType.TInfo = ConvNamePtrToFunctionTSI; // The conversion function is a conversion to a pointer-to-function. TypeSourceInfo *ConvTSI = S.Context.getTrivialTypeSourceInfo(ConvTy, Loc); FunctionProtoTypeLoc ConvTL = ConvTSI->getTypeLoc().getAs<FunctionProtoTypeLoc>(); // Get the result of the conversion function which is a pointer-to-function. PointerTypeLoc PtrToFunctionTL = ConvTL.getReturnLoc().getAs<PointerTypeLoc>(); // Do the same for the TypeSourceInfo that is used to name the conversion // operator. PointerTypeLoc ConvNamePtrToFunctionTL = ConvNamePtrToFunctionTSI->getTypeLoc().getAs<PointerTypeLoc>(); // Get the underlying function types that the conversion function will // be converting to (should match the type of the call operator). FunctionProtoTypeLoc CallOpConvTL = PtrToFunctionTL.getPointeeLoc().getAs<FunctionProtoTypeLoc>(); FunctionProtoTypeLoc CallOpConvNameTL = ConvNamePtrToFunctionTL.getPointeeLoc().getAs<FunctionProtoTypeLoc>(); // Wire up the FunctionProtoTypeLocs with the call operator's parameters. // These parameter's are essentially used to transform the name and // the type of the conversion operator. By using the same parameters // as the call operator's we don't have to fix any back references that // the trailing return type of the call operator's uses (such as // decltype(some_type<decltype(a)>::type{} + decltype(a){}) etc.) // - we can simply use the return type of the call operator, and // everything should work. SmallVector<ParmVarDecl *, 4> InvokerParams; for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) { ParmVarDecl *From = CallOperator->getParamDecl(I); InvokerParams.push_back(ParmVarDecl::Create(S.Context, // Temporarily add to the TU. This is set to the invoker below. S.Context.getTranslationUnitDecl(), From->getLocStart(), From->getLocation(), From->getIdentifier(), From->getType(), From->getTypeSourceInfo(), From->getStorageClass(), /*DefaultArg=*/nullptr)); CallOpConvTL.setParam(I, From); CallOpConvNameTL.setParam(I, From); } CXXConversionDecl *Conversion = CXXConversionDecl::Create(S.Context, Class, Loc, DeclarationNameInfo(ConversionName, Loc, ConvNameLoc), ConvTy, ConvTSI, /*isInline=*/true, /*isExplicit=*/false, /*isConstexpr=*/false, CallOperator->getBody()->getLocEnd()); Conversion->setAccess(AS_public); Conversion->setImplicit(true); if (Class->isGenericLambda()) { // Create a template version of the conversion operator, using the template // parameter list of the function call operator. FunctionTemplateDecl *TemplateCallOperator = CallOperator->getDescribedFunctionTemplate(); FunctionTemplateDecl *ConversionTemplate = FunctionTemplateDecl::Create(S.Context, Class, Loc, ConversionName, TemplateCallOperator->getTemplateParameters(), Conversion); ConversionTemplate->setAccess(AS_public); ConversionTemplate->setImplicit(true); Conversion->setDescribedFunctionTemplate(ConversionTemplate); Class->addDecl(ConversionTemplate); } else Class->addDecl(Conversion); // Add a non-static member function that will be the result of // the conversion with a certain unique ID. DeclarationName InvokerName = &S.Context.Idents.get( getLambdaStaticInvokerName()); // FIXME: Instead of passing in the CallOperator->getTypeSourceInfo() // we should get a prebuilt TrivialTypeSourceInfo from Context // using FunctionTy & Loc and get its TypeLoc as a FunctionProtoTypeLoc // then rewire the parameters accordingly, by hoisting up the InvokeParams // loop below and then use its Params to set Invoke->setParams(...) below. // This would avoid the 'const' qualifier of the calloperator from // contaminating the type of the invoker, which is currently adjusted // in SemaTemplateDeduction.cpp:DeduceTemplateArguments. Fixing the // trailing return type of the invoker would require a visitor to rebuild // the trailing return type and adjusting all back DeclRefExpr's to refer // to the new static invoker parameters - not the call operator's. CXXMethodDecl *Invoke = CXXMethodDecl::Create(S.Context, Class, Loc, DeclarationNameInfo(InvokerName, Loc), InvokerFunctionTy, CallOperator->getTypeSourceInfo(), SC_Static, /*IsInline=*/true, /*IsConstexpr=*/false, CallOperator->getBody()->getLocEnd()); for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) InvokerParams[I]->setOwningFunction(Invoke); Invoke->setParams(InvokerParams); Invoke->setAccess(AS_private); Invoke->setImplicit(true); if (Class->isGenericLambda()) { FunctionTemplateDecl *TemplateCallOperator = CallOperator->getDescribedFunctionTemplate(); FunctionTemplateDecl *StaticInvokerTemplate = FunctionTemplateDecl::Create( S.Context, Class, Loc, InvokerName, TemplateCallOperator->getTemplateParameters(), Invoke); StaticInvokerTemplate->setAccess(AS_private); StaticInvokerTemplate->setImplicit(true); Invoke->setDescribedFunctionTemplate(StaticInvokerTemplate); Class->addDecl(StaticInvokerTemplate); } else Class->addDecl(Invoke); } /// \brief Add a lambda's conversion to block pointer. static void addBlockPointerConversion(Sema &S, SourceRange IntroducerRange, CXXRecordDecl *Class, CXXMethodDecl *CallOperator) { const FunctionProtoType *Proto = CallOperator->getType()->getAs<FunctionProtoType>(); // The function type inside the block pointer type is the same as the call // operator with some tweaks. The calling convention is the default free // function convention, and the type qualifications are lost. FunctionProtoType::ExtProtoInfo BlockEPI = Proto->getExtProtoInfo(); BlockEPI.ExtInfo = BlockEPI.ExtInfo.withCallingConv(S.Context.getDefaultCallingConvention( Proto->isVariadic(), /*IsCXXMethod=*/false)); BlockEPI.TypeQuals = 0; QualType FunctionTy = S.Context.getFunctionType( Proto->getReturnType(), Proto->getParamTypes(), BlockEPI); QualType BlockPtrTy = S.Context.getBlockPointerType(FunctionTy); FunctionProtoType::ExtProtoInfo ConversionEPI( S.Context.getDefaultCallingConvention( /*IsVariadic=*/false, /*IsCXXMethod=*/true)); ConversionEPI.TypeQuals = Qualifiers::Const; QualType ConvTy = S.Context.getFunctionType(BlockPtrTy, None, ConversionEPI); SourceLocation Loc = IntroducerRange.getBegin(); DeclarationName Name = S.Context.DeclarationNames.getCXXConversionFunctionName( S.Context.getCanonicalType(BlockPtrTy)); DeclarationNameLoc NameLoc; NameLoc.NamedType.TInfo = S.Context.getTrivialTypeSourceInfo(BlockPtrTy, Loc); CXXConversionDecl *Conversion = CXXConversionDecl::Create(S.Context, Class, Loc, DeclarationNameInfo(Name, Loc, NameLoc), ConvTy, S.Context.getTrivialTypeSourceInfo(ConvTy, Loc), /*isInline=*/true, /*isExplicit=*/false, /*isConstexpr=*/false, CallOperator->getBody()->getLocEnd()); Conversion->setAccess(AS_public); Conversion->setImplicit(true); Class->addDecl(Conversion); } static ExprResult performLambdaVarCaptureInitialization( Sema &S, LambdaScopeInfo::Capture &Capture, FieldDecl *Field, SmallVectorImpl<VarDecl *> &ArrayIndexVars, SmallVectorImpl<unsigned> &ArrayIndexStarts) { assert(Capture.isVariableCapture() && "not a variable capture"); auto *Var = Capture.getVariable(); SourceLocation Loc = Capture.getLocation(); // C++11 [expr.prim.lambda]p21: // When the lambda-expression is evaluated, the entities that // are captured by copy are used to direct-initialize each // corresponding non-static data member of the resulting closure // object. (For array members, the array elements are // direct-initialized in increasing subscript order.) These // initializations are performed in the (unspecified) order in // which the non-static data members are declared. // C++ [expr.prim.lambda]p12: // An entity captured by a lambda-expression is odr-used (3.2) in // the scope containing the lambda-expression. ExprResult RefResult = S.BuildDeclarationNameExpr( CXXScopeSpec(), DeclarationNameInfo(Var->getDeclName(), Loc), Var); if (RefResult.isInvalid()) return ExprError(); Expr *Ref = RefResult.get(); QualType FieldType = Field->getType(); // When the variable has array type, create index variables for each // dimension of the array. We use these index variables to subscript // the source array, and other clients (e.g., CodeGen) will perform // the necessary iteration with these index variables. // // FIXME: This is dumb. Add a proper AST representation for array // copy-construction and use it here. SmallVector<VarDecl *, 4> IndexVariables; QualType BaseType = FieldType; QualType SizeType = S.Context.getSizeType(); ArrayIndexStarts.push_back(ArrayIndexVars.size()); while (const ConstantArrayType *Array = S.Context.getAsConstantArrayType(BaseType)) { // Create the iteration variable for this array index. IdentifierInfo *IterationVarName = nullptr; { SmallString<8> Str; llvm::raw_svector_ostream OS(Str); OS << "__i" << IndexVariables.size(); IterationVarName = &S.Context.Idents.get(OS.str()); } VarDecl *IterationVar = VarDecl::Create( S.Context, S.CurContext, Loc, Loc, IterationVarName, SizeType, S.Context.getTrivialTypeSourceInfo(SizeType, Loc), SC_None); IterationVar->setImplicit(); IndexVariables.push_back(IterationVar); ArrayIndexVars.push_back(IterationVar); // Create a reference to the iteration variable. ExprResult IterationVarRef = S.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc); assert(!IterationVarRef.isInvalid() && "Reference to invented variable cannot fail!"); IterationVarRef = S.DefaultLvalueConversion(IterationVarRef.get()); assert(!IterationVarRef.isInvalid() && "Conversion of invented variable cannot fail!"); // Subscript the array with this iteration variable. ExprResult Subscript = S.CreateBuiltinArraySubscriptExpr(Ref, Loc, IterationVarRef.get(), Loc); if (Subscript.isInvalid()) return ExprError(); Ref = Subscript.get(); BaseType = Array->getElementType(); } // Construct the entity that we will be initializing. For an array, this // will be first element in the array, which may require several levels // of array-subscript entities. SmallVector<InitializedEntity, 4> Entities; Entities.reserve(1 + IndexVariables.size()); Entities.push_back(InitializedEntity::InitializeLambdaCapture( Var->getIdentifier(), FieldType, Loc)); for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I) Entities.push_back( InitializedEntity::InitializeElement(S.Context, 0, Entities.back())); InitializationKind InitKind = InitializationKind::CreateDirect(Loc, Loc, Loc); InitializationSequence Init(S, Entities.back(), InitKind, Ref); return Init.Perform(S, Entities.back(), InitKind, Ref); } ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope) { LambdaScopeInfo LSI = *cast<LambdaScopeInfo>(FunctionScopes.back()); ActOnFinishFunctionBody(LSI.CallOperator, Body); return BuildLambdaExpr(StartLoc, Body->getLocEnd(), &LSI); } static LambdaCaptureDefault mapImplicitCaptureStyle(CapturingScopeInfo::ImplicitCaptureStyle ICS) { switch (ICS) { case CapturingScopeInfo::ImpCap_None: return LCD_None; case CapturingScopeInfo::ImpCap_LambdaByval: return LCD_ByCopy; case CapturingScopeInfo::ImpCap_CapturedRegion: case CapturingScopeInfo::ImpCap_LambdaByref: return LCD_ByRef; case CapturingScopeInfo::ImpCap_Block: llvm_unreachable("block capture in lambda"); } llvm_unreachable("Unknown implicit capture style"); } ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, LambdaScopeInfo *LSI) { // Collect information from the lambda scope. SmallVector<LambdaCapture, 4> Captures; SmallVector<Expr *, 4> CaptureInits; SourceLocation CaptureDefaultLoc = LSI->CaptureDefaultLoc; LambdaCaptureDefault CaptureDefault = mapImplicitCaptureStyle(LSI->ImpCaptureStyle); CXXRecordDecl *Class; CXXMethodDecl *CallOperator; SourceRange IntroducerRange; bool ExplicitParams; bool ExplicitResultType; bool LambdaExprNeedsCleanups; bool ContainsUnexpandedParameterPack; SmallVector<VarDecl *, 4> ArrayIndexVars; SmallVector<unsigned, 4> ArrayIndexStarts; { CallOperator = LSI->CallOperator; Class = LSI->Lambda; IntroducerRange = LSI->IntroducerRange; ExplicitParams = LSI->ExplicitParams; ExplicitResultType = !LSI->HasImplicitReturnType; LambdaExprNeedsCleanups = LSI->ExprNeedsCleanups; ContainsUnexpandedParameterPack = LSI->ContainsUnexpandedParameterPack; CallOperator->setLexicalDeclContext(Class); Decl *TemplateOrNonTemplateCallOperatorDecl = CallOperator->getDescribedFunctionTemplate() ? CallOperator->getDescribedFunctionTemplate() : cast<Decl>(CallOperator); TemplateOrNonTemplateCallOperatorDecl->setLexicalDeclContext(Class); Class->addDecl(TemplateOrNonTemplateCallOperatorDecl); PopExpressionEvaluationContext(); // Translate captures. auto CurField = Class->field_begin(); for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I, ++CurField) { LambdaScopeInfo::Capture From = LSI->Captures[I]; assert(!From.isBlockCapture() && "Cannot capture __block variables"); bool IsImplicit = I >= LSI->NumExplicitCaptures; // Handle 'this' capture. if (From.isThisCapture()) { Captures.push_back( LambdaCapture(From.getLocation(), IsImplicit, LCK_This)); CaptureInits.push_back(new (Context) CXXThisExpr(From.getLocation(), getCurrentThisType(), /*isImplicit=*/true)); ArrayIndexStarts.push_back(ArrayIndexVars.size()); continue; } if (From.isVLATypeCapture()) { Captures.push_back( LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType)); CaptureInits.push_back(nullptr); ArrayIndexStarts.push_back(ArrayIndexVars.size()); continue; } VarDecl *Var = From.getVariable(); LambdaCaptureKind Kind = From.isCopyCapture() ? LCK_ByCopy : LCK_ByRef; Captures.push_back(LambdaCapture(From.getLocation(), IsImplicit, Kind, Var, From.getEllipsisLoc())); Expr *Init = From.getInitExpr(); if (!Init) { auto InitResult = performLambdaVarCaptureInitialization( *this, From, *CurField, ArrayIndexVars, ArrayIndexStarts); if (InitResult.isInvalid()) return ExprError(); Init = InitResult.get(); } else { ArrayIndexStarts.push_back(ArrayIndexVars.size()); } CaptureInits.push_back(Init); } // C++11 [expr.prim.lambda]p6: // The closure type for a lambda-expression with no lambda-capture // has a public non-virtual non-explicit const conversion function // to pointer to function having the same parameter and return // types as the closure type's function call operator. if (Captures.empty() && CaptureDefault == LCD_None) addFunctionPointerConversion(*this, IntroducerRange, Class, CallOperator); // Objective-C++: // The closure type for a lambda-expression has a public non-virtual // non-explicit const conversion function to a block pointer having the // same parameter and return types as the closure type's function call // operator. // FIXME: Fix generic lambda to block conversions. if (getLangOpts().Blocks && getLangOpts().ObjC1 && !Class->isGenericLambda()) addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator); // Finalize the lambda class. SmallVector<Decl*, 4> Fields(Class->fields()); ActOnFields(nullptr, Class->getLocation(), Class, Fields, SourceLocation(), SourceLocation(), nullptr); CheckCompletedCXXClass(Class); } if (LambdaExprNeedsCleanups) ExprNeedsCleanups = true; LambdaExpr *Lambda = LambdaExpr::Create(Context, Class, IntroducerRange, CaptureDefault, CaptureDefaultLoc, Captures, ExplicitParams, ExplicitResultType, CaptureInits, ArrayIndexVars, ArrayIndexStarts, EndLoc, ContainsUnexpandedParameterPack); if (!CurContext->isDependentContext()) { switch (ExprEvalContexts.back().Context) { // C++11 [expr.prim.lambda]p2: // A lambda-expression shall not appear in an unevaluated operand // (Clause 5). case Unevaluated: case UnevaluatedAbstract: // C++1y [expr.const]p2: // A conditional-expression e is a core constant expression unless the // evaluation of e, following the rules of the abstract machine, would // evaluate [...] a lambda-expression. // // This is technically incorrect, there are some constant evaluated contexts // where this should be allowed. We should probably fix this when DR1607 is // ratified, it lays out the exact set of conditions where we shouldn't // allow a lambda-expression. case ConstantEvaluated: // We don't actually diagnose this case immediately, because we // could be within a context where we might find out later that // the expression is potentially evaluated (e.g., for typeid). ExprEvalContexts.back().Lambdas.push_back(Lambda); break; case PotentiallyEvaluated: case PotentiallyEvaluatedIfUsed: break; } } return MaybeBindToTemporary(Lambda); } ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src) { // Make sure that the lambda call operator is marked used. CXXRecordDecl *Lambda = Conv->getParent(); CXXMethodDecl *CallOperator = cast<CXXMethodDecl>( Lambda->lookup( Context.DeclarationNames.getCXXOperatorName(OO_Call)).front()); CallOperator->setReferenced(); CallOperator->markUsed(Context); ExprResult Init = PerformCopyInitialization( InitializedEntity::InitializeBlock(ConvLocation, Src->getType(), /*NRVO=*/false), CurrentLocation, Src); if (!Init.isInvalid()) Init = ActOnFinishFullExpr(Init.get()); if (Init.isInvalid()) return ExprError(); // Create the new block to be returned. BlockDecl *Block = BlockDecl::Create(Context, CurContext, ConvLocation); // Set the type information. Block->setSignatureAsWritten(CallOperator->getTypeSourceInfo()); Block->setIsVariadic(CallOperator->isVariadic()); Block->setBlockMissingReturnType(false); // Add parameters. SmallVector<ParmVarDecl *, 4> BlockParams; for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) { ParmVarDecl *From = CallOperator->getParamDecl(I); BlockParams.push_back(ParmVarDecl::Create(Context, Block, From->getLocStart(), From->getLocation(), From->getIdentifier(), From->getType(), From->getTypeSourceInfo(), From->getStorageClass(), /*DefaultArg=*/nullptr)); } Block->setParams(BlockParams); Block->setIsConversionFromLambda(true); // Add capture. The capture uses a fake variable, which doesn't correspond // to any actual memory location. However, the initializer copy-initializes // the lambda object. TypeSourceInfo *CapVarTSI = Context.getTrivialTypeSourceInfo(Src->getType()); VarDecl *CapVar = VarDecl::Create(Context, Block, ConvLocation, ConvLocation, nullptr, Src->getType(), CapVarTSI, SC_None); BlockDecl::Capture Capture(/*Variable=*/CapVar, /*ByRef=*/false, /*Nested=*/false, /*Copy=*/Init.get()); Block->setCaptures(Context, &Capture, &Capture + 1, /*CapturesCXXThis=*/false); // Add a fake function body to the block. IR generation is responsible // for filling in the actual body, which cannot be expressed as an AST. Block->setBody(new (Context) CompoundStmt(ConvLocation)); // Create the block literal expression. Expr *BuildBlock = new (Context) BlockExpr(Block, Conv->getConversionType()); ExprCleanupObjects.push_back(Block); ExprNeedsCleanups = true; return BuildBlock; } #endif // HLSL Change
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaStmtAttr.cpp
//===--- SemaStmtAttr.cpp - Statement Attribute Handling ------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements stmt-related attribute processing. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTContext.h" #include "clang/Basic/SourceManager.h" #include "clang/Sema/DelayedDiagnostic.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/LoopHint.h" #include "clang/Sema/ScopeInfo.h" #include "llvm/ADT/StringExtras.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change using namespace clang; using namespace sema; static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const AttributeList &A, SourceRange Range) { if (!isa<NullStmt>(St)) { S.Diag(A.getRange().getBegin(), diag::err_fallthrough_attr_wrong_target) << St->getLocStart(); if (isa<SwitchCase>(St)) { SourceLocation L = S.getLocForEndOfToken(Range.getEnd()); S.Diag(L, diag::note_fallthrough_insert_semi_fixit) << FixItHint::CreateInsertion(L, ";"); } return nullptr; } if (S.getCurFunction()->SwitchStack.empty()) { S.Diag(A.getRange().getBegin(), diag::err_fallthrough_attr_outside_switch); return nullptr; } return ::new (S.Context) FallThroughAttr(A.getRange(), S.Context, A.getAttributeSpellingListIndex()); } static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const AttributeList &A, SourceRange) { IdentifierLoc *PragmaNameLoc = A.getArgAsIdent(0); IdentifierLoc *OptionLoc = A.getArgAsIdent(1); IdentifierLoc *StateLoc = A.getArgAsIdent(2); Expr *ValueExpr = A.getArgAsExpr(3); bool PragmaUnroll = PragmaNameLoc->Ident->getName() == "unroll"; bool PragmaNoUnroll = PragmaNameLoc->Ident->getName() == "nounroll"; if (St->getStmtClass() != Stmt::DoStmtClass && St->getStmtClass() != Stmt::ForStmtClass && St->getStmtClass() != Stmt::CXXForRangeStmtClass && St->getStmtClass() != Stmt::WhileStmtClass) { const char *Pragma = llvm::StringSwitch<const char *>(PragmaNameLoc->Ident->getName()) .Case("unroll", "#pragma unroll") .Case("nounroll", "#pragma nounroll") .Default("#pragma clang loop"); S.Diag(St->getLocStart(), diag::err_pragma_loop_precedes_nonloop) << Pragma; return nullptr; } LoopHintAttr::OptionType Option; LoopHintAttr::Spelling Spelling; if (PragmaUnroll) { Option = ValueExpr ? LoopHintAttr::UnrollCount : LoopHintAttr::Unroll; Spelling = LoopHintAttr::Pragma_unroll; } else if (PragmaNoUnroll) { Option = LoopHintAttr::Unroll; Spelling = LoopHintAttr::Pragma_nounroll; } else { assert(OptionLoc && OptionLoc->Ident && "Attribute must have valid option info."); IdentifierInfo *OptionInfo = OptionLoc->Ident; Option = llvm::StringSwitch<LoopHintAttr::OptionType>(OptionInfo->getName()) .Case("vectorize", LoopHintAttr::Vectorize) .Case("vectorize_width", LoopHintAttr::VectorizeWidth) .Case("interleave", LoopHintAttr::Interleave) .Case("interleave_count", LoopHintAttr::InterleaveCount) .Case("unroll", LoopHintAttr::Unroll) .Case("unroll_count", LoopHintAttr::UnrollCount) .Default(LoopHintAttr::Vectorize); Spelling = LoopHintAttr::Pragma_clang_loop; } LoopHintAttr::LoopHintState State = LoopHintAttr::Default; if (PragmaNoUnroll) { State = LoopHintAttr::Disable; } else if (Option == LoopHintAttr::VectorizeWidth || Option == LoopHintAttr::InterleaveCount || Option == LoopHintAttr::UnrollCount) { assert(ValueExpr && "Attribute must have a valid value expression."); if (S.CheckLoopHintExpr(ValueExpr, St->getLocStart())) return nullptr; } else if (Option == LoopHintAttr::Vectorize || Option == LoopHintAttr::Interleave || Option == LoopHintAttr::Unroll) { // Default state is assumed if StateLoc is not specified, such as with // '#pragma unroll'. if (StateLoc && StateLoc->Ident) { if (StateLoc->Ident->isStr("disable")) State = LoopHintAttr::Disable; else if (StateLoc->Ident->isStr("assume_safety")) State = LoopHintAttr::AssumeSafety; else State = LoopHintAttr::Enable; } } return LoopHintAttr::CreateImplicit(S.Context, Spelling, Option, State, ValueExpr, A.getRange()); } static void CheckForIncompatibleAttributes(Sema &S, const SmallVectorImpl<const Attr *> &Attrs) { // There are 3 categories of loop hints attributes: vectorize, interleave, // and unroll. Each comes in two variants: a state form and a numeric form. // The state form selectively defaults/enables/disables the transformation // for the loop (for unroll, default indicates full unrolling rather than // enabling the transformation). The numeric form form provides an integer // hint (for example, unroll count) to the transformer. The following array // accumulates the hints encountered while iterating through the attributes // to check for compatibility. struct { const LoopHintAttr *StateAttr; const LoopHintAttr *NumericAttr; } HintAttrs[] = {{nullptr, nullptr}, {nullptr, nullptr}, {nullptr, nullptr}}; for (const auto *I : Attrs) { const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(I); // Skip non loop hint attributes if (!LH) continue; int Option = LH->getOption(); int Category; enum { Vectorize, Interleave, Unroll }; switch (Option) { case LoopHintAttr::Vectorize: case LoopHintAttr::VectorizeWidth: Category = Vectorize; break; case LoopHintAttr::Interleave: case LoopHintAttr::InterleaveCount: Category = Interleave; break; case LoopHintAttr::Unroll: case LoopHintAttr::UnrollCount: Category = Unroll; break; }; auto &CategoryState = HintAttrs[Category]; const LoopHintAttr *PrevAttr; if (Option == LoopHintAttr::Vectorize || Option == LoopHintAttr::Interleave || Option == LoopHintAttr::Unroll) { // Enable|Disable|AssumeSafety hint. For example, vectorize(enable). PrevAttr = CategoryState.StateAttr; CategoryState.StateAttr = LH; } else { // Numeric hint. For example, vectorize_width(8). PrevAttr = CategoryState.NumericAttr; CategoryState.NumericAttr = LH; } PrintingPolicy Policy(S.Context.getLangOpts()); SourceLocation OptionLoc = LH->getRange().getBegin(); if (PrevAttr) // Cannot specify same type of attribute twice. S.Diag(OptionLoc, diag::err_pragma_loop_compatibility) << /*Duplicate=*/true << PrevAttr->getDiagnosticName(Policy) << LH->getDiagnosticName(Policy); if (CategoryState.StateAttr && CategoryState.NumericAttr && (Category == Unroll || CategoryState.StateAttr->getState() == LoopHintAttr::Disable)) { // Disable hints are not compatible with numeric hints of the same // category. As a special case, numeric unroll hints are also not // compatible with "enable" form of the unroll pragma, unroll(full). S.Diag(OptionLoc, diag::err_pragma_loop_compatibility) << /*Duplicate=*/false << CategoryState.StateAttr->getDiagnosticName(Policy) << CategoryState.NumericAttr->getDiagnosticName(Policy); } } } static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const AttributeList &A, SourceRange Range) { switch (A.getKind()) { case AttributeList::UnknownAttribute: S.Diag(A.getLoc(), A.isDeclspecAttribute() ? diag::warn_unhandled_ms_attribute_ignored : diag::warn_unknown_attribute_ignored) << A.getName(); return nullptr; case AttributeList::AT_FallThrough: return handleFallThroughAttr(S, St, A, Range); case AttributeList::AT_LoopHint: return handleLoopHintAttr(S, St, A, Range); default: // HLSL Change Starts { bool Handled = false; Attr* hlslResult = hlsl::ProcessStmtAttributeForHLSL(S, St, A, Range, Handled); if (Handled) return hlslResult; } // HLSL Change Ends // if we're here, then we parsed a known attribute, but didn't recognize // it as a statement attribute => it is declaration attribute S.Diag(A.getRange().getBegin(), diag::err_attribute_invalid_on_stmt) << A.getName() << St->getLocStart(); return nullptr; } } StmtResult Sema::ProcessStmtAttributes(Stmt *S, AttributeList *AttrList, SourceRange Range) { SmallVector<const Attr*, 8> Attrs; for (const AttributeList* l = AttrList; l; l = l->getNext()) { if (Attr *a = ProcessStmtAttribute(*this, S, *l, Range)) Attrs.push_back(a); } CheckForIncompatibleAttributes(*this, Attrs); if (Attrs.empty()) return S; return ActOnAttributedStmt(Range.getBegin(), Attrs, S); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/TreeTransform.h
//===------- TreeTransform.h - Semantic Tree Transformation -----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. //===----------------------------------------------------------------------===// // // This file implements a semantic tree transformation that takes a given // AST and rebuilds it, possibly transforming some nodes in the process. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_SEMA_TREETRANSFORM_H #define LLVM_CLANG_LIB_SEMA_TREETRANSFORM_H #include "TypeLocBuilder.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/Sema/Designator.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaDiagnostic.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change #include "llvm/ADT/ArrayRef.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> namespace clang { using namespace sema; /// \brief A semantic tree transformation that allows one to transform one /// abstract syntax tree into another. /// /// A new tree transformation is defined by creating a new subclass \c X of /// \c TreeTransform<X> and then overriding certain operations to provide /// behavior specific to that transformation. For example, template /// instantiation is implemented as a tree transformation where the /// transformation of TemplateTypeParmType nodes involves substituting the /// template arguments for their corresponding template parameters; a similar /// transformation is performed for non-type template parameters and /// template template parameters. /// /// This tree-transformation template uses static polymorphism to allow /// subclasses to customize any of its operations. Thus, a subclass can /// override any of the transformation or rebuild operators by providing an /// operation with the same signature as the default implementation. The /// overridding function should not be virtual. /// /// Semantic tree transformations are split into two stages, either of which /// can be replaced by a subclass. The "transform" step transforms an AST node /// or the parts of an AST node using the various transformation functions, /// then passes the pieces on to the "rebuild" step, which constructs a new AST /// node of the appropriate kind from the pieces. The default transformation /// routines recursively transform the operands to composite AST nodes (e.g., /// the pointee type of a PointerType node) and, if any of those operand nodes /// were changed by the transformation, invokes the rebuild operation to create /// a new AST node. /// /// Subclasses can customize the transformation at various levels. The /// most coarse-grained transformations involve replacing TransformType(), /// TransformExpr(), TransformDecl(), TransformNestedNameSpecifierLoc(), /// TransformTemplateName(), or TransformTemplateArgument() with entirely /// new implementations. /// /// For more fine-grained transformations, subclasses can replace any of the /// \c TransformXXX functions (where XXX is the name of an AST node, e.g., /// PointerType, StmtExpr) to alter the transformation. As mentioned previously, /// replacing TransformTemplateTypeParmType() allows template instantiation /// to substitute template arguments for their corresponding template /// parameters. Additionally, subclasses can override the \c RebuildXXX /// functions to control how AST nodes are rebuilt when their operands change. /// By default, \c TreeTransform will invoke semantic analysis to rebuild /// AST nodes. However, certain other tree transformations (e.g, cloning) may /// be able to use more efficient rebuild steps. /// /// There are a handful of other functions that can be overridden, allowing one /// to avoid traversing nodes that don't need any transformation /// (\c AlreadyTransformed()), force rebuilding AST nodes even when their /// operands have not changed (\c AlwaysRebuild()), and customize the /// default locations and entity names used for type-checking /// (\c getBaseLocation(), \c getBaseEntity()). template<typename Derived> class TreeTransform { /// \brief Private RAII object that helps us forget and then re-remember /// the template argument corresponding to a partially-substituted parameter /// pack. class ForgetPartiallySubstitutedPackRAII { Derived &Self; TemplateArgument Old; public: ForgetPartiallySubstitutedPackRAII(Derived &Self) : Self(Self) { Old = Self.ForgetPartiallySubstitutedPack(); } ~ForgetPartiallySubstitutedPackRAII() { Self.RememberPartiallySubstitutedPack(Old); } }; protected: Sema &SemaRef; /// \brief The set of local declarations that have been transformed, for /// cases where we are forced to build new declarations within the transformer /// rather than in the subclass (e.g., lambda closure types). llvm::DenseMap<Decl *, Decl *> TransformedLocalDecls; public: /// \brief Initializes a new tree transformer. TreeTransform(Sema &SemaRef) : SemaRef(SemaRef) { } /// \brief Retrieves a reference to the derived class. Derived &getDerived() { return static_cast<Derived&>(*this); } /// \brief Retrieves a reference to the derived class. const Derived &getDerived() const { return static_cast<const Derived&>(*this); } static inline ExprResult Owned(Expr *E) { return E; } static inline StmtResult Owned(Stmt *S) { return S; } /// \brief Retrieves a reference to the semantic analysis object used for /// this tree transform. Sema &getSema() const { return SemaRef; } /// \brief Whether the transformation should always rebuild AST nodes, even /// if none of the children have changed. /// /// Subclasses may override this function to specify when the transformation /// should rebuild all AST nodes. /// /// We must always rebuild all AST nodes when performing variadic template /// pack expansion, in order to avoid violating the AST invariant that each /// statement node appears at most once in its containing declaration. bool AlwaysRebuild() { return SemaRef.ArgumentPackSubstitutionIndex != -1; } /// \brief Returns the location of the entity being transformed, if that /// information was not available elsewhere in the AST. /// /// By default, returns no source-location information. Subclasses can /// provide an alternative implementation that provides better location /// information. SourceLocation getBaseLocation() { return SourceLocation(); } /// \brief Returns the name of the entity being transformed, if that /// information was not available elsewhere in the AST. /// /// By default, returns an empty name. Subclasses can provide an alternative /// implementation with a more precise name. DeclarationName getBaseEntity() { return DeclarationName(); } /// \brief Sets the "base" location and entity when that /// information is known based on another transformation. /// /// By default, the source location and entity are ignored. Subclasses can /// override this function to provide a customized implementation. void setBase(SourceLocation Loc, DeclarationName Entity) { } /// \brief RAII object that temporarily sets the base location and entity /// used for reporting diagnostics in types. class TemporaryBase { TreeTransform &Self; SourceLocation OldLocation; DeclarationName OldEntity; public: TemporaryBase(TreeTransform &Self, SourceLocation Location, DeclarationName Entity) : Self(Self) { OldLocation = Self.getDerived().getBaseLocation(); OldEntity = Self.getDerived().getBaseEntity(); if (Location.isValid()) Self.getDerived().setBase(Location, Entity); } ~TemporaryBase() { Self.getDerived().setBase(OldLocation, OldEntity); } }; /// \brief Determine whether the given type \p T has already been /// transformed. /// /// Subclasses can provide an alternative implementation of this routine /// to short-circuit evaluation when it is known that a given type will /// not change. For example, template instantiation need not traverse /// non-dependent types. bool AlreadyTransformed(QualType T) { return T.isNull(); } /// \brief Determine whether the given call argument should be dropped, e.g., /// because it is a default argument. /// /// Subclasses can provide an alternative implementation of this routine to /// determine which kinds of call arguments get dropped. By default, /// CXXDefaultArgument nodes are dropped (prior to transformation). bool DropCallArgument(Expr *E) { return E->isDefaultArgument(); } /// \brief Determine whether we should expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// By default, the transformer never tries to expand pack expansions. /// Subclasses can override this routine to provide different behavior. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool TryExpandParameterPacks(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions) { ShouldExpand = false; return false; } /// \brief "Forget" about the partially-substituted pack template argument, /// when performing an instantiation that must preserve the parameter pack /// use. /// /// This routine is meant to be overridden by the template instantiator. TemplateArgument ForgetPartiallySubstitutedPack() { return TemplateArgument(); } /// \brief "Remember" the partially-substituted pack template argument /// after performing an instantiation that must preserve the parameter pack /// use. /// /// This routine is meant to be overridden by the template instantiator. void RememberPartiallySubstitutedPack(TemplateArgument Arg) { } /// \brief Note to the derived class when a function parameter pack is /// being expanded. void ExpandingFunctionParameterPack(ParmVarDecl *Pack) { } /// \brief Transforms the given type into another type. /// /// By default, this routine transforms a type by creating a /// TypeSourceInfo for it and delegating to the appropriate /// function. This is expensive, but we don't mind, because /// this method is deprecated anyway; all users should be /// switched to storing TypeSourceInfos. /// /// \returns the transformed type. QualType TransformType(QualType T); /// \brief Transforms the given type-with-location into a new /// type-with-location. /// /// By default, this routine transforms a type by delegating to the /// appropriate TransformXXXType to build a new type. Subclasses /// may override this function (to take over all type /// transformations) or some set of the TransformXXXType functions /// to alter the transformation. TypeSourceInfo *TransformType(TypeSourceInfo *DI); /// \brief Transform the given type-with-location into a new /// type, collecting location information in the given builder /// as necessary. /// QualType TransformType(TypeLocBuilder &TLB, TypeLoc TL); /// \brief Transform the given statement. /// /// By default, this routine transforms a statement by delegating to the /// appropriate TransformXXXStmt function to transform a specific kind of /// statement or the TransformExpr() function to transform an expression. /// Subclasses may override this function to transform statements using some /// other mechanism. /// /// \returns the transformed statement. StmtResult TransformStmt(Stmt *S); /// \brief Transform the given statement. /// /// By default, this routine transforms a statement by delegating to the /// appropriate TransformOMPXXXClause function to transform a specific kind /// of clause. Subclasses may override this function to transform statements /// using some other mechanism. /// /// \returns the transformed OpenMP clause. OMPClause *TransformOMPClause(OMPClause *S); /// \brief Transform the given attribute. /// /// By default, this routine transforms a statement by delegating to the /// appropriate TransformXXXAttr function to transform a specific kind /// of attribute. Subclasses may override this function to transform /// attributed statements using some other mechanism. /// /// \returns the transformed attribute const Attr *TransformAttr(const Attr *S); /// \brief Transform the specified attribute. /// /// Subclasses should override the transformation of attributes with a pragma /// spelling to transform expressions stored within the attribute. /// /// \returns the transformed attribute. #define ATTR(X) #define PRAGMA_SPELLING_ATTR(X) \ const X##Attr *Transform##X##Attr(const X##Attr *R) { return R; } #include "clang/Basic/AttrList.inc" /// \brief Transform the given expression. /// /// By default, this routine transforms an expression by delegating to the /// appropriate TransformXXXExpr function to build a new expression. /// Subclasses may override this function to transform expressions using some /// other mechanism. /// /// \returns the transformed expression. ExprResult TransformExpr(Expr *E); /// \brief Transform the given initializer. /// /// By default, this routine transforms an initializer by stripping off the /// semantic nodes added by initialization, then passing the result to /// TransformExpr or TransformExprs. /// /// \returns the transformed initializer. ExprResult TransformInitializer(Expr *Init, bool NotCopyInit); /// \brief Transform the given list of expressions. /// /// This routine transforms a list of expressions by invoking /// \c TransformExpr() for each subexpression. However, it also provides /// support for variadic templates by expanding any pack expansions (if the /// derived class permits such expansion) along the way. When pack expansions /// are present, the number of outputs may not equal the number of inputs. /// /// \param Inputs The set of expressions to be transformed. /// /// \param NumInputs The number of expressions in \c Inputs. /// /// \param IsCall If \c true, then this transform is being performed on /// function-call arguments, and any arguments that should be dropped, will /// be. /// /// \param Outputs The transformed input expressions will be added to this /// vector. /// /// \param ArgChanged If non-NULL, will be set \c true if any argument changed /// due to transformation. /// /// \returns true if an error occurred, false otherwise. bool TransformExprs(Expr **Inputs, unsigned NumInputs, bool IsCall, SmallVectorImpl<Expr *> &Outputs, bool *ArgChanged = nullptr); /// \brief Transform the given declaration, which is referenced from a type /// or expression. /// /// By default, acts as the identity function on declarations, unless the /// transformer has had to transform the declaration itself. Subclasses /// may override this function to provide alternate behavior. Decl *TransformDecl(SourceLocation Loc, Decl *D) { llvm::DenseMap<Decl *, Decl *>::iterator Known = TransformedLocalDecls.find(D); if (Known != TransformedLocalDecls.end()) return Known->second; return D; } /// \brief Transform the attributes associated with the given declaration and /// place them on the new declaration. /// /// By default, this operation does nothing. Subclasses may override this /// behavior to transform attributes. void transformAttrs(Decl *Old, Decl *New) { } /// \brief Note that a local declaration has been transformed by this /// transformer. /// /// Local declarations are typically transformed via a call to /// TransformDefinition. However, in some cases (e.g., lambda expressions), /// the transformer itself has to transform the declarations. This routine /// can be overridden by a subclass that keeps track of such mappings. void transformedLocalDecl(Decl *Old, Decl *New) { TransformedLocalDecls[Old] = New; } /// \brief Transform the definition of the given declaration. /// /// By default, invokes TransformDecl() to transform the declaration. /// Subclasses may override this function to provide alternate behavior. Decl *TransformDefinition(SourceLocation Loc, Decl *D) { return getDerived().TransformDecl(Loc, D); } /// \brief Transform the given declaration, which was the first part of a /// nested-name-specifier in a member access expression. /// /// This specific declaration transformation only applies to the first /// identifier in a nested-name-specifier of a member access expression, e.g., /// the \c T in \c x->T::member /// /// By default, invokes TransformDecl() to transform the declaration. /// Subclasses may override this function to provide alternate behavior. NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc) { return cast_or_null<NamedDecl>(getDerived().TransformDecl(Loc, D)); } /// \brief Transform the given nested-name-specifier with source-location /// information. /// /// By default, transforms all of the types and declarations within the /// nested-name-specifier. Subclasses may override this function to provide /// alternate behavior. NestedNameSpecifierLoc TransformNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, QualType ObjectType = QualType(), NamedDecl *FirstQualifierInScope = nullptr); /// \brief Transform the given declaration name. /// /// By default, transforms the types of conversion function, constructor, /// and destructor names and then (if needed) rebuilds the declaration name. /// Identifiers and selectors are returned unmodified. Sublcasses may /// override this function to provide alternate behavior. DeclarationNameInfo TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo); /// \brief Transform the given template name. /// /// \param SS The nested-name-specifier that qualifies the template /// name. This nested-name-specifier must already have been transformed. /// /// \param Name The template name to transform. /// /// \param NameLoc The source location of the template name. /// /// \param ObjectType If we're translating a template name within a member /// access expression, this is the type of the object whose member template /// is being referenced. /// /// \param FirstQualifierInScope If the first part of a nested-name-specifier /// also refers to a name within the current (lexical) scope, this is the /// declaration it refers to. /// /// By default, transforms the template name by transforming the declarations /// and nested-name-specifiers that occur within the template name. /// Subclasses may override this function to provide alternate behavior. TemplateName TransformTemplateName(CXXScopeSpec &SS, TemplateName Name, SourceLocation NameLoc, QualType ObjectType = QualType(), NamedDecl *FirstQualifierInScope = nullptr); /// \brief Transform the given template argument. /// /// By default, this operation transforms the type, expression, or /// declaration stored within the template argument and constructs a /// new template argument from the transformed result. Subclasses may /// override this function to provide alternate behavior. /// /// Returns true if there was an error. bool TransformTemplateArgument(const TemplateArgumentLoc &Input, TemplateArgumentLoc &Output); /// \brief Transform the given set of template arguments. /// /// By default, this operation transforms all of the template arguments /// in the input set using \c TransformTemplateArgument(), and appends /// the transformed arguments to the output list. /// /// Note that this overload of \c TransformTemplateArguments() is merely /// a convenience function. Subclasses that wish to override this behavior /// should override the iterator-based member template version. /// /// \param Inputs The set of template arguments to be transformed. /// /// \param NumInputs The number of template arguments in \p Inputs. /// /// \param Outputs The set of transformed template arguments output by this /// routine. /// /// Returns true if an error occurred. bool TransformTemplateArguments(const TemplateArgumentLoc *Inputs, unsigned NumInputs, TemplateArgumentListInfo &Outputs) { return TransformTemplateArguments(Inputs, Inputs + NumInputs, Outputs); } /// \brief Transform the given set of template arguments. /// /// By default, this operation transforms all of the template arguments /// in the input set using \c TransformTemplateArgument(), and appends /// the transformed arguments to the output list. /// /// \param First An iterator to the first template argument. /// /// \param Last An iterator one step past the last template argument. /// /// \param Outputs The set of transformed template arguments output by this /// routine. /// /// Returns true if an error occurred. template<typename InputIterator> bool TransformTemplateArguments(InputIterator First, InputIterator Last, TemplateArgumentListInfo &Outputs); /// \brief Fakes up a TemplateArgumentLoc for a given TemplateArgument. void InventTemplateArgumentLoc(const TemplateArgument &Arg, TemplateArgumentLoc &ArgLoc); /// \brief Fakes up a TypeSourceInfo for a type. TypeSourceInfo *InventTypeSourceInfo(QualType T) { return SemaRef.Context.getTrivialTypeSourceInfo(T, getDerived().getBaseLocation()); } #define ABSTRACT_TYPELOC(CLASS, PARENT) #define TYPELOC(CLASS, PARENT) \ QualType Transform##CLASS##Type(TypeLocBuilder &TLB, CLASS##TypeLoc T); #include "clang/AST/TypeLocNodes.def" template<typename Fn> QualType TransformFunctionProtoType(TypeLocBuilder &TLB, FunctionProtoTypeLoc TL, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals, Fn TransformExceptionSpec); bool TransformExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &Exceptions, bool &Changed); StmtResult TransformSEHHandler(Stmt *Handler); QualType TransformTemplateSpecializationType(TypeLocBuilder &TLB, TemplateSpecializationTypeLoc TL, TemplateName Template); QualType TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL, TemplateName Template, CXXScopeSpec &SS); QualType TransformDependentTemplateSpecializationType( TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL, NestedNameSpecifierLoc QualifierLoc); /// \brief Transforms the parameters of a function type into the /// given vectors. /// /// The result vectors should be kept in sync; null entries in the /// variables vector are acceptable. /// /// Return true on error. bool TransformFunctionTypeParams(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const QualType *ParamTypes, SmallVectorImpl<QualType> &PTypes, SmallVectorImpl<ParmVarDecl*> *PVars); /// \brief Transforms a single function-type parameter. Return null /// on error. /// /// \param indexAdjustment - A number to add to the parameter's /// scope index; can be negative ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); QualType TransformReferenceType(TypeLocBuilder &TLB, ReferenceTypeLoc TL); StmtResult TransformCompoundStmt(CompoundStmt *S, bool IsStmtExpr); ExprResult TransformCXXNamedCastExpr(CXXNamedCastExpr *E); TemplateParameterList *TransformTemplateParameterList( TemplateParameterList *TPL) { return TPL; } ExprResult TransformAddressOfOperand(Expr *E); ExprResult TransformDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI); ExprResult TransformParenDependentScopeDeclRefExpr( ParenExpr *PE, DependentScopeDeclRefExpr *DRE, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI); StmtResult TransformOMPExecutableDirective(OMPExecutableDirective *S); // FIXME: We use LLVM_ATTRIBUTE_NOINLINE because inlining causes a ridiculous // amount of stack usage with clang. #define STMT(Node, Parent) \ LLVM_ATTRIBUTE_NOINLINE \ StmtResult Transform##Node(Node *S); #define EXPR(Node, Parent) \ LLVM_ATTRIBUTE_NOINLINE \ ExprResult Transform##Node(Node *E); #define ABSTRACT_STMT(Stmt) #include "clang/AST/StmtNodes.inc" #define OPENMP_CLAUSE(Name, Class) \ LLVM_ATTRIBUTE_NOINLINE \ OMPClause *Transform ## Class(Class *S); #include "clang/Basic/OpenMPKinds.def" /// \brief Build a new pointer type given its pointee type. /// /// By default, performs semantic analysis when building the pointer type. /// Subclasses may override this routine to provide different behavior. QualType RebuildPointerType(QualType PointeeType, SourceLocation Sigil); /// \brief Build a new block pointer type given its pointee type. /// /// By default, performs semantic analysis when building the block pointer /// type. Subclasses may override this routine to provide different behavior. QualType RebuildBlockPointerType(QualType PointeeType, SourceLocation Sigil); /// \brief Build a new reference type given the type it references. /// /// By default, performs semantic analysis when building the /// reference type. Subclasses may override this routine to provide /// different behavior. /// /// \param LValue whether the type was written with an lvalue sigil /// or an rvalue sigil. QualType RebuildReferenceType(QualType ReferentType, bool LValue, SourceLocation Sigil); /// \brief Build a new member pointer type given the pointee type and the /// class type it refers into. /// /// By default, performs semantic analysis when building the member pointer /// type. Subclasses may override this routine to provide different behavior. QualType RebuildMemberPointerType(QualType PointeeType, QualType ClassType, SourceLocation Sigil); /// \brief Build an Objective-C object type. /// /// By default, performs semantic analysis when building the object type. /// Subclasses may override this routine to provide different behavior. QualType RebuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// \brief Build a new Objective-C object pointer type given the pointee type. /// /// By default, directly builds the pointer type, with no additional semantic /// analysis. QualType RebuildObjCObjectPointerType(QualType PointeeType, SourceLocation Star); /// \brief Build a new array type given the element type, size /// modifier, size of the array (if known), size expression, and index type /// qualifiers. /// /// By default, performs semantic analysis when building the array type. /// Subclasses may override this routine to provide different behavior. /// Also by default, all of the other Rebuild*Array QualType RebuildArrayType(QualType ElementType, ArrayType::ArraySizeModifier SizeMod, const llvm::APInt *Size, Expr *SizeExpr, unsigned IndexTypeQuals, SourceRange BracketsRange); /// \brief Build a new constant array type given the element type, size /// modifier, (known) size of the array, and index type qualifiers. /// /// By default, performs semantic analysis when building the array type. /// Subclasses may override this routine to provide different behavior. QualType RebuildConstantArrayType(QualType ElementType, ArrayType::ArraySizeModifier SizeMod, const llvm::APInt &Size, unsigned IndexTypeQuals, SourceRange BracketsRange); /// \brief Build a new incomplete array type given the element type, size /// modifier, and index type qualifiers. /// /// By default, performs semantic analysis when building the array type. /// Subclasses may override this routine to provide different behavior. QualType RebuildIncompleteArrayType(QualType ElementType, ArrayType::ArraySizeModifier SizeMod, unsigned IndexTypeQuals, SourceRange BracketsRange); /// \brief Build a new variable-length array type given the element type, /// size modifier, size expression, and index type qualifiers. /// /// By default, performs semantic analysis when building the array type. /// Subclasses may override this routine to provide different behavior. QualType RebuildVariableArrayType(QualType ElementType, ArrayType::ArraySizeModifier SizeMod, Expr *SizeExpr, unsigned IndexTypeQuals, SourceRange BracketsRange); /// \brief Build a new dependent-sized array type given the element type, /// size modifier, size expression, and index type qualifiers. /// /// By default, performs semantic analysis when building the array type. /// Subclasses may override this routine to provide different behavior. QualType RebuildDependentSizedArrayType(QualType ElementType, ArrayType::ArraySizeModifier SizeMod, Expr *SizeExpr, unsigned IndexTypeQuals, SourceRange BracketsRange); /// \brief Build a new vector type given the element type and /// number of elements. /// /// By default, performs semantic analysis when building the vector type. /// Subclasses may override this routine to provide different behavior. QualType RebuildVectorType(QualType ElementType, unsigned NumElements, VectorType::VectorKind VecKind); /// \brief Build a new extended vector type given the element type and /// number of elements. /// /// By default, performs semantic analysis when building the vector type. /// Subclasses may override this routine to provide different behavior. QualType RebuildExtVectorType(QualType ElementType, unsigned NumElements, SourceLocation AttributeLoc); /// \brief Build a new potentially dependently-sized extended vector type /// given the element type and number of elements. /// /// By default, performs semantic analysis when building the vector type. /// Subclasses may override this routine to provide different behavior. QualType RebuildDependentSizedExtVectorType(QualType ElementType, Expr *SizeExpr, SourceLocation AttributeLoc); /// \brief Build a new function type. /// /// By default, performs semantic analysis when building the function type. /// Subclasses may override this routine to provide different behavior. // HLSL Change - FIX - We should move param mods to parameter QualTypes QualType RebuildFunctionProtoType(QualType T, MutableArrayRef<QualType> ParamTypes, ArrayRef<hlsl::ParameterModifier> ParamMods, const FunctionProtoType::ExtProtoInfo &EPI); // HLSL Change - End /// \brief Build a new unprototyped function type. QualType RebuildFunctionNoProtoType(QualType ResultType); /// \brief Rebuild an unresolved typename type, given the decl that /// the UnresolvedUsingTypenameDecl was transformed to. QualType RebuildUnresolvedUsingType(Decl *D); /// \brief Build a new typedef type. QualType RebuildTypedefType(TypedefNameDecl *Typedef) { return SemaRef.Context.getTypeDeclType(Typedef); } /// \brief Build a new class/struct/union type. QualType RebuildRecordType(RecordDecl *Record) { return SemaRef.Context.getTypeDeclType(Record); } /// \brief Build a new Enum type. QualType RebuildEnumType(EnumDecl *Enum) { return SemaRef.Context.getTypeDeclType(Enum); } /// \brief Build a new typeof(expr) type. /// /// By default, performs semantic analysis when building the typeof type. /// Subclasses may override this routine to provide different behavior. QualType RebuildTypeOfExprType(Expr *Underlying, SourceLocation Loc); /// \brief Build a new typeof(type) type. /// /// By default, builds a new TypeOfType with the given underlying type. QualType RebuildTypeOfType(QualType Underlying); /// \brief Build a new unary transform type. QualType RebuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); /// \brief Build a new C++11 decltype type. /// /// By default, performs semantic analysis when building the decltype type. /// Subclasses may override this routine to provide different behavior. QualType RebuildDecltypeType(Expr *Underlying, SourceLocation Loc); /// \brief Build a new C++11 auto type. /// /// By default, builds a new AutoType with the given deduced type. QualType RebuildAutoType(QualType Deduced, bool IsDecltypeAuto) { // Note, IsDependent is always false here: we implicitly convert an 'auto' // which has been deduced to a dependent type into an undeduced 'auto', so // that we'll retry deduction after the transformation. return SemaRef.Context.getAutoType(Deduced, IsDecltypeAuto, /*IsDependent*/ false); } /// \brief Build a new template specialization type. /// /// By default, performs semantic analysis when building the template /// specialization type. Subclasses may override this routine to provide /// different behavior. QualType RebuildTemplateSpecializationType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &Args); /// \brief Build a new parenthesized type. /// /// By default, builds a new ParenType type from the inner type. /// Subclasses may override this routine to provide different behavior. QualType RebuildParenType(QualType InnerType) { return SemaRef.Context.getParenType(InnerType); } /// \brief Build a new qualified name type. /// /// By default, builds a new ElaboratedType type from the keyword, /// the nested-name-specifier and the named type. /// Subclasses may override this routine to provide different behavior. QualType RebuildElaboratedType(SourceLocation KeywordLoc, ElaboratedTypeKeyword Keyword, NestedNameSpecifierLoc QualifierLoc, QualType Named) { return SemaRef.Context.getElaboratedType(Keyword, QualifierLoc.getNestedNameSpecifier(), Named); } /// \brief Build a new typename type that refers to a template-id. /// /// By default, builds a new DependentNameType type from the /// nested-name-specifier and the given type. Subclasses may override /// this routine to provide different behavior. QualType RebuildDependentTemplateSpecializationType( ElaboratedTypeKeyword Keyword, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo *Name, SourceLocation NameLoc, TemplateArgumentListInfo &Args) { // Rebuild the template name. // TODO: avoid TemplateName abstraction CXXScopeSpec SS; SS.Adopt(QualifierLoc); TemplateName InstName = getDerived().RebuildTemplateName(SS, *Name, NameLoc, QualType(), nullptr); if (InstName.isNull()) return QualType(); // If it's still dependent, make a dependent specialization. if (InstName.getAsDependentTemplateName()) return SemaRef.Context.getDependentTemplateSpecializationType(Keyword, QualifierLoc.getNestedNameSpecifier(), Name, Args); // Otherwise, make an elaborated type wrapping a non-dependent // specialization. QualType T = getDerived().RebuildTemplateSpecializationType(InstName, NameLoc, Args); if (T.isNull()) return QualType(); if (Keyword == ETK_None && QualifierLoc.getNestedNameSpecifier() == nullptr) return T; return SemaRef.Context.getElaboratedType(Keyword, QualifierLoc.getNestedNameSpecifier(), T); } /// \brief Build a new typename type that refers to an identifier. /// /// By default, performs semantic analysis when building the typename type /// (or elaborated type). Subclasses may override this routine to provide /// different behavior. QualType RebuildDependentNameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo *Id, SourceLocation IdLoc) { CXXScopeSpec SS; SS.Adopt(QualifierLoc); if (QualifierLoc.getNestedNameSpecifier()->isDependent()) { // If the name is still dependent, just build a new dependent name type. if (!SemaRef.computeDeclContext(SS)) return SemaRef.Context.getDependentNameType(Keyword, QualifierLoc.getNestedNameSpecifier(), Id); } if (Keyword == ETK_None || Keyword == ETK_Typename) return SemaRef.CheckTypenameType(Keyword, KeywordLoc, QualifierLoc, *Id, IdLoc); TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword); // We had a dependent elaborated-type-specifier that has been transformed // into a non-dependent elaborated-type-specifier. Find the tag we're // referring to. LookupResult Result(SemaRef, Id, IdLoc, Sema::LookupTagName); DeclContext *DC = SemaRef.computeDeclContext(SS, false); if (!DC) return QualType(); if (SemaRef.RequireCompleteDeclContext(SS, DC)) return QualType(); TagDecl *Tag = nullptr; SemaRef.LookupQualifiedName(Result, DC); switch (Result.getResultKind()) { case LookupResult::NotFound: case LookupResult::NotFoundInCurrentInstantiation: break; case LookupResult::Found: Tag = Result.getAsSingle<TagDecl>(); break; case LookupResult::FoundOverloaded: case LookupResult::FoundUnresolvedValue: llvm_unreachable("Tag lookup cannot find non-tags"); case LookupResult::Ambiguous: // Let the LookupResult structure handle ambiguities. return QualType(); } if (!Tag) { // Check where the name exists but isn't a tag type and use that to emit // better diagnostics. LookupResult Result(SemaRef, Id, IdLoc, Sema::LookupTagName); SemaRef.LookupQualifiedName(Result, DC); switch (Result.getResultKind()) { case LookupResult::Found: case LookupResult::FoundOverloaded: case LookupResult::FoundUnresolvedValue: { NamedDecl *SomeDecl = Result.getRepresentativeDecl(); unsigned Kind = 0; if (isa<TypedefDecl>(SomeDecl)) Kind = 1; else if (isa<TypeAliasDecl>(SomeDecl)) Kind = 2; else if (isa<ClassTemplateDecl>(SomeDecl)) Kind = 3; SemaRef.Diag(IdLoc, diag::err_tag_reference_non_tag) << Kind; SemaRef.Diag(SomeDecl->getLocation(), diag::note_declared_at); break; } default: SemaRef.Diag(IdLoc, diag::err_not_tag_in_scope) << Kind << Id << DC << QualifierLoc.getSourceRange(); break; } return QualType(); } if (!SemaRef.isAcceptableTagRedeclaration(Tag, Kind, /*isDefinition*/false, IdLoc, Id)) { SemaRef.Diag(KeywordLoc, diag::err_use_with_wrong_tag) << Id; SemaRef.Diag(Tag->getLocation(), diag::note_previous_use); return QualType(); } // Build the elaborated-type-specifier type. QualType T = SemaRef.Context.getTypeDeclType(Tag); return SemaRef.Context.getElaboratedType(Keyword, QualifierLoc.getNestedNameSpecifier(), T); } /// \brief Build a new pack expansion type. /// /// By default, builds a new PackExpansionType type from the given pattern. /// Subclasses may override this routine to provide different behavior. QualType RebuildPackExpansionType(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions) { return getSema().CheckPackExpansion(Pattern, PatternRange, EllipsisLoc, NumExpansions); } /// \brief Build a new atomic type given its value type. /// /// By default, performs semantic analysis when building the atomic type. /// Subclasses may override this routine to provide different behavior. QualType RebuildAtomicType(QualType ValueType, SourceLocation KWLoc); /// \brief Build a new template name given a nested name specifier, a flag /// indicating whether the "template" keyword was provided, and the template /// that the template name refers to. /// /// By default, builds the new template name directly. Subclasses may override /// this routine to provide different behavior. TemplateName RebuildTemplateName(CXXScopeSpec &SS, bool TemplateKW, TemplateDecl *Template); /// \brief Build a new template name given a nested name specifier and the /// name that is referred to as a template. /// /// By default, performs semantic analysis to determine whether the name can /// be resolved to a specific template, then builds the appropriate kind of /// template name. Subclasses may override this routine to provide different /// behavior. TemplateName RebuildTemplateName(CXXScopeSpec &SS, const IdentifierInfo &Name, SourceLocation NameLoc, QualType ObjectType, NamedDecl *FirstQualifierInScope); /// \brief Build a new template name given a nested name specifier and the /// overloaded operator name that is referred to as a template. /// /// By default, performs semantic analysis to determine whether the name can /// be resolved to a specific template, then builds the appropriate kind of /// template name. Subclasses may override this routine to provide different /// behavior. TemplateName RebuildTemplateName(CXXScopeSpec &SS, OverloadedOperatorKind Operator, SourceLocation NameLoc, QualType ObjectType); /// \brief Build a new template name given a template template parameter pack /// and the /// /// By default, performs semantic analysis to determine whether the name can /// be resolved to a specific template, then builds the appropriate kind of /// template name. Subclasses may override this routine to provide different /// behavior. TemplateName RebuildTemplateName(TemplateTemplateParmDecl *Param, const TemplateArgument &ArgPack) { return getSema().Context.getSubstTemplateTemplateParmPack(Param, ArgPack); } /// \brief Build a new compound statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildCompoundStmt(SourceLocation LBraceLoc, MultiStmtArg Statements, SourceLocation RBraceLoc, bool IsStmtExpr) { return getSema().ActOnCompoundStmt(LBraceLoc, RBraceLoc, Statements, IsStmtExpr); } /// \brief Build a new case statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildCaseStmt(SourceLocation CaseLoc, Expr *LHS, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation ColonLoc) { return getSema().ActOnCaseStmt(CaseLoc, LHS, EllipsisLoc, RHS, ColonLoc); } /// \brief Attach the body to a new case statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildCaseStmtBody(Stmt *S, Stmt *Body) { getSema().ActOnCaseStmtBody(S, Body); return S; } /// \brief Build a new default statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt) { return getSema().ActOnDefaultStmt(DefaultLoc, ColonLoc, SubStmt, /*CurScope=*/nullptr); } /// \brief Build a new label statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildLabelStmt(SourceLocation IdentLoc, LabelDecl *L, SourceLocation ColonLoc, Stmt *SubStmt) { return SemaRef.ActOnLabelStmt(IdentLoc, L, ColonLoc, SubStmt); } /// \brief Build a new label statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) { return SemaRef.ActOnAttributedStmt(AttrLoc, Attrs, SubStmt); } /// \brief Build a new "if" statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildIfStmt(SourceLocation IfLoc, Sema::FullExprArg Cond, VarDecl *CondVar, Stmt *Then, SourceLocation ElseLoc, Stmt *Else) { return getSema().ActOnIfStmt(IfLoc, Cond, CondVar, Then, ElseLoc, Else); } /// \brief Start building a new switch statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildSwitchStmtStart(SourceLocation SwitchLoc, Expr *Cond, VarDecl *CondVar) { return getSema().ActOnStartOfSwitchStmt(SwitchLoc, Cond, CondVar); } /// \brief Attach the body to the switch statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildSwitchStmtBody(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body) { return getSema().ActOnFinishSwitchStmt(SwitchLoc, Switch, Body); } /// \brief Build a new while statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildWhileStmt(SourceLocation WhileLoc, Sema::FullExprArg Cond, VarDecl *CondVar, Stmt *Body) { return getSema().ActOnWhileStmt(WhileLoc, Cond, CondVar, Body); } /// \brief Build a new do-while statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation LParenLoc, Expr *Cond, SourceLocation RParenLoc) { return getSema().ActOnDoStmt(DoLoc, Body, WhileLoc, LParenLoc, Cond, RParenLoc); } /// \brief Build a new for statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *Init, Sema::FullExprArg Cond, VarDecl *CondVar, Sema::FullExprArg Inc, SourceLocation RParenLoc, Stmt *Body) { return getSema().ActOnForStmt(ForLoc, LParenLoc, Init, Cond, CondVar, Inc, RParenLoc, Body); } /// \brief Build a new goto statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *Label) { return getSema().ActOnGotoStmt(GotoLoc, LabelLoc, Label); } /// \brief Build a new indirect goto statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *Target) { return getSema().ActOnIndirectGotoStmt(GotoLoc, StarLoc, Target); } /// \brief Build a new return statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildReturnStmt(SourceLocation ReturnLoc, Expr *Result) { return getSema().BuildReturnStmt(ReturnLoc, Result); } /// \brief Build a new declaration statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildDeclStmt(MutableArrayRef<Decl *> Decls, SourceLocation StartLoc, SourceLocation EndLoc) { Sema::DeclGroupPtrTy DG = getSema().BuildDeclaratorGroup(Decls); return getSema().ActOnDeclStmt(DG, StartLoc, EndLoc); } /// \brief Build a new inline asm statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc) { return getSema().ActOnGCCAsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs, NumInputs, Names, Constraints, Exprs, AsmString, Clobbers, RParenLoc); } /// \brief Build a new MS style inline asm statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc) { return getSema().ActOnMSAsmStmt(AsmLoc, LBraceLoc, AsmToks, AsmString, NumOutputs, NumInputs, Constraints, Clobbers, Exprs, EndLoc); } /// \brief Build a new Objective-C \@try statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildObjCAtTryStmt(SourceLocation AtLoc, Stmt *TryBody, MultiStmtArg CatchStmts, Stmt *Finally) { return getSema().ActOnObjCAtTryStmt(AtLoc, TryBody, CatchStmts, Finally); } /// \brief Rebuild an Objective-C exception declaration. /// /// By default, performs semantic analysis to build the new declaration. /// Subclasses may override this routine to provide different behavior. VarDecl *RebuildObjCExceptionDecl(VarDecl *ExceptionDecl, TypeSourceInfo *TInfo, QualType T) { return getSema().BuildObjCExceptionDecl(TInfo, T, ExceptionDecl->getInnerLocStart(), ExceptionDecl->getLocation(), ExceptionDecl->getIdentifier()); } /// \brief Build a new Objective-C \@catch statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParenLoc, VarDecl *Var, Stmt *Body) { return getSema().ActOnObjCAtCatchStmt(AtLoc, RParenLoc, Var, Body); } /// \brief Build a new Objective-C \@finally statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body) { return getSema().ActOnObjCAtFinallyStmt(AtLoc, Body); } /// \brief Build a new Objective-C \@throw statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Operand) { return getSema().BuildObjCAtThrowStmt(AtLoc, Operand); } /// \brief Build a new OpenMP executable directive. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildOMPExecutableDirective(OpenMPDirectiveKind Kind, DeclarationNameInfo DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPExecutableDirective( Kind, DirName, CancelRegion, Clauses, AStmt, StartLoc, EndLoc); } /// \brief Build a new OpenMP 'if' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPIfClause(Condition, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'final' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPFinalClause(Condition, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'num_threads' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPNumThreadsClause(NumThreads, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'safelen' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPSafelenClause(Len, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'collapse' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPCollapseClause(Num, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'default' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindKwLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPDefaultClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'proc_bind' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindKwLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPProcBindClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'schedule' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPScheduleClause( Kind, ChunkSize, StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc); } /// \brief Build a new OpenMP 'private' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPPrivateClause(VarList, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'firstprivate' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPFirstprivateClause(VarList, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'lastprivate' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPLastprivateClause(VarList, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'shared' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPSharedClause(VarList, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'reduction' clause. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId) { return getSema().ActOnOpenMPReductionClause( VarList, StartLoc, LParenLoc, ColonLoc, EndLoc, ReductionIdScopeSpec, ReductionId); } /// \brief Build a new OpenMP 'linear' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPLinearClause(VarList, Step, StartLoc, LParenLoc, ColonLoc, EndLoc); } /// \brief Build a new OpenMP 'aligned' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPAlignedClause(VarList, Alignment, StartLoc, LParenLoc, ColonLoc, EndLoc); } /// \brief Build a new OpenMP 'copyin' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPCopyinClause(VarList, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'copyprivate' clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPCopyprivateClause(VarList, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'flush' pseudo clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause *RebuildOMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPFlushClause(VarList, StartLoc, LParenLoc, EndLoc); } /// \brief Build a new OpenMP 'depend' pseudo clause. /// /// By default, performs semantic analysis to build the new OpenMP clause. /// Subclasses may override this routine to provide different behavior. OMPClause * RebuildOMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { return getSema().ActOnOpenMPDependClause(DepKind, DepLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc); } /// \brief Rebuild the operand to an Objective-C \@synchronized statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *object) { return getSema().ActOnObjCAtSynchronizedOperand(atLoc, object); } /// \brief Build a new Objective-C \@synchronized statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *Object, Stmt *Body) { return getSema().ActOnObjCAtSynchronizedStmt(AtLoc, Object, Body); } /// \brief Build a new Objective-C \@autoreleasepool statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body) { return getSema().ActOnObjCAutoreleasePoolStmt(AtLoc, Body); } /// \brief Build a new Objective-C fast enumeration statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildObjCForCollectionStmt(SourceLocation ForLoc, Stmt *Element, Expr *Collection, SourceLocation RParenLoc, Stmt *Body) { StmtResult ForEachStmt = getSema().ActOnObjCForCollectionStmt(ForLoc, Element, Collection, RParenLoc); if (ForEachStmt.isInvalid()) return StmtError(); return getSema().FinishObjCForCollectionStmt(ForEachStmt.get(), Body); } /// \brief Build a new C++ exception declaration. /// /// By default, performs semantic analysis to build the new decaration. /// Subclasses may override this routine to provide different behavior. VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl, TypeSourceInfo *Declarator, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id) { VarDecl *Var = getSema().BuildExceptionDeclaration(nullptr, Declarator, StartLoc, IdLoc, Id); if (Var) getSema().CurContext->addDecl(Var); return Var; } /// \brief Build a new C++ catch statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildCXXCatchStmt(SourceLocation CatchLoc, VarDecl *ExceptionDecl, Stmt *Handler) { return Owned(new (getSema().Context) CXXCatchStmt(CatchLoc, ExceptionDecl, Handler)); } /// \brief Build a new C++ try statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildCXXTryStmt(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers) { return getSema().ActOnCXXTryBlock(TryLoc, TryBlock, Handlers); } /// \brief Build a new C++0x range-based for statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *Range, Stmt *BeginEnd, Expr *Cond, Expr *Inc, Stmt *LoopVar, SourceLocation RParenLoc) { // If we've just learned that the range is actually an Objective-C // collection, treat this as an Objective-C fast enumeration loop. if (DeclStmt *RangeStmt = dyn_cast<DeclStmt>(Range)) { if (RangeStmt->isSingleDecl()) { if (VarDecl *RangeVar = dyn_cast<VarDecl>(RangeStmt->getSingleDecl())) { if (RangeVar->isInvalidDecl()) return StmtError(); Expr *RangeExpr = RangeVar->getInit(); if (!RangeExpr->isTypeDependent() && RangeExpr->getType()->isObjCObjectPointerType()) return getSema().ActOnObjCForCollectionStmt(ForLoc, LoopVar, RangeExpr, RParenLoc); } } } return getSema().BuildCXXForRangeStmt(ForLoc, ColonLoc, Range, BeginEnd, Cond, Inc, LoopVar, RParenLoc, Sema::BFRK_Rebuild); } /// \brief Build a new C++0x range-based for statement. /// /// By default, performs semantic analysis to build the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult RebuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested) { return getSema().BuildMSDependentExistsStmt(KeywordLoc, IsIfExists, QualifierLoc, NameInfo, Nested); } /// \brief Attach body to a C++0x range-based for statement. /// /// By default, performs semantic analysis to finish the new statement. /// Subclasses may override this routine to provide different behavior. StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body) { return getSema().FinishCXXForRangeStmt(ForRange, Body); } StmtResult RebuildSEHTryStmt(bool IsCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler) { return getSema().ActOnSEHTryBlock(IsCXXTry, TryLoc, TryBlock, Handler); } StmtResult RebuildSEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block) { return getSema().ActOnSEHExceptBlock(Loc, FilterExpr, Block); } StmtResult RebuildSEHFinallyStmt(SourceLocation Loc, Stmt *Block) { return SEHFinallyStmt::Create(getSema().getASTContext(), Loc, Block); } /// \brief Build a new predefined expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT) { return getSema().BuildPredefinedExpr(Loc, IT); } /// \brief Build a new expression that references a declaration. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool RequiresADL) { return getSema().BuildDeclarationNameExpr(SS, R, RequiresADL); } /// \brief Build a new expression that references a declaration. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildDeclRefExpr(NestedNameSpecifierLoc QualifierLoc, ValueDecl *VD, const DeclarationNameInfo &NameInfo, TemplateArgumentListInfo *TemplateArgs) { CXXScopeSpec SS; SS.Adopt(QualifierLoc); // FIXME: loses template args. return getSema().BuildDeclarationNameExpr(SS, NameInfo, VD); } /// \brief Build a new expression in parentheses. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildParenExpr(Expr *SubExpr, SourceLocation LParen, SourceLocation RParen) { return getSema().ActOnParenExpr(LParen, RParen, SubExpr); } /// \brief Build a new pseudo-destructor expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXPseudoDestructorExpr(Expr *Base, SourceLocation OperatorLoc, bool isArrow, CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage Destroyed); /// \brief Build a new unary operator expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildUnaryOperator(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *SubExpr) { return getSema().BuildUnaryOp(/*Scope=*/nullptr, OpLoc, Opc, SubExpr); } /// \brief Build a new builtin offsetof expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildOffsetOfExpr(SourceLocation OperatorLoc, TypeSourceInfo *Type, Sema::OffsetOfComponent *Components, unsigned NumComponents, SourceLocation RParenLoc) { return getSema().BuildBuiltinOffsetOf(OperatorLoc, Type, Components, NumComponents, RParenLoc); } /// \brief Build a new sizeof, alignof or vec_step expression with a /// type argument. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildUnaryExprOrTypeTrait(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R) { return getSema().CreateUnaryExprOrTypeTraitExpr(TInfo, OpLoc, ExprKind, R); } /// \brief Build a new sizeof, alignof or vec step expression with an /// expression argument. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildUnaryExprOrTypeTrait(Expr *SubExpr, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R) { ExprResult Result = getSema().CreateUnaryExprOrTypeTraitExpr(SubExpr, OpLoc, ExprKind); if (Result.isInvalid()) return ExprError(); return Result; } /// \brief Build a new array subscript expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildArraySubscriptExpr(Expr *LHS, SourceLocation LBracketLoc, Expr *RHS, SourceLocation RBracketLoc) { return getSema().ActOnArraySubscriptExpr(/*Scope=*/nullptr, LHS, LBracketLoc, RHS, RBracketLoc); } /// \brief Build a new call expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCallExpr(Expr *Callee, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr) { return getSema().ActOnCallExpr(/*Scope=*/nullptr, Callee, LParenLoc, Args, RParenLoc, ExecConfig); } /// \brief Build a new member access expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildMemberExpr(Expr *Base, SourceLocation OpLoc, bool isArrow, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, const DeclarationNameInfo &MemberNameInfo, ValueDecl *Member, NamedDecl *FoundDecl, const TemplateArgumentListInfo *ExplicitTemplateArgs, NamedDecl *FirstQualifierInScope) { ExprResult BaseResult = getSema().PerformMemberExprBaseConversion(Base, isArrow); if (!Member->getDeclName()) { // We have a reference to an unnamed field. This is always the // base of an anonymous struct/union member access, i.e. the // field is always of record type. assert(!QualifierLoc && "Can't have an unnamed field with a qualifier!"); assert(Member->getType()->isRecordType() && "unnamed member not of record type?"); BaseResult = getSema().PerformObjectMemberConversion(BaseResult.get(), QualifierLoc.getNestedNameSpecifier(), FoundDecl, Member); if (BaseResult.isInvalid()) return ExprError(); Base = BaseResult.get(); ExprValueKind VK = isArrow ? VK_LValue : Base->getValueKind(); MemberExpr *ME = new (getSema().Context) MemberExpr(Base, isArrow, OpLoc, Member, MemberNameInfo, cast<FieldDecl>(Member)->getType(), VK, OK_Ordinary); return ME; } CXXScopeSpec SS; SS.Adopt(QualifierLoc); Base = BaseResult.get(); QualType BaseType = Base->getType(); // FIXME: this involves duplicating earlier analysis in a lot of // cases; we should avoid this when possible. LookupResult R(getSema(), MemberNameInfo, Sema::LookupMemberName); R.addDecl(FoundDecl); R.resolveKind(); return getSema().BuildMemberReferenceExpr(Base, BaseType, OpLoc, isArrow, SS, TemplateKWLoc, FirstQualifierInScope, R, ExplicitTemplateArgs); } /// \brief Build a new binary operator expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildBinaryOperator(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHS, Expr *RHS) { return getSema().BuildBinOp(/*Scope=*/nullptr, OpLoc, Opc, LHS, RHS); } /// \brief Build a new conditional operator expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildConditionalOperator(Expr *Cond, SourceLocation QuestionLoc, Expr *LHS, SourceLocation ColonLoc, Expr *RHS) { return getSema().ActOnConditionalOp(QuestionLoc, ColonLoc, Cond, LHS, RHS); } /// \brief Build a new C-style cast expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *SubExpr) { return getSema().BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, SubExpr); } /// \brief Build a new compound literal expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *Init) { return getSema().BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, Init); } /// \brief Build a new extended vector element access expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildExtVectorElementExpr(Expr *Base, SourceLocation OpLoc, SourceLocation AccessorLoc, IdentifierInfo &Accessor) { CXXScopeSpec SS; DeclarationNameInfo NameInfo(&Accessor, AccessorLoc); return getSema().BuildMemberReferenceExpr(Base, Base->getType(), OpLoc, /*IsArrow*/ false, SS, SourceLocation(), /*FirstQualifierInScope*/ nullptr, NameInfo, /* TemplateArgs */ nullptr); } // HLSL Changes Start /// \brief Build a new extended matrix element access expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildExtMatrixElementExpr(Expr *Base, SourceLocation OpLoc, SourceLocation AccessorLoc, IdentifierInfo &Accessor) { const bool IsArrowFalse = false; ExprResult result; DeclarationName Name(&Accessor); return hlsl::LookupMatrixMemberExprForHLSL(&getSema(), *Base, Name, IsArrowFalse, OpLoc, AccessorLoc); } /// \brief Build a new extended vector element access expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildHLSLVectorElementExpr(Expr *Base, SourceLocation OpLoc, SourceLocation AccessorLoc, IdentifierInfo &Accessor) { const bool IsArrowFalse = false; ExprResult result; DeclarationName Name(&Accessor); ExprResult ER = hlsl::MaybeConvertMemberAccess(&getSema(), Base); if (ER.isInvalid()) { return ExprError(); } return hlsl::LookupVectorMemberExprForHLSL(&getSema(), *ER.get(), Name, IsArrowFalse, OpLoc, AccessorLoc); } // HLSL Changes End /// \brief Build a new initializer list expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildInitList(SourceLocation LBraceLoc, MultiExprArg Inits, SourceLocation RBraceLoc, QualType ResultTy) { ExprResult Result = SemaRef.ActOnInitList(LBraceLoc, Inits, RBraceLoc); if (Result.isInvalid() || ResultTy->isDependentType()) return Result; // Patch in the result type we were given, which may have been computed // when the initial InitListExpr was built. InitListExpr *ILE = cast<InitListExpr>((Expr *)Result.get()); ILE->setType(ResultTy); return Result; } /// \brief Build a new designated initializer expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildDesignatedInitExpr(Designation &Desig, MultiExprArg ArrayExprs, SourceLocation EqualOrColonLoc, bool GNUSyntax, Expr *Init) { ExprResult Result = SemaRef.ActOnDesignatedInitializer(Desig, EqualOrColonLoc, GNUSyntax, Init); if (Result.isInvalid()) return ExprError(); return Result; } /// \brief Build a new value-initialized expression. /// /// By default, builds the implicit value initialization without performing /// any semantic analysis. Subclasses may override this routine to provide /// different behavior. ExprResult RebuildImplicitValueInitExpr(QualType T) { return new (SemaRef.Context) ImplicitValueInitExpr(T); } /// \brief Build a new \c va_arg expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildVAArgExpr(SourceLocation BuiltinLoc, Expr *SubExpr, TypeSourceInfo *TInfo, SourceLocation RParenLoc) { return getSema().BuildVAArgExpr(BuiltinLoc, SubExpr, TInfo, RParenLoc); } /// \brief Build a new expression list in parentheses. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildParenListExpr(SourceLocation LParenLoc, MultiExprArg SubExprs, SourceLocation RParenLoc) { return getSema().ActOnParenListExpr(LParenLoc, RParenLoc, SubExprs); } /// \brief Build a new address-of-label expression. /// /// By default, performs semantic analysis, using the name of the label /// rather than attempting to map the label statement itself. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildAddrLabelExpr(SourceLocation AmpAmpLoc, SourceLocation LabelLoc, LabelDecl *Label) { return getSema().ActOnAddrLabel(AmpAmpLoc, LabelLoc, Label); } /// \brief Build a new GNU statement expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildStmtExpr(SourceLocation LParenLoc, Stmt *SubStmt, SourceLocation RParenLoc) { return getSema().ActOnStmtExpr(LParenLoc, SubStmt, RParenLoc); } /// \brief Build a new __builtin_choose_expr expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildChooseExpr(SourceLocation BuiltinLoc, Expr *Cond, Expr *LHS, Expr *RHS, SourceLocation RParenLoc) { return SemaRef.ActOnChooseExpr(BuiltinLoc, Cond, LHS, RHS, RParenLoc); } /// \brief Build a new generic selection expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs) { return getSema().CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc, ControllingExpr, Types, Exprs); } /// \brief Build a new overloaded operator call expression. /// /// By default, performs semantic analysis to build the new expression. /// The semantic analysis provides the behavior of template instantiation, /// copying with transformations that turn what looks like an overloaded /// operator call into a use of a builtin operator, performing /// argument-dependent lookup, etc. Subclasses may override this routine to /// provide different behavior. ExprResult RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op, SourceLocation OpLoc, Expr *Callee, Expr *First, Expr *Second); /// \brief Build a new C++ "named" cast expression, such as static_cast or /// reinterpret_cast. /// /// By default, this routine dispatches to one of the more-specific routines /// for a particular named case, e.g., RebuildCXXStaticCastExpr(). /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXNamedCastExpr(SourceLocation OpLoc, Stmt::StmtClass Class, SourceLocation LAngleLoc, TypeSourceInfo *TInfo, SourceLocation RAngleLoc, SourceLocation LParenLoc, Expr *SubExpr, SourceLocation RParenLoc) { switch (Class) { case Stmt::CXXStaticCastExprClass: return getDerived().RebuildCXXStaticCastExpr(OpLoc, LAngleLoc, TInfo, RAngleLoc, LParenLoc, SubExpr, RParenLoc); case Stmt::CXXDynamicCastExprClass: return getDerived().RebuildCXXDynamicCastExpr(OpLoc, LAngleLoc, TInfo, RAngleLoc, LParenLoc, SubExpr, RParenLoc); case Stmt::CXXReinterpretCastExprClass: return getDerived().RebuildCXXReinterpretCastExpr(OpLoc, LAngleLoc, TInfo, RAngleLoc, LParenLoc, SubExpr, RParenLoc); case Stmt::CXXConstCastExprClass: return getDerived().RebuildCXXConstCastExpr(OpLoc, LAngleLoc, TInfo, RAngleLoc, LParenLoc, SubExpr, RParenLoc); default: llvm_unreachable("Invalid C++ named cast"); } } /// \brief Build a new C++ static_cast expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXStaticCastExpr(SourceLocation OpLoc, SourceLocation LAngleLoc, TypeSourceInfo *TInfo, SourceLocation RAngleLoc, SourceLocation LParenLoc, Expr *SubExpr, SourceLocation RParenLoc) { return getSema().BuildCXXNamedCast(OpLoc, tok::kw_static_cast, TInfo, SubExpr, SourceRange(LAngleLoc, RAngleLoc), SourceRange(LParenLoc, RParenLoc)); } /// \brief Build a new C++ dynamic_cast expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXDynamicCastExpr(SourceLocation OpLoc, SourceLocation LAngleLoc, TypeSourceInfo *TInfo, SourceLocation RAngleLoc, SourceLocation LParenLoc, Expr *SubExpr, SourceLocation RParenLoc) { return getSema().BuildCXXNamedCast(OpLoc, tok::kw_dynamic_cast, TInfo, SubExpr, SourceRange(LAngleLoc, RAngleLoc), SourceRange(LParenLoc, RParenLoc)); } /// \brief Build a new C++ reinterpret_cast expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXReinterpretCastExpr(SourceLocation OpLoc, SourceLocation LAngleLoc, TypeSourceInfo *TInfo, SourceLocation RAngleLoc, SourceLocation LParenLoc, Expr *SubExpr, SourceLocation RParenLoc) { return getSema().BuildCXXNamedCast(OpLoc, tok::kw_reinterpret_cast, TInfo, SubExpr, SourceRange(LAngleLoc, RAngleLoc), SourceRange(LParenLoc, RParenLoc)); } /// \brief Build a new C++ const_cast expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXConstCastExpr(SourceLocation OpLoc, SourceLocation LAngleLoc, TypeSourceInfo *TInfo, SourceLocation RAngleLoc, SourceLocation LParenLoc, Expr *SubExpr, SourceLocation RParenLoc) { return getSema().BuildCXXNamedCast(OpLoc, tok::kw_const_cast, TInfo, SubExpr, SourceRange(LAngleLoc, RAngleLoc), SourceRange(LParenLoc, RParenLoc)); } /// \brief Build a new C++ functional-style cast expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *Sub, SourceLocation RParenLoc) { return getSema().BuildCXXTypeConstructExpr(TInfo, LParenLoc, MultiExprArg(&Sub, 1), RParenLoc); } /// \brief Build a new C++ typeid(type) expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXTypeidExpr(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc) { return getSema().BuildCXXTypeId(TypeInfoType, TypeidLoc, Operand, RParenLoc); } /// \brief Build a new C++ typeid(expr) expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXTypeidExpr(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc) { return getSema().BuildCXXTypeId(TypeInfoType, TypeidLoc, Operand, RParenLoc); } /// \brief Build a new C++ __uuidof(type) expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc) { return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand, RParenLoc); } /// \brief Build a new C++ __uuidof(expr) expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc) { return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand, RParenLoc); } /// \brief Build a new C++ "this" expression. /// /// By default, builds a new "this" expression without performing any /// semantic analysis. Subclasses may override this routine to provide /// different behavior. ExprResult RebuildCXXThisExpr(SourceLocation ThisLoc, QualType ThisType, bool isImplicit) { getSema().CheckCXXThisCapture(ThisLoc); // HLSL Change Begin - adjust this from T* to T&-like if (getSema().getLangOpts().HLSL) return getSema().genereateHLSLThis(ThisLoc, ThisType, isImplicit); // HLSL Change End - adjust this from T* to T&-like return new (getSema().Context) CXXThisExpr(ThisLoc, ThisType, isImplicit); } /// \brief Build a new C++ throw expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXThrowExpr(SourceLocation ThrowLoc, Expr *Sub, bool IsThrownVariableInScope) { return getSema().BuildCXXThrow(ThrowLoc, Sub, IsThrownVariableInScope); } /// \brief Build a new C++ default-argument expression. /// /// By default, builds a new default-argument expression, which does not /// require any semantic analysis. Subclasses may override this routine to /// provide different behavior. ExprResult RebuildCXXDefaultArgExpr(SourceLocation Loc, ParmVarDecl *Param) { return CXXDefaultArgExpr::Create(getSema().Context, Loc, Param); } /// \brief Build a new C++11 default-initialization expression. /// /// By default, builds a new default field initialization expression, which /// does not require any semantic analysis. Subclasses may override this /// routine to provide different behavior. ExprResult RebuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) { return CXXDefaultInitExpr::Create(getSema().Context, Loc, Field); } /// \brief Build a new C++ zero-initialization expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXScalarValueInitExpr(TypeSourceInfo *TSInfo, SourceLocation LParenLoc, SourceLocation RParenLoc) { return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc, None, RParenLoc); } /// \brief Build a new C++ "new" expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXNewExpr(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocatedType, TypeSourceInfo *AllocatedTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer) { return getSema().BuildCXXNew(StartLoc, UseGlobal, PlacementLParen, PlacementArgs, PlacementRParen, TypeIdParens, AllocatedType, AllocatedTypeInfo, ArraySize, DirectInitRange, Initializer); } /// \brief Build a new C++ "delete" expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXDeleteExpr(SourceLocation StartLoc, bool IsGlobalDelete, bool IsArrayForm, Expr *Operand) { return getSema().ActOnCXXDelete(StartLoc, IsGlobalDelete, IsArrayForm, Operand); } /// \brief Build a new type trait expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildTypeTrait(TypeTrait Trait, SourceLocation StartLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc) { return getSema().BuildTypeTrait(Trait, StartLoc, Args, RParenLoc); } /// \brief Build a new array type trait expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildArrayTypeTrait(ArrayTypeTrait Trait, SourceLocation StartLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParenLoc) { return getSema().BuildArrayTypeTrait(Trait, StartLoc, TSInfo, DimExpr, RParenLoc); } /// \brief Build a new expression trait expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildExpressionTrait(ExpressionTrait Trait, SourceLocation StartLoc, Expr *Queried, SourceLocation RParenLoc) { return getSema().BuildExpressionTrait(Trait, StartLoc, Queried, RParenLoc); } /// \brief Build a new (previously unresolved) declaration reference /// expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildDependentScopeDeclRefExpr( NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI) { CXXScopeSpec SS; SS.Adopt(QualifierLoc); if (TemplateArgs || TemplateKWLoc.isValid()) return getSema().BuildQualifiedTemplateIdExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs); return getSema().BuildQualifiedDeclarationNameExpr( SS, NameInfo, IsAddressOfOperand, RecoveryTSI); } /// \brief Build a new template-id expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs) { return getSema().BuildTemplateIdExpr(SS, TemplateKWLoc, R, RequiresADL, TemplateArgs); } /// \brief Build a new object-construction expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXConstructExpr(QualType T, SourceLocation Loc, CXXConstructorDecl *Constructor, bool IsElidable, MultiExprArg Args, bool HadMultipleCandidates, bool ListInitialization, bool StdInitListInitialization, bool RequiresZeroInit, CXXConstructExpr::ConstructionKind ConstructKind, SourceRange ParenRange) { SmallVector<Expr*, 8> ConvertedArgs; if (getSema().CompleteConstructorCall(Constructor, Args, Loc, ConvertedArgs)) return ExprError(); return getSema().BuildCXXConstructExpr(Loc, T, Constructor, IsElidable, ConvertedArgs, HadMultipleCandidates, ListInitialization, StdInitListInitialization, RequiresZeroInit, ConstructKind, ParenRange); } /// \brief Build a new object-construction expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXTemporaryObjectExpr(TypeSourceInfo *TSInfo, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc) { return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc, Args, RParenLoc); } /// \brief Build a new object-construction expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXUnresolvedConstructExpr(TypeSourceInfo *TSInfo, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc) { return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc, Args, RParenLoc); } /// \brief Build a new member reference expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXDependentScopeMemberExpr(Expr *BaseE, QualType BaseType, bool IsArrow, SourceLocation OperatorLoc, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &MemberNameInfo, const TemplateArgumentListInfo *TemplateArgs) { CXXScopeSpec SS; SS.Adopt(QualifierLoc); return SemaRef.BuildMemberReferenceExpr(BaseE, BaseType, OperatorLoc, IsArrow, SS, TemplateKWLoc, FirstQualifierInScope, MemberNameInfo, TemplateArgs); } /// \brief Build a new member reference expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildUnresolvedMemberExpr(Expr *BaseE, QualType BaseType, SourceLocation OperatorLoc, bool IsArrow, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs) { CXXScopeSpec SS; SS.Adopt(QualifierLoc); return SemaRef.BuildMemberReferenceExpr(BaseE, BaseType, OperatorLoc, IsArrow, SS, TemplateKWLoc, FirstQualifierInScope, R, TemplateArgs); } /// \brief Build a new noexcept expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildCXXNoexceptExpr(SourceRange Range, Expr *Arg) { return SemaRef.BuildCXXNoexceptExpr(Range.getBegin(), Arg, Range.getEnd()); } /// \brief Build a new expression to compute the length of a parameter pack. ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc, NamedDecl *Pack, SourceLocation PackLoc, SourceLocation RParenLoc, Optional<unsigned> Length) { if (Length) return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(), OperatorLoc, Pack, PackLoc, RParenLoc, *Length); return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(), OperatorLoc, Pack, PackLoc, RParenLoc); } /// \brief Build a new Objective-C boxed expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) { return getSema().BuildObjCBoxedExpr(SR, ValueExpr); } /// \brief Build a new Objective-C array literal. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildObjCArrayLiteral(SourceRange Range, Expr **Elements, unsigned NumElements) { return getSema().BuildObjCArrayLiteral(Range, MultiExprArg(Elements, NumElements)); } ExprResult RebuildObjCSubscriptRefExpr(SourceLocation RB, Expr *Base, Expr *Key, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod) { return getSema().BuildObjCSubscriptExpression(RB, Base, Key, getterMethod, setterMethod); } /// \brief Build a new Objective-C dictionary literal. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildObjCDictionaryLiteral(SourceRange Range, ObjCDictionaryElement *Elements, unsigned NumElements) { return getSema().BuildObjCDictionaryLiteral(Range, Elements, NumElements); } /// \brief Build a new Objective-C \@encode expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildObjCEncodeExpr(SourceLocation AtLoc, TypeSourceInfo *EncodeTypeInfo, SourceLocation RParenLoc) { return SemaRef.BuildObjCEncodeExpression(AtLoc, EncodeTypeInfo, RParenLoc); } /// \brief Build a new Objective-C class message. ExprResult RebuildObjCMessageExpr(TypeSourceInfo *ReceiverTypeInfo, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, SourceLocation LBracLoc, MultiExprArg Args, SourceLocation RBracLoc) { return SemaRef.BuildClassMessage(ReceiverTypeInfo, ReceiverTypeInfo->getType(), /*SuperLoc=*/SourceLocation(), Sel, Method, LBracLoc, SelectorLocs, RBracLoc, Args); } /// \brief Build a new Objective-C instance message. ExprResult RebuildObjCMessageExpr(Expr *Receiver, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, SourceLocation LBracLoc, MultiExprArg Args, SourceLocation RBracLoc) { return SemaRef.BuildInstanceMessage(Receiver, Receiver->getType(), /*SuperLoc=*/SourceLocation(), Sel, Method, LBracLoc, SelectorLocs, RBracLoc, Args); } /// \brief Build a new Objective-C instance/class message to 'super'. ExprResult RebuildObjCMessageExpr(SourceLocation SuperLoc, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, SourceLocation LBracLoc, MultiExprArg Args, SourceLocation RBracLoc) { ObjCInterfaceDecl *Class = Method->getClassInterface(); QualType ReceiverTy = SemaRef.Context.getObjCInterfaceType(Class); return Method->isInstanceMethod() ? SemaRef.BuildInstanceMessage(nullptr, ReceiverTy, SuperLoc, Sel, Method, LBracLoc, SelectorLocs, RBracLoc, Args) : SemaRef.BuildClassMessage(nullptr, ReceiverTy, SuperLoc, Sel, Method, LBracLoc, SelectorLocs, RBracLoc, Args); } /// \brief Build a new Objective-C ivar reference expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildObjCIvarRefExpr(Expr *BaseArg, ObjCIvarDecl *Ivar, SourceLocation IvarLoc, bool IsArrow, bool IsFreeIvar) { // FIXME: We lose track of the IsFreeIvar bit. CXXScopeSpec SS; DeclarationNameInfo NameInfo(Ivar->getDeclName(), IvarLoc); return getSema().BuildMemberReferenceExpr(BaseArg, BaseArg->getType(), /*FIXME:*/IvarLoc, IsArrow, SS, SourceLocation(), /*FirstQualifierInScope=*/nullptr, NameInfo, /*TemplateArgs=*/nullptr); } /// \brief Build a new Objective-C property reference expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildObjCPropertyRefExpr(Expr *BaseArg, ObjCPropertyDecl *Property, SourceLocation PropertyLoc) { CXXScopeSpec SS; DeclarationNameInfo NameInfo(Property->getDeclName(), PropertyLoc); return getSema().BuildMemberReferenceExpr(BaseArg, BaseArg->getType(), /*FIXME:*/PropertyLoc, /*IsArrow=*/false, SS, SourceLocation(), /*FirstQualifierInScope=*/nullptr, NameInfo, /*TemplateArgs=*/nullptr); } /// \brief Build a new Objective-C property reference expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildObjCPropertyRefExpr(Expr *Base, QualType T, ObjCMethodDecl *Getter, ObjCMethodDecl *Setter, SourceLocation PropertyLoc) { // Since these expressions can only be value-dependent, we do not // need to perform semantic analysis again. return Owned( new (getSema().Context) ObjCPropertyRefExpr(Getter, Setter, T, VK_LValue, OK_ObjCProperty, PropertyLoc, Base)); } /// \brief Build a new Objective-C "isa" expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildObjCIsaExpr(Expr *BaseArg, SourceLocation IsaLoc, SourceLocation OpLoc, bool IsArrow) { CXXScopeSpec SS; DeclarationNameInfo NameInfo(&getSema().Context.Idents.get("isa"), IsaLoc); return getSema().BuildMemberReferenceExpr(BaseArg, BaseArg->getType(), OpLoc, IsArrow, SS, SourceLocation(), /*FirstQualifierInScope=*/nullptr, NameInfo, /*TemplateArgs=*/nullptr); } /// \brief Build a new shuffle vector expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildShuffleVectorExpr(SourceLocation BuiltinLoc, MultiExprArg SubExprs, SourceLocation RParenLoc) { // Find the declaration for __builtin_shufflevector const IdentifierInfo &Name = SemaRef.Context.Idents.get("__builtin_shufflevector"); TranslationUnitDecl *TUDecl = SemaRef.Context.getTranslationUnitDecl(); DeclContext::lookup_result Lookup = TUDecl->lookup(DeclarationName(&Name)); assert(!Lookup.empty() && "No __builtin_shufflevector?"); // Build a reference to the __builtin_shufflevector builtin FunctionDecl *Builtin = cast<FunctionDecl>(Lookup.front()); Expr *Callee = new (SemaRef.Context) DeclRefExpr(Builtin, false, SemaRef.Context.BuiltinFnTy, VK_RValue, BuiltinLoc); QualType CalleePtrTy = SemaRef.Context.getPointerType(Builtin->getType()); Callee = SemaRef.ImpCastExprToType(Callee, CalleePtrTy, CK_BuiltinFnToFnPtr).get(); // Build the CallExpr ExprResult TheCall = new (SemaRef.Context) CallExpr( SemaRef.Context, Callee, SubExprs, Builtin->getCallResultType(), Expr::getValueKindForType(Builtin->getReturnType()), RParenLoc); // Type-check the __builtin_shufflevector expression. return SemaRef.SemaBuiltinShuffleVector(cast<CallExpr>(TheCall.get())); } /// \brief Build a new convert vector expression. ExprResult RebuildConvertVectorExpr(SourceLocation BuiltinLoc, Expr *SrcExpr, TypeSourceInfo *DstTInfo, SourceLocation RParenLoc) { return SemaRef.SemaConvertVectorExpr(SrcExpr, DstTInfo, BuiltinLoc, RParenLoc); } /// \brief Build a new template argument pack expansion. /// /// By default, performs semantic analysis to build a new pack expansion /// for a template argument. Subclasses may override this routine to provide /// different behavior. TemplateArgumentLoc RebuildPackExpansion(TemplateArgumentLoc Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions) { switch (Pattern.getArgument().getKind()) { case TemplateArgument::Expression: { ExprResult Result = getSema().CheckPackExpansion(Pattern.getSourceExpression(), EllipsisLoc, NumExpansions); if (Result.isInvalid()) return TemplateArgumentLoc(); return TemplateArgumentLoc(Result.get(), Result.get()); } case TemplateArgument::Template: return TemplateArgumentLoc(TemplateArgument( Pattern.getArgument().getAsTemplate(), NumExpansions), Pattern.getTemplateQualifierLoc(), Pattern.getTemplateNameLoc(), EllipsisLoc); case TemplateArgument::Null: case TemplateArgument::Integral: case TemplateArgument::Declaration: case TemplateArgument::Pack: case TemplateArgument::TemplateExpansion: case TemplateArgument::NullPtr: llvm_unreachable("Pack expansion pattern has no parameter packs"); case TemplateArgument::Type: if (TypeSourceInfo *Expansion = getSema().CheckPackExpansion(Pattern.getTypeSourceInfo(), EllipsisLoc, NumExpansions)) return TemplateArgumentLoc(TemplateArgument(Expansion->getType()), Expansion); break; } return TemplateArgumentLoc(); } /// \brief Build a new expression pack expansion. /// /// By default, performs semantic analysis to build a new pack expansion /// for an expression. Subclasses may override this routine to provide /// different behavior. ExprResult RebuildPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions) { return getSema().CheckPackExpansion(Pattern, EllipsisLoc, NumExpansions); } /// \brief Build a new C++1z fold-expression. /// /// By default, performs semantic analysis in order to build a new fold /// expression. ExprResult RebuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc) { return getSema().BuildCXXFoldExpr(LParenLoc, LHS, Operator, EllipsisLoc, RHS, RParenLoc); } /// \brief Build an empty C++1z fold-expression with the given operator. /// /// By default, produces the fallback value for the fold-expression, or /// produce an error if there is no fallback value. ExprResult RebuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator) { return getSema().BuildEmptyCXXFoldExpr(EllipsisLoc, Operator); } /// \brief Build a new atomic operation expression. /// /// By default, performs semantic analysis to build the new expression. /// Subclasses may override this routine to provide different behavior. ExprResult RebuildAtomicExpr(SourceLocation BuiltinLoc, MultiExprArg SubExprs, QualType RetTy, AtomicExpr::AtomicOp Op, SourceLocation RParenLoc) { // Just create the expression; there is not any interesting semantic // analysis here because we can't actually build an AtomicExpr until // we are sure it is semantically sound. return new (SemaRef.Context) AtomicExpr(BuiltinLoc, SubExprs, RetTy, Op, RParenLoc); } private: TypeLoc TransformTypeInObjectScope(TypeLoc TL, QualType ObjectType, NamedDecl *FirstQualifierInScope, CXXScopeSpec &SS); TypeSourceInfo *TransformTypeInObjectScope(TypeSourceInfo *TSInfo, QualType ObjectType, NamedDecl *FirstQualifierInScope, CXXScopeSpec &SS); TypeSourceInfo *TransformTSIInObjectScope(TypeLoc TL, QualType ObjectType, NamedDecl *FirstQualifierInScope, CXXScopeSpec &SS); }; template<typename Derived> StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S) { if (!S) return S; switch (S->getStmtClass()) { case Stmt::NoStmtClass: break; // Transform individual statement nodes #define STMT(Node, Parent) \ case Stmt::Node##Class: return getDerived().Transform##Node(cast<Node>(S)); #define ABSTRACT_STMT(Node) #define EXPR(Node, Parent) #include "clang/AST/StmtNodes.inc" // Transform expressions by calling TransformExpr. #define STMT(Node, Parent) #define ABSTRACT_STMT(Stmt) #define EXPR(Node, Parent) case Stmt::Node##Class: #include "clang/AST/StmtNodes.inc" { ExprResult E = getDerived().TransformExpr(cast<Expr>(S)); if (E.isInvalid()) return StmtError(); return getSema().ActOnExprStmt(E); } } return S; } template<typename Derived> OMPClause *TreeTransform<Derived>::TransformOMPClause(OMPClause *S) { if (!S) return S; switch (S->getClauseKind()) { default: break; // Transform individual clause nodes #define OPENMP_CLAUSE(Name, Class) \ case OMPC_ ## Name : \ return getDerived().Transform ## Class(cast<Class>(S)); #include "clang/Basic/OpenMPKinds.def" } return S; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformExpr(Expr *E) { if (!E) return E; switch (E->getStmtClass()) { case Stmt::NoStmtClass: break; #define STMT(Node, Parent) case Stmt::Node##Class: break; #define ABSTRACT_STMT(Stmt) #define EXPR(Node, Parent) \ case Stmt::Node##Class: return getDerived().Transform##Node(cast<Node>(E)); #include "clang/AST/StmtNodes.inc" } return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init, bool NotCopyInit) { // Initializers are instantiated like expressions, except that various outer // layers are stripped. if (!Init) return Init; if (ExprWithCleanups *ExprTemp = dyn_cast<ExprWithCleanups>(Init)) Init = ExprTemp->getSubExpr(); if (MaterializeTemporaryExpr *MTE = dyn_cast<MaterializeTemporaryExpr>(Init)) Init = MTE->GetTemporaryExpr(); while (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(Init)) Init = Binder->getSubExpr(); if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Init)) Init = ICE->getSubExprAsWritten(); if (CXXStdInitializerListExpr *ILE = dyn_cast<CXXStdInitializerListExpr>(Init)) return TransformInitializer(ILE->getSubExpr(), NotCopyInit); // If this is copy-initialization, we only need to reconstruct // InitListExprs. Other forms of copy-initialization will be a no-op if // the initializer is already the right type. CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init); if (!NotCopyInit && !(Construct && Construct->isListInitialization())) return getDerived().TransformExpr(Init); // Revert value-initialization back to empty parens. if (CXXScalarValueInitExpr *VIE = dyn_cast<CXXScalarValueInitExpr>(Init)) { SourceRange Parens = VIE->getSourceRange(); return getDerived().RebuildParenListExpr(Parens.getBegin(), None, Parens.getEnd()); } // FIXME: We shouldn't build ImplicitValueInitExprs for direct-initialization. if (isa<ImplicitValueInitExpr>(Init)) return getDerived().RebuildParenListExpr(SourceLocation(), None, SourceLocation()); // Revert initialization by constructor back to a parenthesized or braced list // of expressions. Any other form of initializer can just be reused directly. if (!Construct || isa<CXXTemporaryObjectExpr>(Construct)) return getDerived().TransformExpr(Init); // If the initialization implicitly converted an initializer list to a // std::initializer_list object, unwrap the std::initializer_list too. if (Construct && Construct->isStdInitListInitialization()) return TransformInitializer(Construct->getArg(0), NotCopyInit); SmallVector<Expr*, 8> NewArgs; bool ArgChanged = false; if (getDerived().TransformExprs(Construct->getArgs(), Construct->getNumArgs(), /*IsCall*/true, NewArgs, &ArgChanged)) return ExprError(); // If this was list initialization, revert to list form. if (Construct->isListInitialization()) return getDerived().RebuildInitList(Construct->getLocStart(), NewArgs, Construct->getLocEnd(), Construct->getType()); // Build a ParenListExpr to represent anything else. SourceRange Parens = Construct->getParenOrBraceRange(); if (Parens.isInvalid()) { // This was a variable declaration's initialization for which no initializer // was specified. assert(NewArgs.empty() && "no parens or braces but have direct init with arguments?"); return ExprEmpty(); } return getDerived().RebuildParenListExpr(Parens.getBegin(), NewArgs, Parens.getEnd()); } template<typename Derived> bool TreeTransform<Derived>::TransformExprs(Expr **Inputs, unsigned NumInputs, bool IsCall, SmallVectorImpl<Expr *> &Outputs, bool *ArgChanged) { for (unsigned I = 0; I != NumInputs; ++I) { // If requested, drop call arguments that need to be dropped. if (IsCall && getDerived().DropCallArgument(Inputs[I])) { if (ArgChanged) *ArgChanged = true; break; } if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(Inputs[I])) { Expr *Pattern = Expansion->getPattern(); SmallVector<UnexpandedParameterPack, 2> Unexpanded; getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded); assert(!Unexpanded.empty() && "Pack expansion without parameter packs?"); // Determine whether the set of unexpanded parameter packs can and should // be expanded. bool Expand = true; bool RetainExpansion = false; Optional<unsigned> OrigNumExpansions = Expansion->getNumExpansions(); Optional<unsigned> NumExpansions = OrigNumExpansions; if (getDerived().TryExpandParameterPacks(Expansion->getEllipsisLoc(), Pattern->getSourceRange(), Unexpanded, Expand, RetainExpansion, NumExpansions)) return true; if (!Expand) { // The transform has determined that we should perform a simple // transformation on the pack expansion, producing another pack // expansion. Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1); ExprResult OutPattern = getDerived().TransformExpr(Pattern); if (OutPattern.isInvalid()) return true; ExprResult Out = getDerived().RebuildPackExpansion(OutPattern.get(), Expansion->getEllipsisLoc(), NumExpansions); if (Out.isInvalid()) return true; if (ArgChanged) *ArgChanged = true; Outputs.push_back(Out.get()); continue; } // Record right away that the argument was changed. This needs // to happen even if the array expands to nothing. if (ArgChanged) *ArgChanged = true; // The transform has determined that we should perform an elementwise // expansion of the pattern. Do so. for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I); ExprResult Out = getDerived().TransformExpr(Pattern); if (Out.isInvalid()) return true; // FIXME: Can this happen? We should not try to expand the pack // in this case. if (Out.get()->containsUnexpandedParameterPack()) { Out = getDerived().RebuildPackExpansion( Out.get(), Expansion->getEllipsisLoc(), OrigNumExpansions); if (Out.isInvalid()) return true; } Outputs.push_back(Out.get()); } // If we're supposed to retain a pack expansion, do so by temporarily // forgetting the partially-substituted parameter pack. if (RetainExpansion) { ForgetPartiallySubstitutedPackRAII Forget(getDerived()); ExprResult Out = getDerived().TransformExpr(Pattern); if (Out.isInvalid()) return true; Out = getDerived().RebuildPackExpansion( Out.get(), Expansion->getEllipsisLoc(), OrigNumExpansions); if (Out.isInvalid()) return true; Outputs.push_back(Out.get()); } continue; } ExprResult Result = IsCall ? getDerived().TransformInitializer(Inputs[I], /*DirectInit*/false) : getDerived().TransformExpr(Inputs[I]); if (Result.isInvalid()) return true; if (Result.get() != Inputs[I] && ArgChanged) *ArgChanged = true; Outputs.push_back(Result.get()); } return false; } template<typename Derived> NestedNameSpecifierLoc TreeTransform<Derived>::TransformNestedNameSpecifierLoc( NestedNameSpecifierLoc NNS, QualType ObjectType, NamedDecl *FirstQualifierInScope) { SmallVector<NestedNameSpecifierLoc, 4> Qualifiers; for (NestedNameSpecifierLoc Qualifier = NNS; Qualifier; Qualifier = Qualifier.getPrefix()) Qualifiers.push_back(Qualifier); CXXScopeSpec SS; while (!Qualifiers.empty()) { NestedNameSpecifierLoc Q = Qualifiers.pop_back_val(); NestedNameSpecifier *QNNS = Q.getNestedNameSpecifier(); switch (QNNS->getKind()) { case NestedNameSpecifier::Identifier: if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/nullptr, *QNNS->getAsIdentifier(), Q.getLocalBeginLoc(), Q.getLocalEndLoc(), ObjectType, false, SS, FirstQualifierInScope, false)) return NestedNameSpecifierLoc(); break; case NestedNameSpecifier::Namespace: { NamespaceDecl *NS = cast_or_null<NamespaceDecl>( getDerived().TransformDecl( Q.getLocalBeginLoc(), QNNS->getAsNamespace())); SS.Extend(SemaRef.Context, NS, Q.getLocalBeginLoc(), Q.getLocalEndLoc()); break; } case NestedNameSpecifier::NamespaceAlias: { NamespaceAliasDecl *Alias = cast_or_null<NamespaceAliasDecl>( getDerived().TransformDecl(Q.getLocalBeginLoc(), QNNS->getAsNamespaceAlias())); SS.Extend(SemaRef.Context, Alias, Q.getLocalBeginLoc(), Q.getLocalEndLoc()); break; } case NestedNameSpecifier::Global: // There is no meaningful transformation that one could perform on the // global scope. SS.MakeGlobal(SemaRef.Context, Q.getBeginLoc()); break; case NestedNameSpecifier::Super: { CXXRecordDecl *RD = cast_or_null<CXXRecordDecl>(getDerived().TransformDecl( SourceLocation(), QNNS->getAsRecordDecl())); SS.MakeSuper(SemaRef.Context, RD, Q.getBeginLoc(), Q.getEndLoc()); break; } case NestedNameSpecifier::TypeSpecWithTemplate: case NestedNameSpecifier::TypeSpec: { TypeLoc TL = TransformTypeInObjectScope(Q.getTypeLoc(), ObjectType, FirstQualifierInScope, SS); if (!TL) return NestedNameSpecifierLoc(); if (TL.getType()->isDependentType() || TL.getType()->isRecordType() || ((SemaRef.getLangOpts().CPlusPlus11 || SemaRef.getLangOpts().HLSL) && TL.getType()->isEnumeralType())) { assert(!TL.getType().hasLocalQualifiers() && "Can't get cv-qualifiers here"); if (TL.getType()->isEnumeralType()) SemaRef.Diag(TL.getBeginLoc(), diag::warn_cxx98_compat_enum_nested_name_spec); SS.Extend(SemaRef.Context, /*FIXME:*/SourceLocation(), TL, Q.getLocalEndLoc()); break; } // If the nested-name-specifier is an invalid type def, don't emit an // error because a previous error should have already been emitted. TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>(); if (!TTL || !TTL.getTypedefNameDecl()->isInvalidDecl()) { SemaRef.Diag(TL.getBeginLoc(), diag::err_nested_name_spec_non_tag) << TL.getType() << SS.getRange(); } return NestedNameSpecifierLoc(); } } // The qualifier-in-scope and object type only apply to the leftmost entity. FirstQualifierInScope = nullptr; ObjectType = QualType(); } // Don't rebuild the nested-name-specifier if we don't have to. if (SS.getScopeRep() == NNS.getNestedNameSpecifier() && !getDerived().AlwaysRebuild()) return NNS; // If we can re-use the source-location data from the original // nested-name-specifier, do so. if (SS.location_size() == NNS.getDataLength() && memcmp(SS.location_data(), NNS.getOpaqueData(), SS.location_size()) == 0) return NestedNameSpecifierLoc(SS.getScopeRep(), NNS.getOpaqueData()); // Allocate new nested-name-specifier location information. return SS.getWithLocInContext(SemaRef.Context); } template<typename Derived> DeclarationNameInfo TreeTransform<Derived> ::TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo) { DeclarationName Name = NameInfo.getName(); if (!Name) return DeclarationNameInfo(); switch (Name.getNameKind()) { case DeclarationName::Identifier: case DeclarationName::ObjCZeroArgSelector: case DeclarationName::ObjCOneArgSelector: case DeclarationName::ObjCMultiArgSelector: case DeclarationName::CXXOperatorName: case DeclarationName::CXXLiteralOperatorName: case DeclarationName::CXXUsingDirective: return NameInfo; case DeclarationName::CXXConstructorName: case DeclarationName::CXXDestructorName: case DeclarationName::CXXConversionFunctionName: { TypeSourceInfo *NewTInfo; CanQualType NewCanTy; if (TypeSourceInfo *OldTInfo = NameInfo.getNamedTypeInfo()) { NewTInfo = getDerived().TransformType(OldTInfo); if (!NewTInfo) return DeclarationNameInfo(); NewCanTy = SemaRef.Context.getCanonicalType(NewTInfo->getType()); } else { NewTInfo = nullptr; TemporaryBase Rebase(*this, NameInfo.getLoc(), Name); QualType NewT = getDerived().TransformType(Name.getCXXNameType()); if (NewT.isNull()) return DeclarationNameInfo(); NewCanTy = SemaRef.Context.getCanonicalType(NewT); } DeclarationName NewName = SemaRef.Context.DeclarationNames.getCXXSpecialName(Name.getNameKind(), NewCanTy); DeclarationNameInfo NewNameInfo(NameInfo); NewNameInfo.setName(NewName); NewNameInfo.setNamedTypeInfo(NewTInfo); return NewNameInfo; } } llvm_unreachable("Unknown name kind."); } template<typename Derived> TemplateName TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS, TemplateName Name, SourceLocation NameLoc, QualType ObjectType, NamedDecl *FirstQualifierInScope) { if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) { TemplateDecl *Template = QTN->getTemplateDecl(); assert(Template && "qualified template name must refer to a template"); TemplateDecl *TransTemplate = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc, Template)); if (!TransTemplate) return TemplateName(); if (!getDerived().AlwaysRebuild() && SS.getScopeRep() == QTN->getQualifier() && TransTemplate == Template) return Name; return getDerived().RebuildTemplateName(SS, QTN->hasTemplateKeyword(), TransTemplate); } if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) { if (SS.getScopeRep()) { // These apply to the scope specifier, not the template. ObjectType = QualType(); FirstQualifierInScope = nullptr; } if (!getDerived().AlwaysRebuild() && SS.getScopeRep() == DTN->getQualifier() && ObjectType.isNull()) return Name; if (DTN->isIdentifier()) { return getDerived().RebuildTemplateName(SS, *DTN->getIdentifier(), NameLoc, ObjectType, FirstQualifierInScope); } return getDerived().RebuildTemplateName(SS, DTN->getOperator(), NameLoc, ObjectType); } if (TemplateDecl *Template = Name.getAsTemplateDecl()) { TemplateDecl *TransTemplate = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc, Template)); if (!TransTemplate) return TemplateName(); if (!getDerived().AlwaysRebuild() && TransTemplate == Template) return Name; return TemplateName(TransTemplate); } if (SubstTemplateTemplateParmPackStorage *SubstPack = Name.getAsSubstTemplateTemplateParmPack()) { TemplateTemplateParmDecl *TransParam = cast_or_null<TemplateTemplateParmDecl>( getDerived().TransformDecl(NameLoc, SubstPack->getParameterPack())); if (!TransParam) return TemplateName(); if (!getDerived().AlwaysRebuild() && TransParam == SubstPack->getParameterPack()) return Name; return getDerived().RebuildTemplateName(TransParam, SubstPack->getArgumentPack()); } // These should be getting filtered out before they reach the AST. llvm_unreachable("overloaded function decl survived to here"); } template<typename Derived> void TreeTransform<Derived>::InventTemplateArgumentLoc( const TemplateArgument &Arg, TemplateArgumentLoc &Output) { SourceLocation Loc = getDerived().getBaseLocation(); switch (Arg.getKind()) { case TemplateArgument::Null: llvm_unreachable("null template argument in TreeTransform"); break; case TemplateArgument::Type: Output = TemplateArgumentLoc(Arg, SemaRef.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc)); break; case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: { NestedNameSpecifierLocBuilder Builder; TemplateName Template = Arg.getAsTemplate(); if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) Builder.MakeTrivial(SemaRef.Context, DTN->getQualifier(), Loc); else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) Builder.MakeTrivial(SemaRef.Context, QTN->getQualifier(), Loc); if (Arg.getKind() == TemplateArgument::Template) Output = TemplateArgumentLoc(Arg, Builder.getWithLocInContext(SemaRef.Context), Loc); else Output = TemplateArgumentLoc(Arg, Builder.getWithLocInContext(SemaRef.Context), Loc, Loc); break; } case TemplateArgument::Expression: Output = TemplateArgumentLoc(Arg, Arg.getAsExpr()); break; case TemplateArgument::Declaration: case TemplateArgument::Integral: case TemplateArgument::Pack: case TemplateArgument::NullPtr: Output = TemplateArgumentLoc(Arg, TemplateArgumentLocInfo()); break; } } template<typename Derived> bool TreeTransform<Derived>::TransformTemplateArgument( const TemplateArgumentLoc &Input, TemplateArgumentLoc &Output) { const TemplateArgument &Arg = Input.getArgument(); switch (Arg.getKind()) { case TemplateArgument::Null: case TemplateArgument::Integral: case TemplateArgument::Pack: case TemplateArgument::Declaration: case TemplateArgument::NullPtr: llvm_unreachable("Unexpected TemplateArgument"); case TemplateArgument::Type: { TypeSourceInfo *DI = Input.getTypeSourceInfo(); if (!DI) DI = InventTypeSourceInfo(Input.getArgument().getAsType()); DI = getDerived().TransformType(DI); if (!DI) return true; Output = TemplateArgumentLoc(TemplateArgument(DI->getType()), DI); return false; } case TemplateArgument::Template: { NestedNameSpecifierLoc QualifierLoc = Input.getTemplateQualifierLoc(); if (QualifierLoc) { QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc); if (!QualifierLoc) return true; } CXXScopeSpec SS; SS.Adopt(QualifierLoc); TemplateName Template = getDerived().TransformTemplateName(SS, Arg.getAsTemplate(), Input.getTemplateNameLoc()); if (Template.isNull()) return true; Output = TemplateArgumentLoc(TemplateArgument(Template), QualifierLoc, Input.getTemplateNameLoc()); return false; } case TemplateArgument::TemplateExpansion: llvm_unreachable("Caller should expand pack expansions"); case TemplateArgument::Expression: { // Template argument expressions are constant expressions. EnterExpressionEvaluationContext Unevaluated(getSema(), Sema::ConstantEvaluated); Expr *InputExpr = Input.getSourceExpression(); if (!InputExpr) InputExpr = Input.getArgument().getAsExpr(); ExprResult E = getDerived().TransformExpr(InputExpr); E = SemaRef.ActOnConstantExpression(E); if (E.isInvalid()) return true; Output = TemplateArgumentLoc(TemplateArgument(E.get()), E.get()); return false; } } // Work around bogus GCC warning return true; } /// \brief Iterator adaptor that invents template argument location information /// for each of the template arguments in its underlying iterator. template<typename Derived, typename InputIterator> class TemplateArgumentLocInventIterator { TreeTransform<Derived> &Self; InputIterator Iter; public: typedef TemplateArgumentLoc value_type; typedef TemplateArgumentLoc reference; typedef typename std::iterator_traits<InputIterator>::difference_type difference_type; typedef std::input_iterator_tag iterator_category; class pointer { TemplateArgumentLoc Arg; public: explicit pointer(TemplateArgumentLoc Arg) : Arg(Arg) { } const TemplateArgumentLoc *operator->() const { return &Arg; } }; TemplateArgumentLocInventIterator() { } explicit TemplateArgumentLocInventIterator(TreeTransform<Derived> &Self, InputIterator Iter) : Self(Self), Iter(Iter) { } TemplateArgumentLocInventIterator &operator++() { ++Iter; return *this; } TemplateArgumentLocInventIterator operator++(int) { TemplateArgumentLocInventIterator Old(*this); ++(*this); return Old; } reference operator*() const { TemplateArgumentLoc Result; Self.InventTemplateArgumentLoc(*Iter, Result); return Result; } pointer operator->() const { return pointer(**this); } friend bool operator==(const TemplateArgumentLocInventIterator &X, const TemplateArgumentLocInventIterator &Y) { return X.Iter == Y.Iter; } friend bool operator!=(const TemplateArgumentLocInventIterator &X, const TemplateArgumentLocInventIterator &Y) { return X.Iter != Y.Iter; } }; template<typename Derived> template<typename InputIterator> bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First, InputIterator Last, TemplateArgumentListInfo &Outputs) { for (; First != Last; ++First) { TemplateArgumentLoc Out; TemplateArgumentLoc In = *First; if (In.getArgument().getKind() == TemplateArgument::Pack) { // Unpack argument packs, which we translate them into separate // arguments. // FIXME: We could do much better if we could guarantee that the // TemplateArgumentLocInfo for the pack expansion would be usable for // all of the template arguments in the argument pack. typedef TemplateArgumentLocInventIterator<Derived, TemplateArgument::pack_iterator> PackLocIterator; if (TransformTemplateArguments(PackLocIterator(*this, In.getArgument().pack_begin()), PackLocIterator(*this, In.getArgument().pack_end()), Outputs)) return true; continue; } if (In.getArgument().isPackExpansion()) { // We have a pack expansion, for which we will be substituting into // the pattern. SourceLocation Ellipsis; Optional<unsigned> OrigNumExpansions; TemplateArgumentLoc Pattern = getSema().getTemplateArgumentPackExpansionPattern( In, Ellipsis, OrigNumExpansions); SmallVector<UnexpandedParameterPack, 2> Unexpanded; getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded); assert(!Unexpanded.empty() && "Pack expansion without parameter packs?"); // Determine whether the set of unexpanded parameter packs can and should // be expanded. bool Expand = true; bool RetainExpansion = false; Optional<unsigned> NumExpansions = OrigNumExpansions; if (getDerived().TryExpandParameterPacks(Ellipsis, Pattern.getSourceRange(), Unexpanded, Expand, RetainExpansion, NumExpansions)) return true; if (!Expand) { // The transform has determined that we should perform a simple // transformation on the pack expansion, producing another pack // expansion. TemplateArgumentLoc OutPattern; Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1); if (getDerived().TransformTemplateArgument(Pattern, OutPattern)) return true; Out = getDerived().RebuildPackExpansion(OutPattern, Ellipsis, NumExpansions); if (Out.getArgument().isNull()) return true; Outputs.addArgument(Out); continue; } // The transform has determined that we should perform an elementwise // expansion of the pattern. Do so. for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I); if (getDerived().TransformTemplateArgument(Pattern, Out)) return true; if (Out.getArgument().containsUnexpandedParameterPack()) { Out = getDerived().RebuildPackExpansion(Out, Ellipsis, OrigNumExpansions); if (Out.getArgument().isNull()) return true; } Outputs.addArgument(Out); } // If we're supposed to retain a pack expansion, do so by temporarily // forgetting the partially-substituted parameter pack. if (RetainExpansion) { ForgetPartiallySubstitutedPackRAII Forget(getDerived()); if (getDerived().TransformTemplateArgument(Pattern, Out)) return true; Out = getDerived().RebuildPackExpansion(Out, Ellipsis, OrigNumExpansions); if (Out.getArgument().isNull()) return true; Outputs.addArgument(Out); } continue; } // The simple case: if (getDerived().TransformTemplateArgument(In, Out)) return true; Outputs.addArgument(Out); } return false; } //===----------------------------------------------------------------------===// // Type transformation //===----------------------------------------------------------------------===// template<typename Derived> QualType TreeTransform<Derived>::TransformType(QualType T) { if (getDerived().AlreadyTransformed(T)) return T; // Temporary workaround. All of these transformations should // eventually turn into transformations on TypeLocs. TypeSourceInfo *DI = getSema().Context.getTrivialTypeSourceInfo(T, getDerived().getBaseLocation()); TypeSourceInfo *NewDI = getDerived().TransformType(DI); if (!NewDI) return QualType(); return NewDI->getType(); } template<typename Derived> TypeSourceInfo *TreeTransform<Derived>::TransformType(TypeSourceInfo *DI) { // Refine the base location to the type's location. TemporaryBase Rebase(*this, DI->getTypeLoc().getBeginLoc(), getDerived().getBaseEntity()); if (getDerived().AlreadyTransformed(DI->getType())) return DI; TypeLocBuilder TLB; TypeLoc TL = DI->getTypeLoc(); TLB.reserve(TL.getFullDataSize()); QualType Result = getDerived().TransformType(TLB, TL); if (Result.isNull()) return nullptr; return TLB.getTypeSourceInfo(SemaRef.Context, Result); } template<typename Derived> QualType TreeTransform<Derived>::TransformType(TypeLocBuilder &TLB, TypeLoc T) { switch (T.getTypeLocClass()) { #define ABSTRACT_TYPELOC(CLASS, PARENT) #define TYPELOC(CLASS, PARENT) \ case TypeLoc::CLASS: \ return getDerived().Transform##CLASS##Type(TLB, \ T.castAs<CLASS##TypeLoc>()); #include "clang/AST/TypeLocNodes.def" } llvm_unreachable("unhandled type loc!"); } /// FIXME: By default, this routine adds type qualifiers only to types /// that can have qualifiers, and silently suppresses those qualifiers /// that are not permitted (e.g., qualifiers on reference or function /// types). This is the right thing for template instantiation, but /// probably not for other clients. template<typename Derived> QualType TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB, QualifiedTypeLoc T) { Qualifiers Quals = T.getType().getLocalQualifiers(); QualType Result = getDerived().TransformType(TLB, T.getUnqualifiedLoc()); if (Result.isNull()) return QualType(); // Silently suppress qualifiers if the result type can't be qualified. // FIXME: this is the right thing for template instantiation, but // probably not for other clients. if (Result->isFunctionType() || Result->isReferenceType()) return Result; // Suppress Objective-C lifetime qualifiers if they don't make sense for the // resulting type. if (Quals.hasObjCLifetime()) { if (!Result->isObjCLifetimeType() && !Result->isDependentType()) Quals.removeObjCLifetime(); else if (Result.getObjCLifetime()) { // Objective-C ARC: // A lifetime qualifier applied to a substituted template parameter // overrides the lifetime qualifier from the template argument. const AutoType *AutoTy; if (const SubstTemplateTypeParmType *SubstTypeParam = dyn_cast<SubstTemplateTypeParmType>(Result)) { QualType Replacement = SubstTypeParam->getReplacementType(); Qualifiers Qs = Replacement.getQualifiers(); Qs.removeObjCLifetime(); Replacement = SemaRef.Context.getQualifiedType(Replacement.getUnqualifiedType(), Qs); Result = SemaRef.Context.getSubstTemplateTypeParmType( SubstTypeParam->getReplacedParameter(), Replacement); TLB.TypeWasModifiedSafely(Result); } else if ((AutoTy = dyn_cast<AutoType>(Result)) && AutoTy->isDeduced()) { // 'auto' types behave the same way as template parameters. QualType Deduced = AutoTy->getDeducedType(); Qualifiers Qs = Deduced.getQualifiers(); Qs.removeObjCLifetime(); Deduced = SemaRef.Context.getQualifiedType(Deduced.getUnqualifiedType(), Qs); Result = SemaRef.Context.getAutoType(Deduced, AutoTy->isDecltypeAuto(), AutoTy->isDependentType()); TLB.TypeWasModifiedSafely(Result); } else { // Otherwise, complain about the addition of a qualifier to an // already-qualified type. SourceRange R = T.getUnqualifiedLoc().getSourceRange(); SemaRef.Diag(R.getBegin(), diag::err_attr_objc_ownership_redundant) << Result << R; Quals.removeObjCLifetime(); } } } if (!Quals.empty()) { Result = SemaRef.BuildQualifiedType(Result, T.getBeginLoc(), Quals); // BuildQualifiedType might not add qualifiers if they are invalid. if (Result.hasLocalQualifiers()) TLB.push<QualifiedTypeLoc>(Result); // No location information to preserve. } return Result; } template<typename Derived> TypeLoc TreeTransform<Derived>::TransformTypeInObjectScope(TypeLoc TL, QualType ObjectType, NamedDecl *UnqualLookup, CXXScopeSpec &SS) { if (getDerived().AlreadyTransformed(TL.getType())) return TL; TypeSourceInfo *TSI = TransformTSIInObjectScope(TL, ObjectType, UnqualLookup, SS); if (TSI) return TSI->getTypeLoc(); return TypeLoc(); } template<typename Derived> TypeSourceInfo * TreeTransform<Derived>::TransformTypeInObjectScope(TypeSourceInfo *TSInfo, QualType ObjectType, NamedDecl *UnqualLookup, CXXScopeSpec &SS) { if (getDerived().AlreadyTransformed(TSInfo->getType())) return TSInfo; return TransformTSIInObjectScope(TSInfo->getTypeLoc(), ObjectType, UnqualLookup, SS); } template <typename Derived> TypeSourceInfo *TreeTransform<Derived>::TransformTSIInObjectScope( TypeLoc TL, QualType ObjectType, NamedDecl *UnqualLookup, CXXScopeSpec &SS) { QualType T = TL.getType(); assert(!getDerived().AlreadyTransformed(T)); TypeLocBuilder TLB; QualType Result; if (isa<TemplateSpecializationType>(T)) { TemplateSpecializationTypeLoc SpecTL = TL.castAs<TemplateSpecializationTypeLoc>(); TemplateName Template = getDerived().TransformTemplateName(SS, SpecTL.getTypePtr()->getTemplateName(), SpecTL.getTemplateNameLoc(), ObjectType, UnqualLookup); if (Template.isNull()) return nullptr; Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL, Template); } else if (isa<DependentTemplateSpecializationType>(T)) { DependentTemplateSpecializationTypeLoc SpecTL = TL.castAs<DependentTemplateSpecializationTypeLoc>(); TemplateName Template = getDerived().RebuildTemplateName(SS, *SpecTL.getTypePtr()->getIdentifier(), SpecTL.getTemplateNameLoc(), ObjectType, UnqualLookup); if (Template.isNull()) return nullptr; Result = getDerived().TransformDependentTemplateSpecializationType(TLB, SpecTL, Template, SS); } else { // Nothing special needs to be done for these. Result = getDerived().TransformType(TLB, TL); } if (Result.isNull()) return nullptr; return TLB.getTypeSourceInfo(SemaRef.Context, Result); } template <class TyLoc> static inline QualType TransformTypeSpecType(TypeLocBuilder &TLB, TyLoc T) { TyLoc NewT = TLB.push<TyLoc>(T.getType()); NewT.setNameLoc(T.getNameLoc()); return T.getType(); } template<typename Derived> QualType TreeTransform<Derived>::TransformBuiltinType(TypeLocBuilder &TLB, BuiltinTypeLoc T) { BuiltinTypeLoc NewT = TLB.push<BuiltinTypeLoc>(T.getType()); NewT.setBuiltinLoc(T.getBuiltinLoc()); if (T.needsExtraLocalData()) NewT.getWrittenBuiltinSpecs() = T.getWrittenBuiltinSpecs(); return T.getType(); } template<typename Derived> QualType TreeTransform<Derived>::TransformComplexType(TypeLocBuilder &TLB, ComplexTypeLoc T) { // FIXME: recurse? return TransformTypeSpecType(TLB, T); } template <typename Derived> QualType TreeTransform<Derived>::TransformAdjustedType(TypeLocBuilder &TLB, AdjustedTypeLoc TL) { // Adjustments applied during transformation are handled elsewhere. return getDerived().TransformType(TLB, TL.getOriginalLoc()); } template<typename Derived> QualType TreeTransform<Derived>::TransformDecayedType(TypeLocBuilder &TLB, DecayedTypeLoc TL) { QualType OriginalType = getDerived().TransformType(TLB, TL.getOriginalLoc()); if (OriginalType.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || OriginalType != TL.getOriginalLoc().getType()) Result = SemaRef.Context.getDecayedType(OriginalType); TLB.push<DecayedTypeLoc>(Result); // Nothing to set for DecayedTypeLoc. return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB, PointerTypeLoc TL) { QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc()); if (PointeeType.isNull()) return QualType(); QualType Result = TL.getType(); if (PointeeType->getAs<ObjCObjectType>()) { // A dependent pointer type 'T *' has is being transformed such // that an Objective-C class type is being replaced for 'T'. The // resulting pointer type is an ObjCObjectPointerType, not a // PointerType. Result = SemaRef.Context.getObjCObjectPointerType(PointeeType); ObjCObjectPointerTypeLoc NewT = TLB.push<ObjCObjectPointerTypeLoc>(Result); NewT.setStarLoc(TL.getStarLoc()); return Result; } if (getDerived().AlwaysRebuild() || PointeeType != TL.getPointeeLoc().getType()) { Result = getDerived().RebuildPointerType(PointeeType, TL.getSigilLoc()); if (Result.isNull()) return QualType(); } // Objective-C ARC can add lifetime qualifiers to the type that we're // pointing to. TLB.TypeWasModifiedSafely(Result->getPointeeType()); PointerTypeLoc NewT = TLB.push<PointerTypeLoc>(Result); NewT.setSigilLoc(TL.getSigilLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformBlockPointerType(TypeLocBuilder &TLB, BlockPointerTypeLoc TL) { QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc()); if (PointeeType.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || PointeeType != TL.getPointeeLoc().getType()) { Result = getDerived().RebuildBlockPointerType(PointeeType, TL.getSigilLoc()); if (Result.isNull()) return QualType(); } BlockPointerTypeLoc NewT = TLB.push<BlockPointerTypeLoc>(Result); NewT.setSigilLoc(TL.getSigilLoc()); return Result; } /// Transforms a reference type. Note that somewhat paradoxically we /// don't care whether the type itself is an l-value type or an r-value /// type; we only care if the type was *written* as an l-value type /// or an r-value type. template<typename Derived> QualType TreeTransform<Derived>::TransformReferenceType(TypeLocBuilder &TLB, ReferenceTypeLoc TL) { const ReferenceType *T = TL.getTypePtr(); // Note that this works with the pointee-as-written. QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc()); if (PointeeType.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || PointeeType != T->getPointeeTypeAsWritten()) { Result = getDerived().RebuildReferenceType(PointeeType, T->isSpelledAsLValue(), TL.getSigilLoc()); if (Result.isNull()) return QualType(); } // Objective-C ARC can add lifetime qualifiers to the type that we're // referring to. TLB.TypeWasModifiedSafely( Result->getAs<ReferenceType>()->getPointeeTypeAsWritten()); // r-value references can be rebuilt as l-value references. ReferenceTypeLoc NewTL; if (isa<LValueReferenceType>(Result)) NewTL = TLB.push<LValueReferenceTypeLoc>(Result); else NewTL = TLB.push<RValueReferenceTypeLoc>(Result); NewTL.setSigilLoc(TL.getSigilLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformLValueReferenceType(TypeLocBuilder &TLB, LValueReferenceTypeLoc TL) { return TransformReferenceType(TLB, TL); } template<typename Derived> QualType TreeTransform<Derived>::TransformRValueReferenceType(TypeLocBuilder &TLB, RValueReferenceTypeLoc TL) { return TransformReferenceType(TLB, TL); } template<typename Derived> QualType TreeTransform<Derived>::TransformMemberPointerType(TypeLocBuilder &TLB, MemberPointerTypeLoc TL) { QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc()); if (PointeeType.isNull()) return QualType(); TypeSourceInfo* OldClsTInfo = TL.getClassTInfo(); TypeSourceInfo *NewClsTInfo = nullptr; if (OldClsTInfo) { NewClsTInfo = getDerived().TransformType(OldClsTInfo); if (!NewClsTInfo) return QualType(); } const MemberPointerType *T = TL.getTypePtr(); QualType OldClsType = QualType(T->getClass(), 0); QualType NewClsType; if (NewClsTInfo) NewClsType = NewClsTInfo->getType(); else { NewClsType = getDerived().TransformType(OldClsType); if (NewClsType.isNull()) return QualType(); } QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || PointeeType != T->getPointeeType() || NewClsType != OldClsType) { Result = getDerived().RebuildMemberPointerType(PointeeType, NewClsType, TL.getStarLoc()); if (Result.isNull()) return QualType(); } // If we had to adjust the pointee type when building a member pointer, make // sure to push TypeLoc info for it. const MemberPointerType *MPT = Result->getAs<MemberPointerType>(); if (MPT && PointeeType != MPT->getPointeeType()) { assert(isa<AdjustedType>(MPT->getPointeeType())); TLB.push<AdjustedTypeLoc>(MPT->getPointeeType()); } MemberPointerTypeLoc NewTL = TLB.push<MemberPointerTypeLoc>(Result); NewTL.setSigilLoc(TL.getSigilLoc()); NewTL.setClassTInfo(NewClsTInfo); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformConstantArrayType(TypeLocBuilder &TLB, ConstantArrayTypeLoc TL) { const ConstantArrayType *T = TL.getTypePtr(); QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc()); if (ElementType.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || ElementType != T->getElementType()) { Result = getDerived().RebuildConstantArrayType(ElementType, T->getSizeModifier(), T->getSize(), T->getIndexTypeCVRQualifiers(), TL.getBracketsRange()); if (Result.isNull()) return QualType(); } // We might have either a ConstantArrayType or a VariableArrayType now: // a ConstantArrayType is allowed to have an element type which is a // VariableArrayType if the type is dependent. Fortunately, all array // types have the same location layout. ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result); NewTL.setLBracketLoc(TL.getLBracketLoc()); NewTL.setRBracketLoc(TL.getRBracketLoc()); Expr *Size = TL.getSizeExpr(); if (Size) { EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::ConstantEvaluated); Size = getDerived().TransformExpr(Size).template getAs<Expr>(); Size = SemaRef.ActOnConstantExpression(Size).get(); } NewTL.setSizeExpr(Size); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformIncompleteArrayType( TypeLocBuilder &TLB, IncompleteArrayTypeLoc TL) { const IncompleteArrayType *T = TL.getTypePtr(); QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc()); if (ElementType.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || ElementType != T->getElementType()) { Result = getDerived().RebuildIncompleteArrayType(ElementType, T->getSizeModifier(), T->getIndexTypeCVRQualifiers(), TL.getBracketsRange()); if (Result.isNull()) return QualType(); } IncompleteArrayTypeLoc NewTL = TLB.push<IncompleteArrayTypeLoc>(Result); NewTL.setLBracketLoc(TL.getLBracketLoc()); NewTL.setRBracketLoc(TL.getRBracketLoc()); NewTL.setSizeExpr(nullptr); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformVariableArrayType(TypeLocBuilder &TLB, VariableArrayTypeLoc TL) { const VariableArrayType *T = TL.getTypePtr(); QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc()); if (ElementType.isNull()) return QualType(); ExprResult SizeResult = getDerived().TransformExpr(T->getSizeExpr()); if (SizeResult.isInvalid()) return QualType(); Expr *Size = SizeResult.get(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || ElementType != T->getElementType() || Size != T->getSizeExpr()) { Result = getDerived().RebuildVariableArrayType(ElementType, T->getSizeModifier(), Size, T->getIndexTypeCVRQualifiers(), TL.getBracketsRange()); if (Result.isNull()) return QualType(); } // We might have constant size array now, but fortunately it has the same // location layout. ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result); NewTL.setLBracketLoc(TL.getLBracketLoc()); NewTL.setRBracketLoc(TL.getRBracketLoc()); NewTL.setSizeExpr(Size); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformDependentSizedArrayType(TypeLocBuilder &TLB, DependentSizedArrayTypeLoc TL) { const DependentSizedArrayType *T = TL.getTypePtr(); QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc()); if (ElementType.isNull()) return QualType(); // Array bounds are constant expressions. EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::ConstantEvaluated); // Prefer the expression from the TypeLoc; the other may have been uniqued. Expr *origSize = TL.getSizeExpr(); if (!origSize) origSize = T->getSizeExpr(); ExprResult sizeResult = getDerived().TransformExpr(origSize); sizeResult = SemaRef.ActOnConstantExpression(sizeResult); if (sizeResult.isInvalid()) return QualType(); Expr *size = sizeResult.get(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || ElementType != T->getElementType() || size != origSize) { Result = getDerived().RebuildDependentSizedArrayType(ElementType, T->getSizeModifier(), size, T->getIndexTypeCVRQualifiers(), TL.getBracketsRange()); if (Result.isNull()) return QualType(); } // We might have any sort of array type now, but fortunately they // all have the same location layout. ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result); NewTL.setLBracketLoc(TL.getLBracketLoc()); NewTL.setRBracketLoc(TL.getRBracketLoc()); NewTL.setSizeExpr(size); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformDependentSizedExtVectorType( TypeLocBuilder &TLB, DependentSizedExtVectorTypeLoc TL) { const DependentSizedExtVectorType *T = TL.getTypePtr(); // FIXME: ext vector locs should be nested QualType ElementType = getDerived().TransformType(T->getElementType()); if (ElementType.isNull()) return QualType(); // Vector sizes are constant expressions. EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::ConstantEvaluated); ExprResult Size = getDerived().TransformExpr(T->getSizeExpr()); Size = SemaRef.ActOnConstantExpression(Size); if (Size.isInvalid()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || ElementType != T->getElementType() || Size.get() != T->getSizeExpr()) { Result = getDerived().RebuildDependentSizedExtVectorType(ElementType, Size.get(), T->getAttributeLoc()); if (Result.isNull()) return QualType(); } // Result might be dependent or not. if (isa<DependentSizedExtVectorType>(Result)) { DependentSizedExtVectorTypeLoc NewTL = TLB.push<DependentSizedExtVectorTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); } else { ExtVectorTypeLoc NewTL = TLB.push<ExtVectorTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); } return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformVectorType(TypeLocBuilder &TLB, VectorTypeLoc TL) { const VectorType *T = TL.getTypePtr(); QualType ElementType = getDerived().TransformType(T->getElementType()); if (ElementType.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || ElementType != T->getElementType()) { Result = getDerived().RebuildVectorType(ElementType, T->getNumElements(), T->getVectorKind()); if (Result.isNull()) return QualType(); } VectorTypeLoc NewTL = TLB.push<VectorTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformExtVectorType(TypeLocBuilder &TLB, ExtVectorTypeLoc TL) { const VectorType *T = TL.getTypePtr(); QualType ElementType = getDerived().TransformType(T->getElementType()); if (ElementType.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || ElementType != T->getElementType()) { Result = getDerived().RebuildExtVectorType(ElementType, T->getNumElements(), /*FIXME*/ SourceLocation()); if (Result.isNull()) return QualType(); } ExtVectorTypeLoc NewTL = TLB.push<ExtVectorTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } template <typename Derived> ParmVarDecl *TreeTransform<Derived>::TransformFunctionTypeParam( ParmVarDecl *OldParm, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack) { TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo(); TypeSourceInfo *NewDI = nullptr; if (NumExpansions && isa<PackExpansionType>(OldDI->getType())) { // If we're substituting into a pack expansion type and we know the // length we want to expand to, just substitute for the pattern. TypeLoc OldTL = OldDI->getTypeLoc(); PackExpansionTypeLoc OldExpansionTL = OldTL.castAs<PackExpansionTypeLoc>(); TypeLocBuilder TLB; TypeLoc NewTL = OldDI->getTypeLoc(); TLB.reserve(NewTL.getFullDataSize()); QualType Result = getDerived().TransformType(TLB, OldExpansionTL.getPatternLoc()); if (Result.isNull()) return nullptr; Result = RebuildPackExpansionType(Result, OldExpansionTL.getPatternLoc().getSourceRange(), OldExpansionTL.getEllipsisLoc(), NumExpansions); if (Result.isNull()) return nullptr; PackExpansionTypeLoc NewExpansionTL = TLB.push<PackExpansionTypeLoc>(Result); NewExpansionTL.setEllipsisLoc(OldExpansionTL.getEllipsisLoc()); NewDI = TLB.getTypeSourceInfo(SemaRef.Context, Result); } else NewDI = getDerived().TransformType(OldDI); if (!NewDI) return nullptr; if (NewDI == OldDI && indexAdjustment == 0) return OldParm; ParmVarDecl *newParm = ParmVarDecl::Create(SemaRef.Context, OldParm->getDeclContext(), OldParm->getInnerLocStart(), OldParm->getLocation(), OldParm->getIdentifier(), NewDI->getType(), NewDI, OldParm->getStorageClass(), /* DefArg */ nullptr, OldParm->getParamModifiers()); // HLSL Change newParm->setScopeInfo(OldParm->getFunctionScopeDepth(), OldParm->getFunctionScopeIndex() + indexAdjustment); return newParm; } template<typename Derived> bool TreeTransform<Derived>:: TransformFunctionTypeParams(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const QualType *ParamTypes, SmallVectorImpl<QualType> &OutParamTypes, SmallVectorImpl<ParmVarDecl*> *PVars) { int indexAdjustment = 0; for (unsigned i = 0; i != NumParams; ++i) { if (ParmVarDecl *OldParm = Params[i]) { assert(OldParm->getFunctionScopeIndex() == i); Optional<unsigned> NumExpansions; ParmVarDecl *NewParm = nullptr; if (OldParm->isParameterPack()) { // We have a function parameter pack that may need to be expanded. SmallVector<UnexpandedParameterPack, 2> Unexpanded; // Find the parameter packs that could be expanded. TypeLoc TL = OldParm->getTypeSourceInfo()->getTypeLoc(); PackExpansionTypeLoc ExpansionTL = TL.castAs<PackExpansionTypeLoc>(); TypeLoc Pattern = ExpansionTL.getPatternLoc(); SemaRef.collectUnexpandedParameterPacks(Pattern, Unexpanded); assert(Unexpanded.size() > 0 && "Could not find parameter packs!"); // Determine whether we should expand the parameter packs. bool ShouldExpand = false; bool RetainExpansion = false; Optional<unsigned> OrigNumExpansions = ExpansionTL.getTypePtr()->getNumExpansions(); NumExpansions = OrigNumExpansions; if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(), Pattern.getSourceRange(), Unexpanded, ShouldExpand, RetainExpansion, NumExpansions)) { return true; } if (ShouldExpand) { // Expand the function parameter pack into multiple, separate // parameters. getDerived().ExpandingFunctionParameterPack(OldParm); for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I); ParmVarDecl *NewParm = getDerived().TransformFunctionTypeParam(OldParm, indexAdjustment++, OrigNumExpansions, /*ExpectParameterPack=*/false); if (!NewParm) return true; OutParamTypes.push_back(NewParm->getType()); if (PVars) PVars->push_back(NewParm); } // If we're supposed to retain a pack expansion, do so by temporarily // forgetting the partially-substituted parameter pack. if (RetainExpansion) { ForgetPartiallySubstitutedPackRAII Forget(getDerived()); ParmVarDecl *NewParm = getDerived().TransformFunctionTypeParam(OldParm, indexAdjustment++, OrigNumExpansions, /*ExpectParameterPack=*/false); if (!NewParm) return true; OutParamTypes.push_back(NewParm->getType()); if (PVars) PVars->push_back(NewParm); } // The next parameter should have the same adjustment as the // last thing we pushed, but we post-incremented indexAdjustment // on every push. Also, if we push nothing, the adjustment should // go down by one. indexAdjustment--; // We're done with the pack expansion. continue; } // We'll substitute the parameter now without expanding the pack // expansion. Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1); NewParm = getDerived().TransformFunctionTypeParam(OldParm, indexAdjustment, NumExpansions, /*ExpectParameterPack=*/true); } else { NewParm = getDerived().TransformFunctionTypeParam( OldParm, indexAdjustment, None, /*ExpectParameterPack=*/ false); } if (!NewParm) return true; OutParamTypes.push_back(NewParm->getType()); if (PVars) PVars->push_back(NewParm); continue; } // Deal with the possibility that we don't have a parameter // declaration for this parameter. QualType OldType = ParamTypes[i]; bool IsPackExpansion = false; Optional<unsigned> NumExpansions; QualType NewType; if (const PackExpansionType *Expansion = dyn_cast<PackExpansionType>(OldType)) { // We have a function parameter pack that may need to be expanded. QualType Pattern = Expansion->getPattern(); SmallVector<UnexpandedParameterPack, 2> Unexpanded; getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded); // Determine whether we should expand the parameter packs. bool ShouldExpand = false; bool RetainExpansion = false; if (getDerived().TryExpandParameterPacks(Loc, SourceRange(), Unexpanded, ShouldExpand, RetainExpansion, NumExpansions)) { return true; } if (ShouldExpand) { // Expand the function parameter pack into multiple, separate // parameters. for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I); QualType NewType = getDerived().TransformType(Pattern); if (NewType.isNull()) return true; OutParamTypes.push_back(NewType); if (PVars) PVars->push_back(nullptr); } // We're done with the pack expansion. continue; } // If we're supposed to retain a pack expansion, do so by temporarily // forgetting the partially-substituted parameter pack. if (RetainExpansion) { ForgetPartiallySubstitutedPackRAII Forget(getDerived()); QualType NewType = getDerived().TransformType(Pattern); if (NewType.isNull()) return true; OutParamTypes.push_back(NewType); if (PVars) PVars->push_back(nullptr); } // We'll substitute the parameter now without expanding the pack // expansion. OldType = Expansion->getPattern(); IsPackExpansion = true; Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1); NewType = getDerived().TransformType(OldType); } else { NewType = getDerived().TransformType(OldType); } if (NewType.isNull()) return true; if (IsPackExpansion) NewType = getSema().Context.getPackExpansionType(NewType, NumExpansions); OutParamTypes.push_back(NewType); if (PVars) PVars->push_back(nullptr); } #ifndef NDEBUG if (PVars) { for (unsigned i = 0, e = PVars->size(); i != e; ++i) if (ParmVarDecl *parm = (*PVars)[i]) assert(parm->getFunctionScopeIndex() == i); } #endif return false; } template<typename Derived> QualType TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB, FunctionProtoTypeLoc TL) { SmallVector<QualType, 4> ExceptionStorage; TreeTransform *This = this; // Work around gcc.gnu.org/PR56135. return getDerived().TransformFunctionProtoType( TLB, TL, nullptr, 0, [&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) { return This->TransformExceptionSpec(TL.getBeginLoc(), ESI, ExceptionStorage, Changed); }); } template<typename Derived> template<typename Fn> QualType TreeTransform<Derived>::TransformFunctionProtoType( TypeLocBuilder &TLB, FunctionProtoTypeLoc TL, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals, Fn TransformExceptionSpec) { // Transform the parameters and return type. // // We are required to instantiate the params and return type in source order. // When the function has a trailing return type, we instantiate the // parameters before the return type, since the return type can then refer // to the parameters themselves (via decltype, sizeof, etc.). // SmallVector<QualType, 4> ParamTypes; SmallVector<ParmVarDecl*, 4> ParamDecls; const FunctionProtoType *T = TL.getTypePtr(); QualType ResultType; if (T->hasTrailingReturn()) { if (getDerived().TransformFunctionTypeParams( TL.getBeginLoc(), TL.getParmArray(), TL.getNumParams(), TL.getTypePtr()->param_type_begin(), ParamTypes, &ParamDecls)) return QualType(); { // C++11 [expr.prim.general]p3: // If a declaration declares a member function or member function // template of a class X, the expression this is a prvalue of type // "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq // and the end of the function-definition, member-declarator, or // declarator. Sema::CXXThisScopeRAII ThisScope(SemaRef, ThisContext, ThisTypeQuals); ResultType = getDerived().TransformType(TLB, TL.getReturnLoc()); if (ResultType.isNull()) return QualType(); } } else { ResultType = getDerived().TransformType(TLB, TL.getReturnLoc()); if (ResultType.isNull()) return QualType(); if (getDerived().TransformFunctionTypeParams( TL.getBeginLoc(), TL.getParmArray(), TL.getNumParams(), TL.getTypePtr()->param_type_begin(), ParamTypes, &ParamDecls)) return QualType(); } FunctionProtoType::ExtProtoInfo EPI = T->getExtProtoInfo(); bool EPIChanged = false; if (TransformExceptionSpec(EPI.ExceptionSpec, EPIChanged)) return QualType(); // FIXME: Need to transform ConsumedParameters for variadic template // expansion. QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || ResultType != T->getReturnType() || T->getNumParams() != ParamTypes.size() || !std::equal(T->param_type_begin(), T->param_type_end(), ParamTypes.begin()) || EPIChanged) { // HLSL Change - FIX - We should move param mods to parameter QualTypes Result = getDerived().RebuildFunctionProtoType(ResultType, ParamTypes, T->getParamMods(), EPI); // HLSL Change - End if (Result.isNull()) return QualType(); } FunctionProtoTypeLoc NewTL = TLB.push<FunctionProtoTypeLoc>(Result); NewTL.setLocalRangeBegin(TL.getLocalRangeBegin()); NewTL.setLParenLoc(TL.getLParenLoc()); NewTL.setRParenLoc(TL.getRParenLoc()); NewTL.setLocalRangeEnd(TL.getLocalRangeEnd()); for (unsigned i = 0, e = NewTL.getNumParams(); i != e; ++i) NewTL.setParam(i, ParamDecls[i]); return Result; } template<typename Derived> bool TreeTransform<Derived>::TransformExceptionSpec( SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &Exceptions, bool &Changed) { assert(ESI.Type != EST_Uninstantiated && ESI.Type != EST_Unevaluated); // Instantiate a dynamic noexcept expression, if any. if (ESI.Type == EST_ComputedNoexcept) { EnterExpressionEvaluationContext Unevaluated(getSema(), Sema::ConstantEvaluated); ExprResult NoexceptExpr = getDerived().TransformExpr(ESI.NoexceptExpr); if (NoexceptExpr.isInvalid()) return true; NoexceptExpr = getSema().CheckBooleanCondition( NoexceptExpr.get(), NoexceptExpr.get()->getLocStart()); if (NoexceptExpr.isInvalid()) return true; if (!NoexceptExpr.get()->isValueDependent()) { NoexceptExpr = getSema().VerifyIntegerConstantExpression( NoexceptExpr.get(), nullptr, diag::err_noexcept_needs_constant_expression, /*AllowFold*/false); if (NoexceptExpr.isInvalid()) return true; } if (ESI.NoexceptExpr != NoexceptExpr.get()) Changed = true; ESI.NoexceptExpr = NoexceptExpr.get(); } if (ESI.Type != EST_Dynamic) return false; // Instantiate a dynamic exception specification's type. for (QualType T : ESI.Exceptions) { if (const PackExpansionType *PackExpansion = T->getAs<PackExpansionType>()) { Changed = true; // We have a pack expansion. Instantiate it. SmallVector<UnexpandedParameterPack, 2> Unexpanded; SemaRef.collectUnexpandedParameterPacks(PackExpansion->getPattern(), Unexpanded); assert(!Unexpanded.empty() && "Pack expansion without parameter packs?"); // Determine whether the set of unexpanded parameter packs can and // should // be expanded. bool Expand = false; bool RetainExpansion = false; Optional<unsigned> NumExpansions = PackExpansion->getNumExpansions(); // FIXME: Track the location of the ellipsis (and track source location // information for the types in the exception specification in general). if (getDerived().TryExpandParameterPacks( Loc, SourceRange(), Unexpanded, Expand, RetainExpansion, NumExpansions)) return true; if (!Expand) { // We can't expand this pack expansion into separate arguments yet; // just substitute into the pattern and create a new pack expansion // type. Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1); QualType U = getDerived().TransformType(PackExpansion->getPattern()); if (U.isNull()) return true; U = SemaRef.Context.getPackExpansionType(U, NumExpansions); Exceptions.push_back(U); continue; } // Substitute into the pack expansion pattern for each slice of the // pack. for (unsigned ArgIdx = 0; ArgIdx != *NumExpansions; ++ArgIdx) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), ArgIdx); QualType U = getDerived().TransformType(PackExpansion->getPattern()); if (U.isNull() || SemaRef.CheckSpecifiedExceptionType(U, Loc)) return true; Exceptions.push_back(U); } } else { QualType U = getDerived().TransformType(T); if (U.isNull() || SemaRef.CheckSpecifiedExceptionType(U, Loc)) return true; if (T != U) Changed = true; Exceptions.push_back(U); } } ESI.Exceptions = Exceptions; return false; } template<typename Derived> QualType TreeTransform<Derived>::TransformFunctionNoProtoType( TypeLocBuilder &TLB, FunctionNoProtoTypeLoc TL) { const FunctionNoProtoType *T = TL.getTypePtr(); QualType ResultType = getDerived().TransformType(TLB, TL.getReturnLoc()); if (ResultType.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || ResultType != T->getReturnType()) Result = getDerived().RebuildFunctionNoProtoType(ResultType); FunctionNoProtoTypeLoc NewTL = TLB.push<FunctionNoProtoTypeLoc>(Result); NewTL.setLocalRangeBegin(TL.getLocalRangeBegin()); NewTL.setLParenLoc(TL.getLParenLoc()); NewTL.setRParenLoc(TL.getRParenLoc()); NewTL.setLocalRangeEnd(TL.getLocalRangeEnd()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformUnresolvedUsingType(TypeLocBuilder &TLB, UnresolvedUsingTypeLoc TL) { const UnresolvedUsingType *T = TL.getTypePtr(); Decl *D = getDerived().TransformDecl(TL.getNameLoc(), T->getDecl()); if (!D) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || D != T->getDecl()) { Result = getDerived().RebuildUnresolvedUsingType(D); if (Result.isNull()) return QualType(); } // We might get an arbitrary type spec type back. We should at // least always get a type spec type, though. TypeSpecTypeLoc NewTL = TLB.pushTypeSpec(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformTypedefType(TypeLocBuilder &TLB, TypedefTypeLoc TL) { const TypedefType *T = TL.getTypePtr(); TypedefNameDecl *Typedef = cast_or_null<TypedefNameDecl>(getDerived().TransformDecl(TL.getNameLoc(), T->getDecl())); if (!Typedef) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || Typedef != T->getDecl()) { Result = getDerived().RebuildTypedefType(Typedef); if (Result.isNull()) return QualType(); } TypedefTypeLoc NewTL = TLB.push<TypedefTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB, TypeOfExprTypeLoc TL) { // typeof expressions are not potentially evaluated contexts EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated, Sema::ReuseLambdaContextDecl); ExprResult E = getDerived().TransformExpr(TL.getUnderlyingExpr()); if (E.isInvalid()) return QualType(); E = SemaRef.HandleExprEvaluationContextForTypeof(E.get()); if (E.isInvalid()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || E.get() != TL.getUnderlyingExpr()) { Result = getDerived().RebuildTypeOfExprType(E.get(), TL.getTypeofLoc()); if (Result.isNull()) return QualType(); } else E.get(); TypeOfExprTypeLoc NewTL = TLB.push<TypeOfExprTypeLoc>(Result); NewTL.setTypeofLoc(TL.getTypeofLoc()); NewTL.setLParenLoc(TL.getLParenLoc()); NewTL.setRParenLoc(TL.getRParenLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformTypeOfType(TypeLocBuilder &TLB, TypeOfTypeLoc TL) { TypeSourceInfo* Old_Under_TI = TL.getUnderlyingTInfo(); TypeSourceInfo* New_Under_TI = getDerived().TransformType(Old_Under_TI); if (!New_Under_TI) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || New_Under_TI != Old_Under_TI) { Result = getDerived().RebuildTypeOfType(New_Under_TI->getType()); if (Result.isNull()) return QualType(); } TypeOfTypeLoc NewTL = TLB.push<TypeOfTypeLoc>(Result); NewTL.setTypeofLoc(TL.getTypeofLoc()); NewTL.setLParenLoc(TL.getLParenLoc()); NewTL.setRParenLoc(TL.getRParenLoc()); NewTL.setUnderlyingTInfo(New_Under_TI); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB, DecltypeTypeLoc TL) { const DecltypeType *T = TL.getTypePtr(); // decltype expressions are not potentially evaluated contexts EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated, nullptr, /*IsDecltype=*/ true); ExprResult E = getDerived().TransformExpr(T->getUnderlyingExpr()); if (E.isInvalid()) return QualType(); E = getSema().ActOnDecltypeExpression(E.get()); if (E.isInvalid()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || E.get() != T->getUnderlyingExpr()) { Result = getDerived().RebuildDecltypeType(E.get(), TL.getNameLoc()); if (Result.isNull()) return QualType(); } else E.get(); DecltypeTypeLoc NewTL = TLB.push<DecltypeTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformUnaryTransformType( TypeLocBuilder &TLB, UnaryTransformTypeLoc TL) { QualType Result = TL.getType(); if (Result->isDependentType()) { const UnaryTransformType *T = TL.getTypePtr(); QualType NewBase = getDerived().TransformType(TL.getUnderlyingTInfo())->getType(); Result = getDerived().RebuildUnaryTransformType(NewBase, T->getUTTKind(), TL.getKWLoc()); if (Result.isNull()) return QualType(); } UnaryTransformTypeLoc NewTL = TLB.push<UnaryTransformTypeLoc>(Result); NewTL.setKWLoc(TL.getKWLoc()); NewTL.setParensRange(TL.getParensRange()); NewTL.setUnderlyingTInfo(TL.getUnderlyingTInfo()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB, AutoTypeLoc TL) { const AutoType *T = TL.getTypePtr(); QualType OldDeduced = T->getDeducedType(); QualType NewDeduced; if (!OldDeduced.isNull()) { NewDeduced = getDerived().TransformType(OldDeduced); if (NewDeduced.isNull()) return QualType(); } QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || NewDeduced != OldDeduced || T->isDependentType()) { Result = getDerived().RebuildAutoType(NewDeduced, T->isDecltypeAuto()); if (Result.isNull()) return QualType(); } AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformRecordType(TypeLocBuilder &TLB, RecordTypeLoc TL) { const RecordType *T = TL.getTypePtr(); RecordDecl *Record = cast_or_null<RecordDecl>(getDerived().TransformDecl(TL.getNameLoc(), T->getDecl())); if (!Record) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || Record != T->getDecl()) { Result = getDerived().RebuildRecordType(Record); if (Result.isNull()) return QualType(); } RecordTypeLoc NewTL = TLB.push<RecordTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformEnumType(TypeLocBuilder &TLB, EnumTypeLoc TL) { const EnumType *T = TL.getTypePtr(); EnumDecl *Enum = cast_or_null<EnumDecl>(getDerived().TransformDecl(TL.getNameLoc(), T->getDecl())); if (!Enum) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || Enum != T->getDecl()) { Result = getDerived().RebuildEnumType(Enum); if (Result.isNull()) return QualType(); } EnumTypeLoc NewTL = TLB.push<EnumTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformInjectedClassNameType( TypeLocBuilder &TLB, InjectedClassNameTypeLoc TL) { Decl *D = getDerived().TransformDecl(TL.getNameLoc(), TL.getTypePtr()->getDecl()); if (!D) return QualType(); QualType T = SemaRef.Context.getTypeDeclType(cast<TypeDecl>(D)); TLB.pushTypeSpec(T).setNameLoc(TL.getNameLoc()); return T; } template<typename Derived> QualType TreeTransform<Derived>::TransformTemplateTypeParmType( TypeLocBuilder &TLB, TemplateTypeParmTypeLoc TL) { return TransformTypeSpecType(TLB, TL); } template<typename Derived> QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmType( TypeLocBuilder &TLB, SubstTemplateTypeParmTypeLoc TL) { const SubstTemplateTypeParmType *T = TL.getTypePtr(); // Substitute into the replacement type, which itself might involve something // that needs to be transformed. This only tends to occur with default // template arguments of template template parameters. TemporaryBase Rebase(*this, TL.getNameLoc(), DeclarationName()); QualType Replacement = getDerived().TransformType(T->getReplacementType()); if (Replacement.isNull()) return QualType(); // Always canonicalize the replacement type. Replacement = SemaRef.Context.getCanonicalType(Replacement); QualType Result = SemaRef.Context.getSubstTemplateTypeParmType(T->getReplacedParameter(), Replacement); // Propagate type-source information. SubstTemplateTypeParmTypeLoc NewTL = TLB.push<SubstTemplateTypeParmTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmPackType( TypeLocBuilder &TLB, SubstTemplateTypeParmPackTypeLoc TL) { return TransformTypeSpecType(TLB, TL); } template<typename Derived> QualType TreeTransform<Derived>::TransformTemplateSpecializationType( TypeLocBuilder &TLB, TemplateSpecializationTypeLoc TL) { const TemplateSpecializationType *T = TL.getTypePtr(); // The nested-name-specifier never matters in a TemplateSpecializationType, // because we can't have a dependent nested-name-specifier anyway. CXXScopeSpec SS; TemplateName Template = getDerived().TransformTemplateName(SS, T->getTemplateName(), TL.getTemplateNameLoc()); if (Template.isNull()) return QualType(); return getDerived().TransformTemplateSpecializationType(TLB, TL, Template); } template<typename Derived> QualType TreeTransform<Derived>::TransformAtomicType(TypeLocBuilder &TLB, AtomicTypeLoc TL) { QualType ValueType = getDerived().TransformType(TLB, TL.getValueLoc()); if (ValueType.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || ValueType != TL.getValueLoc().getType()) { Result = getDerived().RebuildAtomicType(ValueType, TL.getKWLoc()); if (Result.isNull()) return QualType(); } AtomicTypeLoc NewTL = TLB.push<AtomicTypeLoc>(Result); NewTL.setKWLoc(TL.getKWLoc()); NewTL.setLParenLoc(TL.getLParenLoc()); NewTL.setRParenLoc(TL.getRParenLoc()); return Result; } /// \brief Simple iterator that traverses the template arguments in a /// container that provides a \c getArgLoc() member function. /// /// This iterator is intended to be used with the iterator form of /// \c TreeTransform<Derived>::TransformTemplateArguments(). template<typename ArgLocContainer> class TemplateArgumentLocContainerIterator { ArgLocContainer *Container; unsigned Index; public: typedef TemplateArgumentLoc value_type; typedef TemplateArgumentLoc reference; typedef int difference_type; typedef std::input_iterator_tag iterator_category; class pointer { TemplateArgumentLoc Arg; public: explicit pointer(TemplateArgumentLoc Arg) : Arg(Arg) { } const TemplateArgumentLoc *operator->() const { return &Arg; } }; TemplateArgumentLocContainerIterator() {} TemplateArgumentLocContainerIterator(ArgLocContainer &Container, unsigned Index) : Container(&Container), Index(Index) { } TemplateArgumentLocContainerIterator &operator++() { ++Index; return *this; } TemplateArgumentLocContainerIterator operator++(int) { TemplateArgumentLocContainerIterator Old(*this); ++(*this); return Old; } TemplateArgumentLoc operator*() const { return Container->getArgLoc(Index); } pointer operator->() const { return pointer(Container->getArgLoc(Index)); } friend bool operator==(const TemplateArgumentLocContainerIterator &X, const TemplateArgumentLocContainerIterator &Y) { return X.Container == Y.Container && X.Index == Y.Index; } friend bool operator!=(const TemplateArgumentLocContainerIterator &X, const TemplateArgumentLocContainerIterator &Y) { return !(X == Y); } }; template <typename Derived> QualType TreeTransform<Derived>::TransformTemplateSpecializationType( TypeLocBuilder &TLB, TemplateSpecializationTypeLoc TL, TemplateName Template) { TemplateArgumentListInfo NewTemplateArgs; NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc()); NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc()); typedef TemplateArgumentLocContainerIterator<TemplateSpecializationTypeLoc> ArgIterator; if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0), ArgIterator(TL, TL.getNumArgs()), NewTemplateArgs)) return QualType(); // FIXME: maybe don't rebuild if all the template arguments are the same. QualType Result = getDerived().RebuildTemplateSpecializationType(Template, TL.getTemplateNameLoc(), NewTemplateArgs); if (!Result.isNull()) { // Specializations of template template parameters are represented as // TemplateSpecializationTypes, and substitution of type alias templates // within a dependent context can transform them into // DependentTemplateSpecializationTypes. if (isa<DependentTemplateSpecializationType>(Result)) { DependentTemplateSpecializationTypeLoc NewTL = TLB.push<DependentTemplateSpecializationTypeLoc>(Result); NewTL.setElaboratedKeywordLoc(SourceLocation()); NewTL.setQualifierLoc(NestedNameSpecifierLoc()); NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc()); NewTL.setTemplateNameLoc(TL.getTemplateNameLoc()); NewTL.setLAngleLoc(TL.getLAngleLoc()); NewTL.setRAngleLoc(TL.getRAngleLoc()); for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i) NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo()); return Result; } TemplateSpecializationTypeLoc NewTL = TLB.push<TemplateSpecializationTypeLoc>(Result); NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc()); NewTL.setTemplateNameLoc(TL.getTemplateNameLoc()); NewTL.setLAngleLoc(TL.getLAngleLoc()); NewTL.setRAngleLoc(TL.getRAngleLoc()); for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i) NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo()); } return Result; } template <typename Derived> QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType( TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL, TemplateName Template, CXXScopeSpec &SS) { TemplateArgumentListInfo NewTemplateArgs; NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc()); NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc()); typedef TemplateArgumentLocContainerIterator< DependentTemplateSpecializationTypeLoc> ArgIterator; if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0), ArgIterator(TL, TL.getNumArgs()), NewTemplateArgs)) return QualType(); // FIXME: maybe don't rebuild if all the template arguments are the same. if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) { QualType Result = getSema().Context.getDependentTemplateSpecializationType( TL.getTypePtr()->getKeyword(), DTN->getQualifier(), DTN->getIdentifier(), NewTemplateArgs); DependentTemplateSpecializationTypeLoc NewTL = TLB.push<DependentTemplateSpecializationTypeLoc>(Result); NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc()); NewTL.setQualifierLoc(SS.getWithLocInContext(SemaRef.Context)); NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc()); NewTL.setTemplateNameLoc(TL.getTemplateNameLoc()); NewTL.setLAngleLoc(TL.getLAngleLoc()); NewTL.setRAngleLoc(TL.getRAngleLoc()); for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i) NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo()); return Result; } QualType Result = getDerived().RebuildTemplateSpecializationType(Template, TL.getTemplateNameLoc(), NewTemplateArgs); if (!Result.isNull()) { /// FIXME: Wrap this in an elaborated-type-specifier? TemplateSpecializationTypeLoc NewTL = TLB.push<TemplateSpecializationTypeLoc>(Result); NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc()); NewTL.setTemplateNameLoc(TL.getTemplateNameLoc()); NewTL.setLAngleLoc(TL.getLAngleLoc()); NewTL.setRAngleLoc(TL.getRAngleLoc()); for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i) NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo()); } return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB, ElaboratedTypeLoc TL) { const ElaboratedType *T = TL.getTypePtr(); NestedNameSpecifierLoc QualifierLoc; // NOTE: the qualifier in an ElaboratedType is optional. if (TL.getQualifierLoc()) { QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc()); if (!QualifierLoc) return QualType(); } QualType NamedT = getDerived().TransformType(TLB, TL.getNamedTypeLoc()); if (NamedT.isNull()) return QualType(); // C++0x [dcl.type.elab]p2: // If the identifier resolves to a typedef-name or the simple-template-id // resolves to an alias template specialization, the // elaborated-type-specifier is ill-formed. if (T->getKeyword() != ETK_None && T->getKeyword() != ETK_Typename) { if (const TemplateSpecializationType *TST = NamedT->getAs<TemplateSpecializationType>()) { TemplateName Template = TST->getTemplateName(); if (TypeAliasTemplateDecl *TAT = dyn_cast_or_null<TypeAliasTemplateDecl>( Template.getAsTemplateDecl())) { SemaRef.Diag(TL.getNamedTypeLoc().getBeginLoc(), diag::err_tag_reference_non_tag) << 4; SemaRef.Diag(TAT->getLocation(), diag::note_declared_at); } } } QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || QualifierLoc != TL.getQualifierLoc() || NamedT != T->getNamedType()) { Result = getDerived().RebuildElaboratedType(TL.getElaboratedKeywordLoc(), T->getKeyword(), QualifierLoc, NamedT); if (Result.isNull()) return QualType(); } ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result); NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc()); NewTL.setQualifierLoc(QualifierLoc); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformAttributedType( TypeLocBuilder &TLB, AttributedTypeLoc TL) { const AttributedType *oldType = TL.getTypePtr(); QualType modifiedType = getDerived().TransformType(TLB, TL.getModifiedLoc()); if (modifiedType.isNull()) return QualType(); QualType result = TL.getType(); // FIXME: dependent operand expressions? if (getDerived().AlwaysRebuild() || modifiedType != oldType->getModifiedType()) { // TODO: this is really lame; we should really be rebuilding the // equivalent type from first principles. QualType equivalentType = getDerived().TransformType(oldType->getEquivalentType()); if (equivalentType.isNull()) return QualType(); // Check whether we can add nullability; it is only represented as // type sugar, and therefore cannot be diagnosed in any other way. if (auto nullability = oldType->getImmediateNullability()) { if (!modifiedType->canHaveNullability()) { SemaRef.Diag(TL.getAttrNameLoc(), diag::err_nullability_nonpointer) << DiagNullabilityKind(*nullability, false) << modifiedType; return QualType(); } } result = SemaRef.Context.getAttributedType(oldType->getAttrKind(), modifiedType, equivalentType); } AttributedTypeLoc newTL = TLB.push<AttributedTypeLoc>(result); newTL.setAttrNameLoc(TL.getAttrNameLoc()); if (TL.hasAttrOperand()) newTL.setAttrOperandParensRange(TL.getAttrOperandParensRange()); if (TL.hasAttrExprOperand()) newTL.setAttrExprOperand(TL.getAttrExprOperand()); else if (TL.hasAttrEnumOperand()) newTL.setAttrEnumOperandLoc(TL.getAttrEnumOperandLoc()); return result; } template<typename Derived> QualType TreeTransform<Derived>::TransformParenType(TypeLocBuilder &TLB, ParenTypeLoc TL) { QualType Inner = getDerived().TransformType(TLB, TL.getInnerLoc()); if (Inner.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || Inner != TL.getInnerLoc().getType()) { Result = getDerived().RebuildParenType(Inner); if (Result.isNull()) return QualType(); } ParenTypeLoc NewTL = TLB.push<ParenTypeLoc>(Result); NewTL.setLParenLoc(TL.getLParenLoc()); NewTL.setRParenLoc(TL.getRParenLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB, DependentNameTypeLoc TL) { const DependentNameType *T = TL.getTypePtr(); NestedNameSpecifierLoc QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc()); if (!QualifierLoc) return QualType(); QualType Result = getDerived().RebuildDependentNameType(T->getKeyword(), TL.getElaboratedKeywordLoc(), QualifierLoc, T->getIdentifier(), TL.getNameLoc()); if (Result.isNull()) return QualType(); if (const ElaboratedType* ElabT = Result->getAs<ElaboratedType>()) { QualType NamedT = ElabT->getNamedType(); TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc()); ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result); NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc()); NewTL.setQualifierLoc(QualifierLoc); } else { DependentNameTypeLoc NewTL = TLB.push<DependentNameTypeLoc>(Result); NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc()); NewTL.setQualifierLoc(QualifierLoc); NewTL.setNameLoc(TL.getNameLoc()); } return Result; } template<typename Derived> QualType TreeTransform<Derived>:: TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL) { NestedNameSpecifierLoc QualifierLoc; if (TL.getQualifierLoc()) { QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc()); if (!QualifierLoc) return QualType(); } return getDerived() .TransformDependentTemplateSpecializationType(TLB, TL, QualifierLoc); } template<typename Derived> QualType TreeTransform<Derived>:: TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB, DependentTemplateSpecializationTypeLoc TL, NestedNameSpecifierLoc QualifierLoc) { const DependentTemplateSpecializationType *T = TL.getTypePtr(); TemplateArgumentListInfo NewTemplateArgs; NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc()); NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc()); typedef TemplateArgumentLocContainerIterator< DependentTemplateSpecializationTypeLoc> ArgIterator; if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0), ArgIterator(TL, TL.getNumArgs()), NewTemplateArgs)) return QualType(); QualType Result = getDerived().RebuildDependentTemplateSpecializationType(T->getKeyword(), QualifierLoc, T->getIdentifier(), TL.getTemplateNameLoc(), NewTemplateArgs); if (Result.isNull()) return QualType(); if (const ElaboratedType *ElabT = dyn_cast<ElaboratedType>(Result)) { QualType NamedT = ElabT->getNamedType(); // Copy information relevant to the template specialization. TemplateSpecializationTypeLoc NamedTL = TLB.push<TemplateSpecializationTypeLoc>(NamedT); NamedTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc()); NamedTL.setTemplateNameLoc(TL.getTemplateNameLoc()); NamedTL.setLAngleLoc(TL.getLAngleLoc()); NamedTL.setRAngleLoc(TL.getRAngleLoc()); for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I) NamedTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo()); // Copy information relevant to the elaborated type. ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result); NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc()); NewTL.setQualifierLoc(QualifierLoc); } else if (isa<DependentTemplateSpecializationType>(Result)) { DependentTemplateSpecializationTypeLoc SpecTL = TLB.push<DependentTemplateSpecializationTypeLoc>(Result); SpecTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc()); SpecTL.setQualifierLoc(QualifierLoc); SpecTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc()); SpecTL.setTemplateNameLoc(TL.getTemplateNameLoc()); SpecTL.setLAngleLoc(TL.getLAngleLoc()); SpecTL.setRAngleLoc(TL.getRAngleLoc()); for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I) SpecTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo()); } else { TemplateSpecializationTypeLoc SpecTL = TLB.push<TemplateSpecializationTypeLoc>(Result); SpecTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc()); SpecTL.setTemplateNameLoc(TL.getTemplateNameLoc()); SpecTL.setLAngleLoc(TL.getLAngleLoc()); SpecTL.setRAngleLoc(TL.getRAngleLoc()); for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I) SpecTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo()); } return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformPackExpansionType(TypeLocBuilder &TLB, PackExpansionTypeLoc TL) { QualType Pattern = getDerived().TransformType(TLB, TL.getPatternLoc()); if (Pattern.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || Pattern != TL.getPatternLoc().getType()) { Result = getDerived().RebuildPackExpansionType(Pattern, TL.getPatternLoc().getSourceRange(), TL.getEllipsisLoc(), TL.getTypePtr()->getNumExpansions()); if (Result.isNull()) return QualType(); } PackExpansionTypeLoc NewT = TLB.push<PackExpansionTypeLoc>(Result); NewT.setEllipsisLoc(TL.getEllipsisLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformObjCInterfaceType(TypeLocBuilder &TLB, ObjCInterfaceTypeLoc TL) { // ObjCInterfaceType is never dependent. TLB.pushFullCopy(TL); return TL.getType(); } template<typename Derived> QualType TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB, ObjCObjectTypeLoc TL) { // Transform base type. QualType BaseType = getDerived().TransformType(TLB, TL.getBaseLoc()); if (BaseType.isNull()) return QualType(); bool AnyChanged = BaseType != TL.getBaseLoc().getType(); // Transform type arguments. SmallVector<TypeSourceInfo *, 4> NewTypeArgInfos; for (unsigned i = 0, n = TL.getNumTypeArgs(); i != n; ++i) { TypeSourceInfo *TypeArgInfo = TL.getTypeArgTInfo(i); TypeLoc TypeArgLoc = TypeArgInfo->getTypeLoc(); QualType TypeArg = TypeArgInfo->getType(); if (auto PackExpansionLoc = TypeArgLoc.getAs<PackExpansionTypeLoc>()) { AnyChanged = true; // We have a pack expansion. Instantiate it. const auto *PackExpansion = PackExpansionLoc.getType() ->castAs<PackExpansionType>(); SmallVector<UnexpandedParameterPack, 2> Unexpanded; SemaRef.collectUnexpandedParameterPacks(PackExpansion->getPattern(), Unexpanded); assert(!Unexpanded.empty() && "Pack expansion without parameter packs?"); // Determine whether the set of unexpanded parameter packs can // and should be expanded. TypeLoc PatternLoc = PackExpansionLoc.getPatternLoc(); bool Expand = false; bool RetainExpansion = false; Optional<unsigned> NumExpansions = PackExpansion->getNumExpansions(); if (getDerived().TryExpandParameterPacks( PackExpansionLoc.getEllipsisLoc(), PatternLoc.getSourceRange(), Unexpanded, Expand, RetainExpansion, NumExpansions)) return QualType(); if (!Expand) { // We can't expand this pack expansion into separate arguments yet; // just substitute into the pattern and create a new pack expansion // type. Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1); TypeLocBuilder TypeArgBuilder; TypeArgBuilder.reserve(PatternLoc.getFullDataSize()); QualType NewPatternType = getDerived().TransformType(TypeArgBuilder, PatternLoc); if (NewPatternType.isNull()) return QualType(); QualType NewExpansionType = SemaRef.Context.getPackExpansionType( NewPatternType, NumExpansions); auto NewExpansionLoc = TLB.push<PackExpansionTypeLoc>(NewExpansionType); NewExpansionLoc.setEllipsisLoc(PackExpansionLoc.getEllipsisLoc()); NewTypeArgInfos.push_back( TypeArgBuilder.getTypeSourceInfo(SemaRef.Context, NewExpansionType)); continue; } // Substitute into the pack expansion pattern for each slice of the // pack. for (unsigned ArgIdx = 0; ArgIdx != *NumExpansions; ++ArgIdx) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), ArgIdx); TypeLocBuilder TypeArgBuilder; TypeArgBuilder.reserve(PatternLoc.getFullDataSize()); QualType NewTypeArg = getDerived().TransformType(TypeArgBuilder, PatternLoc); if (NewTypeArg.isNull()) return QualType(); NewTypeArgInfos.push_back( TypeArgBuilder.getTypeSourceInfo(SemaRef.Context, NewTypeArg)); } continue; } TypeLocBuilder TypeArgBuilder; TypeArgBuilder.reserve(TypeArgLoc.getFullDataSize()); QualType NewTypeArg = getDerived().TransformType(TypeArgBuilder, TypeArgLoc); if (NewTypeArg.isNull()) return QualType(); // If nothing changed, just keep the old TypeSourceInfo. if (NewTypeArg == TypeArg) { NewTypeArgInfos.push_back(TypeArgInfo); continue; } NewTypeArgInfos.push_back( TypeArgBuilder.getTypeSourceInfo(SemaRef.Context, NewTypeArg)); AnyChanged = true; } QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || AnyChanged) { // Rebuild the type. Result = getDerived().RebuildObjCObjectType( BaseType, TL.getLocStart(), TL.getTypeArgsLAngleLoc(), NewTypeArgInfos, TL.getTypeArgsRAngleLoc(), TL.getProtocolLAngleLoc(), llvm::makeArrayRef(TL.getTypePtr()->qual_begin(), TL.getNumProtocols()), TL.getProtocolLocs(), TL.getProtocolRAngleLoc()); if (Result.isNull()) return QualType(); } ObjCObjectTypeLoc NewT = TLB.push<ObjCObjectTypeLoc>(Result); assert(TL.hasBaseTypeAsWritten() && "Can't be dependent"); NewT.setHasBaseTypeAsWritten(true); NewT.setTypeArgsLAngleLoc(TL.getTypeArgsLAngleLoc()); for (unsigned i = 0, n = TL.getNumTypeArgs(); i != n; ++i) NewT.setTypeArgTInfo(i, NewTypeArgInfos[i]); NewT.setTypeArgsRAngleLoc(TL.getTypeArgsRAngleLoc()); NewT.setProtocolLAngleLoc(TL.getProtocolLAngleLoc()); for (unsigned i = 0, n = TL.getNumProtocols(); i != n; ++i) NewT.setProtocolLoc(i, TL.getProtocolLoc(i)); NewT.setProtocolRAngleLoc(TL.getProtocolRAngleLoc()); return Result; } template<typename Derived> QualType TreeTransform<Derived>::TransformObjCObjectPointerType(TypeLocBuilder &TLB, ObjCObjectPointerTypeLoc TL) { QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc()); if (PointeeType.isNull()) return QualType(); QualType Result = TL.getType(); if (getDerived().AlwaysRebuild() || PointeeType != TL.getPointeeLoc().getType()) { Result = getDerived().RebuildObjCObjectPointerType(PointeeType, TL.getStarLoc()); if (Result.isNull()) return QualType(); } ObjCObjectPointerTypeLoc NewT = TLB.push<ObjCObjectPointerTypeLoc>(Result); NewT.setStarLoc(TL.getStarLoc()); return Result; } //===----------------------------------------------------------------------===// // Statement transformation //===----------------------------------------------------------------------===// template<typename Derived> StmtResult TreeTransform<Derived>::TransformNullStmt(NullStmt *S) { return S; } // HLSL Change Starts - adding hlsl discard stmt support template<typename Derived> StmtResult TreeTransform<Derived>::TransformDiscardStmt(DiscardStmt *S) { return S; } // HLSL Change Ends template<typename Derived> StmtResult TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S) { return getDerived().TransformCompoundStmt(S, false); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S, bool IsStmtExpr) { Sema::CompoundScopeRAII CompoundScope(getSema()); bool SubStmtInvalid = false; bool SubStmtChanged = false; SmallVector<Stmt*, 8> Statements; for (auto *B : S->body()) { StmtResult Result = getDerived().TransformStmt(B); if (Result.isInvalid()) { // Immediately fail if this was a DeclStmt, since it's very // likely that this will cause problems for future statements. if (isa<DeclStmt>(B)) return StmtError(); // Otherwise, just keep processing substatements and fail later. SubStmtInvalid = true; continue; } SubStmtChanged = SubStmtChanged || Result.get() != B; Statements.push_back(Result.getAs<Stmt>()); } if (SubStmtInvalid) return StmtError(); if (!getDerived().AlwaysRebuild() && !SubStmtChanged) return S; return getDerived().RebuildCompoundStmt(S->getLBracLoc(), Statements, S->getRBracLoc(), IsStmtExpr); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformCaseStmt(CaseStmt *S) { ExprResult LHS, RHS; { EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::ConstantEvaluated); // Transform the left-hand case value. LHS = getDerived().TransformExpr(S->getLHS()); LHS = SemaRef.ActOnConstantExpression(LHS); if (LHS.isInvalid()) return StmtError(); // Transform the right-hand case value (for the GNU case-range extension). RHS = getDerived().TransformExpr(S->getRHS()); RHS = SemaRef.ActOnConstantExpression(RHS); if (RHS.isInvalid()) return StmtError(); } // Build the case statement. // Case statements are always rebuilt so that they will attached to their // transformed switch statement. StmtResult Case = getDerived().RebuildCaseStmt(S->getCaseLoc(), LHS.get(), S->getEllipsisLoc(), RHS.get(), S->getColonLoc()); if (Case.isInvalid()) return StmtError(); // Transform the statement following the case StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt()); if (SubStmt.isInvalid()) return StmtError(); // Attach the body to the case statement return getDerived().RebuildCaseStmtBody(Case.get(), SubStmt.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformDefaultStmt(DefaultStmt *S) { // Transform the statement following the default case StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt()); if (SubStmt.isInvalid()) return StmtError(); // Default statements are always rebuilt return getDerived().RebuildDefaultStmt(S->getDefaultLoc(), S->getColonLoc(), SubStmt.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformLabelStmt(LabelStmt *S) { StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt()); if (SubStmt.isInvalid()) return StmtError(); Decl *LD = getDerived().TransformDecl(S->getDecl()->getLocation(), S->getDecl()); if (!LD) return StmtError(); // FIXME: Pass the real colon location in. return getDerived().RebuildLabelStmt(S->getIdentLoc(), cast<LabelDecl>(LD), SourceLocation(), SubStmt.get()); } template <typename Derived> const Attr *TreeTransform<Derived>::TransformAttr(const Attr *R) { if (!R) return R; switch (R->getKind()) { // Transform attributes with a pragma spelling by calling TransformXXXAttr. #define ATTR(X) #define PRAGMA_SPELLING_ATTR(X) \ case attr::X: \ return getDerived().Transform##X##Attr(cast<X##Attr>(R)); #include "clang/Basic/AttrList.inc" default: return R; } } template <typename Derived> StmtResult TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S) { bool AttrsChanged = false; SmallVector<const Attr *, 1> Attrs; // Visit attributes and keep track if any are transformed. for (const auto *I : S->getAttrs()) { const Attr *R = getDerived().TransformAttr(I); AttrsChanged |= (I != R); Attrs.push_back(R); } StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt()); if (SubStmt.isInvalid()) return StmtError(); if (SubStmt.get() == S->getSubStmt() && !AttrsChanged) return S; return getDerived().RebuildAttributedStmt(S->getAttrLoc(), Attrs, SubStmt.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformIfStmt(IfStmt *S) { // Transform the condition ExprResult Cond; VarDecl *ConditionVar = nullptr; if (S->getConditionVariable()) { ConditionVar = cast_or_null<VarDecl>( getDerived().TransformDefinition( S->getConditionVariable()->getLocation(), S->getConditionVariable())); if (!ConditionVar) return StmtError(); } else { Cond = getDerived().TransformExpr(S->getCond()); if (Cond.isInvalid()) return StmtError(); // Convert the condition to a boolean value. if (S->getCond()) { ExprResult CondE = getSema().ActOnBooleanCondition(nullptr, S->getIfLoc(), Cond.get()); if (CondE.isInvalid()) return StmtError(); Cond = CondE.get(); } } Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.get())); if (!S->getConditionVariable() && S->getCond() && !FullCond.get()) return StmtError(); // Transform the "then" branch. StmtResult Then = getDerived().TransformStmt(S->getThen()); if (Then.isInvalid()) return StmtError(); // Transform the "else" branch. StmtResult Else = getDerived().TransformStmt(S->getElse()); if (Else.isInvalid()) return StmtError(); if (!getDerived().AlwaysRebuild() && FullCond.get() == S->getCond() && ConditionVar == S->getConditionVariable() && Then.get() == S->getThen() && Else.get() == S->getElse()) return S; return getDerived().RebuildIfStmt(S->getIfLoc(), FullCond, ConditionVar, Then.get(), S->getElseLoc(), Else.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformSwitchStmt(SwitchStmt *S) { // Transform the condition. ExprResult Cond; VarDecl *ConditionVar = nullptr; if (S->getConditionVariable()) { ConditionVar = cast_or_null<VarDecl>( getDerived().TransformDefinition( S->getConditionVariable()->getLocation(), S->getConditionVariable())); if (!ConditionVar) return StmtError(); } else { Cond = getDerived().TransformExpr(S->getCond()); if (Cond.isInvalid()) return StmtError(); } // Rebuild the switch statement. StmtResult Switch = getDerived().RebuildSwitchStmtStart(S->getSwitchLoc(), Cond.get(), ConditionVar); if (Switch.isInvalid()) return StmtError(); // Transform the body of the switch statement. StmtResult Body = getDerived().TransformStmt(S->getBody()); if (Body.isInvalid()) return StmtError(); // Complete the switch statement. return getDerived().RebuildSwitchStmtBody(S->getSwitchLoc(), Switch.get(), Body.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformWhileStmt(WhileStmt *S) { // Transform the condition ExprResult Cond; VarDecl *ConditionVar = nullptr; if (S->getConditionVariable()) { ConditionVar = cast_or_null<VarDecl>( getDerived().TransformDefinition( S->getConditionVariable()->getLocation(), S->getConditionVariable())); if (!ConditionVar) return StmtError(); } else { Cond = getDerived().TransformExpr(S->getCond()); if (Cond.isInvalid()) return StmtError(); if (S->getCond()) { // Convert the condition to a boolean value. ExprResult CondE = getSema().ActOnBooleanCondition(nullptr, S->getWhileLoc(), Cond.get()); if (CondE.isInvalid()) return StmtError(); Cond = CondE; } } Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.get())); if (!S->getConditionVariable() && S->getCond() && !FullCond.get()) return StmtError(); // Transform the body StmtResult Body = getDerived().TransformStmt(S->getBody()); if (Body.isInvalid()) return StmtError(); if (!getDerived().AlwaysRebuild() && FullCond.get() == S->getCond() && ConditionVar == S->getConditionVariable() && Body.get() == S->getBody()) return Owned(S); return getDerived().RebuildWhileStmt(S->getWhileLoc(), FullCond, ConditionVar, Body.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformDoStmt(DoStmt *S) { // Transform the body StmtResult Body = getDerived().TransformStmt(S->getBody()); if (Body.isInvalid()) return StmtError(); // Transform the condition ExprResult Cond = getDerived().TransformExpr(S->getCond()); if (Cond.isInvalid()) return StmtError(); if (!getDerived().AlwaysRebuild() && Cond.get() == S->getCond() && Body.get() == S->getBody()) return S; return getDerived().RebuildDoStmt(S->getDoLoc(), Body.get(), S->getWhileLoc(), /*FIXME:*/S->getWhileLoc(), Cond.get(), S->getRParenLoc()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformForStmt(ForStmt *S) { // Transform the initialization statement StmtResult Init = getDerived().TransformStmt(S->getInit()); if (Init.isInvalid()) return StmtError(); // Transform the condition ExprResult Cond; VarDecl *ConditionVar = nullptr; if (S->getConditionVariable()) { ConditionVar = cast_or_null<VarDecl>( getDerived().TransformDefinition( S->getConditionVariable()->getLocation(), S->getConditionVariable())); if (!ConditionVar) return StmtError(); } else { Cond = getDerived().TransformExpr(S->getCond()); if (Cond.isInvalid()) return StmtError(); if (S->getCond()) { // Convert the condition to a boolean value. ExprResult CondE = getSema().ActOnBooleanCondition(nullptr, S->getForLoc(), Cond.get()); if (CondE.isInvalid()) return StmtError(); Cond = CondE.get(); } } Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.get())); if (!S->getConditionVariable() && S->getCond() && !FullCond.get()) return StmtError(); // Transform the increment ExprResult Inc = getDerived().TransformExpr(S->getInc()); if (Inc.isInvalid()) return StmtError(); Sema::FullExprArg FullInc(getSema().MakeFullDiscardedValueExpr(Inc.get())); if (S->getInc() && !FullInc.get()) return StmtError(); // Transform the body StmtResult Body = getDerived().TransformStmt(S->getBody()); if (Body.isInvalid()) return StmtError(); if (!getDerived().AlwaysRebuild() && Init.get() == S->getInit() && FullCond.get() == S->getCond() && Inc.get() == S->getInc() && Body.get() == S->getBody()) return S; return getDerived().RebuildForStmt(S->getForLoc(), S->getLParenLoc(), Init.get(), FullCond, ConditionVar, FullInc, S->getRParenLoc(), Body.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformGotoStmt(GotoStmt *S) { Decl *LD = getDerived().TransformDecl(S->getLabel()->getLocation(), S->getLabel()); if (!LD) return StmtError(); // Goto statements must always be rebuilt, to resolve the label. return getDerived().RebuildGotoStmt(S->getGotoLoc(), S->getLabelLoc(), cast<LabelDecl>(LD)); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformIndirectGotoStmt(IndirectGotoStmt *S) { ExprResult Target = getDerived().TransformExpr(S->getTarget()); if (Target.isInvalid()) return StmtError(); Target = SemaRef.MaybeCreateExprWithCleanups(Target.get()); if (!getDerived().AlwaysRebuild() && Target.get() == S->getTarget()) return S; return getDerived().RebuildIndirectGotoStmt(S->getGotoLoc(), S->getStarLoc(), Target.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformContinueStmt(ContinueStmt *S) { return S; } template<typename Derived> StmtResult TreeTransform<Derived>::TransformBreakStmt(BreakStmt *S) { return S; } template<typename Derived> StmtResult TreeTransform<Derived>::TransformReturnStmt(ReturnStmt *S) { ExprResult Result = getDerived().TransformInitializer(S->getRetValue(), /*NotCopyInit*/false); if (Result.isInvalid()) return StmtError(); // FIXME: We always rebuild the return statement because there is no way // to tell whether the return type of the function has changed. return getDerived().RebuildReturnStmt(S->getReturnLoc(), Result.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) { bool DeclChanged = false; SmallVector<Decl *, 4> Decls; for (auto *D : S->decls()) { Decl *Transformed = getDerived().TransformDefinition(D->getLocation(), D); if (!Transformed) return StmtError(); if (Transformed != D) DeclChanged = true; Decls.push_back(Transformed); } if (!getDerived().AlwaysRebuild() && !DeclChanged) return S; return getDerived().RebuildDeclStmt(Decls, S->getStartLoc(), S->getEndLoc()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformGCCAsmStmt(GCCAsmStmt *S) { SmallVector<Expr*, 8> Constraints; SmallVector<Expr*, 8> Exprs; SmallVector<IdentifierInfo *, 4> Names; ExprResult AsmString; SmallVector<Expr*, 8> Clobbers; bool ExprsChanged = false; // Go through the outputs. for (unsigned I = 0, E = S->getNumOutputs(); I != E; ++I) { Names.push_back(S->getOutputIdentifier(I)); // No need to transform the constraint literal. Constraints.push_back(S->getOutputConstraintLiteral(I)); // Transform the output expr. Expr *OutputExpr = S->getOutputExpr(I); ExprResult Result = getDerived().TransformExpr(OutputExpr); if (Result.isInvalid()) return StmtError(); ExprsChanged |= Result.get() != OutputExpr; Exprs.push_back(Result.get()); } // Go through the inputs. for (unsigned I = 0, E = S->getNumInputs(); I != E; ++I) { Names.push_back(S->getInputIdentifier(I)); // No need to transform the constraint literal. Constraints.push_back(S->getInputConstraintLiteral(I)); // Transform the input expr. Expr *InputExpr = S->getInputExpr(I); ExprResult Result = getDerived().TransformExpr(InputExpr); if (Result.isInvalid()) return StmtError(); ExprsChanged |= Result.get() != InputExpr; Exprs.push_back(Result.get()); } if (!getDerived().AlwaysRebuild() && !ExprsChanged) return S; // Go through the clobbers. for (unsigned I = 0, E = S->getNumClobbers(); I != E; ++I) Clobbers.push_back(S->getClobberStringLiteral(I)); // No need to transform the asm string literal. AsmString = S->getAsmString(); return getDerived().RebuildGCCAsmStmt(S->getAsmLoc(), S->isSimple(), S->isVolatile(), S->getNumOutputs(), S->getNumInputs(), Names.data(), Constraints, Exprs, AsmString.get(), Clobbers, S->getRParenLoc()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformMSAsmStmt(MSAsmStmt *S) { ArrayRef<Token> AsmToks = llvm::makeArrayRef(S->getAsmToks(), S->getNumAsmToks()); bool HadError = false, HadChange = false; ArrayRef<Expr*> SrcExprs = S->getAllExprs(); SmallVector<Expr*, 8> TransformedExprs; TransformedExprs.reserve(SrcExprs.size()); for (unsigned i = 0, e = SrcExprs.size(); i != e; ++i) { ExprResult Result = getDerived().TransformExpr(SrcExprs[i]); if (!Result.isUsable()) { HadError = true; } else { HadChange |= (Result.get() != SrcExprs[i]); TransformedExprs.push_back(Result.get()); } } if (HadError) return StmtError(); if (!HadChange && !getDerived().AlwaysRebuild()) return Owned(S); return getDerived().RebuildMSAsmStmt(S->getAsmLoc(), S->getLBraceLoc(), AsmToks, S->getAsmString(), S->getNumOutputs(), S->getNumInputs(), S->getAllConstraints(), S->getClobbers(), TransformedExprs, S->getEndLoc()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformObjCAtTryStmt(ObjCAtTryStmt *S) { // Transform the body of the @try. StmtResult TryBody = getDerived().TransformStmt(S->getTryBody()); if (TryBody.isInvalid()) return StmtError(); // Transform the @catch statements (if present). bool AnyCatchChanged = false; SmallVector<Stmt*, 8> CatchStmts; for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) { StmtResult Catch = getDerived().TransformStmt(S->getCatchStmt(I)); if (Catch.isInvalid()) return StmtError(); if (Catch.get() != S->getCatchStmt(I)) AnyCatchChanged = true; CatchStmts.push_back(Catch.get()); } // Transform the @finally statement (if present). StmtResult Finally; if (S->getFinallyStmt()) { Finally = getDerived().TransformStmt(S->getFinallyStmt()); if (Finally.isInvalid()) return StmtError(); } // If nothing changed, just retain this statement. if (!getDerived().AlwaysRebuild() && TryBody.get() == S->getTryBody() && !AnyCatchChanged && Finally.get() == S->getFinallyStmt()) return S; // Build a new statement. return getDerived().RebuildObjCAtTryStmt(S->getAtTryLoc(), TryBody.get(), CatchStmts, Finally.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformObjCAtCatchStmt(ObjCAtCatchStmt *S) { // Transform the @catch parameter, if there is one. VarDecl *Var = nullptr; if (VarDecl *FromVar = S->getCatchParamDecl()) { TypeSourceInfo *TSInfo = nullptr; if (FromVar->getTypeSourceInfo()) { TSInfo = getDerived().TransformType(FromVar->getTypeSourceInfo()); if (!TSInfo) return StmtError(); } QualType T; if (TSInfo) T = TSInfo->getType(); else { T = getDerived().TransformType(FromVar->getType()); if (T.isNull()) return StmtError(); } Var = getDerived().RebuildObjCExceptionDecl(FromVar, TSInfo, T); if (!Var) return StmtError(); } StmtResult Body = getDerived().TransformStmt(S->getCatchBody()); if (Body.isInvalid()) return StmtError(); return getDerived().RebuildObjCAtCatchStmt(S->getAtCatchLoc(), S->getRParenLoc(), Var, Body.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformObjCAtFinallyStmt(ObjCAtFinallyStmt *S) { // Transform the body. StmtResult Body = getDerived().TransformStmt(S->getFinallyBody()); if (Body.isInvalid()) return StmtError(); // If nothing changed, just retain this statement. if (!getDerived().AlwaysRebuild() && Body.get() == S->getFinallyBody()) return S; // Build a new statement. return getDerived().RebuildObjCAtFinallyStmt(S->getAtFinallyLoc(), Body.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformObjCAtThrowStmt(ObjCAtThrowStmt *S) { ExprResult Operand; if (S->getThrowExpr()) { Operand = getDerived().TransformExpr(S->getThrowExpr()); if (Operand.isInvalid()) return StmtError(); } if (!getDerived().AlwaysRebuild() && Operand.get() == S->getThrowExpr()) return S; return getDerived().RebuildObjCAtThrowStmt(S->getThrowLoc(), Operand.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformObjCAtSynchronizedStmt( ObjCAtSynchronizedStmt *S) { // Transform the object we are locking. ExprResult Object = getDerived().TransformExpr(S->getSynchExpr()); if (Object.isInvalid()) return StmtError(); Object = getDerived().RebuildObjCAtSynchronizedOperand(S->getAtSynchronizedLoc(), Object.get()); if (Object.isInvalid()) return StmtError(); // Transform the body. StmtResult Body = getDerived().TransformStmt(S->getSynchBody()); if (Body.isInvalid()) return StmtError(); // If nothing change, just retain the current statement. if (!getDerived().AlwaysRebuild() && Object.get() == S->getSynchExpr() && Body.get() == S->getSynchBody()) return S; // Build a new statement. return getDerived().RebuildObjCAtSynchronizedStmt(S->getAtSynchronizedLoc(), Object.get(), Body.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformObjCAutoreleasePoolStmt( ObjCAutoreleasePoolStmt *S) { // Transform the body. StmtResult Body = getDerived().TransformStmt(S->getSubStmt()); if (Body.isInvalid()) return StmtError(); // If nothing changed, just retain this statement. if (!getDerived().AlwaysRebuild() && Body.get() == S->getSubStmt()) return S; // Build a new statement. return getDerived().RebuildObjCAutoreleasePoolStmt( S->getAtLoc(), Body.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformObjCForCollectionStmt( ObjCForCollectionStmt *S) { // Transform the element statement. StmtResult Element = getDerived().TransformStmt(S->getElement()); if (Element.isInvalid()) return StmtError(); // Transform the collection expression. ExprResult Collection = getDerived().TransformExpr(S->getCollection()); if (Collection.isInvalid()) return StmtError(); // Transform the body. StmtResult Body = getDerived().TransformStmt(S->getBody()); if (Body.isInvalid()) return StmtError(); // If nothing changed, just retain this statement. if (!getDerived().AlwaysRebuild() && Element.get() == S->getElement() && Collection.get() == S->getCollection() && Body.get() == S->getBody()) return S; // Build a new statement. return getDerived().RebuildObjCForCollectionStmt(S->getForLoc(), Element.get(), Collection.get(), S->getRParenLoc(), Body.get()); } template <typename Derived> StmtResult TreeTransform<Derived>::TransformCXXCatchStmt(CXXCatchStmt *S) { // Transform the exception declaration, if any. VarDecl *Var = nullptr; if (VarDecl *ExceptionDecl = S->getExceptionDecl()) { TypeSourceInfo *T = getDerived().TransformType(ExceptionDecl->getTypeSourceInfo()); if (!T) return StmtError(); Var = getDerived().RebuildExceptionDecl( ExceptionDecl, T, ExceptionDecl->getInnerLocStart(), ExceptionDecl->getLocation(), ExceptionDecl->getIdentifier()); if (!Var || Var->isInvalidDecl()) return StmtError(); } // Transform the actual exception handler. StmtResult Handler = getDerived().TransformStmt(S->getHandlerBlock()); if (Handler.isInvalid()) return StmtError(); if (!getDerived().AlwaysRebuild() && !Var && Handler.get() == S->getHandlerBlock()) return S; return getDerived().RebuildCXXCatchStmt(S->getCatchLoc(), Var, Handler.get()); } template <typename Derived> StmtResult TreeTransform<Derived>::TransformCXXTryStmt(CXXTryStmt *S) { // Transform the try block itself. StmtResult TryBlock = getDerived().TransformCompoundStmt(S->getTryBlock()); if (TryBlock.isInvalid()) return StmtError(); // Transform the handlers. bool HandlerChanged = false; SmallVector<Stmt *, 8> Handlers; for (unsigned I = 0, N = S->getNumHandlers(); I != N; ++I) { StmtResult Handler = getDerived().TransformCXXCatchStmt(S->getHandler(I)); if (Handler.isInvalid()) return StmtError(); HandlerChanged = HandlerChanged || Handler.get() != S->getHandler(I); Handlers.push_back(Handler.getAs<Stmt>()); } if (!getDerived().AlwaysRebuild() && TryBlock.get() == S->getTryBlock() && !HandlerChanged) return S; return getDerived().RebuildCXXTryStmt(S->getTryLoc(), TryBlock.get(), Handlers); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) { StmtResult Range = getDerived().TransformStmt(S->getRangeStmt()); if (Range.isInvalid()) return StmtError(); StmtResult BeginEnd = getDerived().TransformStmt(S->getBeginEndStmt()); if (BeginEnd.isInvalid()) return StmtError(); ExprResult Cond = getDerived().TransformExpr(S->getCond()); if (Cond.isInvalid()) return StmtError(); if (Cond.get()) Cond = SemaRef.CheckBooleanCondition(Cond.get(), S->getColonLoc()); if (Cond.isInvalid()) return StmtError(); if (Cond.get()) Cond = SemaRef.MaybeCreateExprWithCleanups(Cond.get()); ExprResult Inc = getDerived().TransformExpr(S->getInc()); if (Inc.isInvalid()) return StmtError(); if (Inc.get()) Inc = SemaRef.MaybeCreateExprWithCleanups(Inc.get()); StmtResult LoopVar = getDerived().TransformStmt(S->getLoopVarStmt()); if (LoopVar.isInvalid()) return StmtError(); StmtResult NewStmt = S; if (getDerived().AlwaysRebuild() || Range.get() != S->getRangeStmt() || BeginEnd.get() != S->getBeginEndStmt() || Cond.get() != S->getCond() || Inc.get() != S->getInc() || LoopVar.get() != S->getLoopVarStmt()) { NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(), S->getColonLoc(), Range.get(), BeginEnd.get(), Cond.get(), Inc.get(), LoopVar.get(), S->getRParenLoc()); if (NewStmt.isInvalid()) return StmtError(); } StmtResult Body = getDerived().TransformStmt(S->getBody()); if (Body.isInvalid()) return StmtError(); // Body has changed but we didn't rebuild the for-range statement. Rebuild // it now so we have a new statement to attach the body to. if (Body.get() != S->getBody() && NewStmt.get() == S) { NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(), S->getColonLoc(), Range.get(), BeginEnd.get(), Cond.get(), Inc.get(), LoopVar.get(), S->getRParenLoc()); if (NewStmt.isInvalid()) return StmtError(); } if (NewStmt.get() == S) return S; return FinishCXXForRangeStmt(NewStmt.get(), Body.get()); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformMSDependentExistsStmt( MSDependentExistsStmt *S) { // Transform the nested-name-specifier, if any. NestedNameSpecifierLoc QualifierLoc; if (S->getQualifierLoc()) { QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(S->getQualifierLoc()); if (!QualifierLoc) return StmtError(); } // Transform the declaration name. DeclarationNameInfo NameInfo = S->getNameInfo(); if (NameInfo.getName()) { NameInfo = getDerived().TransformDeclarationNameInfo(NameInfo); if (!NameInfo.getName()) return StmtError(); } // Check whether anything changed. if (!getDerived().AlwaysRebuild() && QualifierLoc == S->getQualifierLoc() && NameInfo.getName() == S->getNameInfo().getName()) return S; // Determine whether this name exists, if we can. CXXScopeSpec SS; SS.Adopt(QualifierLoc); bool Dependent = false; switch (getSema().CheckMicrosoftIfExistsSymbol(/*S=*/nullptr, SS, NameInfo)) { case Sema::IER_Exists: if (S->isIfExists()) break; return new (getSema().Context) NullStmt(S->getKeywordLoc()); case Sema::IER_DoesNotExist: if (S->isIfNotExists()) break; return new (getSema().Context) NullStmt(S->getKeywordLoc()); case Sema::IER_Dependent: Dependent = true; break; case Sema::IER_Error: return StmtError(); } // We need to continue with the instantiation, so do so now. StmtResult SubStmt = getDerived().TransformCompoundStmt(S->getSubStmt()); if (SubStmt.isInvalid()) return StmtError(); // If we have resolved the name, just transform to the substatement. if (!Dependent) return SubStmt; // The name is still dependent, so build a dependent expression again. return getDerived().RebuildMSDependentExistsStmt(S->getKeywordLoc(), S->isIfExists(), QualifierLoc, NameInfo, SubStmt.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformMSPropertyRefExpr(MSPropertyRefExpr *E) { NestedNameSpecifierLoc QualifierLoc; if (E->getQualifierLoc()) { QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc()); if (!QualifierLoc) return ExprError(); } MSPropertyDecl *PD = cast_or_null<MSPropertyDecl>( getDerived().TransformDecl(E->getMemberLoc(), E->getPropertyDecl())); if (!PD) return ExprError(); ExprResult Base = getDerived().TransformExpr(E->getBaseExpr()); if (Base.isInvalid()) return ExprError(); return new (SemaRef.getASTContext()) MSPropertyRefExpr(Base.get(), PD, E->isArrow(), SemaRef.getASTContext().PseudoObjectTy, VK_LValue, QualifierLoc, E->getMemberLoc()); } template <typename Derived> StmtResult TreeTransform<Derived>::TransformSEHTryStmt(SEHTryStmt *S) { StmtResult TryBlock = getDerived().TransformCompoundStmt(S->getTryBlock()); if (TryBlock.isInvalid()) return StmtError(); StmtResult Handler = getDerived().TransformSEHHandler(S->getHandler()); if (Handler.isInvalid()) return StmtError(); if (!getDerived().AlwaysRebuild() && TryBlock.get() == S->getTryBlock() && Handler.get() == S->getHandler()) return S; return getDerived().RebuildSEHTryStmt(S->getIsCXXTry(), S->getTryLoc(), TryBlock.get(), Handler.get()); } template <typename Derived> StmtResult TreeTransform<Derived>::TransformSEHFinallyStmt(SEHFinallyStmt *S) { StmtResult Block = getDerived().TransformCompoundStmt(S->getBlock()); if (Block.isInvalid()) return StmtError(); return getDerived().RebuildSEHFinallyStmt(S->getFinallyLoc(), Block.get()); } template <typename Derived> StmtResult TreeTransform<Derived>::TransformSEHExceptStmt(SEHExceptStmt *S) { ExprResult FilterExpr = getDerived().TransformExpr(S->getFilterExpr()); if (FilterExpr.isInvalid()) return StmtError(); StmtResult Block = getDerived().TransformCompoundStmt(S->getBlock()); if (Block.isInvalid()) return StmtError(); return getDerived().RebuildSEHExceptStmt(S->getExceptLoc(), FilterExpr.get(), Block.get()); } template <typename Derived> StmtResult TreeTransform<Derived>::TransformSEHHandler(Stmt *Handler) { if (isa<SEHFinallyStmt>(Handler)) return getDerived().TransformSEHFinallyStmt(cast<SEHFinallyStmt>(Handler)); else return getDerived().TransformSEHExceptStmt(cast<SEHExceptStmt>(Handler)); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformSEHLeaveStmt(SEHLeaveStmt *S) { return S; } //===----------------------------------------------------------------------===// // OpenMP directive transformation //===----------------------------------------------------------------------===// template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective( OMPExecutableDirective *D) { // Transform the clauses llvm::SmallVector<OMPClause *, 16> TClauses; ArrayRef<OMPClause *> Clauses = D->clauses(); TClauses.reserve(Clauses.size()); for (ArrayRef<OMPClause *>::iterator I = Clauses.begin(), E = Clauses.end(); I != E; ++I) { if (*I) { getDerived().getSema().StartOpenMPClause((*I)->getClauseKind()); OMPClause *Clause = getDerived().TransformOMPClause(*I); getDerived().getSema().EndOpenMPClause(); if (Clause) TClauses.push_back(Clause); } else { TClauses.push_back(nullptr); } } StmtResult AssociatedStmt; if (D->hasAssociatedStmt()) { if (!D->getAssociatedStmt()) { return StmtError(); } getDerived().getSema().ActOnOpenMPRegionStart(D->getDirectiveKind(), /*CurScope=*/nullptr); StmtResult Body; { Sema::CompoundScopeRAII CompoundScope(getSema()); Body = getDerived().TransformStmt( cast<CapturedStmt>(D->getAssociatedStmt())->getCapturedStmt()); } AssociatedStmt = getDerived().getSema().ActOnOpenMPRegionEnd(Body, TClauses); if (AssociatedStmt.isInvalid()) { return StmtError(); } } if (TClauses.size() != Clauses.size()) { return StmtError(); } // Transform directive name for 'omp critical' directive. DeclarationNameInfo DirName; if (D->getDirectiveKind() == OMPD_critical) { DirName = cast<OMPCriticalDirective>(D)->getDirectiveName(); DirName = getDerived().TransformDeclarationNameInfo(DirName); } OpenMPDirectiveKind CancelRegion = OMPD_unknown; if (D->getDirectiveKind() == OMPD_cancellation_point) { CancelRegion = cast<OMPCancellationPointDirective>(D)->getCancelRegion(); } else if (D->getDirectiveKind() == OMPD_cancel) { CancelRegion = cast<OMPCancelDirective>(D)->getCancelRegion(); } return getDerived().RebuildOMPExecutableDirective( D->getDirectiveKind(), DirName, CancelRegion, TClauses, AssociatedStmt.get(), D->getLocStart(), D->getLocEnd()); } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPParallelDirective(OMPParallelDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPSimdDirective(OMPSimdDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_simd, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPForDirective(OMPForDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_for, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPForSimdDirective(OMPForSimdDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_for_simd, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPSectionsDirective(OMPSectionsDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_sections, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPSectionDirective(OMPSectionDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_section, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPSingleDirective(OMPSingleDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_single, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPMasterDirective(OMPMasterDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_master, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPCriticalDirective(OMPCriticalDirective *D) { getDerived().getSema().StartOpenMPDSABlock( OMPD_critical, D->getDirectiveName(), nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPParallelForDirective( OMPParallelForDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_for, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPParallelForSimdDirective( OMPParallelForSimdDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_for_simd, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPParallelSectionsDirective( OMPParallelSectionsDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_sections, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPTaskDirective(OMPTaskDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_task, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPTaskyieldDirective( OMPTaskyieldDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_taskyield, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPBarrierDirective(OMPBarrierDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_barrier, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPTaskwaitDirective(OMPTaskwaitDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_taskwait, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPTaskgroupDirective( OMPTaskgroupDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_taskgroup, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPFlushDirective(OMPFlushDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_flush, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPOrderedDirective(OMPOrderedDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_ordered, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPAtomicDirective(OMPAtomicDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_atomic, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPTargetDirective(OMPTargetDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_target, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPTeamsDirective(OMPTeamsDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_teams, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPCancellationPointDirective( OMPCancellationPointDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_cancellation_point, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } template <typename Derived> StmtResult TreeTransform<Derived>::TransformOMPCancelDirective(OMPCancelDirective *D) { DeclarationNameInfo DirName; getDerived().getSema().StartOpenMPDSABlock(OMPD_cancel, DirName, nullptr, D->getLocStart()); StmtResult Res = getDerived().TransformOMPExecutableDirective(D); getDerived().getSema().EndOpenMPDSABlock(Res.get()); return Res; } //===----------------------------------------------------------------------===// // OpenMP clause transformation //===----------------------------------------------------------------------===// template <typename Derived> OMPClause *TreeTransform<Derived>::TransformOMPIfClause(OMPIfClause *C) { ExprResult Cond = getDerived().TransformExpr(C->getCondition()); if (Cond.isInvalid()) return nullptr; return getDerived().RebuildOMPIfClause(Cond.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause *TreeTransform<Derived>::TransformOMPFinalClause(OMPFinalClause *C) { ExprResult Cond = getDerived().TransformExpr(C->getCondition()); if (Cond.isInvalid()) return nullptr; return getDerived().RebuildOMPFinalClause(Cond.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPNumThreadsClause(OMPNumThreadsClause *C) { ExprResult NumThreads = getDerived().TransformExpr(C->getNumThreads()); if (NumThreads.isInvalid()) return nullptr; return getDerived().RebuildOMPNumThreadsClause( NumThreads.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPSafelenClause(OMPSafelenClause *C) { ExprResult E = getDerived().TransformExpr(C->getSafelen()); if (E.isInvalid()) return nullptr; return getDerived().RebuildOMPSafelenClause( E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPCollapseClause(OMPCollapseClause *C) { ExprResult E = getDerived().TransformExpr(C->getNumForLoops()); if (E.isInvalid()) return 0; return getDerived().RebuildOMPCollapseClause( E.get(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPDefaultClause(OMPDefaultClause *C) { return getDerived().RebuildOMPDefaultClause( C->getDefaultKind(), C->getDefaultKindKwLoc(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPProcBindClause(OMPProcBindClause *C) { return getDerived().RebuildOMPProcBindClause( C->getProcBindKind(), C->getProcBindKindKwLoc(), C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPScheduleClause(OMPScheduleClause *C) { ExprResult E = getDerived().TransformExpr(C->getChunkSize()); if (E.isInvalid()) return nullptr; return getDerived().RebuildOMPScheduleClause( C->getScheduleKind(), E.get(), C->getLocStart(), C->getLParenLoc(), C->getScheduleKindLoc(), C->getCommaLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPOrderedClause(OMPOrderedClause *C) { // No need to rebuild this clause, no template-dependent parameters. return C; } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPNowaitClause(OMPNowaitClause *C) { // No need to rebuild this clause, no template-dependent parameters. return C; } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPUntiedClause(OMPUntiedClause *C) { // No need to rebuild this clause, no template-dependent parameters. return C; } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPMergeableClause(OMPMergeableClause *C) { // No need to rebuild this clause, no template-dependent parameters. return C; } template <typename Derived> OMPClause *TreeTransform<Derived>::TransformOMPReadClause(OMPReadClause *C) { // No need to rebuild this clause, no template-dependent parameters. return C; } template <typename Derived> OMPClause *TreeTransform<Derived>::TransformOMPWriteClause(OMPWriteClause *C) { // No need to rebuild this clause, no template-dependent parameters. return C; } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPUpdateClause(OMPUpdateClause *C) { // No need to rebuild this clause, no template-dependent parameters. return C; } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPCaptureClause(OMPCaptureClause *C) { // No need to rebuild this clause, no template-dependent parameters. return C; } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPSeqCstClause(OMPSeqCstClause *C) { // No need to rebuild this clause, no template-dependent parameters. return C; } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPPrivateClause(OMPPrivateClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } return getDerived().RebuildOMPPrivateClause( Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause *TreeTransform<Derived>::TransformOMPFirstprivateClause( OMPFirstprivateClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } return getDerived().RebuildOMPFirstprivateClause( Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPLastprivateClause(OMPLastprivateClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } return getDerived().RebuildOMPLastprivateClause( Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPSharedClause(OMPSharedClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } return getDerived().RebuildOMPSharedClause(Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPReductionClause(OMPReductionClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } CXXScopeSpec ReductionIdScopeSpec; ReductionIdScopeSpec.Adopt(C->getQualifierLoc()); DeclarationNameInfo NameInfo = C->getNameInfo(); if (NameInfo.getName()) { NameInfo = getDerived().TransformDeclarationNameInfo(NameInfo); if (!NameInfo.getName()) return nullptr; } return getDerived().RebuildOMPReductionClause( Vars, C->getLocStart(), C->getLParenLoc(), C->getColonLoc(), C->getLocEnd(), ReductionIdScopeSpec, NameInfo); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPLinearClause(OMPLinearClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } ExprResult Step = getDerived().TransformExpr(C->getStep()); if (Step.isInvalid()) return nullptr; return getDerived().RebuildOMPLinearClause(Vars, Step.get(), C->getLocStart(), C->getLParenLoc(), C->getColonLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPAlignedClause(OMPAlignedClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } ExprResult Alignment = getDerived().TransformExpr(C->getAlignment()); if (Alignment.isInvalid()) return nullptr; return getDerived().RebuildOMPAlignedClause( Vars, Alignment.get(), C->getLocStart(), C->getLParenLoc(), C->getColonLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPCopyinClause(OMPCopyinClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } return getDerived().RebuildOMPCopyinClause(Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPCopyprivateClause(OMPCopyprivateClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } return getDerived().RebuildOMPCopyprivateClause( Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause *TreeTransform<Derived>::TransformOMPFlushClause(OMPFlushClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } return getDerived().RebuildOMPFlushClause(Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } template <typename Derived> OMPClause * TreeTransform<Derived>::TransformOMPDependClause(OMPDependClause *C) { llvm::SmallVector<Expr *, 16> Vars; Vars.reserve(C->varlist_size()); for (auto *VE : C->varlists()) { ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE)); if (EVar.isInvalid()) return nullptr; Vars.push_back(EVar.get()); } return getDerived().RebuildOMPDependClause( C->getDependencyKind(), C->getDependencyLoc(), C->getColonLoc(), Vars, C->getLocStart(), C->getLParenLoc(), C->getLocEnd()); } //===----------------------------------------------------------------------===// // Expression transformation //===----------------------------------------------------------------------===// template<typename Derived> ExprResult TreeTransform<Derived>::TransformPredefinedExpr(PredefinedExpr *E) { if (!E->isTypeDependent()) return E; return getDerived().RebuildPredefinedExpr(E->getLocation(), E->getIdentType()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformDeclRefExpr(DeclRefExpr *E) { NestedNameSpecifierLoc QualifierLoc; if (E->getQualifierLoc()) { QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc()); if (!QualifierLoc) return ExprError(); } ValueDecl *ND = cast_or_null<ValueDecl>(getDerived().TransformDecl(E->getLocation(), E->getDecl())); if (!ND) return ExprError(); DeclarationNameInfo NameInfo = E->getNameInfo(); if (NameInfo.getName()) { NameInfo = getDerived().TransformDeclarationNameInfo(NameInfo); if (!NameInfo.getName()) return ExprError(); } if (!getDerived().AlwaysRebuild() && QualifierLoc == E->getQualifierLoc() && ND == E->getDecl() && NameInfo.getName() == E->getDecl()->getDeclName() && !E->hasExplicitTemplateArgs()) { // Mark it referenced in the new context regardless. // FIXME: this is a bit instantiation-specific. SemaRef.MarkDeclRefReferenced(E); return E; } TemplateArgumentListInfo TransArgs, *TemplateArgs = nullptr; if (E->hasExplicitTemplateArgs()) { TemplateArgs = &TransArgs; TransArgs.setLAngleLoc(E->getLAngleLoc()); TransArgs.setRAngleLoc(E->getRAngleLoc()); if (getDerived().TransformTemplateArguments(E->getTemplateArgs(), E->getNumTemplateArgs(), TransArgs)) return ExprError(); } return getDerived().RebuildDeclRefExpr(QualifierLoc, ND, NameInfo, TemplateArgs); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformIntegerLiteral(IntegerLiteral *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformFloatingLiteral(FloatingLiteral *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformImaginaryLiteral(ImaginaryLiteral *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformStringLiteral(StringLiteral *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCharacterLiteral(CharacterLiteral *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformUserDefinedLiteral(UserDefinedLiteral *E) { if (FunctionDecl *FD = E->getDirectCallee()) SemaRef.MarkFunctionReferenced(E->getLocStart(), FD); return SemaRef.MaybeBindToTemporary(E); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformGenericSelectionExpr(GenericSelectionExpr *E) { ExprResult ControllingExpr = getDerived().TransformExpr(E->getControllingExpr()); if (ControllingExpr.isInvalid()) return ExprError(); SmallVector<Expr *, 4> AssocExprs; SmallVector<TypeSourceInfo *, 4> AssocTypes; for (unsigned i = 0; i != E->getNumAssocs(); ++i) { TypeSourceInfo *TS = E->getAssocTypeSourceInfo(i); if (TS) { TypeSourceInfo *AssocType = getDerived().TransformType(TS); if (!AssocType) return ExprError(); AssocTypes.push_back(AssocType); } else { AssocTypes.push_back(nullptr); } ExprResult AssocExpr = getDerived().TransformExpr(E->getAssocExpr(i)); if (AssocExpr.isInvalid()) return ExprError(); AssocExprs.push_back(AssocExpr.get()); } return getDerived().RebuildGenericSelectionExpr(E->getGenericLoc(), E->getDefaultLoc(), E->getRParenLoc(), ControllingExpr.get(), AssocTypes, AssocExprs); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformParenExpr(ParenExpr *E) { ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getSubExpr()) return E; return getDerived().RebuildParenExpr(SubExpr.get(), E->getLParen(), E->getRParen()); } /// \brief The operand of a unary address-of operator has special rules: it's /// allowed to refer to a non-static member of a class even if there's no 'this' /// object available. template<typename Derived> ExprResult TreeTransform<Derived>::TransformAddressOfOperand(Expr *E) { if (DependentScopeDeclRefExpr *DRE = dyn_cast<DependentScopeDeclRefExpr>(E)) return getDerived().TransformDependentScopeDeclRefExpr(DRE, true, nullptr); else return getDerived().TransformExpr(E); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformUnaryOperator(UnaryOperator *E) { ExprResult SubExpr; if (E->getOpcode() == UO_AddrOf) SubExpr = TransformAddressOfOperand(E->getSubExpr()); else SubExpr = TransformExpr(E->getSubExpr()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getSubExpr()) return E; return getDerived().RebuildUnaryOperator(E->getOperatorLoc(), E->getOpcode(), SubExpr.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) { // Transform the type. TypeSourceInfo *Type = getDerived().TransformType(E->getTypeSourceInfo()); if (!Type) return ExprError(); // Transform all of the components into components similar to what the // parser uses. // FIXME: It would be slightly more efficient in the non-dependent case to // just map FieldDecls, rather than requiring the rebuilder to look for // the fields again. However, __builtin_offsetof is rare enough in // template code that we don't care. bool ExprChanged = false; typedef Sema::OffsetOfComponent Component; typedef OffsetOfExpr::OffsetOfNode Node; SmallVector<Component, 4> Components; for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) { const Node &ON = E->getComponent(I); Component Comp; Comp.isBrackets = true; Comp.LocStart = ON.getSourceRange().getBegin(); Comp.LocEnd = ON.getSourceRange().getEnd(); switch (ON.getKind()) { case Node::Array: { Expr *FromIndex = E->getIndexExpr(ON.getArrayExprIndex()); ExprResult Index = getDerived().TransformExpr(FromIndex); if (Index.isInvalid()) return ExprError(); ExprChanged = ExprChanged || Index.get() != FromIndex; Comp.isBrackets = true; Comp.U.E = Index.get(); break; } case Node::Field: case Node::Identifier: Comp.isBrackets = false; Comp.U.IdentInfo = ON.getFieldName(); if (!Comp.U.IdentInfo) continue; break; case Node::Base: // Will be recomputed during the rebuild. continue; } Components.push_back(Comp); } // If nothing changed, retain the existing expression. if (!getDerived().AlwaysRebuild() && Type == E->getTypeSourceInfo() && !ExprChanged) return E; // Build a new offsetof expression. return getDerived().RebuildOffsetOfExpr(E->getOperatorLoc(), Type, Components.data(), Components.size(), E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformOpaqueValueExpr(OpaqueValueExpr *E) { assert(getDerived().AlreadyTransformed(E->getType()) && "opaque value expression requires transformation"); return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformTypoExpr(TypoExpr *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) { // Rebuild the syntactic form. The original syntactic form has // opaque-value expressions in it, so strip those away and rebuild // the result. This is a really awful way of doing this, but the // better solution (rebuilding the semantic expressions and // rebinding OVEs as necessary) doesn't work; we'd need // TreeTransform to not strip away implicit conversions. Expr *newSyntacticForm = SemaRef.recreateSyntacticForm(E); ExprResult result = getDerived().TransformExpr(newSyntacticForm); if (result.isInvalid()) return ExprError(); // If that gives us a pseudo-object result back, the pseudo-object // expression must have been an lvalue-to-rvalue conversion which we // should reapply. if (result.get()->hasPlaceholderType(BuiltinType::PseudoObject)) result = SemaRef.checkPseudoObjectRValue(result.get()); return result; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformUnaryExprOrTypeTraitExpr( UnaryExprOrTypeTraitExpr *E) { if (E->isArgumentType()) { TypeSourceInfo *OldT = E->getArgumentTypeInfo(); TypeSourceInfo *NewT = getDerived().TransformType(OldT); if (!NewT) return ExprError(); if (!getDerived().AlwaysRebuild() && OldT == NewT) return E; return getDerived().RebuildUnaryExprOrTypeTrait(NewT, E->getOperatorLoc(), E->getKind(), E->getSourceRange()); } // C++0x [expr.sizeof]p1: // The operand is either an expression, which is an unevaluated operand // [...] EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated, Sema::ReuseLambdaContextDecl); // Try to recover if we have something like sizeof(T::X) where X is a type. // Notably, there must be *exactly* one set of parens if X is a type. TypeSourceInfo *RecoveryTSI = nullptr; ExprResult SubExpr; auto *PE = dyn_cast<ParenExpr>(E->getArgumentExpr()); if (auto *DRE = PE ? dyn_cast<DependentScopeDeclRefExpr>(PE->getSubExpr()) : nullptr) SubExpr = getDerived().TransformParenDependentScopeDeclRefExpr( PE, DRE, false, &RecoveryTSI); else SubExpr = getDerived().TransformExpr(E->getArgumentExpr()); if (RecoveryTSI) { return getDerived().RebuildUnaryExprOrTypeTrait( RecoveryTSI, E->getOperatorLoc(), E->getKind(), E->getSourceRange()); } else if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getArgumentExpr()) return E; return getDerived().RebuildUnaryExprOrTypeTrait(SubExpr.get(), E->getOperatorLoc(), E->getKind(), E->getSourceRange()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformArraySubscriptExpr(ArraySubscriptExpr *E) { ExprResult LHS = getDerived().TransformExpr(E->getLHS()); if (LHS.isInvalid()) return ExprError(); ExprResult RHS = getDerived().TransformExpr(E->getRHS()); if (RHS.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && LHS.get() == E->getLHS() && RHS.get() == E->getRHS()) return E; return getDerived().RebuildArraySubscriptExpr(LHS.get(), /*FIXME:*/E->getLHS()->getLocStart(), RHS.get(), E->getRBracketLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCallExpr(CallExpr *E) { // Transform the callee. ExprResult Callee = getDerived().TransformExpr(E->getCallee()); if (Callee.isInvalid()) return ExprError(); // Transform arguments. bool ArgChanged = false; SmallVector<Expr*, 8> Args; if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args, &ArgChanged)) return ExprError(); if (!getDerived().AlwaysRebuild() && Callee.get() == E->getCallee() && !ArgChanged) return SemaRef.MaybeBindToTemporary(E); // FIXME: Wrong source location information for the '('. SourceLocation FakeLParenLoc = ((Expr *)Callee.get())->getSourceRange().getBegin(); return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc, Args, E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) { ExprResult Base = getDerived().TransformExpr(E->getBase()); if (Base.isInvalid()) return ExprError(); NestedNameSpecifierLoc QualifierLoc; if (E->hasQualifier()) { QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc()); if (!QualifierLoc) return ExprError(); } SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc(); ValueDecl *Member = cast_or_null<ValueDecl>(getDerived().TransformDecl(E->getMemberLoc(), E->getMemberDecl())); if (!Member) return ExprError(); NamedDecl *FoundDecl = E->getFoundDecl(); if (FoundDecl == E->getMemberDecl()) { FoundDecl = Member; } else { FoundDecl = cast_or_null<NamedDecl>( getDerived().TransformDecl(E->getMemberLoc(), FoundDecl)); if (!FoundDecl) return ExprError(); } if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase() && QualifierLoc == E->getQualifierLoc() && Member == E->getMemberDecl() && FoundDecl == E->getFoundDecl() && !E->hasExplicitTemplateArgs()) { // Mark it referenced in the new context regardless. // FIXME: this is a bit instantiation-specific. SemaRef.MarkMemberReferenced(E); return E; } TemplateArgumentListInfo TransArgs; if (E->hasExplicitTemplateArgs()) { TransArgs.setLAngleLoc(E->getLAngleLoc()); TransArgs.setRAngleLoc(E->getRAngleLoc()); if (getDerived().TransformTemplateArguments(E->getTemplateArgs(), E->getNumTemplateArgs(), TransArgs)) return ExprError(); } // FIXME: Bogus source location for the operator SourceLocation FakeOperatorLoc = SemaRef.getLocForEndOfToken(E->getBase()->getSourceRange().getEnd()); // FIXME: to do this check properly, we will need to preserve the // first-qualifier-in-scope here, just in case we had a dependent // base (and therefore couldn't do the check) and a // nested-name-qualifier (and therefore could do the lookup). NamedDecl *FirstQualifierInScope = nullptr; return getDerived().RebuildMemberExpr(Base.get(), FakeOperatorLoc, E->isArrow(), QualifierLoc, TemplateKWLoc, E->getMemberNameInfo(), Member, FoundDecl, (E->hasExplicitTemplateArgs() ? &TransArgs : nullptr), FirstQualifierInScope); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) { ExprResult LHS = getDerived().TransformExpr(E->getLHS()); if (LHS.isInvalid()) return ExprError(); ExprResult RHS = getDerived().TransformExpr(E->getRHS()); if (RHS.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && LHS.get() == E->getLHS() && RHS.get() == E->getRHS()) return E; Sema::FPContractStateRAII FPContractState(getSema()); getSema().FPFeatures.fp_contract = E->isFPContractable(); return getDerived().RebuildBinaryOperator(E->getOperatorLoc(), E->getOpcode(), LHS.get(), RHS.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCompoundAssignOperator( CompoundAssignOperator *E) { return getDerived().TransformBinaryOperator(E); } template<typename Derived> ExprResult TreeTransform<Derived>:: TransformBinaryConditionalOperator(BinaryConditionalOperator *e) { // Just rebuild the common and RHS expressions and see whether we // get any changes. ExprResult commonExpr = getDerived().TransformExpr(e->getCommon()); if (commonExpr.isInvalid()) return ExprError(); ExprResult rhs = getDerived().TransformExpr(e->getFalseExpr()); if (rhs.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && commonExpr.get() == e->getCommon() && rhs.get() == e->getFalseExpr()) return e; return getDerived().RebuildConditionalOperator(commonExpr.get(), e->getQuestionLoc(), nullptr, e->getColonLoc(), rhs.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformConditionalOperator(ConditionalOperator *E) { ExprResult Cond = getDerived().TransformExpr(E->getCond()); if (Cond.isInvalid()) return ExprError(); ExprResult LHS = getDerived().TransformExpr(E->getLHS()); if (LHS.isInvalid()) return ExprError(); ExprResult RHS = getDerived().TransformExpr(E->getRHS()); if (RHS.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && Cond.get() == E->getCond() && LHS.get() == E->getLHS() && RHS.get() == E->getRHS()) return E; return getDerived().RebuildConditionalOperator(Cond.get(), E->getQuestionLoc(), LHS.get(), E->getColonLoc(), RHS.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformImplicitCastExpr(ImplicitCastExpr *E) { // Implicit casts are eliminated during transformation, since they // will be recomputed by semantic analysis after transformation. return getDerived().TransformExpr(E->getSubExprAsWritten()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCStyleCastExpr(CStyleCastExpr *E) { TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten()); if (!Type) return ExprError(); ExprResult SubExpr = getDerived().TransformExpr(E->getSubExprAsWritten()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && Type == E->getTypeInfoAsWritten() && SubExpr.get() == E->getSubExpr()) return E; return getDerived().RebuildCStyleCastExpr(E->getLParenLoc(), Type, E->getRParenLoc(), SubExpr.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCompoundLiteralExpr(CompoundLiteralExpr *E) { TypeSourceInfo *OldT = E->getTypeSourceInfo(); TypeSourceInfo *NewT = getDerived().TransformType(OldT); if (!NewT) return ExprError(); ExprResult Init = getDerived().TransformExpr(E->getInitializer()); if (Init.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && OldT == NewT && Init.get() == E->getInitializer()) return SemaRef.MaybeBindToTemporary(E); // Note: the expression type doesn't necessarily match the // type-as-written, but that's okay, because it should always be // derivable from the initializer. return getDerived().RebuildCompoundLiteralExpr(E->getLParenLoc(), NewT, /*FIXME:*/E->getInitializer()->getLocEnd(), Init.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformExtVectorElementExpr(ExtVectorElementExpr *E) { ExprResult Base = getDerived().TransformExpr(E->getBase()); if (Base.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase()) return E; // FIXME: Bad source location SourceLocation FakeOperatorLoc = SemaRef.getLocForEndOfToken(E->getBase()->getLocEnd()); return getDerived().RebuildExtVectorElementExpr(Base.get(), FakeOperatorLoc, E->getAccessorLoc(), E->getAccessor()); } // HLSL Change Starts template<typename Derived> ExprResult TreeTransform<Derived>::TransformExtMatrixElementExpr(ExtMatrixElementExpr *E) { ExprResult Base = getDerived().TransformExpr(E->getBase()); if (Base.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase()) return E; // FIXME: Bad source location SourceLocation FakeOperatorLoc = SemaRef.getLocForEndOfToken(E->getBase()->getLocEnd()); return getDerived().RebuildExtMatrixElementExpr(Base.get(), FakeOperatorLoc, E->getAccessorLoc(), E->getAccessor()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformHLSLVectorElementExpr(HLSLVectorElementExpr *E) { ExprResult Base = getDerived().TransformExpr(E->getBase()); if (Base.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase()) return E; // FIXME: Bad source location SourceLocation FakeOperatorLoc = SemaRef.getLocForEndOfToken(E->getBase()->getLocEnd()); return getDerived().RebuildHLSLVectorElementExpr(Base.get(), FakeOperatorLoc, E->getAccessorLoc(), E->getAccessor()); } // HLSL Change Ends template<typename Derived> ExprResult TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) { if (InitListExpr *Syntactic = E->getSyntacticForm()) E = Syntactic; bool InitChanged = false; SmallVector<Expr*, 4> Inits; if (getDerived().TransformExprs(E->getInits(), E->getNumInits(), false, Inits, &InitChanged)) return ExprError(); if (!getDerived().AlwaysRebuild() && !InitChanged) { // FIXME: Attempt to reuse the existing syntactic form of the InitListExpr // in some cases. We can't reuse it in general, because the syntactic and // semantic forms are linked, and we can't know that semantic form will // match even if the syntactic form does. } return getDerived().RebuildInitList(E->getLBraceLoc(), Inits, E->getRBraceLoc(), E->getType()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) { Designation Desig; // transform the initializer value ExprResult Init = getDerived().TransformExpr(E->getInit()); if (Init.isInvalid()) return ExprError(); // transform the designators. SmallVector<Expr*, 4> ArrayExprs; bool ExprChanged = false; for (DesignatedInitExpr::designators_iterator D = E->designators_begin(), DEnd = E->designators_end(); D != DEnd; ++D) { if (D->isFieldDesignator()) { Desig.AddDesignator(Designator::getField(D->getFieldName(), D->getDotLoc(), D->getFieldLoc())); continue; } if (D->isArrayDesignator()) { ExprResult Index = getDerived().TransformExpr(E->getArrayIndex(*D)); if (Index.isInvalid()) return ExprError(); Desig.AddDesignator(Designator::getArray(Index.get(), D->getLBracketLoc())); ExprChanged = ExprChanged || Init.get() != E->getArrayIndex(*D); ArrayExprs.push_back(Index.get()); continue; } assert(D->isArrayRangeDesignator() && "New kind of designator?"); ExprResult Start = getDerived().TransformExpr(E->getArrayRangeStart(*D)); if (Start.isInvalid()) return ExprError(); ExprResult End = getDerived().TransformExpr(E->getArrayRangeEnd(*D)); if (End.isInvalid()) return ExprError(); Desig.AddDesignator(Designator::getArrayRange(Start.get(), End.get(), D->getLBracketLoc(), D->getEllipsisLoc())); ExprChanged = ExprChanged || Start.get() != E->getArrayRangeStart(*D) || End.get() != E->getArrayRangeEnd(*D); ArrayExprs.push_back(Start.get()); ArrayExprs.push_back(End.get()); } if (!getDerived().AlwaysRebuild() && Init.get() == E->getInit() && !ExprChanged) return E; return getDerived().RebuildDesignatedInitExpr(Desig, ArrayExprs, E->getEqualOrColonLoc(), E->usesGNUSyntax(), Init.get()); } // Seems that if TransformInitListExpr() only works on the syntactic form of an // InitListExpr, then a DesignatedInitUpdateExpr is not encountered. template<typename Derived> ExprResult TreeTransform<Derived>::TransformDesignatedInitUpdateExpr( DesignatedInitUpdateExpr *E) { llvm_unreachable("Unexpected DesignatedInitUpdateExpr in syntactic form of " "initializer"); return ExprError(); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformNoInitExpr( NoInitExpr *E) { llvm_unreachable("Unexpected NoInitExpr in syntactic form of initializer"); return ExprError(); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformImplicitValueInitExpr( ImplicitValueInitExpr *E) { TemporaryBase Rebase(*this, E->getLocStart(), DeclarationName()); // FIXME: Will we ever have proper type location here? Will we actually // need to transform the type? QualType T = getDerived().TransformType(E->getType()); if (T.isNull()) return ExprError(); if (!getDerived().AlwaysRebuild() && T == E->getType()) return E; return getDerived().RebuildImplicitValueInitExpr(T); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformVAArgExpr(VAArgExpr *E) { TypeSourceInfo *TInfo = getDerived().TransformType(E->getWrittenTypeInfo()); if (!TInfo) return ExprError(); ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && TInfo == E->getWrittenTypeInfo() && SubExpr.get() == E->getSubExpr()) return E; return getDerived().RebuildVAArgExpr(E->getBuiltinLoc(), SubExpr.get(), TInfo, E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformParenListExpr(ParenListExpr *E) { bool ArgumentChanged = false; SmallVector<Expr*, 4> Inits; if (TransformExprs(E->getExprs(), E->getNumExprs(), true, Inits, &ArgumentChanged)) return ExprError(); return getDerived().RebuildParenListExpr(E->getLParenLoc(), Inits, E->getRParenLoc()); } /// \brief Transform an address-of-label expression. /// /// By default, the transformation of an address-of-label expression always /// rebuilds the expression, so that the label identifier can be resolved to /// the corresponding label statement by semantic analysis. template<typename Derived> ExprResult TreeTransform<Derived>::TransformAddrLabelExpr(AddrLabelExpr *E) { Decl *LD = getDerived().TransformDecl(E->getLabel()->getLocation(), E->getLabel()); if (!LD) return ExprError(); return getDerived().RebuildAddrLabelExpr(E->getAmpAmpLoc(), E->getLabelLoc(), cast<LabelDecl>(LD)); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformStmtExpr(StmtExpr *E) { SemaRef.ActOnStartStmtExpr(); StmtResult SubStmt = getDerived().TransformCompoundStmt(E->getSubStmt(), true); if (SubStmt.isInvalid()) { SemaRef.ActOnStmtExprError(); return ExprError(); } if (!getDerived().AlwaysRebuild() && SubStmt.get() == E->getSubStmt()) { // Calling this an 'error' is unintuitive, but it does the right thing. SemaRef.ActOnStmtExprError(); return SemaRef.MaybeBindToTemporary(E); } return getDerived().RebuildStmtExpr(E->getLParenLoc(), SubStmt.get(), E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformChooseExpr(ChooseExpr *E) { ExprResult Cond = getDerived().TransformExpr(E->getCond()); if (Cond.isInvalid()) return ExprError(); ExprResult LHS = getDerived().TransformExpr(E->getLHS()); if (LHS.isInvalid()) return ExprError(); ExprResult RHS = getDerived().TransformExpr(E->getRHS()); if (RHS.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && Cond.get() == E->getCond() && LHS.get() == E->getLHS() && RHS.get() == E->getRHS()) return E; return getDerived().RebuildChooseExpr(E->getBuiltinLoc(), Cond.get(), LHS.get(), RHS.get(), E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformGNUNullExpr(GNUNullExpr *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) { switch (E->getOperator()) { case OO_New: case OO_Delete: case OO_Array_New: case OO_Array_Delete: llvm_unreachable("new and delete operators cannot use CXXOperatorCallExpr"); case OO_Call: { // This is a call to an object's operator(). assert(E->getNumArgs() >= 1 && "Object call is missing arguments"); // Transform the object itself. ExprResult Object = getDerived().TransformExpr(E->getArg(0)); if (Object.isInvalid()) return ExprError(); // FIXME: Poor location information SourceLocation FakeLParenLoc = SemaRef.getLocForEndOfToken( static_cast<Expr *>(Object.get())->getLocEnd()); // Transform the call arguments. SmallVector<Expr*, 8> Args; if (getDerived().TransformExprs(E->getArgs() + 1, E->getNumArgs() - 1, true, Args)) return ExprError(); return getDerived().RebuildCallExpr(Object.get(), FakeLParenLoc, Args, E->getLocEnd()); } #define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \ case OO_##Name: #define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly) #include "clang/Basic/OperatorKinds.def" case OO_Subscript: // Handled below. break; case OO_Conditional: llvm_unreachable("conditional operator is not actually overloadable"); case OO_None: case NUM_OVERLOADED_OPERATORS: llvm_unreachable("not an overloaded operator?"); } ExprResult Callee = getDerived().TransformExpr(E->getCallee()); if (Callee.isInvalid()) return ExprError(); ExprResult First; if (E->getOperator() == OO_Amp) First = getDerived().TransformAddressOfOperand(E->getArg(0)); else First = getDerived().TransformExpr(E->getArg(0)); if (First.isInvalid()) return ExprError(); ExprResult Second; if (E->getNumArgs() == 2) { Second = getDerived().TransformExpr(E->getArg(1)); if (Second.isInvalid()) return ExprError(); } if (!getDerived().AlwaysRebuild() && Callee.get() == E->getCallee() && First.get() == E->getArg(0) && (E->getNumArgs() != 2 || Second.get() == E->getArg(1))) return SemaRef.MaybeBindToTemporary(E); Sema::FPContractStateRAII FPContractState(getSema()); getSema().FPFeatures.fp_contract = E->isFPContractable(); return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(), E->getOperatorLoc(), Callee.get(), First.get(), Second.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXMemberCallExpr(CXXMemberCallExpr *E) { return getDerived().TransformCallExpr(E); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) { // Transform the callee. ExprResult Callee = getDerived().TransformExpr(E->getCallee()); if (Callee.isInvalid()) return ExprError(); // Transform exec config. ExprResult EC = getDerived().TransformCallExpr(E->getConfig()); if (EC.isInvalid()) return ExprError(); // Transform arguments. bool ArgChanged = false; SmallVector<Expr*, 8> Args; if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args, &ArgChanged)) return ExprError(); if (!getDerived().AlwaysRebuild() && Callee.get() == E->getCallee() && !ArgChanged) return SemaRef.MaybeBindToTemporary(E); // FIXME: Wrong source location information for the '('. SourceLocation FakeLParenLoc = ((Expr *)Callee.get())->getSourceRange().getBegin(); return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc, Args, E->getRParenLoc(), EC.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) { TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten()); if (!Type) return ExprError(); ExprResult SubExpr = getDerived().TransformExpr(E->getSubExprAsWritten()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && Type == E->getTypeInfoAsWritten() && SubExpr.get() == E->getSubExpr()) return E; return getDerived().RebuildCXXNamedCastExpr( E->getOperatorLoc(), E->getStmtClass(), E->getAngleBrackets().getBegin(), Type, E->getAngleBrackets().getEnd(), // FIXME. this should be '(' location E->getAngleBrackets().getEnd(), SubExpr.get(), E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXStaticCastExpr(CXXStaticCastExpr *E) { return getDerived().TransformCXXNamedCastExpr(E); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXDynamicCastExpr(CXXDynamicCastExpr *E) { return getDerived().TransformCXXNamedCastExpr(E); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXReinterpretCastExpr( CXXReinterpretCastExpr *E) { return getDerived().TransformCXXNamedCastExpr(E); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXConstCastExpr(CXXConstCastExpr *E) { return getDerived().TransformCXXNamedCastExpr(E); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXFunctionalCastExpr( CXXFunctionalCastExpr *E) { TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten()); if (!Type) return ExprError(); ExprResult SubExpr = getDerived().TransformExpr(E->getSubExprAsWritten()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && Type == E->getTypeInfoAsWritten() && SubExpr.get() == E->getSubExpr()) return E; return getDerived().RebuildCXXFunctionalCastExpr(Type, E->getLParenLoc(), SubExpr.get(), E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) { if (E->isTypeOperand()) { TypeSourceInfo *TInfo = getDerived().TransformType(E->getTypeOperandSourceInfo()); if (!TInfo) return ExprError(); if (!getDerived().AlwaysRebuild() && TInfo == E->getTypeOperandSourceInfo()) return E; return getDerived().RebuildCXXTypeidExpr(E->getType(), E->getLocStart(), TInfo, E->getLocEnd()); } // We don't know whether the subexpression is potentially evaluated until // after we perform semantic analysis. We speculatively assume it is // unevaluated; it will get fixed later if the subexpression is in fact // potentially evaluated. EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated, Sema::ReuseLambdaContextDecl); ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getExprOperand()) return E; return getDerived().RebuildCXXTypeidExpr(E->getType(), E->getLocStart(), SubExpr.get(), E->getLocEnd()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXUuidofExpr(CXXUuidofExpr *E) { if (E->isTypeOperand()) { TypeSourceInfo *TInfo = getDerived().TransformType(E->getTypeOperandSourceInfo()); if (!TInfo) return ExprError(); if (!getDerived().AlwaysRebuild() && TInfo == E->getTypeOperandSourceInfo()) return E; return getDerived().RebuildCXXUuidofExpr(E->getType(), E->getLocStart(), TInfo, E->getLocEnd()); } EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated); ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getExprOperand()) return E; return getDerived().RebuildCXXUuidofExpr(E->getType(), E->getLocStart(), SubExpr.get(), E->getLocEnd()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXNullPtrLiteralExpr( CXXNullPtrLiteralExpr *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) { QualType T = getSema().getCurrentThisType(); if (!getDerived().AlwaysRebuild() && T == E->getType()) { // Make sure that we capture 'this'. getSema().CheckCXXThisCapture(E->getLocStart()); return E; } return getDerived().RebuildCXXThisExpr(E->getLocStart(), T, E->isImplicit()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXThrowExpr(CXXThrowExpr *E) { ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getSubExpr()) return E; return getDerived().RebuildCXXThrowExpr(E->getThrowLoc(), SubExpr.get(), E->isThrownVariableInScope()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E) { ParmVarDecl *Param = cast_or_null<ParmVarDecl>(getDerived().TransformDecl(E->getLocStart(), E->getParam())); if (!Param) return ExprError(); if (!getDerived().AlwaysRebuild() && Param == E->getParam()) return E; return getDerived().RebuildCXXDefaultArgExpr(E->getUsedLocation(), Param); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXDefaultInitExpr(CXXDefaultInitExpr *E) { FieldDecl *Field = cast_or_null<FieldDecl>(getDerived().TransformDecl(E->getLocStart(), E->getField())); if (!Field) return ExprError(); if (!getDerived().AlwaysRebuild() && Field == E->getField()) return E; return getDerived().RebuildCXXDefaultInitExpr(E->getExprLoc(), Field); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXScalarValueInitExpr( CXXScalarValueInitExpr *E) { TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo()); if (!T) return ExprError(); if (!getDerived().AlwaysRebuild() && T == E->getTypeSourceInfo()) return E; return getDerived().RebuildCXXScalarValueInitExpr(T, /*FIXME:*/T->getTypeLoc().getEndLoc(), E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) { // Transform the type that we're allocating TypeSourceInfo *AllocTypeInfo = getDerived().TransformType(E->getAllocatedTypeSourceInfo()); if (!AllocTypeInfo) return ExprError(); // Transform the size of the array we're allocating (if any). ExprResult ArraySize = getDerived().TransformExpr(E->getArraySize()); if (ArraySize.isInvalid()) return ExprError(); // Transform the placement arguments (if any). bool ArgumentChanged = false; SmallVector<Expr*, 8> PlacementArgs; if (getDerived().TransformExprs(E->getPlacementArgs(), E->getNumPlacementArgs(), true, PlacementArgs, &ArgumentChanged)) return ExprError(); // Transform the initializer (if any). Expr *OldInit = E->getInitializer(); ExprResult NewInit; if (OldInit) NewInit = getDerived().TransformInitializer(OldInit, true); if (NewInit.isInvalid()) return ExprError(); // Transform new operator and delete operator. FunctionDecl *OperatorNew = nullptr; if (E->getOperatorNew()) { OperatorNew = cast_or_null<FunctionDecl>( getDerived().TransformDecl(E->getLocStart(), E->getOperatorNew())); if (!OperatorNew) return ExprError(); } FunctionDecl *OperatorDelete = nullptr; if (E->getOperatorDelete()) { OperatorDelete = cast_or_null<FunctionDecl>( getDerived().TransformDecl(E->getLocStart(), E->getOperatorDelete())); if (!OperatorDelete) return ExprError(); } if (!getDerived().AlwaysRebuild() && AllocTypeInfo == E->getAllocatedTypeSourceInfo() && ArraySize.get() == E->getArraySize() && NewInit.get() == OldInit && OperatorNew == E->getOperatorNew() && OperatorDelete == E->getOperatorDelete() && !ArgumentChanged) { // Mark any declarations we need as referenced. // FIXME: instantiation-specific. if (OperatorNew) SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorNew); if (OperatorDelete) SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete); if (E->isArray() && !E->getAllocatedType()->isDependentType()) { QualType ElementType = SemaRef.Context.getBaseElementType(E->getAllocatedType()); if (const RecordType *RecordT = ElementType->getAs<RecordType>()) { CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordT->getDecl()); if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(Record)) { SemaRef.MarkFunctionReferenced(E->getLocStart(), Destructor); } } } return E; } QualType AllocType = AllocTypeInfo->getType(); if (!ArraySize.get()) { // If no array size was specified, but the new expression was // instantiated with an array type (e.g., "new T" where T is // instantiated with "int[4]"), extract the outer bound from the // array type as our array size. We do this with constant and // dependently-sized array types. const ArrayType *ArrayT = SemaRef.Context.getAsArrayType(AllocType); if (!ArrayT) { // Do nothing } else if (const ConstantArrayType *ConsArrayT = dyn_cast<ConstantArrayType>(ArrayT)) { ArraySize = IntegerLiteral::Create(SemaRef.Context, ConsArrayT->getSize(), SemaRef.Context.getSizeType(), /*FIXME:*/ E->getLocStart()); AllocType = ConsArrayT->getElementType(); } else if (const DependentSizedArrayType *DepArrayT = dyn_cast<DependentSizedArrayType>(ArrayT)) { if (DepArrayT->getSizeExpr()) { ArraySize = DepArrayT->getSizeExpr(); AllocType = DepArrayT->getElementType(); } } } return getDerived().RebuildCXXNewExpr(E->getLocStart(), E->isGlobalNew(), /*FIXME:*/E->getLocStart(), PlacementArgs, /*FIXME:*/E->getLocStart(), E->getTypeIdParens(), AllocType, AllocTypeInfo, ArraySize.get(), E->getDirectInitRange(), NewInit.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) { ExprResult Operand = getDerived().TransformExpr(E->getArgument()); if (Operand.isInvalid()) return ExprError(); // Transform the delete operator, if known. FunctionDecl *OperatorDelete = nullptr; if (E->getOperatorDelete()) { OperatorDelete = cast_or_null<FunctionDecl>( getDerived().TransformDecl(E->getLocStart(), E->getOperatorDelete())); if (!OperatorDelete) return ExprError(); } if (!getDerived().AlwaysRebuild() && Operand.get() == E->getArgument() && OperatorDelete == E->getOperatorDelete()) { // Mark any declarations we need as referenced. // FIXME: instantiation-specific. if (OperatorDelete) SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete); if (!E->getArgument()->isTypeDependent()) { QualType Destroyed = SemaRef.Context.getBaseElementType( E->getDestroyedType()); if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) { CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl()); SemaRef.MarkFunctionReferenced(E->getLocStart(), SemaRef.LookupDestructor(Record)); } } return E; } return getDerived().RebuildCXXDeleteExpr(E->getLocStart(), E->isGlobalDelete(), E->isArrayForm(), Operand.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXPseudoDestructorExpr( CXXPseudoDestructorExpr *E) { ExprResult Base = getDerived().TransformExpr(E->getBase()); if (Base.isInvalid()) return ExprError(); ParsedType ObjectTypePtr; bool MayBePseudoDestructor = false; Base = SemaRef.ActOnStartCXXMemberReference(nullptr, Base.get(), E->getOperatorLoc(), E->isArrow()? tok::arrow : tok::period, ObjectTypePtr, MayBePseudoDestructor); if (Base.isInvalid()) return ExprError(); QualType ObjectType = ObjectTypePtr.get(); NestedNameSpecifierLoc QualifierLoc = E->getQualifierLoc(); if (QualifierLoc) { QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc, ObjectType); if (!QualifierLoc) return ExprError(); } CXXScopeSpec SS; SS.Adopt(QualifierLoc); PseudoDestructorTypeStorage Destroyed; if (E->getDestroyedTypeInfo()) { TypeSourceInfo *DestroyedTypeInfo = getDerived().TransformTypeInObjectScope(E->getDestroyedTypeInfo(), ObjectType, nullptr, SS); if (!DestroyedTypeInfo) return ExprError(); Destroyed = DestroyedTypeInfo; } else if (!ObjectType.isNull() && ObjectType->isDependentType()) { // We aren't likely to be able to resolve the identifier down to a type // now anyway, so just retain the identifier. Destroyed = PseudoDestructorTypeStorage(E->getDestroyedTypeIdentifier(), E->getDestroyedTypeLoc()); } else { // Look for a destructor known with the given name. ParsedType T = SemaRef.getDestructorName(E->getTildeLoc(), *E->getDestroyedTypeIdentifier(), E->getDestroyedTypeLoc(), /*Scope=*/nullptr, SS, ObjectTypePtr, false); if (!T) return ExprError(); Destroyed = SemaRef.Context.getTrivialTypeSourceInfo(SemaRef.GetTypeFromParser(T), E->getDestroyedTypeLoc()); } TypeSourceInfo *ScopeTypeInfo = nullptr; if (E->getScopeTypeInfo()) { CXXScopeSpec EmptySS; ScopeTypeInfo = getDerived().TransformTypeInObjectScope( E->getScopeTypeInfo(), ObjectType, nullptr, EmptySS); if (!ScopeTypeInfo) return ExprError(); } return getDerived().RebuildCXXPseudoDestructorExpr(Base.get(), E->getOperatorLoc(), E->isArrow(), SS, ScopeTypeInfo, E->getColonColonLoc(), E->getTildeLoc(), Destroyed); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformUnresolvedLookupExpr( UnresolvedLookupExpr *Old) { LookupResult R(SemaRef, Old->getName(), Old->getNameLoc(), Sema::LookupOrdinaryName); // Transform all the decls. for (UnresolvedLookupExpr::decls_iterator I = Old->decls_begin(), E = Old->decls_end(); I != E; ++I) { NamedDecl *InstD = static_cast<NamedDecl*>( getDerived().TransformDecl(Old->getNameLoc(), *I)); if (!InstD) { // Silently ignore these if a UsingShadowDecl instantiated to nothing. // This can happen because of dependent hiding. if (isa<UsingShadowDecl>(*I)) continue; else { R.clear(); return ExprError(); } } // Expand using declarations. if (isa<UsingDecl>(InstD)) { UsingDecl *UD = cast<UsingDecl>(InstD); for (auto *I : UD->shadows()) R.addDecl(I); continue; } R.addDecl(InstD); } // Resolve a kind, but don't do any further analysis. If it's // ambiguous, the callee needs to deal with it. R.resolveKind(); // Rebuild the nested-name qualifier, if present. CXXScopeSpec SS; if (Old->getQualifierLoc()) { NestedNameSpecifierLoc QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(Old->getQualifierLoc()); if (!QualifierLoc) return ExprError(); SS.Adopt(QualifierLoc); } if (Old->getNamingClass()) { CXXRecordDecl *NamingClass = cast_or_null<CXXRecordDecl>(getDerived().TransformDecl( Old->getNameLoc(), Old->getNamingClass())); if (!NamingClass) { R.clear(); return ExprError(); } R.setNamingClass(NamingClass); } SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc(); // If we have neither explicit template arguments, nor the template keyword, // it's a normal declaration name. if (!Old->hasExplicitTemplateArgs() && !TemplateKWLoc.isValid()) return getDerived().RebuildDeclarationNameExpr(SS, R, Old->requiresADL()); // If we have template arguments, rebuild them, then rebuild the // templateid expression. TemplateArgumentListInfo TransArgs(Old->getLAngleLoc(), Old->getRAngleLoc()); if (Old->hasExplicitTemplateArgs() && getDerived().TransformTemplateArguments(Old->getTemplateArgs(), Old->getNumTemplateArgs(), TransArgs)) { R.clear(); return ExprError(); } return getDerived().RebuildTemplateIdExpr(SS, TemplateKWLoc, R, Old->requiresADL(), &TransArgs); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) { bool ArgChanged = false; SmallVector<TypeSourceInfo *, 4> Args; for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) { TypeSourceInfo *From = E->getArg(I); TypeLoc FromTL = From->getTypeLoc(); if (!FromTL.getAs<PackExpansionTypeLoc>()) { TypeLocBuilder TLB; TLB.reserve(FromTL.getFullDataSize()); QualType To = getDerived().TransformType(TLB, FromTL); if (To.isNull()) return ExprError(); if (To == From->getType()) Args.push_back(From); else { Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To)); ArgChanged = true; } continue; } ArgChanged = true; // We have a pack expansion. Instantiate it. PackExpansionTypeLoc ExpansionTL = FromTL.castAs<PackExpansionTypeLoc>(); TypeLoc PatternTL = ExpansionTL.getPatternLoc(); SmallVector<UnexpandedParameterPack, 2> Unexpanded; SemaRef.collectUnexpandedParameterPacks(PatternTL, Unexpanded); // Determine whether the set of unexpanded parameter packs can and should // be expanded. bool Expand = true; bool RetainExpansion = false; Optional<unsigned> OrigNumExpansions = ExpansionTL.getTypePtr()->getNumExpansions(); Optional<unsigned> NumExpansions = OrigNumExpansions; if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(), PatternTL.getSourceRange(), Unexpanded, Expand, RetainExpansion, NumExpansions)) return ExprError(); if (!Expand) { // The transform has determined that we should perform a simple // transformation on the pack expansion, producing another pack // expansion. Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1); TypeLocBuilder TLB; TLB.reserve(From->getTypeLoc().getFullDataSize()); QualType To = getDerived().TransformType(TLB, PatternTL); if (To.isNull()) return ExprError(); To = getDerived().RebuildPackExpansionType(To, PatternTL.getSourceRange(), ExpansionTL.getEllipsisLoc(), NumExpansions); if (To.isNull()) return ExprError(); PackExpansionTypeLoc ToExpansionTL = TLB.push<PackExpansionTypeLoc>(To); ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc()); Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To)); continue; } // Expand the pack expansion by substituting for each argument in the // pack(s). for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I); TypeLocBuilder TLB; TLB.reserve(PatternTL.getFullDataSize()); QualType To = getDerived().TransformType(TLB, PatternTL); if (To.isNull()) return ExprError(); if (To->containsUnexpandedParameterPack()) { To = getDerived().RebuildPackExpansionType(To, PatternTL.getSourceRange(), ExpansionTL.getEllipsisLoc(), NumExpansions); if (To.isNull()) return ExprError(); PackExpansionTypeLoc ToExpansionTL = TLB.push<PackExpansionTypeLoc>(To); ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc()); } Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To)); } if (!RetainExpansion) continue; // If we're supposed to retain a pack expansion, do so by temporarily // forgetting the partially-substituted parameter pack. ForgetPartiallySubstitutedPackRAII Forget(getDerived()); TypeLocBuilder TLB; TLB.reserve(From->getTypeLoc().getFullDataSize()); QualType To = getDerived().TransformType(TLB, PatternTL); if (To.isNull()) return ExprError(); To = getDerived().RebuildPackExpansionType(To, PatternTL.getSourceRange(), ExpansionTL.getEllipsisLoc(), NumExpansions); if (To.isNull()) return ExprError(); PackExpansionTypeLoc ToExpansionTL = TLB.push<PackExpansionTypeLoc>(To); ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc()); Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To)); } if (!getDerived().AlwaysRebuild() && !ArgChanged) return E; return getDerived().RebuildTypeTrait(E->getTrait(), E->getLocStart(), Args, E->getLocEnd()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformArrayTypeTraitExpr(ArrayTypeTraitExpr *E) { TypeSourceInfo *T = getDerived().TransformType(E->getQueriedTypeSourceInfo()); if (!T) return ExprError(); if (!getDerived().AlwaysRebuild() && T == E->getQueriedTypeSourceInfo()) return E; ExprResult SubExpr; { EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated); SubExpr = getDerived().TransformExpr(E->getDimensionExpression()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getDimensionExpression()) return E; } return getDerived().RebuildArrayTypeTrait(E->getTrait(), E->getLocStart(), T, SubExpr.get(), E->getLocEnd()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformExpressionTraitExpr(ExpressionTraitExpr *E) { ExprResult SubExpr; { EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated); SubExpr = getDerived().TransformExpr(E->getQueriedExpression()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getQueriedExpression()) return E; } return getDerived().RebuildExpressionTrait( E->getTrait(), E->getLocStart(), SubExpr.get(), E->getLocEnd()); } template <typename Derived> ExprResult TreeTransform<Derived>::TransformParenDependentScopeDeclRefExpr( ParenExpr *PE, DependentScopeDeclRefExpr *DRE, bool AddrTaken, TypeSourceInfo **RecoveryTSI) { ExprResult NewDRE = getDerived().TransformDependentScopeDeclRefExpr( DRE, AddrTaken, RecoveryTSI); // Propagate both errors and recovered types, which return ExprEmpty. if (!NewDRE.isUsable()) return NewDRE; // We got an expr, wrap it up in parens. if (!getDerived().AlwaysRebuild() && NewDRE.get() == DRE) return PE; return getDerived().RebuildParenExpr(NewDRE.get(), PE->getLParen(), PE->getRParen()); } template <typename Derived> ExprResult TreeTransform<Derived>::TransformDependentScopeDeclRefExpr( DependentScopeDeclRefExpr *E) { return TransformDependentScopeDeclRefExpr(E, /*IsAddressOfOperand=*/false, nullptr); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformDependentScopeDeclRefExpr( DependentScopeDeclRefExpr *E, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI) { assert(E->getQualifierLoc()); NestedNameSpecifierLoc QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc()); if (!QualifierLoc) return ExprError(); SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc(); // TODO: If this is a conversion-function-id, verify that the // destination type name (if present) resolves the same way after // instantiation as it did in the local scope. DeclarationNameInfo NameInfo = getDerived().TransformDeclarationNameInfo(E->getNameInfo()); if (!NameInfo.getName()) return ExprError(); if (!E->hasExplicitTemplateArgs()) { if (!getDerived().AlwaysRebuild() && QualifierLoc == E->getQualifierLoc() && // Note: it is sufficient to compare the Name component of NameInfo: // if name has not changed, DNLoc has not changed either. NameInfo.getName() == E->getDeclName()) return E; return getDerived().RebuildDependentScopeDeclRefExpr( QualifierLoc, TemplateKWLoc, NameInfo, /*TemplateArgs=*/nullptr, IsAddressOfOperand, RecoveryTSI); } TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc()); if (getDerived().TransformTemplateArguments(E->getTemplateArgs(), E->getNumTemplateArgs(), TransArgs)) return ExprError(); return getDerived().RebuildDependentScopeDeclRefExpr( QualifierLoc, TemplateKWLoc, NameInfo, &TransArgs, IsAddressOfOperand, RecoveryTSI); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) { // CXXConstructExprs other than for list-initialization and // CXXTemporaryObjectExpr are always implicit, so when we have // a 1-argument construction we just transform that argument. if ((E->getNumArgs() == 1 || (E->getNumArgs() > 1 && getDerived().DropCallArgument(E->getArg(1)))) && (!getDerived().DropCallArgument(E->getArg(0))) && !E->isListInitialization()) return getDerived().TransformExpr(E->getArg(0)); TemporaryBase Rebase(*this, /*FIXME*/E->getLocStart(), DeclarationName()); QualType T = getDerived().TransformType(E->getType()); if (T.isNull()) return ExprError(); CXXConstructorDecl *Constructor = cast_or_null<CXXConstructorDecl>( getDerived().TransformDecl(E->getLocStart(), E->getConstructor())); if (!Constructor) return ExprError(); bool ArgumentChanged = false; SmallVector<Expr*, 8> Args; if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args, &ArgumentChanged)) return ExprError(); if (!getDerived().AlwaysRebuild() && T == E->getType() && Constructor == E->getConstructor() && !ArgumentChanged) { // Mark the constructor as referenced. // FIXME: Instantiation-specific SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor); return E; } return getDerived().RebuildCXXConstructExpr(T, /*FIXME:*/E->getLocStart(), Constructor, E->isElidable(), Args, E->hadMultipleCandidates(), E->isListInitialization(), E->isStdInitListInitialization(), E->requiresZeroInitialization(), E->getConstructionKind(), E->getParenOrBraceRange()); } /// \brief Transform a C++ temporary-binding expression. /// /// Since CXXBindTemporaryExpr nodes are implicitly generated, we just /// transform the subexpression and return that. template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { return getDerived().TransformExpr(E->getSubExpr()); } /// \brief Transform a C++ expression that contains cleanups that should /// be run after the expression is evaluated. /// /// Since ExprWithCleanups nodes are implicitly generated, we /// just transform the subexpression and return that. template<typename Derived> ExprResult TreeTransform<Derived>::TransformExprWithCleanups(ExprWithCleanups *E) { return getDerived().TransformExpr(E->getSubExpr()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXTemporaryObjectExpr( CXXTemporaryObjectExpr *E) { TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo()); if (!T) return ExprError(); CXXConstructorDecl *Constructor = cast_or_null<CXXConstructorDecl>( getDerived().TransformDecl(E->getLocStart(), E->getConstructor())); if (!Constructor) return ExprError(); bool ArgumentChanged = false; SmallVector<Expr*, 8> Args; Args.reserve(E->getNumArgs()); if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args, &ArgumentChanged)) return ExprError(); if (!getDerived().AlwaysRebuild() && T == E->getTypeSourceInfo() && Constructor == E->getConstructor() && !ArgumentChanged) { // FIXME: Instantiation-specific SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor); return SemaRef.MaybeBindToTemporary(E); } // FIXME: Pass in E->isListInitialization(). return getDerived().RebuildCXXTemporaryObjectExpr(T, /*FIXME:*/T->getTypeLoc().getEndLoc(), Args, E->getLocEnd()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) { // Transform any init-capture expressions before entering the scope of the // lambda body, because they are not semantically within that scope. typedef std::pair<ExprResult, QualType> InitCaptureInfoTy; SmallVector<InitCaptureInfoTy, 8> InitCaptureExprsAndTypes; InitCaptureExprsAndTypes.resize(E->explicit_capture_end() - E->explicit_capture_begin()); for (LambdaExpr::capture_iterator C = E->capture_begin(), CEnd = E->capture_end(); C != CEnd; ++C) { if (!E->isInitCapture(C)) continue; EnterExpressionEvaluationContext EEEC(getSema(), Sema::PotentiallyEvaluated); ExprResult NewExprInitResult = getDerived().TransformInitializer( C->getCapturedVar()->getInit(), C->getCapturedVar()->getInitStyle() == VarDecl::CallInit); if (NewExprInitResult.isInvalid()) return ExprError(); Expr *NewExprInit = NewExprInitResult.get(); VarDecl *OldVD = C->getCapturedVar(); QualType NewInitCaptureType = getSema().performLambdaInitCaptureInitialization(C->getLocation(), OldVD->getType()->isReferenceType(), OldVD->getIdentifier(), NewExprInit); NewExprInitResult = NewExprInit; InitCaptureExprsAndTypes[C - E->capture_begin()] = std::make_pair(NewExprInitResult, NewInitCaptureType); } // Transform the template parameters, and add them to the current // instantiation scope. The null case is handled correctly. auto TPL = getDerived().TransformTemplateParameterList( E->getTemplateParameterList()); // Transform the type of the original lambda's call operator. // The transformation MUST be done in the CurrentInstantiationScope since // it introduces a mapping of the original to the newly created // transformed parameters. TypeSourceInfo *NewCallOpTSI = nullptr; { TypeSourceInfo *OldCallOpTSI = E->getCallOperator()->getTypeSourceInfo(); FunctionProtoTypeLoc OldCallOpFPTL = OldCallOpTSI->getTypeLoc().getAs<FunctionProtoTypeLoc>(); TypeLocBuilder NewCallOpTLBuilder; SmallVector<QualType, 4> ExceptionStorage; TreeTransform *This = this; // Work around gcc.gnu.org/PR56135. QualType NewCallOpType = TransformFunctionProtoType( NewCallOpTLBuilder, OldCallOpFPTL, nullptr, 0, [&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) { return This->TransformExceptionSpec(OldCallOpFPTL.getBeginLoc(), ESI, ExceptionStorage, Changed); }); if (NewCallOpType.isNull()) return ExprError(); NewCallOpTSI = NewCallOpTLBuilder.getTypeSourceInfo(getSema().Context, NewCallOpType); } LambdaScopeInfo *LSI = getSema().PushLambdaScope(); Sema::FunctionScopeRAII FuncScopeCleanup(getSema()); LSI->GLTemplateParameterList = TPL; // Create the local class that will describe the lambda. CXXRecordDecl *Class = getSema().createLambdaClosureType(E->getIntroducerRange(), NewCallOpTSI, /*KnownDependent=*/false, E->getCaptureDefault()); getDerived().transformedLocalDecl(E->getLambdaClass(), Class); // Build the call operator. CXXMethodDecl *NewCallOperator = getSema().startLambdaDefinition( Class, E->getIntroducerRange(), NewCallOpTSI, E->getCallOperator()->getLocEnd(), NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams()); LSI->CallOperator = NewCallOperator; getDerived().transformAttrs(E->getCallOperator(), NewCallOperator); getDerived().transformedLocalDecl(E->getCallOperator(), NewCallOperator); // Introduce the context of the call operator. Sema::ContextRAII SavedContext(getSema(), NewCallOperator, /*NewThisContext*/false); // Enter the scope of the lambda. getSema().buildLambdaScope(LSI, NewCallOperator, E->getIntroducerRange(), E->getCaptureDefault(), E->getCaptureDefaultLoc(), E->hasExplicitParameters(), E->hasExplicitResultType(), E->isMutable()); bool Invalid = false; // Transform captures. bool FinishedExplicitCaptures = false; for (LambdaExpr::capture_iterator C = E->capture_begin(), CEnd = E->capture_end(); C != CEnd; ++C) { // When we hit the first implicit capture, tell Sema that we've finished // the list of explicit captures. if (!FinishedExplicitCaptures && C->isImplicit()) { getSema().finishLambdaExplicitCaptures(LSI); FinishedExplicitCaptures = true; } // Capturing 'this' is trivial. if (C->capturesThis()) { getSema().CheckCXXThisCapture(C->getLocation(), C->isExplicit()); continue; } // Captured expression will be recaptured during captured variables // rebuilding. if (C->capturesVLAType()) continue; // Rebuild init-captures, including the implied field declaration. if (E->isInitCapture(C)) { InitCaptureInfoTy InitExprTypePair = InitCaptureExprsAndTypes[C - E->capture_begin()]; ExprResult Init = InitExprTypePair.first; QualType InitQualType = InitExprTypePair.second; if (Init.isInvalid() || InitQualType.isNull()) { Invalid = true; continue; } VarDecl *OldVD = C->getCapturedVar(); VarDecl *NewVD = getSema().createLambdaInitCaptureVarDecl( OldVD->getLocation(), InitExprTypePair.second, OldVD->getIdentifier(), Init.get()); if (!NewVD) Invalid = true; else { getDerived().transformedLocalDecl(OldVD, NewVD); } getSema().buildInitCaptureField(LSI, NewVD); continue; } assert(C->capturesVariable() && "unexpected kind of lambda capture"); // Determine the capture kind for Sema. Sema::TryCaptureKind Kind = C->isImplicit()? Sema::TryCapture_Implicit : C->getCaptureKind() == LCK_ByCopy ? Sema::TryCapture_ExplicitByVal : Sema::TryCapture_ExplicitByRef; SourceLocation EllipsisLoc; if (C->isPackExpansion()) { UnexpandedParameterPack Unexpanded(C->getCapturedVar(), C->getLocation()); bool ShouldExpand = false; bool RetainExpansion = false; Optional<unsigned> NumExpansions; if (getDerived().TryExpandParameterPacks(C->getEllipsisLoc(), C->getLocation(), Unexpanded, ShouldExpand, RetainExpansion, NumExpansions)) { Invalid = true; continue; } if (ShouldExpand) { // The transform has determined that we should perform an expansion; // transform and capture each of the arguments. // expansion of the pattern. Do so. VarDecl *Pack = C->getCapturedVar(); for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I); VarDecl *CapturedVar = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(), Pack)); if (!CapturedVar) { Invalid = true; continue; } // Capture the transformed variable. getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind); } // FIXME: Retain a pack expansion if RetainExpansion is true. continue; } EllipsisLoc = C->getEllipsisLoc(); } // Transform the captured variable. VarDecl *CapturedVar = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(), C->getCapturedVar())); if (!CapturedVar || CapturedVar->isInvalidDecl()) { Invalid = true; continue; } // Capture the transformed variable. getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind, EllipsisLoc); } if (!FinishedExplicitCaptures) getSema().finishLambdaExplicitCaptures(LSI); // Enter a new evaluation context to insulate the lambda from any // cleanups from the enclosing full-expression. getSema().PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); // Instantiate the body of the lambda expression. StmtResult Body = Invalid ? StmtError() : getDerived().TransformStmt(E->getBody()); // ActOnLambda* will pop the function scope for us. FuncScopeCleanup.disable(); if (Body.isInvalid()) { SavedContext.pop(); getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/nullptr, /*IsInstantiation=*/true); return ExprError(); } // Copy the LSI before ActOnFinishFunctionBody removes it. // FIXME: This is dumb. Store the lambda information somewhere that outlives // the call operator. auto LSICopy = *LSI; getSema().ActOnFinishFunctionBody(NewCallOperator, Body.get(), /*IsInstantiation*/ true); SavedContext.pop(); return getSema().BuildLambdaExpr(E->getLocStart(), Body.get()->getLocEnd(), &LSICopy); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr( CXXUnresolvedConstructExpr *E) { TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo()); if (!T) return ExprError(); bool ArgumentChanged = false; SmallVector<Expr*, 8> Args; Args.reserve(E->arg_size()); if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args, &ArgumentChanged)) return ExprError(); if (!getDerived().AlwaysRebuild() && T == E->getTypeSourceInfo() && !ArgumentChanged) return E; // FIXME: we're faking the locations of the commas return getDerived().RebuildCXXUnresolvedConstructExpr(T, E->getLParenLoc(), Args, E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXDependentScopeMemberExpr( CXXDependentScopeMemberExpr *E) { // Transform the base of the expression. ExprResult Base((Expr*) nullptr); Expr *OldBase; QualType BaseType; QualType ObjectType; if (!E->isImplicitAccess()) { OldBase = E->getBase(); Base = getDerived().TransformExpr(OldBase); if (Base.isInvalid()) return ExprError(); // Start the member reference and compute the object's type. ParsedType ObjectTy; bool MayBePseudoDestructor = false; Base = SemaRef.ActOnStartCXXMemberReference(nullptr, Base.get(), E->getOperatorLoc(), E->isArrow()? tok::arrow : tok::period, ObjectTy, MayBePseudoDestructor); if (Base.isInvalid()) return ExprError(); ObjectType = ObjectTy.get(); BaseType = ((Expr*) Base.get())->getType(); } else { OldBase = nullptr; BaseType = getDerived().TransformType(E->getBaseType()); ObjectType = BaseType->getPointeeType(); } // Transform the first part of the nested-name-specifier that qualifies // the member name. NamedDecl *FirstQualifierInScope = getDerived().TransformFirstQualifierInScope( E->getFirstQualifierFoundInScope(), E->getQualifierLoc().getBeginLoc()); NestedNameSpecifierLoc QualifierLoc; if (E->getQualifier()) { QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc(), ObjectType, FirstQualifierInScope); if (!QualifierLoc) return ExprError(); } SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc(); // TODO: If this is a conversion-function-id, verify that the // destination type name (if present) resolves the same way after // instantiation as it did in the local scope. DeclarationNameInfo NameInfo = getDerived().TransformDeclarationNameInfo(E->getMemberNameInfo()); if (!NameInfo.getName()) return ExprError(); if (!E->hasExplicitTemplateArgs()) { // This is a reference to a member without an explicitly-specified // template argument list. Optimize for this common case. if (!getDerived().AlwaysRebuild() && Base.get() == OldBase && BaseType == E->getBaseType() && QualifierLoc == E->getQualifierLoc() && NameInfo.getName() == E->getMember() && FirstQualifierInScope == E->getFirstQualifierFoundInScope()) return E; return getDerived().RebuildCXXDependentScopeMemberExpr(Base.get(), BaseType, E->isArrow(), E->getOperatorLoc(), QualifierLoc, TemplateKWLoc, FirstQualifierInScope, NameInfo, /*TemplateArgs*/nullptr); } TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc()); if (getDerived().TransformTemplateArguments(E->getTemplateArgs(), E->getNumTemplateArgs(), TransArgs)) return ExprError(); return getDerived().RebuildCXXDependentScopeMemberExpr(Base.get(), BaseType, E->isArrow(), E->getOperatorLoc(), QualifierLoc, TemplateKWLoc, FirstQualifierInScope, NameInfo, &TransArgs); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old) { // Transform the base of the expression. ExprResult Base((Expr*) nullptr); QualType BaseType; if (!Old->isImplicitAccess()) { Base = getDerived().TransformExpr(Old->getBase()); if (Base.isInvalid()) return ExprError(); Base = getSema().PerformMemberExprBaseConversion(Base.get(), Old->isArrow()); if (Base.isInvalid()) return ExprError(); BaseType = Base.get()->getType(); } else { BaseType = getDerived().TransformType(Old->getBaseType()); } NestedNameSpecifierLoc QualifierLoc; if (Old->getQualifierLoc()) { QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(Old->getQualifierLoc()); if (!QualifierLoc) return ExprError(); } SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc(); LookupResult R(SemaRef, Old->getMemberNameInfo(), Sema::LookupOrdinaryName); // Transform all the decls. for (UnresolvedMemberExpr::decls_iterator I = Old->decls_begin(), E = Old->decls_end(); I != E; ++I) { NamedDecl *InstD = static_cast<NamedDecl*>( getDerived().TransformDecl(Old->getMemberLoc(), *I)); if (!InstD) { // Silently ignore these if a UsingShadowDecl instantiated to nothing. // This can happen because of dependent hiding. if (isa<UsingShadowDecl>(*I)) continue; else { R.clear(); return ExprError(); } } // Expand using declarations. if (isa<UsingDecl>(InstD)) { UsingDecl *UD = cast<UsingDecl>(InstD); for (auto *I : UD->shadows()) R.addDecl(I); continue; } R.addDecl(InstD); } R.resolveKind(); // Determine the naming class. if (Old->getNamingClass()) { CXXRecordDecl *NamingClass = cast_or_null<CXXRecordDecl>(getDerived().TransformDecl( Old->getMemberLoc(), Old->getNamingClass())); if (!NamingClass) return ExprError(); R.setNamingClass(NamingClass); } TemplateArgumentListInfo TransArgs; if (Old->hasExplicitTemplateArgs()) { TransArgs.setLAngleLoc(Old->getLAngleLoc()); TransArgs.setRAngleLoc(Old->getRAngleLoc()); if (getDerived().TransformTemplateArguments(Old->getTemplateArgs(), Old->getNumTemplateArgs(), TransArgs)) return ExprError(); } // FIXME: to do this check properly, we will need to preserve the // first-qualifier-in-scope here, just in case we had a dependent // base (and therefore couldn't do the check) and a // nested-name-qualifier (and therefore could do the lookup). NamedDecl *FirstQualifierInScope = nullptr; return getDerived().RebuildUnresolvedMemberExpr(Base.get(), BaseType, Old->getOperatorLoc(), Old->isArrow(), QualifierLoc, TemplateKWLoc, FirstQualifierInScope, R, (Old->hasExplicitTemplateArgs() ? &TransArgs : nullptr)); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXNoexceptExpr(CXXNoexceptExpr *E) { EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated); ExprResult SubExpr = getDerived().TransformExpr(E->getOperand()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getOperand()) return E; return getDerived().RebuildCXXNoexceptExpr(E->getSourceRange(),SubExpr.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformPackExpansionExpr(PackExpansionExpr *E) { ExprResult Pattern = getDerived().TransformExpr(E->getPattern()); if (Pattern.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && Pattern.get() == E->getPattern()) return E; return getDerived().RebuildPackExpansion(Pattern.get(), E->getEllipsisLoc(), E->getNumExpansions()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) { // If E is not value-dependent, then nothing will change when we transform it. // Note: This is an instantiation-centric view. if (!E->isValueDependent()) return E; // Note: None of the implementations of TryExpandParameterPacks can ever // produce a diagnostic when given only a single unexpanded parameter pack, // so UnexpandedParameterPack Unexpanded(E->getPack(), E->getPackLoc()); bool ShouldExpand = false; bool RetainExpansion = false; Optional<unsigned> NumExpansions; if (getDerived().TryExpandParameterPacks(E->getOperatorLoc(), E->getPackLoc(), Unexpanded, ShouldExpand, RetainExpansion, NumExpansions)) return ExprError(); if (RetainExpansion) return E; NamedDecl *Pack = E->getPack(); if (!ShouldExpand) { Pack = cast_or_null<NamedDecl>(getDerived().TransformDecl(E->getPackLoc(), Pack)); if (!Pack) return ExprError(); } // We now know the length of the parameter pack, so build a new expression // that stores that length. return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), Pack, E->getPackLoc(), E->getRParenLoc(), NumExpansions); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformSubstNonTypeTemplateParmPackExpr( SubstNonTypeTemplateParmPackExpr *E) { // Default behavior is to do nothing with this transformation. return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformSubstNonTypeTemplateParmExpr( SubstNonTypeTemplateParmExpr *E) { // Default behavior is to do nothing with this transformation. return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformFunctionParmPackExpr(FunctionParmPackExpr *E) { // Default behavior is to do nothing with this transformation. return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformMaterializeTemporaryExpr( MaterializeTemporaryExpr *E) { return getDerived().TransformExpr(E->GetTemporaryExpr()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXFoldExpr(CXXFoldExpr *E) { Expr *Pattern = E->getPattern(); SmallVector<UnexpandedParameterPack, 2> Unexpanded; getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded); assert(!Unexpanded.empty() && "Pack expansion without parameter packs?"); // Determine whether the set of unexpanded parameter packs can and should // be expanded. bool Expand = true; bool RetainExpansion = false; Optional<unsigned> NumExpansions; if (getDerived().TryExpandParameterPacks(E->getEllipsisLoc(), Pattern->getSourceRange(), Unexpanded, Expand, RetainExpansion, NumExpansions)) return true; if (!Expand) { // Do not expand any packs here, just transform and rebuild a fold // expression. Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1); ExprResult LHS = E->getLHS() ? getDerived().TransformExpr(E->getLHS()) : ExprResult(); if (LHS.isInvalid()) return true; ExprResult RHS = E->getRHS() ? getDerived().TransformExpr(E->getRHS()) : ExprResult(); if (RHS.isInvalid()) return true; if (!getDerived().AlwaysRebuild() && LHS.get() == E->getLHS() && RHS.get() == E->getRHS()) return E; return getDerived().RebuildCXXFoldExpr( E->getLocStart(), LHS.get(), E->getOperator(), E->getEllipsisLoc(), RHS.get(), E->getLocEnd()); } // The transform has determined that we should perform an elementwise // expansion of the pattern. Do so. ExprResult Result = getDerived().TransformExpr(E->getInit()); if (Result.isInvalid()) return true; bool LeftFold = E->isLeftFold(); // If we're retaining an expansion for a right fold, it is the innermost // component and takes the init (if any). if (!LeftFold && RetainExpansion) { ForgetPartiallySubstitutedPackRAII Forget(getDerived()); ExprResult Out = getDerived().TransformExpr(Pattern); if (Out.isInvalid()) return true; Result = getDerived().RebuildCXXFoldExpr( E->getLocStart(), Out.get(), E->getOperator(), E->getEllipsisLoc(), Result.get(), E->getLocEnd()); if (Result.isInvalid()) return true; } for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex( getSema(), LeftFold ? I : *NumExpansions - I - 1); ExprResult Out = getDerived().TransformExpr(Pattern); if (Out.isInvalid()) return true; if (Out.get()->containsUnexpandedParameterPack()) { // We still have a pack; retain a pack expansion for this slice. Result = getDerived().RebuildCXXFoldExpr( E->getLocStart(), LeftFold ? Result.get() : Out.get(), E->getOperator(), E->getEllipsisLoc(), LeftFold ? Out.get() : Result.get(), E->getLocEnd()); } else if (Result.isUsable()) { // We've got down to a single element; build a binary operator. Result = getDerived().RebuildBinaryOperator( E->getEllipsisLoc(), E->getOperator(), LeftFold ? Result.get() : Out.get(), LeftFold ? Out.get() : Result.get()); } else Result = Out; if (Result.isInvalid()) return true; } // If we're retaining an expansion for a left fold, it is the outermost // component and takes the complete expansion so far as its init (if any). if (LeftFold && RetainExpansion) { ForgetPartiallySubstitutedPackRAII Forget(getDerived()); ExprResult Out = getDerived().TransformExpr(Pattern); if (Out.isInvalid()) return true; Result = getDerived().RebuildCXXFoldExpr( E->getLocStart(), Result.get(), E->getOperator(), E->getEllipsisLoc(), Out.get(), E->getLocEnd()); if (Result.isInvalid()) return true; } // If we had no init and an empty pack, and we're not retaining an expansion, // then produce a fallback value or error. if (Result.isUnset()) return getDerived().RebuildEmptyCXXFoldExpr(E->getEllipsisLoc(), E->getOperator()); return Result; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformCXXStdInitializerListExpr( CXXStdInitializerListExpr *E) { return getDerived().TransformExpr(E->getSubExpr()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCStringLiteral(ObjCStringLiteral *E) { return SemaRef.MaybeBindToTemporary(E); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCBoxedExpr(ObjCBoxedExpr *E) { ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr()); if (SubExpr.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getSubExpr()) return E; return getDerived().RebuildObjCBoxedExpr(E->getSourceRange(), SubExpr.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCArrayLiteral(ObjCArrayLiteral *E) { // Transform each of the elements. SmallVector<Expr *, 8> Elements; bool ArgChanged = false; if (getDerived().TransformExprs(E->getElements(), E->getNumElements(), /*IsCall=*/false, Elements, &ArgChanged)) return ExprError(); if (!getDerived().AlwaysRebuild() && !ArgChanged) return SemaRef.MaybeBindToTemporary(E); return getDerived().RebuildObjCArrayLiteral(E->getSourceRange(), Elements.data(), Elements.size()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCDictionaryLiteral( ObjCDictionaryLiteral *E) { // Transform each of the elements. SmallVector<ObjCDictionaryElement, 8> Elements; bool ArgChanged = false; for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) { ObjCDictionaryElement OrigElement = E->getKeyValueElement(I); if (OrigElement.isPackExpansion()) { // This key/value element is a pack expansion. SmallVector<UnexpandedParameterPack, 2> Unexpanded; getSema().collectUnexpandedParameterPacks(OrigElement.Key, Unexpanded); getSema().collectUnexpandedParameterPacks(OrigElement.Value, Unexpanded); assert(!Unexpanded.empty() && "Pack expansion without parameter packs?"); // Determine whether the set of unexpanded parameter packs can // and should be expanded. bool Expand = true; bool RetainExpansion = false; Optional<unsigned> OrigNumExpansions = OrigElement.NumExpansions; Optional<unsigned> NumExpansions = OrigNumExpansions; SourceRange PatternRange(OrigElement.Key->getLocStart(), OrigElement.Value->getLocEnd()); if (getDerived().TryExpandParameterPacks(OrigElement.EllipsisLoc, PatternRange, Unexpanded, Expand, RetainExpansion, NumExpansions)) return ExprError(); if (!Expand) { // The transform has determined that we should perform a simple // transformation on the pack expansion, producing another pack // expansion. Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1); ExprResult Key = getDerived().TransformExpr(OrigElement.Key); if (Key.isInvalid()) return ExprError(); if (Key.get() != OrigElement.Key) ArgChanged = true; ExprResult Value = getDerived().TransformExpr(OrigElement.Value); if (Value.isInvalid()) return ExprError(); if (Value.get() != OrigElement.Value) ArgChanged = true; ObjCDictionaryElement Expansion = { Key.get(), Value.get(), OrigElement.EllipsisLoc, NumExpansions }; Elements.push_back(Expansion); continue; } // Record right away that the argument was changed. This needs // to happen even if the array expands to nothing. ArgChanged = true; // The transform has determined that we should perform an elementwise // expansion of the pattern. Do so. for (unsigned I = 0; I != *NumExpansions; ++I) { Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I); ExprResult Key = getDerived().TransformExpr(OrigElement.Key); if (Key.isInvalid()) return ExprError(); ExprResult Value = getDerived().TransformExpr(OrigElement.Value); if (Value.isInvalid()) return ExprError(); ObjCDictionaryElement Element = { Key.get(), Value.get(), SourceLocation(), NumExpansions }; // If any unexpanded parameter packs remain, we still have a // pack expansion. // FIXME: Can this really happen? if (Key.get()->containsUnexpandedParameterPack() || Value.get()->containsUnexpandedParameterPack()) Element.EllipsisLoc = OrigElement.EllipsisLoc; Elements.push_back(Element); } // FIXME: Retain a pack expansion if RetainExpansion is true. // We've finished with this pack expansion. continue; } // Transform and check key. ExprResult Key = getDerived().TransformExpr(OrigElement.Key); if (Key.isInvalid()) return ExprError(); if (Key.get() != OrigElement.Key) ArgChanged = true; // Transform and check value. ExprResult Value = getDerived().TransformExpr(OrigElement.Value); if (Value.isInvalid()) return ExprError(); if (Value.get() != OrigElement.Value) ArgChanged = true; ObjCDictionaryElement Element = { Key.get(), Value.get(), SourceLocation(), None }; Elements.push_back(Element); } if (!getDerived().AlwaysRebuild() && !ArgChanged) return SemaRef.MaybeBindToTemporary(E); return getDerived().RebuildObjCDictionaryLiteral(E->getSourceRange(), Elements.data(), Elements.size()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCEncodeExpr(ObjCEncodeExpr *E) { TypeSourceInfo *EncodedTypeInfo = getDerived().TransformType(E->getEncodedTypeSourceInfo()); if (!EncodedTypeInfo) return ExprError(); if (!getDerived().AlwaysRebuild() && EncodedTypeInfo == E->getEncodedTypeSourceInfo()) return E; return getDerived().RebuildObjCEncodeExpr(E->getAtLoc(), EncodedTypeInfo, E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>:: TransformObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) { // This is a kind of implicit conversion, and it needs to get dropped // and recomputed for the same general reasons that ImplicitCastExprs // do, as well a more specific one: this expression is only valid when // it appears *immediately* as an argument expression. return getDerived().TransformExpr(E->getSubExpr()); } template<typename Derived> ExprResult TreeTransform<Derived>:: TransformObjCBridgedCastExpr(ObjCBridgedCastExpr *E) { TypeSourceInfo *TSInfo = getDerived().TransformType(E->getTypeInfoAsWritten()); if (!TSInfo) return ExprError(); ExprResult Result = getDerived().TransformExpr(E->getSubExpr()); if (Result.isInvalid()) return ExprError(); if (!getDerived().AlwaysRebuild() && TSInfo == E->getTypeInfoAsWritten() && Result.get() == E->getSubExpr()) return E; return SemaRef.BuildObjCBridgedCast(E->getLParenLoc(), E->getBridgeKind(), E->getBridgeKeywordLoc(), TSInfo, Result.get()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) { // Transform arguments. bool ArgChanged = false; SmallVector<Expr*, 8> Args; Args.reserve(E->getNumArgs()); if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), false, Args, &ArgChanged)) return ExprError(); if (E->getReceiverKind() == ObjCMessageExpr::Class) { // Class message: transform the receiver type. TypeSourceInfo *ReceiverTypeInfo = getDerived().TransformType(E->getClassReceiverTypeInfo()); if (!ReceiverTypeInfo) return ExprError(); // If nothing changed, just retain the existing message send. if (!getDerived().AlwaysRebuild() && ReceiverTypeInfo == E->getClassReceiverTypeInfo() && !ArgChanged) return SemaRef.MaybeBindToTemporary(E); // Build a new class message send. SmallVector<SourceLocation, 16> SelLocs; E->getSelectorLocs(SelLocs); return getDerived().RebuildObjCMessageExpr(ReceiverTypeInfo, E->getSelector(), SelLocs, E->getMethodDecl(), E->getLeftLoc(), Args, E->getRightLoc()); } else if (E->getReceiverKind() == ObjCMessageExpr::SuperClass || E->getReceiverKind() == ObjCMessageExpr::SuperInstance) { // Build a new class message send to 'super'. SmallVector<SourceLocation, 16> SelLocs; E->getSelectorLocs(SelLocs); return getDerived().RebuildObjCMessageExpr(E->getSuperLoc(), E->getSelector(), SelLocs, E->getMethodDecl(), E->getLeftLoc(), Args, E->getRightLoc()); } // Instance message: transform the receiver assert(E->getReceiverKind() == ObjCMessageExpr::Instance && "Only class and instance messages may be instantiated"); ExprResult Receiver = getDerived().TransformExpr(E->getInstanceReceiver()); if (Receiver.isInvalid()) return ExprError(); // If nothing changed, just retain the existing message send. if (!getDerived().AlwaysRebuild() && Receiver.get() == E->getInstanceReceiver() && !ArgChanged) return SemaRef.MaybeBindToTemporary(E); // Build a new instance message send. SmallVector<SourceLocation, 16> SelLocs; E->getSelectorLocs(SelLocs); return getDerived().RebuildObjCMessageExpr(Receiver.get(), E->getSelector(), SelLocs, E->getMethodDecl(), E->getLeftLoc(), Args, E->getRightLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCSelectorExpr(ObjCSelectorExpr *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCProtocolExpr(ObjCProtocolExpr *E) { return E; } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCIvarRefExpr(ObjCIvarRefExpr *E) { // Transform the base expression. ExprResult Base = getDerived().TransformExpr(E->getBase()); if (Base.isInvalid()) return ExprError(); // We don't need to transform the ivar; it will never change. // If nothing changed, just retain the existing expression. if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase()) return E; return getDerived().RebuildObjCIvarRefExpr(Base.get(), E->getDecl(), E->getLocation(), E->isArrow(), E->isFreeIvar()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { // 'super' and types never change. Property never changes. Just // retain the existing expression. if (!E->isObjectReceiver()) return E; // Transform the base expression. ExprResult Base = getDerived().TransformExpr(E->getBase()); if (Base.isInvalid()) return ExprError(); // We don't need to transform the property; it will never change. // If nothing changed, just retain the existing expression. if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase()) return E; if (E->isExplicitProperty()) return getDerived().RebuildObjCPropertyRefExpr(Base.get(), E->getExplicitProperty(), E->getLocation()); return getDerived().RebuildObjCPropertyRefExpr(Base.get(), SemaRef.Context.PseudoObjectTy, E->getImplicitPropertyGetter(), E->getImplicitPropertySetter(), E->getLocation()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) { // Transform the base expression. ExprResult Base = getDerived().TransformExpr(E->getBaseExpr()); if (Base.isInvalid()) return ExprError(); // Transform the key expression. ExprResult Key = getDerived().TransformExpr(E->getKeyExpr()); if (Key.isInvalid()) return ExprError(); // If nothing changed, just retain the existing expression. if (!getDerived().AlwaysRebuild() && Key.get() == E->getKeyExpr() && Base.get() == E->getBaseExpr()) return E; return getDerived().RebuildObjCSubscriptRefExpr(E->getRBracket(), Base.get(), Key.get(), E->getAtIndexMethodDecl(), E->setAtIndexMethodDecl()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformObjCIsaExpr(ObjCIsaExpr *E) { // Transform the base expression. ExprResult Base = getDerived().TransformExpr(E->getBase()); if (Base.isInvalid()) return ExprError(); // If nothing changed, just retain the existing expression. if (!getDerived().AlwaysRebuild() && Base.get() == E->getBase()) return E; return getDerived().RebuildObjCIsaExpr(Base.get(), E->getIsaMemberLoc(), E->getOpLoc(), E->isArrow()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformShuffleVectorExpr(ShuffleVectorExpr *E) { bool ArgumentChanged = false; SmallVector<Expr*, 8> SubExprs; SubExprs.reserve(E->getNumSubExprs()); if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false, SubExprs, &ArgumentChanged)) return ExprError(); if (!getDerived().AlwaysRebuild() && !ArgumentChanged) return E; return getDerived().RebuildShuffleVectorExpr(E->getBuiltinLoc(), SubExprs, E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformConvertVectorExpr(ConvertVectorExpr *E) { ExprResult SrcExpr = getDerived().TransformExpr(E->getSrcExpr()); if (SrcExpr.isInvalid()) return ExprError(); TypeSourceInfo *Type = getDerived().TransformType(E->getTypeSourceInfo()); if (!Type) return ExprError(); if (!getDerived().AlwaysRebuild() && Type == E->getTypeSourceInfo() && SrcExpr.get() == E->getSrcExpr()) return E; return getDerived().RebuildConvertVectorExpr(E->getBuiltinLoc(), SrcExpr.get(), Type, E->getRParenLoc()); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) { BlockDecl *oldBlock = E->getBlockDecl(); SemaRef.ActOnBlockStart(E->getCaretLocation(), /*Scope=*/nullptr); BlockScopeInfo *blockScope = SemaRef.getCurBlock(); blockScope->TheDecl->setIsVariadic(oldBlock->isVariadic()); blockScope->TheDecl->setBlockMissingReturnType( oldBlock->blockMissingReturnType()); SmallVector<ParmVarDecl*, 4> params; SmallVector<QualType, 4> paramTypes; // Parameter substitution. if (getDerived().TransformFunctionTypeParams(E->getCaretLocation(), oldBlock->param_begin(), oldBlock->param_size(), nullptr, paramTypes, &params)) { getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/nullptr); return ExprError(); } const FunctionProtoType *exprFunctionType = E->getFunctionType(); QualType exprResultType = getDerived().TransformType(exprFunctionType->getReturnType()); // HLSL Change - FIX - We should move param mods to parameter QualTypes QualType functionType = getDerived().RebuildFunctionProtoType( exprResultType, paramTypes, exprFunctionType->getParamMods(), exprFunctionType->getExtProtoInfo()); // HLSL Change - End blockScope->FunctionType = functionType; // Set the parameters on the block decl. if (!params.empty()) blockScope->TheDecl->setParams(params); if (!oldBlock->blockMissingReturnType()) { blockScope->HasImplicitReturnType = false; blockScope->ReturnType = exprResultType; } // Transform the body StmtResult body = getDerived().TransformStmt(E->getBody()); if (body.isInvalid()) { getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/nullptr); return ExprError(); } #ifndef NDEBUG // In builds with assertions, make sure that we captured everything we // captured before. if (!SemaRef.getDiagnostics().hasErrorOccurred()) { for (const auto &I : oldBlock->captures()) { VarDecl *oldCapture = I.getVariable(); // Ignore parameter packs. if (isa<ParmVarDecl>(oldCapture) && cast<ParmVarDecl>(oldCapture)->isParameterPack()) continue; VarDecl *newCapture = cast<VarDecl>(getDerived().TransformDecl(E->getCaretLocation(), oldCapture)); assert(blockScope->CaptureMap.count(newCapture)); } assert(oldBlock->capturesCXXThis() == blockScope->isCXXThisCaptured()); } #endif return SemaRef.ActOnBlockStmtExpr(E->getCaretLocation(), body.get(), /*Scope=*/nullptr); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformAsTypeExpr(AsTypeExpr *E) { llvm_unreachable("Cannot transform asType expressions yet"); } template<typename Derived> ExprResult TreeTransform<Derived>::TransformAtomicExpr(AtomicExpr *E) { QualType RetTy = getDerived().TransformType(E->getType()); bool ArgumentChanged = false; SmallVector<Expr*, 8> SubExprs; SubExprs.reserve(E->getNumSubExprs()); if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false, SubExprs, &ArgumentChanged)) return ExprError(); if (!getDerived().AlwaysRebuild() && !ArgumentChanged) return E; return getDerived().RebuildAtomicExpr(E->getBuiltinLoc(), SubExprs, RetTy, E->getOp(), E->getRParenLoc()); } //===----------------------------------------------------------------------===// // Type reconstruction // // /////////////////////////////////////////////////////////////////////////////// template<typename Derived> QualType TreeTransform<Derived>::RebuildPointerType(QualType PointeeType, SourceLocation Star) { return SemaRef.BuildPointerType(PointeeType, Star, getDerived().getBaseEntity()); } template<typename Derived> QualType TreeTransform<Derived>::RebuildBlockPointerType(QualType PointeeType, SourceLocation Star) { return SemaRef.BuildBlockPointerType(PointeeType, Star, getDerived().getBaseEntity()); } template<typename Derived> QualType TreeTransform<Derived>::RebuildReferenceType(QualType ReferentType, bool WrittenAsLValue, SourceLocation Sigil) { return SemaRef.BuildReferenceType(ReferentType, WrittenAsLValue, Sigil, getDerived().getBaseEntity()); } template<typename Derived> QualType TreeTransform<Derived>::RebuildMemberPointerType(QualType PointeeType, QualType ClassType, SourceLocation Sigil) { return SemaRef.BuildMemberPointerType(PointeeType, ClassType, Sigil, getDerived().getBaseEntity()); } template<typename Derived> QualType TreeTransform<Derived>::RebuildObjCObjectType( QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc) { return SemaRef.BuildObjCObjectType(BaseType, Loc, TypeArgsLAngleLoc, TypeArgs, TypeArgsRAngleLoc, ProtocolLAngleLoc, Protocols, ProtocolLocs, ProtocolRAngleLoc, /*FailOnError=*/true); } template<typename Derived> QualType TreeTransform<Derived>::RebuildObjCObjectPointerType( QualType PointeeType, SourceLocation Star) { return SemaRef.Context.getObjCObjectPointerType(PointeeType); } template<typename Derived> QualType TreeTransform<Derived>::RebuildArrayType(QualType ElementType, ArrayType::ArraySizeModifier SizeMod, const llvm::APInt *Size, Expr *SizeExpr, unsigned IndexTypeQuals, SourceRange BracketsRange) { if (SizeExpr || !Size) return SemaRef.BuildArrayType(ElementType, SizeMod, SizeExpr, IndexTypeQuals, BracketsRange, getDerived().getBaseEntity()); QualType Types[] = { SemaRef.Context.UnsignedCharTy, SemaRef.Context.UnsignedShortTy, SemaRef.Context.UnsignedIntTy, SemaRef.Context.UnsignedLongTy, SemaRef.Context.UnsignedLongLongTy, SemaRef.Context.UnsignedInt128Ty }; const unsigned NumTypes = llvm::array_lengthof(Types); QualType SizeType; for (unsigned I = 0; I != NumTypes; ++I) if (Size->getBitWidth() == SemaRef.Context.getIntWidth(Types[I])) { SizeType = Types[I]; break; } // Note that we can return a VariableArrayType here in the case where // the element type was a dependent VariableArrayType. IntegerLiteral *ArraySize = IntegerLiteral::Create(SemaRef.Context, *Size, SizeType, /*FIXME*/BracketsRange.getBegin()); return SemaRef.BuildArrayType(ElementType, SizeMod, ArraySize, IndexTypeQuals, BracketsRange, getDerived().getBaseEntity()); } template<typename Derived> QualType TreeTransform<Derived>::RebuildConstantArrayType(QualType ElementType, ArrayType::ArraySizeModifier SizeMod, const llvm::APInt &Size, unsigned IndexTypeQuals, SourceRange BracketsRange) { return getDerived().RebuildArrayType(ElementType, SizeMod, &Size, nullptr, IndexTypeQuals, BracketsRange); } template<typename Derived> QualType TreeTransform<Derived>::RebuildIncompleteArrayType(QualType ElementType, ArrayType::ArraySizeModifier SizeMod, unsigned IndexTypeQuals, SourceRange BracketsRange) { return getDerived().RebuildArrayType(ElementType, SizeMod, nullptr, nullptr, IndexTypeQuals, BracketsRange); } template<typename Derived> QualType TreeTransform<Derived>::RebuildVariableArrayType(QualType ElementType, ArrayType::ArraySizeModifier SizeMod, Expr *SizeExpr, unsigned IndexTypeQuals, SourceRange BracketsRange) { return getDerived().RebuildArrayType(ElementType, SizeMod, nullptr, SizeExpr, IndexTypeQuals, BracketsRange); } template<typename Derived> QualType TreeTransform<Derived>::RebuildDependentSizedArrayType(QualType ElementType, ArrayType::ArraySizeModifier SizeMod, Expr *SizeExpr, unsigned IndexTypeQuals, SourceRange BracketsRange) { return getDerived().RebuildArrayType(ElementType, SizeMod, nullptr, SizeExpr, IndexTypeQuals, BracketsRange); } template<typename Derived> QualType TreeTransform<Derived>::RebuildVectorType(QualType ElementType, unsigned NumElements, VectorType::VectorKind VecKind) { // FIXME: semantic checking! return SemaRef.Context.getVectorType(ElementType, NumElements, VecKind); } template<typename Derived> QualType TreeTransform<Derived>::RebuildExtVectorType(QualType ElementType, unsigned NumElements, SourceLocation AttributeLoc) { llvm::APInt numElements(SemaRef.Context.getIntWidth(SemaRef.Context.IntTy), NumElements, true); IntegerLiteral *VectorSize = IntegerLiteral::Create(SemaRef.Context, numElements, SemaRef.Context.IntTy, AttributeLoc); return SemaRef.BuildExtVectorType(ElementType, VectorSize, AttributeLoc); } template<typename Derived> QualType TreeTransform<Derived>::RebuildDependentSizedExtVectorType(QualType ElementType, Expr *SizeExpr, SourceLocation AttributeLoc) { return SemaRef.BuildExtVectorType(ElementType, SizeExpr, AttributeLoc); } // HLSL Change - FIX - We should move param mods to parameter QualTypes template<typename Derived> QualType TreeTransform<Derived>::RebuildFunctionProtoType( QualType T, MutableArrayRef<QualType> ParamTypes, ArrayRef<hlsl::ParameterModifier> ParamMods, const FunctionProtoType::ExtProtoInfo &EPI) { return SemaRef.BuildFunctionType(T, ParamTypes, getDerived().getBaseLocation(), getDerived().getBaseEntity(), EPI, ParamMods); } // HLSL Change - End template<typename Derived> QualType TreeTransform<Derived>::RebuildFunctionNoProtoType(QualType T) { return SemaRef.Context.getFunctionNoProtoType(T); } template<typename Derived> QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(Decl *D) { assert(D && "no decl found"); if (D->isInvalidDecl()) return QualType(); // FIXME: Doesn't account for ObjCInterfaceDecl! TypeDecl *Ty; if (isa<UsingDecl>(D)) { UsingDecl *Using = cast<UsingDecl>(D); assert(Using->hasTypename() && "UnresolvedUsingTypenameDecl transformed to non-typename using"); // A valid resolved using typename decl points to exactly one type decl. assert(++Using->shadow_begin() == Using->shadow_end()); Ty = cast<TypeDecl>((*Using->shadow_begin())->getTargetDecl()); } else { assert(isa<UnresolvedUsingTypenameDecl>(D) && "UnresolvedUsingTypenameDecl transformed to non-using decl"); Ty = cast<UnresolvedUsingTypenameDecl>(D); } return SemaRef.Context.getTypeDeclType(Ty); } template<typename Derived> QualType TreeTransform<Derived>::RebuildTypeOfExprType(Expr *E, SourceLocation Loc) { return SemaRef.BuildTypeofExprType(E, Loc); } template<typename Derived> QualType TreeTransform<Derived>::RebuildTypeOfType(QualType Underlying) { return SemaRef.Context.getTypeOfType(Underlying); } template<typename Derived> QualType TreeTransform<Derived>::RebuildDecltypeType(Expr *E, SourceLocation Loc) { return SemaRef.BuildDecltypeType(E, Loc); } template<typename Derived> QualType TreeTransform<Derived>::RebuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc) { return SemaRef.BuildUnaryTransformType(BaseType, UKind, Loc); } template<typename Derived> QualType TreeTransform<Derived>::RebuildTemplateSpecializationType( TemplateName Template, SourceLocation TemplateNameLoc, TemplateArgumentListInfo &TemplateArgs) { return SemaRef.CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs); } template<typename Derived> QualType TreeTransform<Derived>::RebuildAtomicType(QualType ValueType, SourceLocation KWLoc) { return SemaRef.BuildAtomicType(ValueType, KWLoc); } template<typename Derived> TemplateName TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS, bool TemplateKW, TemplateDecl *Template) { return SemaRef.Context.getQualifiedTemplateName(SS.getScopeRep(), TemplateKW, Template); } template<typename Derived> TemplateName TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS, const IdentifierInfo &Name, SourceLocation NameLoc, QualType ObjectType, NamedDecl *FirstQualifierInScope) { UnqualifiedId TemplateName; TemplateName.setIdentifier(&Name, NameLoc); Sema::TemplateTy Template; SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller. getSema().ActOnDependentTemplateName(/*Scope=*/nullptr, SS, TemplateKWLoc, TemplateName, ParsedType::make(ObjectType), /*EnteringContext=*/false, Template); return Template.get(); } template<typename Derived> TemplateName TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS, OverloadedOperatorKind Operator, SourceLocation NameLoc, QualType ObjectType) { UnqualifiedId Name; // FIXME: Bogus location information. SourceLocation SymbolLocations[3] = { NameLoc, NameLoc, NameLoc }; Name.setOperatorFunctionId(NameLoc, Operator, SymbolLocations); SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller. Sema::TemplateTy Template; getSema().ActOnDependentTemplateName(/*Scope=*/nullptr, SS, TemplateKWLoc, Name, ParsedType::make(ObjectType), /*EnteringContext=*/false, Template); return Template.get(); } template<typename Derived> ExprResult TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op, SourceLocation OpLoc, Expr *OrigCallee, Expr *First, Expr *Second) { Expr *Callee = OrigCallee->IgnoreParenCasts(); bool isPostIncDec = Second && (Op == OO_PlusPlus || Op == OO_MinusMinus); if (First->getObjectKind() == OK_ObjCProperty) { BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op); if (BinaryOperator::isAssignmentOp(Opc)) return SemaRef.checkPseudoObjectAssignment(/*Scope=*/nullptr, OpLoc, Opc, First, Second); ExprResult Result = SemaRef.CheckPlaceholderExpr(First); if (Result.isInvalid()) return ExprError(); First = Result.get(); } if (Second && Second->getObjectKind() == OK_ObjCProperty) { ExprResult Result = SemaRef.CheckPlaceholderExpr(Second); if (Result.isInvalid()) return ExprError(); Second = Result.get(); } // Determine whether this should be a builtin operation. if (Op == OO_Subscript) { if (!First->getType()->isOverloadableType() && !Second->getType()->isOverloadableType()) return getSema().CreateBuiltinArraySubscriptExpr(First, Callee->getLocStart(), Second, OpLoc); } else if (Op == OO_Arrow) { // -> is never a builtin operation. return SemaRef.BuildOverloadedArrowExpr(nullptr, First, OpLoc); } else if (Second == nullptr || isPostIncDec) { if (!First->getType()->isOverloadableType()) { // The argument is not of overloadable type, so try to create a // built-in unary operation. UnaryOperatorKind Opc = UnaryOperator::getOverloadedOpcode(Op, isPostIncDec); return getSema().CreateBuiltinUnaryOp(OpLoc, Opc, First); } } else { if (!First->getType()->isOverloadableType() && !Second->getType()->isOverloadableType()) { // Neither of the arguments is an overloadable type, so try to // create a built-in binary operation. BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op); ExprResult Result = SemaRef.CreateBuiltinBinOp(OpLoc, Opc, First, Second); if (Result.isInvalid()) return ExprError(); return Result; } } // Compute the transformed set of functions (and function templates) to be // used during overload resolution. UnresolvedSet<16> Functions; if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Callee)) { assert(ULE->requiresADL()); Functions.append(ULE->decls_begin(), ULE->decls_end()); } else { // If we've resolved this to a particular non-member function, just call // that function. If we resolved it to a member function, // CreateOverloaded* will find that function for us. NamedDecl *ND = cast<DeclRefExpr>(Callee)->getDecl(); if (!isa<CXXMethodDecl>(ND)) Functions.addDecl(ND); } // Add any functions found via argument-dependent lookup. Expr *Args[2] = { First, Second }; unsigned NumArgs = 1 + (Second != nullptr); // Create the overloaded operator invocation for unary operators. if (NumArgs == 1 || isPostIncDec) { UnaryOperatorKind Opc = UnaryOperator::getOverloadedOpcode(Op, isPostIncDec); return SemaRef.CreateOverloadedUnaryOp(OpLoc, Opc, Functions, First); } if (Op == OO_Subscript) { SourceLocation LBrace; SourceLocation RBrace; if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Callee)) { DeclarationNameLoc NameLoc = DRE->getNameInfo().getInfo(); LBrace = SourceLocation::getFromRawEncoding( NameLoc.CXXOperatorName.BeginOpNameLoc); RBrace = SourceLocation::getFromRawEncoding( NameLoc.CXXOperatorName.EndOpNameLoc); } else { LBrace = Callee->getLocStart(); RBrace = OpLoc; } return SemaRef.CreateOverloadedArraySubscriptExpr(LBrace, RBrace, First, Second); } // Create the overloaded operator invocation for binary operators. BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op); ExprResult Result = SemaRef.CreateOverloadedBinOp(OpLoc, Opc, Functions, Args[0], Args[1]); if (Result.isInvalid()) return ExprError(); return Result; } template<typename Derived> ExprResult TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base, SourceLocation OperatorLoc, bool isArrow, CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage Destroyed) { QualType BaseType = Base->getType(); if (Base->isTypeDependent() || Destroyed.getIdentifier() || (!isArrow && !BaseType->getAs<RecordType>()) || (isArrow && BaseType->getAs<PointerType>() && !BaseType->getAs<PointerType>()->getPointeeType() ->template getAs<RecordType>())){ // This pseudo-destructor expression is still a pseudo-destructor. return SemaRef.BuildPseudoDestructorExpr( Base, OperatorLoc, isArrow ? tok::arrow : tok::period, SS, ScopeType, CCLoc, TildeLoc, Destroyed); } TypeSourceInfo *DestroyedType = Destroyed.getTypeSourceInfo(); DeclarationName Name(SemaRef.Context.DeclarationNames.getCXXDestructorName( SemaRef.Context.getCanonicalType(DestroyedType->getType()))); DeclarationNameInfo NameInfo(Name, Destroyed.getLocation()); NameInfo.setNamedTypeInfo(DestroyedType); // The scope type is now known to be a valid nested name specifier // component. Tack it on to the end of the nested name specifier. if (ScopeType) { if (!ScopeType->getType()->getAs<TagType>()) { getSema().Diag(ScopeType->getTypeLoc().getBeginLoc(), diag::err_expected_class_or_namespace) << ScopeType->getType() << getSema().getLangOpts().CPlusPlus; return ExprError(); } SS.Extend(SemaRef.Context, SourceLocation(), ScopeType->getTypeLoc(), CCLoc); } SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller. return getSema().BuildMemberReferenceExpr(Base, BaseType, OperatorLoc, isArrow, SS, TemplateKWLoc, /*FIXME: FirstQualifier*/ nullptr, NameInfo, /*TemplateArgs*/ nullptr); } template<typename Derived> StmtResult TreeTransform<Derived>::TransformCapturedStmt(CapturedStmt *S) { SourceLocation Loc = S->getLocStart(); CapturedDecl *CD = S->getCapturedDecl(); unsigned NumParams = CD->getNumParams(); unsigned ContextParamPos = CD->getContextParamPosition(); SmallVector<Sema::CapturedParamNameType, 4> Params; for (unsigned I = 0; I < NumParams; ++I) { if (I != ContextParamPos) { Params.push_back( std::make_pair( CD->getParam(I)->getName(), getDerived().TransformType(CD->getParam(I)->getType()))); } else { Params.push_back(std::make_pair(StringRef(), QualType())); } } getSema().ActOnCapturedRegionStart(Loc, /*CurScope*/nullptr, S->getCapturedRegionKind(), Params); StmtResult Body; { Sema::CompoundScopeRAII CompoundScope(getSema()); Body = getDerived().TransformStmt(S->getCapturedStmt()); } if (Body.isInvalid()) { getSema().ActOnCapturedRegionError(); return StmtError(); } return getSema().ActOnCapturedRegionEnd(Body.get()); } } // end namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/Sema.cpp
//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the actions class which performs semantic analysis and // builds an AST out of a parse stream. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTDiagnostic.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/DiagnosticOptions.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/HeaderSearch.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/CXXFieldCollector.h" #include "clang/Sema/DelayedDiagnostic.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/MultiplexExternalSemaSource.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/PrettyDeclStackTrace.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaConsumer.h" #include "clang/Sema/TemplateDeduction.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallSet.h" #include "llvm/Support/CrashRecoveryContext.h" #include "llvm/Support/TimeProfiler.h" using namespace clang; using namespace sema; SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) { return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts); } ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); } PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context, const Preprocessor &PP) { PrintingPolicy Policy = Context.getPrintingPolicy(); Policy.Bool = Context.getLangOpts().Bool; if (!Policy.Bool) { if (const MacroInfo * BoolMacro = PP.getMacroInfo(&Context.Idents.get("bool"))) { Policy.Bool = BoolMacro->isObjectLike() && BoolMacro->getNumTokens() == 1 && BoolMacro->getReplacementToken(0).is(tok::kw__Bool); } } return Policy; } void Sema::ActOnTranslationUnitScope(Scope *S) { TUScope = S; PushDeclContext(S, Context.getTranslationUnitDecl()); } Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind, CodeCompleteConsumer *CodeCompleter) : ExternalSource(nullptr), isMultiplexExternalSource(false), FPFeatures(pp.getLangOpts()), LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer), Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()), CollectStats(false), CodeCompleter(CodeCompleter), CurContext(nullptr), OriginalLexicalContext(nullptr), PackContext(nullptr), MSStructPragmaOn(false), MSPointerToMemberRepresentationMethod( LangOpts.getMSPointerToMemberRepresentationMethod()), VtorDispModeStack(1, MSVtorDispAttr::Mode(LangOpts.VtorDispMode)), DataSegStack(nullptr), BSSSegStack(nullptr), ConstSegStack(nullptr), CodeSegStack(nullptr), CurInitSeg(nullptr), VisContext(nullptr), IsBuildingRecoveryCallExpr(false), ExprNeedsCleanups(false), LateTemplateParser(nullptr), LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp), StdInitializerList(nullptr), CXXTypeInfoDecl(nullptr), MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr), ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), DictionaryWithObjectsMethod(nullptr), MSAsmLabelNameCounter(0), GlobalNewDeleteDeclared(false), TUKind(TUKind), NumSFINAEErrors(0), CachedFakeTopLevelModule(nullptr), AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { TUScope = nullptr; LoadedExternalKnownNamespaces = false; #if 0 // HLSL Change Starts for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I) NSNumberLiteralMethods[I] = nullptr; if (getLangOpts().ObjC1) NSAPIObj.reset(new NSAPI(Context)); #endif // HLSL Change Ends if (getLangOpts().CPlusPlus) FieldCollector.reset(new CXXFieldCollector()); // Tell diagnostics how to render things from the AST library. PP.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context); ExprEvalContexts.emplace_back(PotentiallyEvaluated, 0, false, nullptr, false); FunctionScopes.push_back(new FunctionScopeInfo(Diags)); // Initilization of data sharing attributes stack for OpenMP // InitDataSharingAttributesStack(); // HLSL Change - no support for OpenMP } void Sema::addImplicitTypedef(StringRef Name, QualType T) { DeclarationName DN = &Context.Idents.get(Name); if (IdResolver.begin(DN) == IdResolver.end()) PushOnScopeChains(Context.buildImplicitTypedef(T, Name), TUScope); } void Sema::Initialize() { // Tell the AST consumer about this Sema object. Consumer.Initialize(Context); // FIXME: Isn't this redundant with the initialization above? if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) SC->InitializeSema(*this); // Tell the external Sema source about this Sema object. if (ExternalSemaSource *ExternalSema = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) ExternalSema->InitializeSema(*this); // This needs to happen after ExternalSemaSource::InitializeSema(this) or we // will not be able to merge any duplicate __va_list_tag decls correctly. VAListTagName = PP.getIdentifierInfo("__va_list_tag"); // Initialize predefined 128-bit integer types, if needed. if (Context.getTargetInfo().hasInt128Type()) { // If either of the 128-bit integer types are unavailable to name lookup, // define them now. DeclarationName Int128 = &Context.Idents.get("__int128_t"); if (IdResolver.begin(Int128) == IdResolver.end()) PushOnScopeChains(Context.getInt128Decl(), TUScope); DeclarationName UInt128 = &Context.Idents.get("__uint128_t"); if (IdResolver.begin(UInt128) == IdResolver.end()) PushOnScopeChains(Context.getUInt128Decl(), TUScope); } // Initialize predefined Objective-C types: if (PP.getLangOpts().ObjC1) { // If 'SEL' does not yet refer to any declarations, make it refer to the // predefined 'SEL'. DeclarationName SEL = &Context.Idents.get("SEL"); if (IdResolver.begin(SEL) == IdResolver.end()) PushOnScopeChains(Context.getObjCSelDecl(), TUScope); // If 'id' does not yet refer to any declarations, make it refer to the // predefined 'id'. DeclarationName Id = &Context.Idents.get("id"); if (IdResolver.begin(Id) == IdResolver.end()) PushOnScopeChains(Context.getObjCIdDecl(), TUScope); // Create the built-in typedef for 'Class'. DeclarationName Class = &Context.Idents.get("Class"); if (IdResolver.begin(Class) == IdResolver.end()) PushOnScopeChains(Context.getObjCClassDecl(), TUScope); // Create the built-in forward declaratino for 'Protocol'. DeclarationName Protocol = &Context.Idents.get("Protocol"); if (IdResolver.begin(Protocol) == IdResolver.end()) PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope); } // Initialize Microsoft "predefined C++ types". if (PP.getLangOpts().MSVCCompat) { if (PP.getLangOpts().CPlusPlus && IdResolver.begin(&Context.Idents.get("type_info")) == IdResolver.end()) PushOnScopeChains(Context.buildImplicitRecord("type_info", TTK_Class), TUScope); addImplicitTypedef("size_t", Context.getSizeType()); } // Initialize predefined OpenCL types. if (PP.getLangOpts().OpenCL) { addImplicitTypedef("image1d_t", Context.OCLImage1dTy); addImplicitTypedef("image1d_array_t", Context.OCLImage1dArrayTy); addImplicitTypedef("image1d_buffer_t", Context.OCLImage1dBufferTy); addImplicitTypedef("image2d_t", Context.OCLImage2dTy); addImplicitTypedef("image2d_array_t", Context.OCLImage2dArrayTy); addImplicitTypedef("image3d_t", Context.OCLImage3dTy); addImplicitTypedef("sampler_t", Context.OCLSamplerTy); addImplicitTypedef("event_t", Context.OCLEventTy); if (getLangOpts().OpenCLVersion >= 200) { addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy)); addImplicitTypedef("atomic_uint", Context.getAtomicType(Context.UnsignedIntTy)); addImplicitTypedef("atomic_long", Context.getAtomicType(Context.LongTy)); addImplicitTypedef("atomic_ulong", Context.getAtomicType(Context.UnsignedLongTy)); addImplicitTypedef("atomic_float", Context.getAtomicType(Context.FloatTy)); addImplicitTypedef("atomic_double", Context.getAtomicType(Context.DoubleTy)); // OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as // 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide. addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy)); addImplicitTypedef("atomic_intptr_t", Context.getAtomicType(Context.getIntPtrType())); addImplicitTypedef("atomic_uintptr_t", Context.getAtomicType(Context.getUIntPtrType())); addImplicitTypedef("atomic_size_t", Context.getAtomicType(Context.getSizeType())); addImplicitTypedef("atomic_ptrdiff_t", Context.getAtomicType(Context.getPointerDiffType())); } } DeclarationName BuiltinVaList = &Context.Idents.get("__builtin_va_list"); if (IdResolver.begin(BuiltinVaList) == IdResolver.end()) PushOnScopeChains(Context.getBuiltinVaListDecl(), TUScope); } Sema::~Sema() { llvm::DeleteContainerSeconds(LateParsedTemplateMap); if (PackContext) FreePackedContext(); if (VisContext) FreeVisContext(); // Kill all the active scopes. for (unsigned I = 1, E = FunctionScopes.size(); I != E; ++I) delete FunctionScopes[I]; if (FunctionScopes.size() == 1) delete FunctionScopes[0]; // Tell the SemaConsumer to forget about us; we're going out of scope. if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer)) SC->ForgetSema(); // Detach from the external Sema source. if (ExternalSemaSource *ExternalSema = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource())) ExternalSema->ForgetSema(); // If Sema's ExternalSource is the multiplexer - we own it. if (isMultiplexExternalSource) delete ExternalSource; threadSafety::threadSafetyCleanup(ThreadSafetyDeclCache); // Destroys data sharing attributes stack for OpenMP // DestroyDataSharingAttributesStack(); // HLSL Change - no support for OpenMP assert(DelayedTypos.empty() && "Uncorrected typos!"); } /// makeUnavailableInSystemHeader - There is an error in the current /// context. If we're still in a system header, and we can plausibly /// make the relevant declaration unavailable instead of erroring, do /// so and return true. bool Sema::makeUnavailableInSystemHeader(SourceLocation loc, StringRef msg) { // If we're not in a function, it's an error. FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext); if (!fn) return false; // If we're in template instantiation, it's an error. if (!ActiveTemplateInstantiations.empty()) return false; // If that function's not in a system header, it's an error. if (!Context.getSourceManager().isInSystemHeader(loc)) return false; // If the function is already unavailable, it's not an error. if (fn->hasAttr<UnavailableAttr>()) return true; fn->addAttr(UnavailableAttr::CreateImplicit(Context, msg, loc)); return true; } ASTMutationListener *Sema::getASTMutationListener() const { return getASTConsumer().GetASTMutationListener(); } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void Sema::addExternalSource(ExternalSemaSource *E) { assert(E && "Cannot use with NULL ptr"); if (!ExternalSource) { ExternalSource = E; return; } if (isMultiplexExternalSource) static_cast<MultiplexExternalSemaSource*>(ExternalSource)->addSource(*E); else { ExternalSource = new MultiplexExternalSemaSource(*ExternalSource, *E); isMultiplexExternalSource = true; } } /// \brief Print out statistics about the semantic analysis. void Sema::PrintStats() const { llvm::errs() << "\n*** Semantic Analysis Stats:\n"; llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n"; BumpAlloc.PrintStats(); AnalysisWarnings.PrintStats(); } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast. /// If there is already an implicit cast, merge into the existing one. /// The result is of the given category. ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty, CastKind Kind, ExprValueKind VK, const CXXCastPath *BasePath, CheckedConversionKind CCK) { #ifndef NDEBUG if (VK == VK_RValue && !E->isRValue()) { switch (Kind) { default: llvm_unreachable("can't implicitly cast lvalue to rvalue with this cast " "kind"); case CK_LValueToRValue: case CK_ArrayToPointerDecay: case CK_FunctionToPointerDecay: case CK_ToVoid: break; } } assert((VK == VK_RValue || !E->isRValue()) && "can't cast rvalue to lvalue"); #endif if (VK == VK_LValue) { if (Kind == CastKind::CK_HLSLVectorTruncationCast || Kind == CastKind::CK_HLSLMatrixTruncationCast) { Diag(E->getLocStart(), diag::err_hlsl_unsupported_lvalue_cast_op); } } // Check whether we're implicitly casting from a nullable type to a nonnull // type. if (auto exprNullability = E->getType()->getNullability(Context)) { if (*exprNullability == NullabilityKind::Nullable) { if (auto typeNullability = Ty->getNullability(Context)) { if (*typeNullability == NullabilityKind::NonNull) { Diag(E->getLocStart(), diag::warn_nullability_lost) << E->getType() << Ty; } } } } QualType ExprTy = Context.getCanonicalType(E->getType()); QualType TypeTy = Context.getCanonicalType(Ty); if (ExprTy == TypeTy) return E; if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) { if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) { ImpCast->setType(Ty); ImpCast->setValueKind(VK); return E; } } return ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK); } /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) { switch (ScalarTy->getScalarTypeKind()) { case Type::STK_Bool: return CK_NoOp; case Type::STK_CPointer: return CK_PointerToBoolean; case Type::STK_BlockPointer: return CK_PointerToBoolean; case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean; case Type::STK_MemberPointer: return CK_MemberPointerToBoolean; case Type::STK_Integral: return CK_IntegralToBoolean; case Type::STK_Floating: return CK_FloatingToBoolean; case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean; case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean; } return CK_Invalid; } /// \brief Used to prune the decls of Sema's UnusedFileScopedDecls vector. static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) { if (D->getMostRecentDecl()->isUsed()) return true; if (D->isExternallyVisible()) return true; if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { // UnusedFileScopedDecls stores the first declaration. // The declaration may have become definition so check again. const FunctionDecl *DeclToCheck; if (FD->hasBody(DeclToCheck)) return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); // Later redecls may add new information resulting in not having to warn, // so check again. DeclToCheck = FD->getMostRecentDecl(); if (DeclToCheck != FD) return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); } if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { // If a variable usable in constant expressions is referenced, // don't warn if it isn't used: if the value of a variable is required // for the computation of a constant expression, it doesn't make sense to // warn even if the variable isn't odr-used. (isReferenced doesn't // precisely reflect that, but it's a decent approximation.) if (VD->isReferenced() && VD->isUsableInConstantExpressions(SemaRef->Context)) return true; // UnusedFileScopedDecls stores the first declaration. // The declaration may have become definition so check again. const VarDecl *DeclToCheck = VD->getDefinition(); if (DeclToCheck) return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); // Later redecls may add new information resulting in not having to warn, // so check again. DeclToCheck = VD->getMostRecentDecl(); if (DeclToCheck != VD) return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck); } return false; } /// Obtains a sorted list of functions that are undefined but ODR-used. void Sema::getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined) { for (llvm::DenseMap<NamedDecl *, SourceLocation>::iterator I = UndefinedButUsed.begin(), E = UndefinedButUsed.end(); I != E; ++I) { NamedDecl *ND = I->first; // Ignore attributes that have become invalid. if (ND->isInvalidDecl()) continue; // __attribute__((weakref)) is basically a definition. if (ND->hasAttr<WeakRefAttr>()) continue; if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { if (FD->isDefined()) continue; if (FD->isExternallyVisible() && !FD->getMostRecentDecl()->isInlined()) continue; } else { if (cast<VarDecl>(ND)->hasDefinition() != VarDecl::DeclarationOnly) continue; if (ND->isExternallyVisible()) continue; } Undefined.push_back(std::make_pair(ND, I->second)); } // Sort (in order of use site) so that we're not dependent on the iteration // order through an llvm::DenseMap. SourceManager &SM = Context.getSourceManager(); std::sort(Undefined.begin(), Undefined.end(), [&SM](const std::pair<NamedDecl *, SourceLocation> &l, const std::pair<NamedDecl *, SourceLocation> &r) { if (l.second.isValid() && !r.second.isValid()) return true; if (!l.second.isValid() && r.second.isValid()) return false; if (l.second != r.second) return SM.isBeforeInTranslationUnit(l.second, r.second); return SM.isBeforeInTranslationUnit(l.first->getLocation(), r.first->getLocation()); }); } /// checkUndefinedButUsed - Check for undefined objects with internal linkage /// or that are inline. static void checkUndefinedButUsed(Sema &S) { if (S.UndefinedButUsed.empty()) return; // Collect all the still-undefined entities with internal linkage. SmallVector<std::pair<NamedDecl *, SourceLocation>, 16> Undefined; S.getUndefinedButUsed(Undefined); if (Undefined.empty()) return; for (SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> >::iterator I = Undefined.begin(), E = Undefined.end(); I != E; ++I) { NamedDecl *ND = I->first; if (ND->hasAttr<DLLImportAttr>() || ND->hasAttr<DLLExportAttr>()) { // An exported function will always be emitted when defined, so even if // the function is inline, it doesn't have to be emitted in this TU. An // imported function implies that it has been exported somewhere else. continue; } if (!ND->isExternallyVisible()) { S.Diag(ND->getLocation(), diag::warn_undefined_internal) << isa<VarDecl>(ND) << ND; } else { assert(cast<FunctionDecl>(ND)->getMostRecentDecl()->isInlined() && "used object requires definition but isn't inline or internal?"); S.Diag(ND->getLocation(), diag::warn_undefined_inline) << ND; } if (I->second.isValid()) S.Diag(I->second, diag::note_used_here); } } void Sema::LoadExternalWeakUndeclaredIdentifiers() { if (!ExternalSource) return; SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs; ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs); for (auto &WeakID : WeakIDs) WeakUndeclaredIdentifiers.insert(WeakID); } typedef llvm::DenseMap<const CXXRecordDecl*, bool> RecordCompleteMap; /// \brief Returns true, if all methods and nested classes of the given /// CXXRecordDecl are defined in this translation unit. /// /// Should only be called from ActOnEndOfTranslationUnit so that all /// definitions are actually read. static bool MethodsAndNestedClassesComplete(const CXXRecordDecl *RD, RecordCompleteMap &MNCComplete) { RecordCompleteMap::iterator Cache = MNCComplete.find(RD); if (Cache != MNCComplete.end()) return Cache->second; if (!RD->isCompleteDefinition()) return false; bool Complete = true; for (DeclContext::decl_iterator I = RD->decls_begin(), E = RD->decls_end(); I != E && Complete; ++I) { if (const CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(*I)) Complete = M->isDefined() || (M->isPure() && !isa<CXXDestructorDecl>(M)); else if (const FunctionTemplateDecl *F = dyn_cast<FunctionTemplateDecl>(*I)) // If the template function is marked as late template parsed at this // point, it has not been instantiated and therefore we have not // performed semantic analysis on it yet, so we cannot know if the type // can be considered complete. Complete = !F->getTemplatedDecl()->isLateTemplateParsed() && F->getTemplatedDecl()->isDefined(); else if (const CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(*I)) { if (R->isInjectedClassName()) continue; if (R->hasDefinition()) Complete = MethodsAndNestedClassesComplete(R->getDefinition(), MNCComplete); else Complete = false; } } MNCComplete[RD] = Complete; return Complete; } /// \brief Returns true, if the given CXXRecordDecl is fully defined in this /// translation unit, i.e. all methods are defined or pure virtual and all /// friends, friend functions and nested classes are fully defined in this /// translation unit. /// /// Should only be called from ActOnEndOfTranslationUnit so that all /// definitions are actually read. static bool IsRecordFullyDefined(const CXXRecordDecl *RD, RecordCompleteMap &RecordsComplete, RecordCompleteMap &MNCComplete) { RecordCompleteMap::iterator Cache = RecordsComplete.find(RD); if (Cache != RecordsComplete.end()) return Cache->second; bool Complete = MethodsAndNestedClassesComplete(RD, MNCComplete); for (CXXRecordDecl::friend_iterator I = RD->friend_begin(), E = RD->friend_end(); I != E && Complete; ++I) { // Check if friend classes and methods are complete. if (TypeSourceInfo *TSI = (*I)->getFriendType()) { // Friend classes are available as the TypeSourceInfo of the FriendDecl. if (CXXRecordDecl *FriendD = TSI->getType()->getAsCXXRecordDecl()) Complete = MethodsAndNestedClassesComplete(FriendD, MNCComplete); else Complete = false; } else { // Friend functions are available through the NamedDecl of FriendDecl. if (const FunctionDecl *FD = dyn_cast<FunctionDecl>((*I)->getFriendDecl())) Complete = FD->isDefined(); else // This is a template friend, give up. Complete = false; } } RecordsComplete[RD] = Complete; return Complete; } void Sema::emitAndClearUnusedLocalTypedefWarnings() { if (ExternalSource) ExternalSource->ReadUnusedLocalTypedefNameCandidates( UnusedLocalTypedefNameCandidates); for (const TypedefNameDecl *TD : UnusedLocalTypedefNameCandidates) { if (TD->isReferenced()) continue; Diag(TD->getLocation(), diag::warn_unused_local_typedef) << isa<TypeAliasDecl>(TD) << TD->getDeclName(); } UnusedLocalTypedefNameCandidates.clear(); } /// ActOnEndOfTranslationUnit - This is called at the very end of the /// translation unit when EOF is reached and all but the top-level scope is /// popped. void Sema::ActOnEndOfTranslationUnit() { assert(DelayedDiagnostics.getCurrentPool() == nullptr && "reached end of translation unit with a pool attached?"); // If code completion is enabled, don't perform any end-of-translation-unit // work. if (PP.isCodeCompletionEnabled()) return; // Complete translation units and modules define vtables and perform implicit // instantiations. PCH files do not. if (TUKind != TU_Prefix) { DiagnoseUseOfUnimplementedSelectors(); // If DefinedUsedVTables ends up marking any virtual member functions it // might lead to more pending template instantiations, which we then need // to instantiate. DefineUsedVTables(); // C++: Perform implicit template instantiations. // // FIXME: When we perform these implicit instantiations, we do not // carefully keep track of the point of instantiation (C++ [temp.point]). // This means that name lookup that occurs within the template // instantiation will always happen at the end of the translation unit, // so it will find some names that are not required to be found. This is // valid, but we could do better by diagnosing if an instantiation uses a // name that was not visible at its first point of instantiation. if (ExternalSource) { // Load pending instantiations from the external source. SmallVector<PendingImplicitInstantiation, 4> Pending; ExternalSource->ReadPendingInstantiations(Pending); PendingInstantiations.insert(PendingInstantiations.begin(), Pending.begin(), Pending.end()); } // HLSL Change Begin - Support hierarchial time tracing. { llvm::TimeTraceScope TimeScope("PerformPendingInstantiations", StringRef("")); PerformPendingInstantiations(); } // HLSL Change Ends - Support hierarchial time tracing. if (LateTemplateParserCleanup) LateTemplateParserCleanup(OpaqueParser); CheckDelayedMemberExceptionSpecs(); } // All delayed member exception specs should be checked or we end up accepting // incompatible declarations. // FIXME: This is wrong for TUKind == TU_Prefix. In that case, we need to // write out the lists to the AST file (if any). assert(DelayedDefaultedMemberExceptionSpecs.empty()); assert(DelayedExceptionSpecChecks.empty()); // Remove file scoped decls that turned out to be used. UnusedFileScopedDecls.erase( std::remove_if(UnusedFileScopedDecls.begin(nullptr, true), UnusedFileScopedDecls.end(), [this](const DeclaratorDecl *DD) { return ShouldRemoveFromUnused(this, DD); }), UnusedFileScopedDecls.end()); if (TUKind == TU_Prefix) { // Translation unit prefixes don't need any of the checking below. TUScope = nullptr; return; } // Check for #pragma weak identifiers that were never declared LoadExternalWeakUndeclaredIdentifiers(); for (auto WeakID : WeakUndeclaredIdentifiers) { if (WeakID.second.getUsed()) continue; Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared) << WeakID.first; } if (LangOpts.CPlusPlus11 && !Diags.isIgnored(diag::warn_delegating_ctor_cycle, SourceLocation())) CheckDelegatingCtorCycles(); if (TUKind == TU_Module) { // If we are building a module, resolve all of the exported declarations // now. if (Module *CurrentModule = PP.getCurrentModule()) { ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap(); SmallVector<Module *, 2> Stack; Stack.push_back(CurrentModule); while (!Stack.empty()) { Module *Mod = Stack.pop_back_val(); // Resolve the exported declarations and conflicts. // FIXME: Actually complain, once we figure out how to teach the // diagnostic client to deal with complaints in the module map at this // point. ModMap.resolveExports(Mod, /*Complain=*/false); ModMap.resolveUses(Mod, /*Complain=*/false); ModMap.resolveConflicts(Mod, /*Complain=*/false); // Queue the submodules, so their exports will also be resolved. Stack.append(Mod->submodule_begin(), Mod->submodule_end()); } } // Warnings emitted in ActOnEndOfTranslationUnit() should be emitted for // modules when they are built, not every time they are used. emitAndClearUnusedLocalTypedefWarnings(); // Modules don't need any of the checking below. TUScope = nullptr; return; } // C99 6.9.2p2: // A declaration of an identifier for an object that has file // scope without an initializer, and without a storage-class // specifier or with the storage-class specifier static, // constitutes a tentative definition. If a translation unit // contains one or more tentative definitions for an identifier, // and the translation unit contains no external definition for // that identifier, then the behavior is exactly as if the // translation unit contains a file scope declaration of that // identifier, with the composite type as of the end of the // translation unit, with an initializer equal to 0. llvm::SmallSet<VarDecl *, 32> Seen; for (TentativeDefinitionsType::iterator T = TentativeDefinitions.begin(ExternalSource), TEnd = TentativeDefinitions.end(); T != TEnd; ++T) { VarDecl *VD = (*T)->getActingDefinition(); // If the tentative definition was completed, getActingDefinition() returns // null. If we've already seen this variable before, insert()'s second // return value is false. if (!VD || VD->isInvalidDecl() || !Seen.insert(VD).second) continue; if (const IncompleteArrayType *ArrayT = Context.getAsIncompleteArrayType(VD->getType())) { // Set the length of the array to 1 (C99 6.9.2p5). Diag(VD->getLocation(), diag::warn_tentative_incomplete_array); llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true); QualType T = Context.getConstantArrayType(ArrayT->getElementType(), One, ArrayType::Normal, 0); VD->setType(T); } else if (RequireCompleteType(VD->getLocation(), VD->getType(), diag::err_tentative_def_incomplete_type)) VD->setInvalidDecl(); CheckCompleteVariableDeclaration(VD); // Notify the consumer that we've completed a tentative definition. if (!VD->isInvalidDecl()) Consumer.CompleteTentativeDefinition(VD); } // If there were errors, disable 'unused' warnings since they will mostly be // noise. if (!Diags.hasErrorOccurred()) { // Output warning for unused file scoped decls. for (UnusedFileScopedDeclsType::iterator I = UnusedFileScopedDecls.begin(ExternalSource), E = UnusedFileScopedDecls.end(); I != E; ++I) { if (ShouldRemoveFromUnused(this, *I)) continue; if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { const FunctionDecl *DiagD; if (!FD->hasBody(DiagD)) DiagD = FD; if (DiagD->isDeleted()) continue; // Deleted functions are supposed to be unused. if (DiagD->isReferenced()) { if (isa<CXXMethodDecl>(DiagD)) Diag(DiagD->getLocation(), diag::warn_unneeded_member_function) << DiagD->getDeclName(); else { if (FD->getStorageClass() == SC_Static && !FD->isInlineSpecified() && !SourceMgr.isInMainFile( SourceMgr.getExpansionLoc(FD->getLocation()))) Diag(DiagD->getLocation(), diag::warn_unneeded_static_internal_decl) << DiagD->getDeclName(); else Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) << /*function*/0 << DiagD->getDeclName(); } } else { Diag(DiagD->getLocation(), isa<CXXMethodDecl>(DiagD) ? diag::warn_unused_member_function : diag::warn_unused_function) << DiagD->getDeclName(); } } else { const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition(); if (!DiagD) DiagD = cast<VarDecl>(*I); if (DiagD->isReferenced()) { Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl) << /*variable*/1 << DiagD->getDeclName(); } else if (DiagD->getType().isConstQualified()) { Diag(DiagD->getLocation(), diag::warn_unused_const_variable) << DiagD->getDeclName(); } else { Diag(DiagD->getLocation(), diag::warn_unused_variable) << DiagD->getDeclName(); } } } if (ExternalSource) ExternalSource->ReadUndefinedButUsed(UndefinedButUsed); checkUndefinedButUsed(*this); emitAndClearUnusedLocalTypedefWarnings(); } if (!Diags.isIgnored(diag::warn_unused_private_field, SourceLocation())) { RecordCompleteMap RecordsComplete; RecordCompleteMap MNCComplete; for (NamedDeclSetType::iterator I = UnusedPrivateFields.begin(), E = UnusedPrivateFields.end(); I != E; ++I) { const NamedDecl *D = *I; const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext()); if (RD && !RD->isUnion() && IsRecordFullyDefined(RD, RecordsComplete, MNCComplete)) { Diag(D->getLocation(), diag::warn_unused_private_field) << D->getDeclName(); } } } if (!Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) { if (ExternalSource) ExternalSource->ReadMismatchingDeleteExpressions(DeleteExprs); for (const auto &DeletedFieldInfo : DeleteExprs) { for (const auto &DeleteExprLoc : DeletedFieldInfo.second) { AnalyzeDeleteExprMismatch(DeletedFieldInfo.first, DeleteExprLoc.first, DeleteExprLoc.second); } } } // Check we've noticed that we're no longer parsing the initializer for every // variable. If we miss cases, then at best we have a performance issue and // at worst a rejects-valid bug. assert(ParsingInitForAutoVars.empty() && "Didn't unmark var as having its initializer parsed"); TUScope = nullptr; } //===----------------------------------------------------------------------===// // Helper functions. //===----------------------------------------------------------------------===// DeclContext *Sema::getFunctionLevelDeclContext() { DeclContext *DC = CurContext; while (true) { if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC)) { DC = DC->getParent(); } else if (isa<CXXMethodDecl>(DC) && cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call && cast<CXXRecordDecl>(DC->getParent())->isLambda()) { DC = DC->getParent()->getParent(); } else break; } return DC; } /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *Sema::getCurFunctionDecl() { DeclContext *DC = getFunctionLevelDeclContext(); return dyn_cast<FunctionDecl>(DC); } ObjCMethodDecl *Sema::getCurMethodDecl() { DeclContext *DC = getFunctionLevelDeclContext(); while (isa<RecordDecl>(DC)) DC = DC->getParent(); return dyn_cast<ObjCMethodDecl>(DC); } NamedDecl *Sema::getCurFunctionOrMethodDecl() { DeclContext *DC = getFunctionLevelDeclContext(); if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC)) return cast<NamedDecl>(DC); return nullptr; } void Sema::EmitCurrentDiagnostic(unsigned DiagID) { // FIXME: It doesn't make sense to me that DiagID is an incoming argument here // and yet we also use the current diag ID on the DiagnosticsEngine. This has // been made more painfully obvious by the refactor that introduced this // function, but it is possible that the incoming argument can be // eliminnated. If it truly cannot be (for example, there is some reentrancy // issue I am not seeing yet), then there should at least be a clarifying // comment somewhere. if (Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) { switch (DiagnosticIDs::getDiagnosticSFINAEResponse( Diags.getCurrentDiagID())) { case DiagnosticIDs::SFINAE_Report: // We'll report the diagnostic below. break; case DiagnosticIDs::SFINAE_SubstitutionFailure: // Count this failure so that we know that template argument deduction // has failed. ++NumSFINAEErrors; // Make a copy of this suppressed diagnostic and store it with the // template-deduction information. if (*Info && !(*Info)->hasSFINAEDiagnostic()) { Diagnostic DiagInfo(&Diags); (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); } Diags.setLastDiagnosticIgnored(); Diags.Clear(); return; case DiagnosticIDs::SFINAE_AccessControl: { // Per C++ Core Issue 1170, access control is part of SFINAE. // Additionally, the AccessCheckingSFINAE flag can be used to temporarily // make access control a part of SFINAE for the purposes of checking // type traits. if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus11) break; SourceLocation Loc = Diags.getCurrentDiagLoc(); // Suppress this diagnostic. ++NumSFINAEErrors; // Make a copy of this suppressed diagnostic and store it with the // template-deduction information. if (*Info && !(*Info)->hasSFINAEDiagnostic()) { Diagnostic DiagInfo(&Diags); (*Info)->addSFINAEDiagnostic(DiagInfo.getLocation(), PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); } Diags.setLastDiagnosticIgnored(); Diags.Clear(); // Now the diagnostic state is clear, produce a C++98 compatibility // warning. Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control); // The last diagnostic which Sema produced was ignored. Suppress any // notes attached to it. Diags.setLastDiagnosticIgnored(); return; } case DiagnosticIDs::SFINAE_Suppress: // Make a copy of this suppressed diagnostic and store it with the // template-deduction information; if (*Info) { Diagnostic DiagInfo(&Diags); (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(), PartialDiagnostic(DiagInfo, Context.getDiagAllocator())); } // Suppress this diagnostic. Diags.setLastDiagnosticIgnored(); Diags.Clear(); return; } } // Set up the context's printing policy based on our current state. Context.setPrintingPolicy(getPrintingPolicy()); // Emit the diagnostic. if (!Diags.EmitCurrentDiagnostic()) return; // If this is not a note, and we're in a template instantiation // that is different from the last template instantiation where // we emitted an error, print a template instantiation // backtrace. if (!DiagnosticIDs::isBuiltinNote(DiagID) && !ActiveTemplateInstantiations.empty() && ActiveTemplateInstantiations.back() != LastTemplateInstantiationErrorContext) { PrintInstantiationStack(); LastTemplateInstantiationErrorContext = ActiveTemplateInstantiations.back(); } } Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) { SemaDiagnosticBuilder Builder(Diag(Loc, PD.getDiagID())); PD.Emit(Builder); return Builder; } /// \brief Looks through the macro-expansion chain for the given /// location, looking for a macro expansion with the given name. /// If one is found, returns true and sets the location to that /// expansion loc. bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) { SourceLocation loc = locref; if (!loc.isMacroID()) return false; // There's no good way right now to look at the intermediate // expansions, so just jump to the expansion location. loc = getSourceManager().getExpansionLoc(loc); // If that's written with the name, stop here. SmallVector<char, 16> buffer; if (getPreprocessor().getSpelling(loc, buffer) == name) { locref = loc; return true; } return false; } /// \brief Determines the active Scope associated with the given declaration /// context. /// /// This routine maps a declaration context to the active Scope object that /// represents that declaration context in the parser. It is typically used /// from "scope-less" code (e.g., template instantiation, lazy creation of /// declarations) that injects a name for name-lookup purposes and, therefore, /// must update the Scope. /// /// \returns The scope corresponding to the given declaraion context, or NULL /// if no such scope is open. Scope *Sema::getScopeForContext(DeclContext *Ctx) { if (!Ctx) return nullptr; Ctx = Ctx->getPrimaryContext(); for (Scope *S = getCurScope(); S; S = S->getParent()) { // Ignore scopes that cannot have declarations. This is important for // out-of-line definitions of static class members. if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) if (DeclContext *Entity = S->getEntity()) if (Ctx == Entity->getPrimaryContext()) return S; } return nullptr; } /// \brief Enter a new function scope void Sema::PushFunctionScope() { if (FunctionScopes.size() == 1) { // Use the "top" function scope rather than having to allocate // memory for a new scope. FunctionScopes.back()->Clear(); FunctionScopes.push_back(FunctionScopes.back()); return; } FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics())); } void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) { FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(), BlockScope, Block)); } LambdaScopeInfo *Sema::PushLambdaScope() { LambdaScopeInfo *const LSI = new LambdaScopeInfo(getDiagnostics()); FunctionScopes.push_back(LSI); return LSI; } void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) { if (LambdaScopeInfo *const LSI = getCurLambda()) { LSI->AutoTemplateParameterDepth = Depth; return; } llvm_unreachable( "Remove assertion if intentionally called in a non-lambda context."); } void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP, const Decl *D, const BlockExpr *blkExpr) { FunctionScopeInfo *Scope = FunctionScopes.pop_back_val(); assert(!FunctionScopes.empty() && "mismatched push/pop!"); // Issue any analysis-based warnings. if (WP && D) AnalysisWarnings.IssueWarnings(*WP, Scope, D, blkExpr); else for (const auto &PUD : Scope->PossiblyUnreachableDiags) Diag(PUD.Loc, PUD.PD); if (FunctionScopes.back() != Scope) delete Scope; } void Sema::PushCompoundScope() { getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo()); } void Sema::PopCompoundScope() { FunctionScopeInfo *CurFunction = getCurFunction(); assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop"); CurFunction->CompoundScopes.pop_back(); } /// \brief Determine whether any errors occurred within this function/method/ /// block. bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const { return getCurFunction()->ErrorTrap.hasUnrecoverableErrorOccurred(); } BlockScopeInfo *Sema::getCurBlock() { if (FunctionScopes.empty()) return nullptr; auto CurBSI = dyn_cast<BlockScopeInfo>(FunctionScopes.back()); if (CurBSI && CurBSI->TheDecl && !CurBSI->TheDecl->Encloses(CurContext)) { // We have switched contexts due to template instantiation. assert(!ActiveTemplateInstantiations.empty()); return nullptr; } return CurBSI; } LambdaScopeInfo *Sema::getCurLambda() { if (FunctionScopes.empty()) return nullptr; auto CurLSI = dyn_cast<LambdaScopeInfo>(FunctionScopes.back()); if (CurLSI && CurLSI->Lambda && !CurLSI->Lambda->Encloses(CurContext)) { // We have switched contexts due to template instantiation. assert(!ActiveTemplateInstantiations.empty()); return nullptr; } return CurLSI; } // We have a generic lambda if we parsed auto parameters, or we have // an associated template parameter list. LambdaScopeInfo *Sema::getCurGenericLambda() { if (LambdaScopeInfo *LSI = getCurLambda()) { return (LSI->AutoTemplateParams.size() || LSI->GLTemplateParameterList) ? LSI : nullptr; } return nullptr; } void Sema::ActOnComment(SourceRange Comment) { if (!LangOpts.RetainCommentsFromSystemHeaders && SourceMgr.isInSystemHeader(Comment.getBegin())) return; RawComment RC(SourceMgr, Comment, false, LangOpts.CommentOpts.ParseAllComments); if (RC.isAlmostTrailingComment()) { SourceRange MagicMarkerRange(Comment.getBegin(), Comment.getBegin().getLocWithOffset(3)); StringRef MagicMarkerText; switch (RC.getKind()) { case RawComment::RCK_OrdinaryBCPL: MagicMarkerText = "///<"; break; case RawComment::RCK_OrdinaryC: MagicMarkerText = "/**<"; break; default: llvm_unreachable("if this is an almost Doxygen comment, " "it should be ordinary"); } Diag(Comment.getBegin(), diag::warn_not_a_doxygen_trailing_member_comment) << FixItHint::CreateReplacement(MagicMarkerRange, MagicMarkerText); } Context.addComment(RC); } // Pin this vtable to this file. ExternalSemaSource::~ExternalSemaSource() {} void ExternalSemaSource::ReadMethodPool(Selector Sel) { } void ExternalSemaSource::ReadKnownNamespaces( SmallVectorImpl<NamespaceDecl *> &Namespaces) { } void ExternalSemaSource::ReadUndefinedButUsed( llvm::DenseMap<NamedDecl *, SourceLocation> &Undefined) { } void ExternalSemaSource::ReadMismatchingDeleteExpressions(llvm::MapVector< FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> &) {} void PrettyDeclStackTraceEntry::print(raw_ostream &OS) const { SourceLocation Loc = this->Loc; if (!Loc.isValid() && TheDecl) Loc = TheDecl->getLocation(); if (Loc.isValid()) { Loc.print(OS, S.getSourceManager()); OS << ": "; } OS << Message; if (TheDecl && isa<NamedDecl>(TheDecl)) { std::string Name = cast<NamedDecl>(TheDecl)->getNameAsString(); if (!Name.empty()) OS << " '" << Name << '\''; } OS << '\n'; } /// \brief Figure out if an expression could be turned into a call. /// /// Use this when trying to recover from an error where the programmer may have /// written just the name of a function instead of actually calling it. /// /// \param E - The expression to examine. /// \param ZeroArgCallReturnTy - If the expression can be turned into a call /// with no arguments, this parameter is set to the type returned by such a /// call; otherwise, it is set to an empty QualType. /// \param OverloadSet - If the expression is an overloaded function /// name, this parameter is populated with the decls of the various overloads. bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &OverloadSet) { ZeroArgCallReturnTy = QualType(); OverloadSet.clear(); const OverloadExpr *Overloads = nullptr; bool IsMemExpr = false; if (E.getType() == Context.OverloadTy) { OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E)); // Ignore overloads that are pointer-to-member constants. if (FR.HasFormOfMemberPointer) return false; Overloads = FR.Expression; } else if (E.getType() == Context.BoundMemberTy) { Overloads = dyn_cast<UnresolvedMemberExpr>(E.IgnoreParens()); IsMemExpr = true; } bool Ambiguous = false; if (Overloads) { for (OverloadExpr::decls_iterator it = Overloads->decls_begin(), DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) { OverloadSet.addDecl(*it); // Check whether the function is a non-template, non-member which takes no // arguments. if (IsMemExpr) continue; if (const FunctionDecl *OverloadDecl = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) { if (OverloadDecl->getMinRequiredArguments() == 0) { if (!ZeroArgCallReturnTy.isNull() && !Ambiguous) { ZeroArgCallReturnTy = QualType(); Ambiguous = true; } else ZeroArgCallReturnTy = OverloadDecl->getReturnType(); } } } // If it's not a member, use better machinery to try to resolve the call if (!IsMemExpr) return !ZeroArgCallReturnTy.isNull(); } // Attempt to call the member with no arguments - this will correctly handle // member templates with defaults/deduction of template arguments, overloads // with default arguments, etc. if (IsMemExpr && !E.isTypeDependent()) { bool Suppress = getDiagnostics().getSuppressAllDiagnostics(); getDiagnostics().setSuppressAllDiagnostics(true); ExprResult R = BuildCallToMemberFunction(nullptr, &E, SourceLocation(), None, SourceLocation()); getDiagnostics().setSuppressAllDiagnostics(Suppress); if (R.isUsable()) { ZeroArgCallReturnTy = R.get()->getType(); return true; } return false; } if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) { if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) { if (Fun->getMinRequiredArguments() == 0) ZeroArgCallReturnTy = Fun->getReturnType(); return true; } } // We don't have an expression that's convenient to get a FunctionDecl from, // but we can at least check if the type is "function of 0 arguments". QualType ExprTy = E.getType(); const FunctionType *FunTy = nullptr; QualType PointeeTy = ExprTy->getPointeeType(); if (!PointeeTy.isNull()) FunTy = PointeeTy->getAs<FunctionType>(); if (!FunTy) FunTy = ExprTy->getAs<FunctionType>(); if (const FunctionProtoType *FPT = dyn_cast_or_null<FunctionProtoType>(FunTy)) { if (FPT->getNumParams() == 0) ZeroArgCallReturnTy = FunTy->getReturnType(); return true; } return false; } /// \brief Give notes for a set of overloads. /// /// A companion to tryExprAsCall. In cases when the name that the programmer /// wrote was an overloaded function, we may be able to make some guesses about /// plausible overloads based on their return types; such guesses can be handed /// off to this method to be emitted as notes. /// /// \param Overloads - The overloads to note. /// \param FinalNoteLoc - If we've suppressed printing some overloads due to /// -fshow-overloads=best, this is the location to attach to the note about too /// many candidates. Typically this will be the location of the original /// ill-formed expression. static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads, const SourceLocation FinalNoteLoc) { int ShownOverloads = 0; int SuppressedOverloads = 0; for (UnresolvedSetImpl::iterator It = Overloads.begin(), DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { // FIXME: Magic number for max shown overloads stolen from // OverloadCandidateSet::NoteCandidates. if (ShownOverloads >= 4 && S.Diags.getShowOverloads() == Ovl_Best) { ++SuppressedOverloads; continue; } NamedDecl *Fn = (*It)->getUnderlyingDecl(); if (!Fn->getLocation().isValid()) continue; // HLSL Change: skip built-in locations S.Diag(Fn->getLocation(), diag::note_possible_target_of_call); ++ShownOverloads; } if (SuppressedOverloads) S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates) << SuppressedOverloads; } static void notePlausibleOverloads(Sema &S, SourceLocation Loc, const UnresolvedSetImpl &Overloads, bool (*IsPlausibleResult)(QualType)) { if (!IsPlausibleResult) return noteOverloads(S, Overloads, Loc); UnresolvedSet<2> PlausibleOverloads; for (OverloadExpr::decls_iterator It = Overloads.begin(), DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) { const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It); QualType OverloadResultTy = OverloadDecl->getReturnType(); if (IsPlausibleResult(OverloadResultTy)) PlausibleOverloads.addDecl(It.getDecl()); } noteOverloads(S, PlausibleOverloads, Loc); } /// Determine whether the given expression can be called by just /// putting parentheses after it. Notably, expressions with unary /// operators can't be because the unary operator will start parsing /// outside the call. static bool IsCallableWithAppend(Expr *E) { E = E->IgnoreImplicit(); return (!isa<CStyleCastExpr>(E) && !isa<UnaryOperator>(E) && !isa<BinaryOperator>(E) && !isa<CXXOperatorCallExpr>(E)); } bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain, bool (*IsPlausibleResult)(QualType)) { SourceLocation Loc = E.get()->getExprLoc(); SourceRange Range = E.get()->getSourceRange(); QualType ZeroArgCallTy; UnresolvedSet<4> Overloads; if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) && !ZeroArgCallTy.isNull() && (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) { // At this point, we know E is potentially callable with 0 // arguments and that it returns something of a reasonable type, // so we can emit a fixit and carry on pretending that E was // actually a CallExpr. SourceLocation ParenInsertionLoc = PP.getLocForEndOfToken(Range.getEnd()); Diag(Loc, PD) << /*zero-arg*/ 1 << Range << (IsCallableWithAppend(E.get()) ? FixItHint::CreateInsertion(ParenInsertionLoc, "()") : FixItHint()); notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); // FIXME: Try this before emitting the fixit, and suppress diagnostics // while doing so. E = ActOnCallExpr(nullptr, E.get(), Range.getEnd(), None, Range.getEnd().getLocWithOffset(1)); return true; } if (!ForceComplain) return false; Diag(Loc, PD) << /*not zero-arg*/ 0 << Range; notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult); E = ExprError(); return true; } IdentifierInfo *Sema::getSuperIdentifier() const { if (!Ident_super) Ident_super = &Context.Idents.get("super"); return Ident_super; } IdentifierInfo *Sema::getFloat128Identifier() const { if (!Ident___float128) Ident___float128 = &Context.Idents.get("__float128"); return Ident___float128; } void Sema::PushCapturedRegionScope(Scope *S, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K) { CapturingScopeInfo *CSI = new CapturedRegionScopeInfo( getDiagnostics(), S, CD, RD, CD->getContextParam(), K); CSI->ReturnType = Context.VoidTy; FunctionScopes.push_back(CSI); } CapturedRegionScopeInfo *Sema::getCurCapturedRegion() { if (FunctionScopes.empty()) return nullptr; return dyn_cast<CapturedRegionScopeInfo>(FunctionScopes.back()); } const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> & Sema::getMismatchingDeleteExpressions() const { return DeleteExprs; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaExprObjC.cpp
//===--- SemaExprObjC.cpp - Semantic Analysis for ObjC Expressions --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for Objective-C expressions. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/StmtVisitor.h" #include "clang/AST/TypeLoc.h" #include "clang/Analysis/DomainSpecific/CocoaConventions.h" #include "clang/Edit/Commit.h" #include "clang/Edit/Rewriters.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "llvm/ADT/SmallString.h" using namespace clang; using namespace sema; using llvm::makeArrayRef; // HLSL Change Starts // No ObjC parse/sema support, so simply skip all of this compilation. // Here are enough stubs to link the current targets. #if 1 ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **strings, unsigned NumStrings) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType ty, SourceLocation RParenLoc) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc) { llvm_unreachable("HLSL does not support ObjC constructs"); return ExprError(); } /// Try to capture an implicit reference to 'self'. ObjCMethodDecl *Sema::tryCaptureObjCSelf(SourceLocation Loc) { llvm_unreachable("HLSL does not support ObjC constructs"); return nullptr; } QualType Sema::getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage) { llvm_unreachable("HLSL does not support ObjC constructs"); return QualType(); } void Sema::EmitRelatedResultTypeNoteForReturn(QualType destType) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::EmitRelatedResultTypeNote(const Expr *E) { llvm_unreachable("HLSL does not support ObjC constructs"); } bool Sema::CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK) { llvm_unreachable("HLSL does not support ObjC constructs"); return false; } bool Sema::isSelfExpr(Expr *RExpr) { llvm_unreachable("HLSL does not support ObjC constructs"); return false; } bool Sema::isSelfExpr(Expr *receiver, const ObjCMethodDecl *method) { llvm_unreachable("HLSL does not support ObjC constructs"); return false; } ObjCMethodDecl *Sema::LookupMethodInObjectType(Selector sel, QualType type, bool isInstance) { llvm_unreachable("HLSL does not support ObjC constructs"); return nullptr; } ObjCMethodDecl *Sema::LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool Instance) { llvm_unreachable("HLSL does not support ObjC constructs"); return nullptr; } ExprResult Sema:: HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super) { llvm_unreachable("HLSL does not support ObjC constructs"); return ExprError(); } ExprResult Sema:: ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc) { llvm_unreachable("HLSL does not support ObjC constructs"); return ExprError(); } Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args) { llvm_unreachable("HLSL does not support ObjC constructs"); return ExprError(); } ExprResult Sema::BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args) { llvm_unreachable("HLSL does not support ObjC constructs"); return ExprError(); } ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg ArgsIn, bool isImplicit) { llvm_unreachable("HLSL does not support ObjC constructs"); return ExprError(); } ExprResult Sema::ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg ArgsIn, bool isImplicit) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args) { llvm_unreachable("HLSL does not support ObjC constructs"); } bool Sema::isKnownName(StringRef name) { if (name.empty()) return false; LookupResult R(*this, &Context.Idents.get(name), SourceLocation(), Sema::LookupOrdinaryName); return LookupName(R, TUScope, false); } void Sema::CheckTollFreeBridgeCast(QualType castType, Expr *castExpr) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr) { llvm_unreachable("HLSL does not support ObjC constructs"); } bool Sema::CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind) { llvm_unreachable("HLSL does not support ObjC constructs"); } bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs) { llvm_unreachable("HLSL does not support ObjC constructs"); } bool Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr) { llvm_unreachable("HLSL does not support ObjC constructs"); } Sema::ARCConversionResult Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&castExpr, CheckedConversionKind CCK, bool DiagnoseCFAudited, BinaryOperatorKind Opc) { llvm_unreachable("HLSL does not support ObjC constructs"); } /// Given that we saw an expression with the ARCUnbridgedCastTy /// placeholder type, complain bitterly. void Sema::diagnoseARCUnbridgedCast(Expr *e) { llvm_unreachable("HLSL does not support ObjC constructs"); } /// stripARCUnbridgedCast - Given an expression of ARCUnbridgedCast /// type, remove the placeholder cast. Expr *Sema::stripARCUnbridgedCast(Expr *e) { llvm_unreachable("HLSL does not support ObjC constructs"); } bool Sema::CheckObjCARCUnavailableWeakConversion(QualType castType, QualType exprType) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr) { llvm_unreachable("HLSL does not support ObjC constructs"); } #else // HLSL Change Ends ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **strings, unsigned NumStrings) { StringLiteral **Strings = reinterpret_cast<StringLiteral**>(strings); // Most ObjC strings are formed out of a single piece. However, we *can* // have strings formed out of multiple @ strings with multiple pptokens in // each one, e.g. @"foo" "bar" @"baz" "qux" which need to be turned into one // StringLiteral for ObjCStringLiteral to hold onto. StringLiteral *S = Strings[0]; // If we have a multi-part string, merge it all together. if (NumStrings != 1) { // Concatenate objc strings. SmallString<128> StrBuf; SmallVector<SourceLocation, 8> StrLocs; for (unsigned i = 0; i != NumStrings; ++i) { S = Strings[i]; // ObjC strings can't be wide or UTF. if (!S->isAscii()) { Diag(S->getLocStart(), diag::err_cfstring_literal_not_string_constant) << S->getSourceRange(); return true; } // Append the string. StrBuf += S->getString(); // Get the locations of the string tokens. StrLocs.append(S->tokloc_begin(), S->tokloc_end()); } // Create the aggregate string with the appropriate content and location // information. const ConstantArrayType *CAT = Context.getAsConstantArrayType(S->getType()); assert(CAT && "String literal not of constant array type!"); QualType StrTy = Context.getConstantArrayType( CAT->getElementType(), llvm::APInt(32, StrBuf.size() + 1), CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers()); S = StringLiteral::Create(Context, StrBuf, StringLiteral::Ascii, /*Pascal=*/false, StrTy, &StrLocs[0], StrLocs.size()); } return BuildObjCStringLiteral(AtLocs[0], S); } ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){ // Verify that this composite string is acceptable for ObjC strings. if (CheckObjCString(S)) return true; // Initialize the constant string interface lazily. This assumes // the NSString interface is seen in this translation unit. Note: We // don't use NSConstantString, since the runtime team considers this // interface private (even though it appears in the header files). QualType Ty = Context.getObjCConstantStringInterface(); if (!Ty.isNull()) { Ty = Context.getObjCObjectPointerType(Ty); } else if (getLangOpts().NoConstantCFStrings) { IdentifierInfo *NSIdent=nullptr; std::string StringClass(getLangOpts().ObjCConstantStringClass); if (StringClass.empty()) NSIdent = &Context.Idents.get("NSConstantString"); else NSIdent = &Context.Idents.get(StringClass); NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc, LookupOrdinaryName); if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) { Context.setObjCConstantStringInterface(StrIF); Ty = Context.getObjCConstantStringInterface(); Ty = Context.getObjCObjectPointerType(Ty); } else { // If there is no NSConstantString interface defined then treat this // as error and recover from it. Diag(S->getLocStart(), diag::err_no_nsconstant_string_class) << NSIdent << S->getSourceRange(); Ty = Context.getObjCIdType(); } } else { IdentifierInfo *NSIdent = NSAPIObj->getNSClassId(NSAPI::ClassId_NSString); NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc, LookupOrdinaryName); if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) { Context.setObjCConstantStringInterface(StrIF); Ty = Context.getObjCConstantStringInterface(); Ty = Context.getObjCObjectPointerType(Ty); } else { // If there is no NSString interface defined, implicitly declare // a @class NSString; and use that instead. This is to make sure // type of an NSString literal is represented correctly, instead of // being an 'id' type. Ty = Context.getObjCNSStringType(); if (Ty.isNull()) { ObjCInterfaceDecl *NSStringIDecl = ObjCInterfaceDecl::Create (Context, Context.getTranslationUnitDecl(), SourceLocation(), NSIdent, nullptr, nullptr, SourceLocation()); Ty = Context.getObjCInterfaceType(NSStringIDecl); Context.setObjCNSStringType(Ty); } Ty = Context.getObjCObjectPointerType(Ty); } } return new (Context) ObjCStringLiteral(S, Ty, AtLoc); } /// \brief Emits an error if the given method does not exist, or if the return /// type is not an Objective-C object. static bool validateBoxingMethod(Sema &S, SourceLocation Loc, const ObjCInterfaceDecl *Class, Selector Sel, const ObjCMethodDecl *Method) { if (!Method) { // FIXME: Is there a better way to avoid quotes than using getName()? S.Diag(Loc, diag::err_undeclared_boxing_method) << Sel << Class->getName(); return false; } // Make sure the return type is reasonable. QualType ReturnType = Method->getReturnType(); if (!ReturnType->isObjCObjectPointerType()) { S.Diag(Loc, diag::err_objc_literal_method_sig) << Sel; S.Diag(Method->getLocation(), diag::note_objc_literal_method_return) << ReturnType; return false; } return true; } /// \brief Retrieve the NSNumber factory method that should be used to create /// an Objective-C literal for the given type. static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc, QualType NumberType, bool isLiteral = false, SourceRange R = SourceRange()) { Optional<NSAPI::NSNumberLiteralMethodKind> Kind = S.NSAPIObj->getNSNumberFactoryMethodKind(NumberType); if (!Kind) { if (isLiteral) { S.Diag(Loc, diag::err_invalid_nsnumber_type) << NumberType << R; } return nullptr; } // If we already looked up this method, we're done. if (S.NSNumberLiteralMethods[*Kind]) return S.NSNumberLiteralMethods[*Kind]; Selector Sel = S.NSAPIObj->getNSNumberLiteralSelector(*Kind, /*Instance=*/false); ASTContext &CX = S.Context; // Look up the NSNumber class, if we haven't done so already. It's cached // in the Sema instance. if (!S.NSNumberDecl) { IdentifierInfo *NSNumberId = S.NSAPIObj->getNSClassId(NSAPI::ClassId_NSNumber); NamedDecl *IF = S.LookupSingleName(S.TUScope, NSNumberId, Loc, Sema::LookupOrdinaryName); S.NSNumberDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF); if (!S.NSNumberDecl) { if (S.getLangOpts().DebuggerObjCLiteral) { // Create a stub definition of NSNumber. S.NSNumberDecl = ObjCInterfaceDecl::Create(CX, CX.getTranslationUnitDecl(), SourceLocation(), NSNumberId, nullptr, nullptr, SourceLocation()); } else { // Otherwise, require a declaration of NSNumber. S.Diag(Loc, diag::err_undeclared_nsnumber); return nullptr; } } else if (!S.NSNumberDecl->hasDefinition()) { S.Diag(Loc, diag::err_undeclared_nsnumber); return nullptr; } } if (S.NSNumberPointer.isNull()) { // generate the pointer to NSNumber type. QualType NSNumberObject = CX.getObjCInterfaceType(S.NSNumberDecl); S.NSNumberPointer = CX.getObjCObjectPointerType(NSNumberObject); } // Look for the appropriate method within NSNumber. ObjCMethodDecl *Method = S.NSNumberDecl->lookupClassMethod(Sel); if (!Method && S.getLangOpts().DebuggerObjCLiteral) { // create a stub definition this NSNumber factory method. TypeSourceInfo *ReturnTInfo = nullptr; Method = ObjCMethodDecl::Create(CX, SourceLocation(), SourceLocation(), Sel, S.NSNumberPointer, ReturnTInfo, S.NSNumberDecl, /*isInstance=*/false, /*isVariadic=*/false, /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, /*HasRelatedResultType=*/false); ParmVarDecl *value = ParmVarDecl::Create(S.Context, Method, SourceLocation(), SourceLocation(), &CX.Idents.get("value"), NumberType, /*TInfo=*/nullptr, SC_None, nullptr); Method->setMethodParams(S.Context, value, None); } if (!validateBoxingMethod(S, Loc, S.NSNumberDecl, Sel, Method)) return nullptr; // Note: if the parameter type is out-of-line, we'll catch it later in the // implicit conversion. S.NSNumberLiteralMethods[*Kind] = Method; return Method; } /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *". ExprResult Sema::BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number) { // Determine the type of the literal. QualType NumberType = Number->getType(); if (CharacterLiteral *Char = dyn_cast<CharacterLiteral>(Number)) { // In C, character literals have type 'int'. That's not the type we want // to use to determine the Objective-c literal kind. switch (Char->getKind()) { case CharacterLiteral::Ascii: NumberType = Context.CharTy; break; case CharacterLiteral::Wide: NumberType = Context.getWideCharType(); break; case CharacterLiteral::UTF16: NumberType = Context.Char16Ty; break; case CharacterLiteral::UTF32: NumberType = Context.Char32Ty; break; } } // Look for the appropriate method within NSNumber. // Construct the literal. SourceRange NR(Number->getSourceRange()); ObjCMethodDecl *Method = getNSNumberFactoryMethod(*this, AtLoc, NumberType, true, NR); if (!Method) return ExprError(); // Convert the number to the type that the parameter expects. ParmVarDecl *ParamDecl = Method->parameters()[0]; InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, ParamDecl); ExprResult ConvertedNumber = PerformCopyInitialization(Entity, SourceLocation(), Number); if (ConvertedNumber.isInvalid()) return ExprError(); Number = ConvertedNumber.get(); // Use the effective source range of the literal, including the leading '@'. return MaybeBindToTemporary( new (Context) ObjCBoxedExpr(Number, NSNumberPointer, Method, SourceRange(AtLoc, NR.getEnd()))); } ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value) { ExprResult Inner; if (getLangOpts().CPlusPlus) { Inner = ActOnCXXBoolLiteral(ValueLoc, Value? tok::kw_true : tok::kw_false); } else { // C doesn't actually have a way to represent literal values of type // _Bool. So, we'll use 0/1 and implicit cast to _Bool. Inner = ActOnIntegerConstant(ValueLoc, Value? 1 : 0); Inner = ImpCastExprToType(Inner.get(), Context.BoolTy, CK_IntegralToBoolean); } return BuildObjCNumericLiteral(AtLoc, Inner.get()); } /// \brief Check that the given expression is a valid element of an Objective-C /// collection literal. static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element, QualType T, bool ArrayLiteral = false) { // If the expression is type-dependent, there's nothing for us to do. if (Element->isTypeDependent()) return Element; ExprResult Result = S.CheckPlaceholderExpr(Element); if (Result.isInvalid()) return ExprError(); Element = Result.get(); // In C++, check for an implicit conversion to an Objective-C object pointer // type. if (S.getLangOpts().CPlusPlus && Element->getType()->isRecordType()) { InitializedEntity Entity = InitializedEntity::InitializeParameter(S.Context, T, /*Consumed=*/false); InitializationKind Kind = InitializationKind::CreateCopy(Element->getLocStart(), SourceLocation()); InitializationSequence Seq(S, Entity, Kind, Element); if (!Seq.Failed()) return Seq.Perform(S, Entity, Kind, Element); } Expr *OrigElement = Element; // Perform lvalue-to-rvalue conversion. Result = S.DefaultLvalueConversion(Element); if (Result.isInvalid()) return ExprError(); Element = Result.get(); // Make sure that we have an Objective-C pointer type or block. if (!Element->getType()->isObjCObjectPointerType() && !Element->getType()->isBlockPointerType()) { bool Recovered = false; // If this is potentially an Objective-C numeric literal, add the '@'. if (isa<IntegerLiteral>(OrigElement) || isa<CharacterLiteral>(OrigElement) || isa<FloatingLiteral>(OrigElement) || isa<ObjCBoolLiteralExpr>(OrigElement) || isa<CXXBoolLiteralExpr>(OrigElement)) { if (S.NSAPIObj->getNSNumberFactoryMethodKind(OrigElement->getType())) { int Which = isa<CharacterLiteral>(OrigElement) ? 1 : (isa<CXXBoolLiteralExpr>(OrigElement) || isa<ObjCBoolLiteralExpr>(OrigElement)) ? 2 : 3; S.Diag(OrigElement->getLocStart(), diag::err_box_literal_collection) << Which << OrigElement->getSourceRange() << FixItHint::CreateInsertion(OrigElement->getLocStart(), "@"); Result = S.BuildObjCNumericLiteral(OrigElement->getLocStart(), OrigElement); if (Result.isInvalid()) return ExprError(); Element = Result.get(); Recovered = true; } } // If this is potentially an Objective-C string literal, add the '@'. else if (StringLiteral *String = dyn_cast<StringLiteral>(OrigElement)) { if (String->isAscii()) { S.Diag(OrigElement->getLocStart(), diag::err_box_literal_collection) << 0 << OrigElement->getSourceRange() << FixItHint::CreateInsertion(OrigElement->getLocStart(), "@"); Result = S.BuildObjCStringLiteral(OrigElement->getLocStart(), String); if (Result.isInvalid()) return ExprError(); Element = Result.get(); Recovered = true; } } if (!Recovered) { S.Diag(Element->getLocStart(), diag::err_invalid_collection_element) << Element->getType(); return ExprError(); } } if (ArrayLiteral) if (ObjCStringLiteral *getString = dyn_cast<ObjCStringLiteral>(OrigElement)) { if (StringLiteral *SL = getString->getString()) { unsigned numConcat = SL->getNumConcatenated(); if (numConcat > 1) { // Only warn if the concatenated string doesn't come from a macro. bool hasMacro = false; for (unsigned i = 0; i < numConcat ; ++i) if (SL->getStrTokenLoc(i).isMacroID()) { hasMacro = true; break; } if (!hasMacro) S.Diag(Element->getLocStart(), diag::warn_concatenated_nsarray_literal) << Element->getType(); } } } // Make sure that the element has the type that the container factory // function expects. return S.PerformCopyInitialization( InitializedEntity::InitializeParameter(S.Context, T, /*Consumed=*/false), Element->getLocStart(), Element); } ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) { if (ValueExpr->isTypeDependent()) { ObjCBoxedExpr *BoxedExpr = new (Context) ObjCBoxedExpr(ValueExpr, Context.DependentTy, nullptr, SR); return BoxedExpr; } ObjCMethodDecl *BoxingMethod = nullptr; QualType BoxedType; // Convert the expression to an RValue, so we can check for pointer types... ExprResult RValue = DefaultFunctionArrayLvalueConversion(ValueExpr); if (RValue.isInvalid()) { return ExprError(); } ValueExpr = RValue.get(); QualType ValueType(ValueExpr->getType()); if (const PointerType *PT = ValueType->getAs<PointerType>()) { QualType PointeeType = PT->getPointeeType(); if (Context.hasSameUnqualifiedType(PointeeType, Context.CharTy)) { if (!NSStringDecl) { IdentifierInfo *NSStringId = NSAPIObj->getNSClassId(NSAPI::ClassId_NSString); NamedDecl *Decl = LookupSingleName(TUScope, NSStringId, SR.getBegin(), LookupOrdinaryName); NSStringDecl = dyn_cast_or_null<ObjCInterfaceDecl>(Decl); if (!NSStringDecl) { if (getLangOpts().DebuggerObjCLiteral) { // Support boxed expressions in the debugger w/o NSString declaration. DeclContext *TU = Context.getTranslationUnitDecl(); NSStringDecl = ObjCInterfaceDecl::Create(Context, TU, SourceLocation(), NSStringId, nullptr, nullptr, SourceLocation()); } else { Diag(SR.getBegin(), diag::err_undeclared_nsstring); return ExprError(); } } else if (!NSStringDecl->hasDefinition()) { Diag(SR.getBegin(), diag::err_undeclared_nsstring); return ExprError(); } assert(NSStringDecl && "NSStringDecl should not be NULL"); QualType NSStringObject = Context.getObjCInterfaceType(NSStringDecl); NSStringPointer = Context.getObjCObjectPointerType(NSStringObject); } if (!StringWithUTF8StringMethod) { IdentifierInfo *II = &Context.Idents.get("stringWithUTF8String"); Selector stringWithUTF8String = Context.Selectors.getUnarySelector(II); // Look for the appropriate method within NSString. BoxingMethod = NSStringDecl->lookupClassMethod(stringWithUTF8String); if (!BoxingMethod && getLangOpts().DebuggerObjCLiteral) { // Debugger needs to work even if NSString hasn't been defined. TypeSourceInfo *ReturnTInfo = nullptr; ObjCMethodDecl *M = ObjCMethodDecl::Create( Context, SourceLocation(), SourceLocation(), stringWithUTF8String, NSStringPointer, ReturnTInfo, NSStringDecl, /*isInstance=*/false, /*isVariadic=*/false, /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, /*HasRelatedResultType=*/false); QualType ConstCharType = Context.CharTy.withConst(); ParmVarDecl *value = ParmVarDecl::Create(Context, M, SourceLocation(), SourceLocation(), &Context.Idents.get("value"), Context.getPointerType(ConstCharType), /*TInfo=*/nullptr, SC_None, nullptr); M->setMethodParams(Context, value, None); BoxingMethod = M; } if (!validateBoxingMethod(*this, SR.getBegin(), NSStringDecl, stringWithUTF8String, BoxingMethod)) return ExprError(); StringWithUTF8StringMethod = BoxingMethod; } BoxingMethod = StringWithUTF8StringMethod; BoxedType = NSStringPointer; } } else if (ValueType->isBuiltinType()) { // The other types we support are numeric, char and BOOL/bool. We could also // provide limited support for structure types, such as NSRange, NSRect, and // NSSize. See NSValue (NSValueGeometryExtensions) in <Foundation/NSGeometry.h> // for more details. // Check for a top-level character literal. if (const CharacterLiteral *Char = dyn_cast<CharacterLiteral>(ValueExpr->IgnoreParens())) { // In C, character literals have type 'int'. That's not the type we want // to use to determine the Objective-c literal kind. switch (Char->getKind()) { case CharacterLiteral::Ascii: ValueType = Context.CharTy; break; case CharacterLiteral::Wide: ValueType = Context.getWideCharType(); break; case CharacterLiteral::UTF16: ValueType = Context.Char16Ty; break; case CharacterLiteral::UTF32: ValueType = Context.Char32Ty; break; } } CheckForIntOverflow(ValueExpr); // FIXME: Do I need to do anything special with BoolTy expressions? // Look for the appropriate method within NSNumber. BoxingMethod = getNSNumberFactoryMethod(*this, SR.getBegin(), ValueType); BoxedType = NSNumberPointer; } else if (const EnumType *ET = ValueType->getAs<EnumType>()) { if (!ET->getDecl()->isComplete()) { Diag(SR.getBegin(), diag::err_objc_incomplete_boxed_expression_type) << ValueType << ValueExpr->getSourceRange(); return ExprError(); } BoxingMethod = getNSNumberFactoryMethod(*this, SR.getBegin(), ET->getDecl()->getIntegerType()); BoxedType = NSNumberPointer; } else if (ValueType->isObjCBoxableRecordType()) { // Support for structure types, that marked as objc_boxable // struct __attribute__((objc_boxable)) s { ... }; // Look up the NSValue class, if we haven't done so already. It's cached // in the Sema instance. if (!NSValueDecl) { IdentifierInfo *NSValueId = NSAPIObj->getNSClassId(NSAPI::ClassId_NSValue); NamedDecl *IF = LookupSingleName(TUScope, NSValueId, SR.getBegin(), Sema::LookupOrdinaryName); NSValueDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF); if (!NSValueDecl) { if (getLangOpts().DebuggerObjCLiteral) { // Create a stub definition of NSValue. DeclContext *TU = Context.getTranslationUnitDecl(); NSValueDecl = ObjCInterfaceDecl::Create(Context, TU, SourceLocation(), NSValueId, nullptr, nullptr, SourceLocation()); } else { // Otherwise, require a declaration of NSValue. Diag(SR.getBegin(), diag::err_undeclared_nsvalue); return ExprError(); } } else if (!NSValueDecl->hasDefinition()) { Diag(SR.getBegin(), diag::err_undeclared_nsvalue); return ExprError(); } // generate the pointer to NSValue type. QualType NSValueObject = Context.getObjCInterfaceType(NSValueDecl); NSValuePointer = Context.getObjCObjectPointerType(NSValueObject); } if (!ValueWithBytesObjCTypeMethod) { IdentifierInfo *II[] = { &Context.Idents.get("valueWithBytes"), &Context.Idents.get("objCType") }; Selector ValueWithBytesObjCType = Context.Selectors.getSelector(2, II); // Look for the appropriate method within NSValue. BoxingMethod = NSValueDecl->lookupClassMethod(ValueWithBytesObjCType); if (!BoxingMethod && getLangOpts().DebuggerObjCLiteral) { // Debugger needs to work even if NSValue hasn't been defined. TypeSourceInfo *ReturnTInfo = nullptr; ObjCMethodDecl *M = ObjCMethodDecl::Create( Context, SourceLocation(), SourceLocation(), ValueWithBytesObjCType, NSValuePointer, ReturnTInfo, NSValueDecl, /*isInstance=*/false, /*isVariadic=*/false, /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, /*HasRelatedResultType=*/false); SmallVector<ParmVarDecl *, 2> Params; ParmVarDecl *bytes = ParmVarDecl::Create(Context, M, SourceLocation(), SourceLocation(), &Context.Idents.get("bytes"), Context.VoidPtrTy.withConst(), /*TInfo=*/nullptr, SC_None, nullptr); Params.push_back(bytes); QualType ConstCharType = Context.CharTy.withConst(); ParmVarDecl *type = ParmVarDecl::Create(Context, M, SourceLocation(), SourceLocation(), &Context.Idents.get("type"), Context.getPointerType(ConstCharType), /*TInfo=*/nullptr, SC_None, nullptr); Params.push_back(type); M->setMethodParams(Context, Params, None); BoxingMethod = M; } if (!validateBoxingMethod(*this, SR.getBegin(), NSValueDecl, ValueWithBytesObjCType, BoxingMethod)) return ExprError(); ValueWithBytesObjCTypeMethod = BoxingMethod; } if (!ValueType.isTriviallyCopyableType(Context)) { Diag(SR.getBegin(), diag::err_objc_non_trivially_copyable_boxed_expression_type) << ValueType << ValueExpr->getSourceRange(); return ExprError(); } BoxingMethod = ValueWithBytesObjCTypeMethod; BoxedType = NSValuePointer; } if (!BoxingMethod) { Diag(SR.getBegin(), diag::err_objc_illegal_boxed_expression_type) << ValueType << ValueExpr->getSourceRange(); return ExprError(); } DiagnoseUseOfDecl(BoxingMethod, SR.getBegin()); ExprResult ConvertedValueExpr; if (ValueType->isObjCBoxableRecordType()) { InitializedEntity IE = InitializedEntity::InitializeTemporary(ValueType); ConvertedValueExpr = PerformCopyInitialization(IE, ValueExpr->getExprLoc(), ValueExpr); } else { // Convert the expression to the type that the parameter requires. ParmVarDecl *ParamDecl = BoxingMethod->parameters()[0]; InitializedEntity IE = InitializedEntity::InitializeParameter(Context, ParamDecl); ConvertedValueExpr = PerformCopyInitialization(IE, SourceLocation(), ValueExpr); } if (ConvertedValueExpr.isInvalid()) return ExprError(); ValueExpr = ConvertedValueExpr.get(); ObjCBoxedExpr *BoxedExpr = new (Context) ObjCBoxedExpr(ValueExpr, BoxedType, BoxingMethod, SR); return MaybeBindToTemporary(BoxedExpr); } /// Build an ObjC subscript pseudo-object expression, given that /// that's supported by the runtime. ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod) { assert(!LangOpts.isSubscriptPointerArithmetic()); // We can't get dependent types here; our callers should have // filtered them out. assert((!BaseExpr->isTypeDependent() && !IndexExpr->isTypeDependent()) && "base or index cannot have dependent type here"); // Filter out placeholders in the index. In theory, overloads could // be preserved here, although that might not actually work correctly. ExprResult Result = CheckPlaceholderExpr(IndexExpr); if (Result.isInvalid()) return ExprError(); IndexExpr = Result.get(); // Perform lvalue-to-rvalue conversion on the base. Result = DefaultLvalueConversion(BaseExpr); if (Result.isInvalid()) return ExprError(); BaseExpr = Result.get(); // Build the pseudo-object expression. return ObjCSubscriptRefExpr::Create(Context, BaseExpr, IndexExpr, Context.PseudoObjectTy, getterMethod, setterMethod, RB); } ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) { // Look up the NSArray class, if we haven't done so already. if (!NSArrayDecl) { NamedDecl *IF = LookupSingleName(TUScope, NSAPIObj->getNSClassId(NSAPI::ClassId_NSArray), SR.getBegin(), LookupOrdinaryName); NSArrayDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF); if (!NSArrayDecl && getLangOpts().DebuggerObjCLiteral) NSArrayDecl = ObjCInterfaceDecl::Create (Context, Context.getTranslationUnitDecl(), SourceLocation(), NSAPIObj->getNSClassId(NSAPI::ClassId_NSArray), nullptr, nullptr, SourceLocation()); if (!NSArrayDecl) { Diag(SR.getBegin(), diag::err_undeclared_nsarray); return ExprError(); } } // Find the arrayWithObjects:count: method, if we haven't done so already. QualType IdT = Context.getObjCIdType(); if (!ArrayWithObjectsMethod) { Selector Sel = NSAPIObj->getNSArraySelector(NSAPI::NSArr_arrayWithObjectsCount); ObjCMethodDecl *Method = NSArrayDecl->lookupClassMethod(Sel); if (!Method && getLangOpts().DebuggerObjCLiteral) { TypeSourceInfo *ReturnTInfo = nullptr; Method = ObjCMethodDecl::Create( Context, SourceLocation(), SourceLocation(), Sel, IdT, ReturnTInfo, Context.getTranslationUnitDecl(), false /*Instance*/, false /*isVariadic*/, /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, false); SmallVector<ParmVarDecl *, 2> Params; ParmVarDecl *objects = ParmVarDecl::Create(Context, Method, SourceLocation(), SourceLocation(), &Context.Idents.get("objects"), Context.getPointerType(IdT), /*TInfo=*/nullptr, SC_None, nullptr); Params.push_back(objects); ParmVarDecl *cnt = ParmVarDecl::Create(Context, Method, SourceLocation(), SourceLocation(), &Context.Idents.get("cnt"), Context.UnsignedLongTy, /*TInfo=*/nullptr, SC_None, nullptr); Params.push_back(cnt); Method->setMethodParams(Context, Params, None); } if (!validateBoxingMethod(*this, SR.getBegin(), NSArrayDecl, Sel, Method)) return ExprError(); // Dig out the type that all elements should be converted to. QualType T = Method->parameters()[0]->getType(); const PointerType *PtrT = T->getAs<PointerType>(); if (!PtrT || !Context.hasSameUnqualifiedType(PtrT->getPointeeType(), IdT)) { Diag(SR.getBegin(), diag::err_objc_literal_method_sig) << Sel; Diag(Method->parameters()[0]->getLocation(), diag::note_objc_literal_method_param) << 0 << T << Context.getPointerType(IdT.withConst()); return ExprError(); } // Check that the 'count' parameter is integral. if (!Method->parameters()[1]->getType()->isIntegerType()) { Diag(SR.getBegin(), diag::err_objc_literal_method_sig) << Sel; Diag(Method->parameters()[1]->getLocation(), diag::note_objc_literal_method_param) << 1 << Method->parameters()[1]->getType() << "integral"; return ExprError(); } // We've found a good +arrayWithObjects:count: method. Save it! ArrayWithObjectsMethod = Method; } QualType ObjectsType = ArrayWithObjectsMethod->parameters()[0]->getType(); QualType RequiredType = ObjectsType->castAs<PointerType>()->getPointeeType(); // Check that each of the elements provided is valid in a collection literal, // performing conversions as necessary. Expr **ElementsBuffer = Elements.data(); for (unsigned I = 0, N = Elements.size(); I != N; ++I) { ExprResult Converted = CheckObjCCollectionLiteralElement(*this, ElementsBuffer[I], RequiredType, true); if (Converted.isInvalid()) return ExprError(); ElementsBuffer[I] = Converted.get(); } QualType Ty = Context.getObjCObjectPointerType( Context.getObjCInterfaceType(NSArrayDecl)); return MaybeBindToTemporary( ObjCArrayLiteral::Create(Context, Elements, Ty, ArrayWithObjectsMethod, SR)); } ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements) { // Look up the NSDictionary class, if we haven't done so already. if (!NSDictionaryDecl) { NamedDecl *IF = LookupSingleName(TUScope, NSAPIObj->getNSClassId(NSAPI::ClassId_NSDictionary), SR.getBegin(), LookupOrdinaryName); NSDictionaryDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF); if (!NSDictionaryDecl && getLangOpts().DebuggerObjCLiteral) NSDictionaryDecl = ObjCInterfaceDecl::Create (Context, Context.getTranslationUnitDecl(), SourceLocation(), NSAPIObj->getNSClassId(NSAPI::ClassId_NSDictionary), nullptr, nullptr, SourceLocation()); if (!NSDictionaryDecl) { Diag(SR.getBegin(), diag::err_undeclared_nsdictionary); return ExprError(); } } // Find the dictionaryWithObjects:forKeys:count: method, if we haven't done // so already. QualType IdT = Context.getObjCIdType(); if (!DictionaryWithObjectsMethod) { Selector Sel = NSAPIObj->getNSDictionarySelector( NSAPI::NSDict_dictionaryWithObjectsForKeysCount); ObjCMethodDecl *Method = NSDictionaryDecl->lookupClassMethod(Sel); if (!Method && getLangOpts().DebuggerObjCLiteral) { Method = ObjCMethodDecl::Create(Context, SourceLocation(), SourceLocation(), Sel, IdT, nullptr /*TypeSourceInfo */, Context.getTranslationUnitDecl(), false /*Instance*/, false/*isVariadic*/, /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, false); SmallVector<ParmVarDecl *, 3> Params; ParmVarDecl *objects = ParmVarDecl::Create(Context, Method, SourceLocation(), SourceLocation(), &Context.Idents.get("objects"), Context.getPointerType(IdT), /*TInfo=*/nullptr, SC_None, nullptr); Params.push_back(objects); ParmVarDecl *keys = ParmVarDecl::Create(Context, Method, SourceLocation(), SourceLocation(), &Context.Idents.get("keys"), Context.getPointerType(IdT), /*TInfo=*/nullptr, SC_None, nullptr); Params.push_back(keys); ParmVarDecl *cnt = ParmVarDecl::Create(Context, Method, SourceLocation(), SourceLocation(), &Context.Idents.get("cnt"), Context.UnsignedLongTy, /*TInfo=*/nullptr, SC_None, nullptr); Params.push_back(cnt); Method->setMethodParams(Context, Params, None); } if (!validateBoxingMethod(*this, SR.getBegin(), NSDictionaryDecl, Sel, Method)) return ExprError(); // Dig out the type that all values should be converted to. QualType ValueT = Method->parameters()[0]->getType(); const PointerType *PtrValue = ValueT->getAs<PointerType>(); if (!PtrValue || !Context.hasSameUnqualifiedType(PtrValue->getPointeeType(), IdT)) { Diag(SR.getBegin(), diag::err_objc_literal_method_sig) << Sel; Diag(Method->parameters()[0]->getLocation(), diag::note_objc_literal_method_param) << 0 << ValueT << Context.getPointerType(IdT.withConst()); return ExprError(); } // Dig out the type that all keys should be converted to. QualType KeyT = Method->parameters()[1]->getType(); const PointerType *PtrKey = KeyT->getAs<PointerType>(); if (!PtrKey || !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(), IdT)) { bool err = true; if (PtrKey) { if (QIDNSCopying.isNull()) { // key argument of selector is id<NSCopying>? if (ObjCProtocolDecl *NSCopyingPDecl = LookupProtocol(&Context.Idents.get("NSCopying"), SR.getBegin())) { ObjCProtocolDecl *PQ[] = {NSCopyingPDecl}; QIDNSCopying = Context.getObjCObjectType(Context.ObjCBuiltinIdTy, { }, llvm::makeArrayRef( (ObjCProtocolDecl**) PQ, 1), false); QIDNSCopying = Context.getObjCObjectPointerType(QIDNSCopying); } } if (!QIDNSCopying.isNull()) err = !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(), QIDNSCopying); } if (err) { Diag(SR.getBegin(), diag::err_objc_literal_method_sig) << Sel; Diag(Method->parameters()[1]->getLocation(), diag::note_objc_literal_method_param) << 1 << KeyT << Context.getPointerType(IdT.withConst()); return ExprError(); } } // Check that the 'count' parameter is integral. QualType CountType = Method->parameters()[2]->getType(); if (!CountType->isIntegerType()) { Diag(SR.getBegin(), diag::err_objc_literal_method_sig) << Sel; Diag(Method->parameters()[2]->getLocation(), diag::note_objc_literal_method_param) << 2 << CountType << "integral"; return ExprError(); } // We've found a good +dictionaryWithObjects:keys:count: method; save it! DictionaryWithObjectsMethod = Method; } QualType ValuesT = DictionaryWithObjectsMethod->parameters()[0]->getType(); QualType ValueT = ValuesT->castAs<PointerType>()->getPointeeType(); QualType KeysT = DictionaryWithObjectsMethod->parameters()[1]->getType(); QualType KeyT = KeysT->castAs<PointerType>()->getPointeeType(); // Check that each of the keys and values provided is valid in a collection // literal, performing conversions as necessary. bool HasPackExpansions = false; for (unsigned I = 0, N = NumElements; I != N; ++I) { // Check the key. ExprResult Key = CheckObjCCollectionLiteralElement(*this, Elements[I].Key, KeyT); if (Key.isInvalid()) return ExprError(); // Check the value. ExprResult Value = CheckObjCCollectionLiteralElement(*this, Elements[I].Value, ValueT); if (Value.isInvalid()) return ExprError(); Elements[I].Key = Key.get(); Elements[I].Value = Value.get(); if (Elements[I].EllipsisLoc.isInvalid()) continue; if (!Elements[I].Key->containsUnexpandedParameterPack() && !Elements[I].Value->containsUnexpandedParameterPack()) { Diag(Elements[I].EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << SourceRange(Elements[I].Key->getLocStart(), Elements[I].Value->getLocEnd()); return ExprError(); } HasPackExpansions = true; } QualType Ty = Context.getObjCObjectPointerType( Context.getObjCInterfaceType(NSDictionaryDecl)); return MaybeBindToTemporary(ObjCDictionaryLiteral::Create( Context, makeArrayRef(Elements, NumElements), HasPackExpansions, Ty, DictionaryWithObjectsMethod, SR)); } ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc) { QualType EncodedType = EncodedTypeInfo->getType(); QualType StrTy; if (EncodedType->isDependentType()) StrTy = Context.DependentTy; else { if (!EncodedType->getAsArrayTypeUnsafe() && //// Incomplete array is handled. !EncodedType->isVoidType()) // void is handled too. if (RequireCompleteType(AtLoc, EncodedType, diag::err_incomplete_type_objc_at_encode, EncodedTypeInfo->getTypeLoc())) return ExprError(); std::string Str; QualType NotEncodedT; Context.getObjCEncodingForType(EncodedType, Str, nullptr, &NotEncodedT); if (!NotEncodedT.isNull()) Diag(AtLoc, diag::warn_incomplete_encoded_type) << EncodedType << NotEncodedT; // The type of @encode is the same as the type of the corresponding string, // which is an array type. StrTy = Context.CharTy; // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) StrTy.addConst(); StrTy = Context.getConstantArrayType(StrTy, llvm::APInt(32, Str.size()+1), ArrayType::Normal, 0); } return new (Context) ObjCEncodeExpr(StrTy, EncodedTypeInfo, AtLoc, RParenLoc); } ExprResult Sema::ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType ty, SourceLocation RParenLoc) { // FIXME: Preserve type source info ? TypeSourceInfo *TInfo; QualType EncodedType = GetTypeFromParser(ty, &TInfo); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(EncodedType, PP.getLocForEndOfToken(LParenLoc)); return BuildObjCEncodeExpression(AtLoc, TInfo, RParenLoc); } static bool HelperToDiagnoseMismatchedMethodsInGlobalPool(Sema &S, SourceLocation AtLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, ObjCMethodDecl *Method, ObjCMethodList &MethList) { ObjCMethodList *M = &MethList; bool Warned = false; for (M = M->getNext(); M; M=M->getNext()) { ObjCMethodDecl *MatchingMethodDecl = M->getMethod(); if (MatchingMethodDecl == Method || isa<ObjCImplDecl>(MatchingMethodDecl->getDeclContext()) || MatchingMethodDecl->getSelector() != Method->getSelector()) continue; if (!S.MatchTwoMethodDeclarations(Method, MatchingMethodDecl, Sema::MMS_loose)) { if (!Warned) { Warned = true; S.Diag(AtLoc, diag::warning_multiple_selectors) << Method->getSelector() << FixItHint::CreateInsertion(LParenLoc, "(") << FixItHint::CreateInsertion(RParenLoc, ")"); S.Diag(Method->getLocation(), diag::note_method_declared_at) << Method->getDeclName(); } S.Diag(MatchingMethodDecl->getLocation(), diag::note_method_declared_at) << MatchingMethodDecl->getDeclName(); } } return Warned; } static void DiagnoseMismatchedSelectors(Sema &S, SourceLocation AtLoc, ObjCMethodDecl *Method, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors) { if (!WarnMultipleSelectors || S.Diags.isIgnored(diag::warning_multiple_selectors, SourceLocation())) return; bool Warned = false; for (Sema::GlobalMethodPool::iterator b = S.MethodPool.begin(), e = S.MethodPool.end(); b != e; b++) { // first, instance methods ObjCMethodList &InstMethList = b->second.first; if (HelperToDiagnoseMismatchedMethodsInGlobalPool(S, AtLoc, LParenLoc, RParenLoc, Method, InstMethList)) Warned = true; // second, class methods ObjCMethodList &ClsMethList = b->second.second; if (HelperToDiagnoseMismatchedMethodsInGlobalPool(S, AtLoc, LParenLoc, RParenLoc, Method, ClsMethList) || Warned) return; } } ExprResult Sema::ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors) { ObjCMethodDecl *Method = LookupInstanceMethodInGlobalPool(Sel, SourceRange(LParenLoc, RParenLoc)); if (!Method) Method = LookupFactoryMethodInGlobalPool(Sel, SourceRange(LParenLoc, RParenLoc)); if (!Method) { if (const ObjCMethodDecl *OM = SelectorsForTypoCorrection(Sel)) { Selector MatchedSel = OM->getSelector(); SourceRange SelectorRange(LParenLoc.getLocWithOffset(1), RParenLoc.getLocWithOffset(-1)); Diag(SelLoc, diag::warn_undeclared_selector_with_typo) << Sel << MatchedSel << FixItHint::CreateReplacement(SelectorRange, MatchedSel.getAsString()); } else Diag(SelLoc, diag::warn_undeclared_selector) << Sel; } else DiagnoseMismatchedSelectors(*this, AtLoc, Method, LParenLoc, RParenLoc, WarnMultipleSelectors); if (Method && Method->getImplementationControl() != ObjCMethodDecl::Optional && !getSourceManager().isInSystemHeader(Method->getLocation())) ReferencedSelectors.insert(std::make_pair(Sel, AtLoc)); // In ARC, forbid the user from using @selector for // retain/release/autorelease/dealloc/retainCount. if (getLangOpts().ObjCAutoRefCount) { switch (Sel.getMethodFamily()) { case OMF_retain: case OMF_release: case OMF_autorelease: case OMF_retainCount: case OMF_dealloc: Diag(AtLoc, diag::err_arc_illegal_selector) << Sel << SourceRange(LParenLoc, RParenLoc); break; case OMF_None: case OMF_alloc: case OMF_copy: case OMF_finalize: case OMF_init: case OMF_mutableCopy: case OMF_new: case OMF_self: case OMF_initialize: case OMF_performSelector: break; } } QualType Ty = Context.getObjCSelType(); return new (Context) ObjCSelectorExpr(Ty, Sel, AtLoc, RParenLoc); } ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc) { ObjCProtocolDecl* PDecl = LookupProtocol(ProtocolId, ProtoIdLoc); if (!PDecl) { Diag(ProtoLoc, diag::err_undeclared_protocol) << ProtocolId; return true; } if (PDecl->hasDefinition()) PDecl = PDecl->getDefinition(); QualType Ty = Context.getObjCProtoType(); if (Ty.isNull()) return true; Ty = Context.getObjCObjectPointerType(Ty); return new (Context) ObjCProtocolExpr(Ty, PDecl, AtLoc, ProtoIdLoc, RParenLoc); } /// Try to capture an implicit reference to 'self'. ObjCMethodDecl *Sema::tryCaptureObjCSelf(SourceLocation Loc) { DeclContext *DC = getFunctionLevelDeclContext(); // If we're not in an ObjC method, error out. Note that, unlike the // C++ case, we don't require an instance method --- class methods // still have a 'self', and we really do still need to capture it! ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(DC); if (!method) return nullptr; tryCaptureVariable(method->getSelfDecl(), Loc); return method; } static QualType stripObjCInstanceType(ASTContext &Context, QualType T) { QualType origType = T; if (auto nullability = AttributedType::stripOuterNullability(T)) { if (T == Context.getObjCInstanceType()) { return Context.getAttributedType( AttributedType::getNullabilityAttrKind(*nullability), Context.getObjCIdType(), Context.getObjCIdType()); } return origType; } if (T == Context.getObjCInstanceType()) return Context.getObjCIdType(); return origType; } /// Determine the result type of a message send based on the receiver type, /// method, and the kind of message send. /// /// This is the "base" result type, which will still need to be adjusted /// to account for nullability. static QualType getBaseMessageSendResultType(Sema &S, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage) { assert(Method && "Must have a method"); if (!Method->hasRelatedResultType()) return Method->getSendResultType(ReceiverType); ASTContext &Context = S.Context; // Local function that transfers the nullability of the method's // result type to the returned result. auto transferNullability = [&](QualType type) -> QualType { // If the method's result type has nullability, extract it. if (auto nullability = Method->getSendResultType(ReceiverType) ->getNullability(Context)){ // Strip off any outer nullability sugar from the provided type. (void)AttributedType::stripOuterNullability(type); // Form a new attributed type using the method result type's nullability. return Context.getAttributedType( AttributedType::getNullabilityAttrKind(*nullability), type, type); } return type; }; // If a method has a related return type: // - if the method found is an instance method, but the message send // was a class message send, T is the declared return type of the method // found if (Method->isInstanceMethod() && isClassMessage) return stripObjCInstanceType(Context, Method->getSendResultType(ReceiverType)); // - if the receiver is super, T is a pointer to the class of the // enclosing method definition if (isSuperMessage) { if (ObjCMethodDecl *CurMethod = S.getCurMethodDecl()) if (ObjCInterfaceDecl *Class = CurMethod->getClassInterface()) { return transferNullability( Context.getObjCObjectPointerType( Context.getObjCInterfaceType(Class))); } } // - if the receiver is the name of a class U, T is a pointer to U if (ReceiverType->getAsObjCInterfaceType()) return transferNullability(Context.getObjCObjectPointerType(ReceiverType)); // - if the receiver is of type Class or qualified Class type, // T is the declared return type of the method. if (ReceiverType->isObjCClassType() || ReceiverType->isObjCQualifiedClassType()) return stripObjCInstanceType(Context, Method->getSendResultType(ReceiverType)); // - if the receiver is id, qualified id, Class, or qualified Class, T // is the receiver type, otherwise // - T is the type of the receiver expression. return transferNullability(ReceiverType); } QualType Sema::getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage) { // Produce the result type. QualType resultType = getBaseMessageSendResultType(*this, ReceiverType, Method, isClassMessage, isSuperMessage); // If this is a class message, ignore the nullability of the receiver. if (isClassMessage) return resultType; // Map the nullability of the result into a table index. unsigned receiverNullabilityIdx = 0; if (auto nullability = ReceiverType->getNullability(Context)) receiverNullabilityIdx = 1 + static_cast<unsigned>(*nullability); unsigned resultNullabilityIdx = 0; if (auto nullability = resultType->getNullability(Context)) resultNullabilityIdx = 1 + static_cast<unsigned>(*nullability); // The table of nullability mappings, indexed by the receiver's nullability // and then the result type's nullability. static const uint8_t None = 0; static const uint8_t NonNull = 1; static const uint8_t Nullable = 2; static const uint8_t Unspecified = 3; static const uint8_t nullabilityMap[4][4] = { // None NonNull Nullable Unspecified /* None */ { None, None, Nullable, None }, /* NonNull */ { None, NonNull, Nullable, Unspecified }, /* Nullable */ { Nullable, Nullable, Nullable, Nullable }, /* Unspecified */ { None, Unspecified, Nullable, Unspecified } }; unsigned newResultNullabilityIdx = nullabilityMap[receiverNullabilityIdx][resultNullabilityIdx]; if (newResultNullabilityIdx == resultNullabilityIdx) return resultType; // Strip off the existing nullability. This removes as little type sugar as // possible. do { if (auto attributed = dyn_cast<AttributedType>(resultType.getTypePtr())) { resultType = attributed->getModifiedType(); } else { resultType = resultType.getDesugaredType(Context); } } while (resultType->getNullability(Context)); // Add nullability back if needed. if (newResultNullabilityIdx > 0) { auto newNullability = static_cast<NullabilityKind>(newResultNullabilityIdx-1); return Context.getAttributedType( AttributedType::getNullabilityAttrKind(newNullability), resultType, resultType); } return resultType; } /// Look for an ObjC method whose result type exactly matches the given type. static const ObjCMethodDecl * findExplicitInstancetypeDeclarer(const ObjCMethodDecl *MD, QualType instancetype) { if (MD->getReturnType() == instancetype) return MD; // For these purposes, a method in an @implementation overrides a // declaration in the @interface. if (const ObjCImplDecl *impl = dyn_cast<ObjCImplDecl>(MD->getDeclContext())) { const ObjCContainerDecl *iface; if (const ObjCCategoryImplDecl *catImpl = dyn_cast<ObjCCategoryImplDecl>(impl)) { iface = catImpl->getCategoryDecl(); } else { iface = impl->getClassInterface(); } const ObjCMethodDecl *ifaceMD = iface->getMethod(MD->getSelector(), MD->isInstanceMethod()); if (ifaceMD) return findExplicitInstancetypeDeclarer(ifaceMD, instancetype); } SmallVector<const ObjCMethodDecl *, 4> overrides; MD->getOverriddenMethods(overrides); for (unsigned i = 0, e = overrides.size(); i != e; ++i) { if (const ObjCMethodDecl *result = findExplicitInstancetypeDeclarer(overrides[i], instancetype)) return result; } return nullptr; } void Sema::EmitRelatedResultTypeNoteForReturn(QualType destType) { // Only complain if we're in an ObjC method and the required return // type doesn't match the method's declared return type. ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CurContext); if (!MD || !MD->hasRelatedResultType() || Context.hasSameUnqualifiedType(destType, MD->getReturnType())) return; // Look for a method overridden by this method which explicitly uses // 'instancetype'. if (const ObjCMethodDecl *overridden = findExplicitInstancetypeDeclarer(MD, Context.getObjCInstanceType())) { SourceRange range = overridden->getReturnTypeSourceRange(); SourceLocation loc = range.getBegin(); if (loc.isInvalid()) loc = overridden->getLocation(); Diag(loc, diag::note_related_result_type_explicit) << /*current method*/ 1 << range; return; } // Otherwise, if we have an interesting method family, note that. // This should always trigger if the above didn't. if (ObjCMethodFamily family = MD->getMethodFamily()) Diag(MD->getLocation(), diag::note_related_result_type_family) << /*current method*/ 1 << family; } void Sema::EmitRelatedResultTypeNote(const Expr *E) { E = E->IgnoreParenImpCasts(); const ObjCMessageExpr *MsgSend = dyn_cast<ObjCMessageExpr>(E); if (!MsgSend) return; const ObjCMethodDecl *Method = MsgSend->getMethodDecl(); if (!Method) return; if (!Method->hasRelatedResultType()) return; if (Context.hasSameUnqualifiedType( Method->getReturnType().getNonReferenceType(), MsgSend->getType())) return; if (!Context.hasSameUnqualifiedType(Method->getReturnType(), Context.getObjCInstanceType())) return; Diag(Method->getLocation(), diag::note_related_result_type_inferred) << Method->isInstanceMethod() << Method->getSelector() << MsgSend->getType(); } bool Sema::CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK) { SourceLocation SelLoc; if (!SelectorLocs.empty() && SelectorLocs.front().isValid()) SelLoc = SelectorLocs.front(); else SelLoc = lbrac; if (!Method) { // Apply default argument promotion as for (C99 6.5.2.2p6). for (unsigned i = 0, e = Args.size(); i != e; i++) { if (Args[i]->isTypeDependent()) continue; ExprResult result; if (getLangOpts().DebuggerSupport) { QualType paramTy; // ignored result = checkUnknownAnyArg(SelLoc, Args[i], paramTy); } else { result = DefaultArgumentPromotion(Args[i]); } if (result.isInvalid()) return true; Args[i] = result.get(); } unsigned DiagID; if (getLangOpts().ObjCAutoRefCount) DiagID = diag::err_arc_method_not_found; else DiagID = isClassMessage ? diag::warn_class_method_not_found : diag::warn_inst_method_not_found; if (!getLangOpts().DebuggerSupport) { const ObjCMethodDecl *OMD = SelectorsForTypoCorrection(Sel, ReceiverType); if (OMD && !OMD->isInvalidDecl()) { if (getLangOpts().ObjCAutoRefCount) DiagID = diag::error_method_not_found_with_typo; else DiagID = isClassMessage ? diag::warn_class_method_not_found_with_typo : diag::warn_instance_method_not_found_with_typo; Selector MatchedSel = OMD->getSelector(); SourceRange SelectorRange(SelectorLocs.front(), SelectorLocs.back()); if (MatchedSel.isUnarySelector()) Diag(SelLoc, DiagID) << Sel<< isClassMessage << MatchedSel << FixItHint::CreateReplacement(SelectorRange, MatchedSel.getAsString()); else Diag(SelLoc, DiagID) << Sel<< isClassMessage << MatchedSel; } else Diag(SelLoc, DiagID) << Sel << isClassMessage << SourceRange(SelectorLocs.front(), SelectorLocs.back()); // Find the class to which we are sending this message. if (ReceiverType->isObjCObjectPointerType()) { if (ObjCInterfaceDecl *ThisClass = ReceiverType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()) { Diag(ThisClass->getLocation(), diag::note_receiver_class_declared); if (!RecRange.isInvalid()) if (ThisClass->lookupClassMethod(Sel)) Diag(RecRange.getBegin(),diag::note_receiver_expr_here) << FixItHint::CreateReplacement(RecRange, ThisClass->getNameAsString()); } } } // In debuggers, we want to use __unknown_anytype for these // results so that clients can cast them. if (getLangOpts().DebuggerSupport) { ReturnType = Context.UnknownAnyTy; } else { ReturnType = Context.getObjCIdType(); } VK = VK_RValue; return false; } ReturnType = getMessageSendResultType(ReceiverType, Method, isClassMessage, isSuperMessage); VK = Expr::getValueKindForType(Method->getReturnType()); unsigned NumNamedArgs = Sel.getNumArgs(); // Method might have more arguments than selector indicates. This is due // to addition of c-style arguments in method. if (Method->param_size() > Sel.getNumArgs()) NumNamedArgs = Method->param_size(); // FIXME. This need be cleaned up. if (Args.size() < NumNamedArgs) { Diag(SelLoc, diag::err_typecheck_call_too_few_args) << 2 << NumNamedArgs << static_cast<unsigned>(Args.size()); return false; } // Compute the set of type arguments to be substituted into each parameter // type. Optional<ArrayRef<QualType>> typeArgs = ReceiverType->getObjCSubstitutions(Method->getDeclContext()); bool IsError = false; for (unsigned i = 0; i < NumNamedArgs; i++) { // We can't do any type-checking on a type-dependent argument. if (Args[i]->isTypeDependent()) continue; Expr *argExpr = Args[i]; ParmVarDecl *param = Method->parameters()[i]; assert(argExpr && "CheckMessageArgumentTypes(): missing expression"); // Strip the unbridged-cast placeholder expression off unless it's // a consumed argument. if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) && !param->hasAttr<CFConsumedAttr>()) argExpr = stripARCUnbridgedCast(argExpr); // If the parameter is __unknown_anytype, infer its type // from the argument. if (param->getType() == Context.UnknownAnyTy) { QualType paramType; ExprResult argE = checkUnknownAnyArg(SelLoc, argExpr, paramType); if (argE.isInvalid()) { IsError = true; } else { Args[i] = argE.get(); // Update the parameter type in-place. param->setType(paramType); } continue; } QualType origParamType = param->getType(); QualType paramType = param->getType(); if (typeArgs) paramType = paramType.substObjCTypeArgs( Context, *typeArgs, ObjCSubstitutionContext::Parameter); if (RequireCompleteType(argExpr->getSourceRange().getBegin(), paramType, diag::err_call_incomplete_argument, argExpr)) return true; InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, param, paramType); ExprResult ArgE = PerformCopyInitialization(Entity, SourceLocation(), argExpr); if (ArgE.isInvalid()) IsError = true; else { Args[i] = ArgE.getAs<Expr>(); // If we are type-erasing a block to a block-compatible // Objective-C pointer type, we may need to extend the lifetime // of the block object. if (typeArgs && Args[i]->isRValue() && paramType->isBlockPointerType() && origParamType->isBlockCompatibleObjCPointerType(Context)) { ExprResult arg = Args[i]; maybeExtendBlockObject(arg); Args[i] = arg.get(); } } } // Promote additional arguments to variadic methods. if (Method->isVariadic()) { for (unsigned i = NumNamedArgs, e = Args.size(); i < e; ++i) { if (Args[i]->isTypeDependent()) continue; ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod, nullptr); IsError |= Arg.isInvalid(); Args[i] = Arg.get(); } } else { // Check for extra arguments to non-variadic methods. if (Args.size() != NumNamedArgs) { Diag(Args[NumNamedArgs]->getLocStart(), diag::err_typecheck_call_too_many_args) << 2 /*method*/ << NumNamedArgs << static_cast<unsigned>(Args.size()) << Method->getSourceRange() << SourceRange(Args[NumNamedArgs]->getLocStart(), Args.back()->getLocEnd()); } } DiagnoseSentinelCalls(Method, SelLoc, Args); // Do additional checkings on method. IsError |= CheckObjCMethodCall( Method, SelLoc, makeArrayRef(Args.data(), Args.size())); return IsError; } bool Sema::isSelfExpr(Expr *RExpr) { // 'self' is objc 'self' in an objc method only. ObjCMethodDecl *Method = dyn_cast_or_null<ObjCMethodDecl>(CurContext->getNonClosureAncestor()); return isSelfExpr(RExpr, Method); } bool Sema::isSelfExpr(Expr *receiver, const ObjCMethodDecl *method) { if (!method) return false; receiver = receiver->IgnoreParenLValueCasts(); if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(receiver)) if (DRE->getDecl() == method->getSelfDecl()) return true; return false; } /// LookupMethodInType - Look up a method in an ObjCObjectType. ObjCMethodDecl *Sema::LookupMethodInObjectType(Selector sel, QualType type, bool isInstance) { const ObjCObjectType *objType = type->castAs<ObjCObjectType>(); if (ObjCInterfaceDecl *iface = objType->getInterface()) { // Look it up in the main interface (and categories, etc.) if (ObjCMethodDecl *method = iface->lookupMethod(sel, isInstance)) return method; // Okay, look for "private" methods declared in any // @implementations we've seen. if (ObjCMethodDecl *method = iface->lookupPrivateMethod(sel, isInstance)) return method; } // Check qualifiers. for (const auto *I : objType->quals()) if (ObjCMethodDecl *method = I->lookupMethod(sel, isInstance)) return method; return nullptr; } /// LookupMethodInQualifiedType - Lookups up a method in protocol qualifier /// list of a qualified objective pointer type. ObjCMethodDecl *Sema::LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool Instance) { ObjCMethodDecl *MD = nullptr; for (const auto *PROTO : OPT->quals()) { if ((MD = PROTO->lookupMethod(Sel, Instance))) { return MD; } } return nullptr; } /// HandleExprPropertyRefExpr - Handle foo.bar where foo is a pointer to an /// objective C interface. This is a property reference expression. ExprResult Sema:: HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super) { const ObjCInterfaceType *IFaceT = OPT->getInterfaceType(); ObjCInterfaceDecl *IFace = IFaceT->getDecl(); if (!MemberName.isIdentifier()) { Diag(MemberLoc, diag::err_invalid_property_name) << MemberName << QualType(OPT, 0); return ExprError(); } IdentifierInfo *Member = MemberName.getAsIdentifierInfo(); SourceRange BaseRange = Super? SourceRange(SuperLoc) : BaseExpr->getSourceRange(); if (RequireCompleteType(MemberLoc, OPT->getPointeeType(), diag::err_property_not_found_forward_class, MemberName, BaseRange)) return ExprError(); // Search for a declared property first. if (ObjCPropertyDecl *PD = IFace->FindPropertyDeclaration(Member)) { // Check whether we can reference this property. if (DiagnoseUseOfDecl(PD, MemberLoc)) return ExprError(); if (Super) return new (Context) ObjCPropertyRefExpr(PD, Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, MemberLoc, SuperLoc, SuperType); else return new (Context) ObjCPropertyRefExpr(PD, Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, MemberLoc, BaseExpr); } // Check protocols on qualified interfaces. for (const auto *I : OPT->quals()) if (ObjCPropertyDecl *PD = I->FindPropertyDeclaration(Member)) { // Check whether we can reference this property. if (DiagnoseUseOfDecl(PD, MemberLoc)) return ExprError(); if (Super) return new (Context) ObjCPropertyRefExpr( PD, Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, MemberLoc, SuperLoc, SuperType); else return new (Context) ObjCPropertyRefExpr(PD, Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, MemberLoc, BaseExpr); } // If that failed, look for an "implicit" property by seeing if the nullary // selector is implemented. // FIXME: The logic for looking up nullary and unary selectors should be // shared with the code in ActOnInstanceMessage. Selector Sel = PP.getSelectorTable().getNullarySelector(Member); ObjCMethodDecl *Getter = IFace->lookupInstanceMethod(Sel); // May be founf in property's qualified list. if (!Getter) Getter = LookupMethodInQualifiedType(Sel, OPT, true); // If this reference is in an @implementation, check for 'private' methods. if (!Getter) Getter = IFace->lookupPrivateMethod(Sel); if (Getter) { // Check if we can reference this property. if (DiagnoseUseOfDecl(Getter, MemberLoc)) return ExprError(); } // If we found a getter then this may be a valid dot-reference, we // will look for the matching setter, in case it is needed. Selector SetterSel = SelectorTable::constructSetterSelector(PP.getIdentifierTable(), PP.getSelectorTable(), Member); ObjCMethodDecl *Setter = IFace->lookupInstanceMethod(SetterSel); // May be founf in property's qualified list. if (!Setter) Setter = LookupMethodInQualifiedType(SetterSel, OPT, true); if (!Setter) { // If this reference is in an @implementation, also check for 'private' // methods. Setter = IFace->lookupPrivateMethod(SetterSel); } if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc)) return ExprError(); // Special warning if member name used in a property-dot for a setter accessor // does not use a property with same name; e.g. obj.X = ... for a property with // name 'x'. if (Setter && Setter->isImplicit() && Setter->isPropertyAccessor() && !IFace->FindPropertyDeclaration(Member)) { if (const ObjCPropertyDecl *PDecl = Setter->findPropertyDecl()) { // Do not warn if user is using property-dot syntax to make call to // user named setter. if (!(PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter)) Diag(MemberLoc, diag::warn_property_access_suggest) << MemberName << QualType(OPT, 0) << PDecl->getName() << FixItHint::CreateReplacement(MemberLoc, PDecl->getName()); } } if (Getter || Setter) { if (Super) return new (Context) ObjCPropertyRefExpr(Getter, Setter, Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, MemberLoc, SuperLoc, SuperType); else return new (Context) ObjCPropertyRefExpr(Getter, Setter, Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, MemberLoc, BaseExpr); } // Attempt to correct for typos in property names. if (TypoCorrection Corrected = CorrectTypo(DeclarationNameInfo(MemberName, MemberLoc), LookupOrdinaryName, nullptr, nullptr, llvm::make_unique<DeclFilterCCC<ObjCPropertyDecl>>(), CTK_ErrorRecovery, IFace, false, OPT)) { diagnoseTypo(Corrected, PDiag(diag::err_property_not_found_suggest) << MemberName << QualType(OPT, 0)); DeclarationName TypoResult = Corrected.getCorrection(); return HandleExprPropertyRefExpr(OPT, BaseExpr, OpLoc, TypoResult, MemberLoc, SuperLoc, SuperType, Super); } ObjCInterfaceDecl *ClassDeclared; if (ObjCIvarDecl *Ivar = IFace->lookupInstanceVariable(Member, ClassDeclared)) { QualType T = Ivar->getType(); if (const ObjCObjectPointerType * OBJPT = T->getAsObjCInterfacePointerType()) { if (RequireCompleteType(MemberLoc, OBJPT->getPointeeType(), diag::err_property_not_as_forward_class, MemberName, BaseExpr)) return ExprError(); } Diag(MemberLoc, diag::err_ivar_access_using_property_syntax_suggest) << MemberName << QualType(OPT, 0) << Ivar->getDeclName() << FixItHint::CreateReplacement(OpLoc, "->"); return ExprError(); } Diag(MemberLoc, diag::err_property_not_found) << MemberName << QualType(OPT, 0); if (Setter) Diag(Setter->getLocation(), diag::note_getter_unavailable) << MemberName << BaseExpr->getSourceRange(); return ExprError(); } ExprResult Sema:: ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc) { IdentifierInfo *receiverNamePtr = &receiverName; ObjCInterfaceDecl *IFace = getObjCInterfaceDecl(receiverNamePtr, receiverNameLoc); QualType SuperType; if (!IFace) { // If the "receiver" is 'super' in a method, handle it as an expression-like // property reference. if (receiverNamePtr->isStr("super")) { if (ObjCMethodDecl *CurMethod = tryCaptureObjCSelf(receiverNameLoc)) { if (auto classDecl = CurMethod->getClassInterface()) { SuperType = QualType(classDecl->getSuperClassType(), 0); if (CurMethod->isInstanceMethod()) { if (SuperType.isNull()) { // The current class does not have a superclass. Diag(receiverNameLoc, diag::error_root_class_cannot_use_super) << CurMethod->getClassInterface()->getIdentifier(); return ExprError(); } QualType T = Context.getObjCObjectPointerType(SuperType); return HandleExprPropertyRefExpr(T->castAs<ObjCObjectPointerType>(), /*BaseExpr*/nullptr, SourceLocation()/*OpLoc*/, &propertyName, propertyNameLoc, receiverNameLoc, T, true); } // Otherwise, if this is a class method, try dispatching to our // superclass. IFace = CurMethod->getClassInterface()->getSuperClass(); } } } if (!IFace) { Diag(receiverNameLoc, diag::err_expected_either) << tok::identifier << tok::l_paren; return ExprError(); } } // Search for a declared property first. Selector Sel = PP.getSelectorTable().getNullarySelector(&propertyName); ObjCMethodDecl *Getter = IFace->lookupClassMethod(Sel); // If this reference is in an @implementation, check for 'private' methods. if (!Getter) Getter = IFace->lookupPrivateClassMethod(Sel); if (Getter) { // FIXME: refactor/share with ActOnMemberReference(). // Check if we can reference this property. if (DiagnoseUseOfDecl(Getter, propertyNameLoc)) return ExprError(); } // Look for the matching setter, in case it is needed. Selector SetterSel = SelectorTable::constructSetterSelector(PP.getIdentifierTable(), PP.getSelectorTable(), &propertyName); ObjCMethodDecl *Setter = IFace->lookupClassMethod(SetterSel); if (!Setter) { // If this reference is in an @implementation, also check for 'private' // methods. Setter = IFace->lookupPrivateClassMethod(SetterSel); } // Look through local category implementations associated with the class. if (!Setter) Setter = IFace->getCategoryClassMethod(SetterSel); if (Setter && DiagnoseUseOfDecl(Setter, propertyNameLoc)) return ExprError(); if (Getter || Setter) { if (!SuperType.isNull()) return new (Context) ObjCPropertyRefExpr(Getter, Setter, Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, propertyNameLoc, receiverNameLoc, SuperType); return new (Context) ObjCPropertyRefExpr( Getter, Setter, Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, propertyNameLoc, receiverNameLoc, IFace); } return ExprError(Diag(propertyNameLoc, diag::err_property_not_found) << &propertyName << Context.getObjCInterfaceType(IFace)); } namespace { class ObjCInterfaceOrSuperCCC : public CorrectionCandidateCallback { public: ObjCInterfaceOrSuperCCC(ObjCMethodDecl *Method) { // Determine whether "super" is acceptable in the current context. if (Method && Method->getClassInterface()) WantObjCSuper = Method->getClassInterface()->getSuperClass(); } bool ValidateCandidate(const TypoCorrection &candidate) override { return candidate.getCorrectionDeclAs<ObjCInterfaceDecl>() || candidate.isKeyword("super"); } }; } Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType) { ReceiverType = ParsedType(); // If the identifier is "super" and there is no trailing dot, we're // messaging super. If the identifier is "super" and there is a // trailing dot, it's an instance message. if (IsSuper && S->isInObjcMethodScope()) return HasTrailingDot? ObjCInstanceMessage : ObjCSuperMessage; LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName); LookupName(Result, S); switch (Result.getResultKind()) { case LookupResult::NotFound: // Normal name lookup didn't find anything. If we're in an // Objective-C method, look for ivars. If we find one, we're done! // FIXME: This is a hack. Ivar lookup should be part of normal // lookup. if (ObjCMethodDecl *Method = getCurMethodDecl()) { if (!Method->getClassInterface()) { // Fall back: let the parser try to parse it as an instance message. return ObjCInstanceMessage; } ObjCInterfaceDecl *ClassDeclared; if (Method->getClassInterface()->lookupInstanceVariable(Name, ClassDeclared)) return ObjCInstanceMessage; } // Break out; we'll perform typo correction below. break; case LookupResult::NotFoundInCurrentInstantiation: case LookupResult::FoundOverloaded: case LookupResult::FoundUnresolvedValue: case LookupResult::Ambiguous: Result.suppressDiagnostics(); return ObjCInstanceMessage; case LookupResult::Found: { // If the identifier is a class or not, and there is a trailing dot, // it's an instance message. if (HasTrailingDot) return ObjCInstanceMessage; // We found something. If it's a type, then we have a class // message. Otherwise, it's an instance message. NamedDecl *ND = Result.getFoundDecl(); QualType T; if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(ND)) T = Context.getObjCInterfaceType(Class); else if (TypeDecl *Type = dyn_cast<TypeDecl>(ND)) { T = Context.getTypeDeclType(Type); DiagnoseUseOfDecl(Type, NameLoc); } else return ObjCInstanceMessage; // We have a class message, and T is the type we're // messaging. Build source-location information for it. TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc); ReceiverType = CreateParsedType(T, TSInfo); return ObjCClassMessage; } } if (TypoCorrection Corrected = CorrectTypo( Result.getLookupNameInfo(), Result.getLookupKind(), S, nullptr, llvm::make_unique<ObjCInterfaceOrSuperCCC>(getCurMethodDecl()), CTK_ErrorRecovery, nullptr, false, nullptr, false)) { if (Corrected.isKeyword()) { // If we've found the keyword "super" (the only keyword that would be // returned by CorrectTypo), this is a send to super. diagnoseTypo(Corrected, PDiag(diag::err_unknown_receiver_suggest) << Name); return ObjCSuperMessage; } else if (ObjCInterfaceDecl *Class = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) { // If we found a declaration, correct when it refers to an Objective-C // class. diagnoseTypo(Corrected, PDiag(diag::err_unknown_receiver_suggest) << Name); QualType T = Context.getObjCInterfaceType(Class); TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc); ReceiverType = CreateParsedType(T, TSInfo); return ObjCClassMessage; } } // Fall back: let the parser try to parse it as an instance message. return ObjCInstanceMessage; } ExprResult Sema::ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args) { // Determine whether we are inside a method or not. ObjCMethodDecl *Method = tryCaptureObjCSelf(SuperLoc); if (!Method) { Diag(SuperLoc, diag::err_invalid_receiver_to_message_super); return ExprError(); } ObjCInterfaceDecl *Class = Method->getClassInterface(); if (!Class) { Diag(SuperLoc, diag::error_no_super_class_message) << Method->getDeclName(); return ExprError(); } QualType SuperTy(Class->getSuperClassType(), 0); if (SuperTy.isNull()) { // The current class does not have a superclass. Diag(SuperLoc, diag::error_root_class_cannot_use_super) << Class->getIdentifier(); return ExprError(); } // We are in a method whose class has a superclass, so 'super' // is acting as a keyword. if (Method->getSelector() == Sel) getCurFunction()->ObjCShouldCallSuper = false; if (Method->isInstanceMethod()) { // Since we are in an instance method, this is an instance // message to the superclass instance. SuperTy = Context.getObjCObjectPointerType(SuperTy); return BuildInstanceMessage(nullptr, SuperTy, SuperLoc, Sel, /*Method=*/nullptr, LBracLoc, SelectorLocs, RBracLoc, Args); } // Since we are in a class method, this is a class message to // the superclass. return BuildClassMessage(/*ReceiverTypeInfo=*/nullptr, SuperTy, SuperLoc, Sel, /*Method=*/nullptr, LBracLoc, SelectorLocs, RBracLoc, Args); } ExprResult Sema::BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args) { TypeSourceInfo *receiverTypeInfo = nullptr; if (!ReceiverType.isNull()) receiverTypeInfo = Context.getTrivialTypeSourceInfo(ReceiverType); return BuildClassMessage(receiverTypeInfo, ReceiverType, /*SuperLoc=*/isSuperReceiver ? Loc : SourceLocation(), Sel, Method, Loc, Loc, Loc, Args, /*isImplicit=*/true); } static void applyCocoaAPICheck(Sema &S, const ObjCMessageExpr *Msg, unsigned DiagID, bool (*refactor)(const ObjCMessageExpr *, const NSAPI &, edit::Commit &)) { SourceLocation MsgLoc = Msg->getExprLoc(); if (S.Diags.isIgnored(DiagID, MsgLoc)) return; SourceManager &SM = S.SourceMgr; edit::Commit ECommit(SM, S.LangOpts); if (refactor(Msg,*S.NSAPIObj, ECommit)) { DiagnosticBuilder Builder = S.Diag(MsgLoc, DiagID) << Msg->getSelector() << Msg->getSourceRange(); // FIXME: Don't emit diagnostic at all if fixits are non-commitable. if (!ECommit.isCommitable()) return; for (edit::Commit::edit_iterator I = ECommit.edit_begin(), E = ECommit.edit_end(); I != E; ++I) { const edit::Commit::Edit &Edit = *I; switch (Edit.Kind) { case edit::Commit::Act_Insert: Builder.AddFixItHint(FixItHint::CreateInsertion(Edit.OrigLoc, Edit.Text, Edit.BeforePrev)); break; case edit::Commit::Act_InsertFromRange: Builder.AddFixItHint( FixItHint::CreateInsertionFromRange(Edit.OrigLoc, Edit.getInsertFromRange(SM), Edit.BeforePrev)); break; case edit::Commit::Act_Remove: Builder.AddFixItHint(FixItHint::CreateRemoval(Edit.getFileRange(SM))); break; } } } } static void checkCocoaAPI(Sema &S, const ObjCMessageExpr *Msg) { applyCocoaAPICheck(S, Msg, diag::warn_objc_redundant_literal_use, edit::rewriteObjCRedundantCallWithLiteral); } /// \brief Diagnose use of %s directive in an NSString which is being passed /// as formatting string to formatting method. static void DiagnoseCStringFormatDirectiveInObjCAPI(Sema &S, ObjCMethodDecl *Method, Selector Sel, Expr **Args, unsigned NumArgs) { unsigned Idx = 0; bool Format = false; ObjCStringFormatFamily SFFamily = Sel.getStringFormatFamily(); if (SFFamily == ObjCStringFormatFamily::SFF_NSString) { Idx = 0; Format = true; } else if (Method) { for (const auto *I : Method->specific_attrs<FormatAttr>()) { if (S.GetFormatNSStringIdx(I, Idx)) { Format = true; break; } } } if (!Format || NumArgs <= Idx) return; Expr *FormatExpr = Args[Idx]; if (ObjCStringLiteral *OSL = dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) { StringLiteral *FormatString = OSL->getString(); if (S.FormatStringHasSArg(FormatString)) { S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) << "%s" << 0 << 0; if (Method) S.Diag(Method->getLocation(), diag::note_method_declared_at) << Method->getDeclName(); } } } /// \brief Build an Objective-C class message expression. /// /// This routine takes care of both normal class messages and /// class messages to the superclass. /// /// \param ReceiverTypeInfo Type source information that describes the /// receiver of this message. This may be NULL, in which case we are /// sending to the superclass and \p SuperLoc must be a valid source /// location. /// \param ReceiverType The type of the object receiving the /// message. When \p ReceiverTypeInfo is non-NULL, this is the same /// type as that refers to. For a superclass send, this is the type of /// the superclass. /// /// \param SuperLoc The location of the "super" keyword in a /// superclass message. /// /// \param Sel The selector to which the message is being sent. /// /// \param Method The method that this class message is invoking, if /// already known. /// /// \param LBracLoc The location of the opening square bracket ']'. /// /// \param RBracLoc The location of the closing square bracket ']'. /// /// \param ArgsIn The message arguments. ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg ArgsIn, bool isImplicit) { SourceLocation Loc = SuperLoc.isValid()? SuperLoc : ReceiverTypeInfo->getTypeLoc().getSourceRange().getBegin(); if (LBracLoc.isInvalid()) { Diag(Loc, diag::err_missing_open_square_message_send) << FixItHint::CreateInsertion(Loc, "["); LBracLoc = Loc; } SourceLocation SelLoc; if (!SelectorLocs.empty() && SelectorLocs.front().isValid()) SelLoc = SelectorLocs.front(); else SelLoc = Loc; if (ReceiverType->isDependentType()) { // If the receiver type is dependent, we can't type-check anything // at this point. Build a dependent expression. unsigned NumArgs = ArgsIn.size(); Expr **Args = ArgsIn.data(); assert(SuperLoc.isInvalid() && "Message to super with dependent type"); return ObjCMessageExpr::Create( Context, ReceiverType, VK_RValue, LBracLoc, ReceiverTypeInfo, Sel, SelectorLocs, /*Method=*/nullptr, makeArrayRef(Args, NumArgs), RBracLoc, isImplicit); } // Find the class to which we are sending this message. ObjCInterfaceDecl *Class = nullptr; const ObjCObjectType *ClassType = ReceiverType->getAs<ObjCObjectType>(); if (!ClassType || !(Class = ClassType->getInterface())) { Diag(Loc, diag::err_invalid_receiver_class_message) << ReceiverType; return ExprError(); } assert(Class && "We don't know which class we're messaging?"); // objc++ diagnoses during typename annotation. if (!getLangOpts().CPlusPlus) (void)DiagnoseUseOfDecl(Class, SelLoc); // Find the method we are messaging. if (!Method) { SourceRange TypeRange = SuperLoc.isValid()? SourceRange(SuperLoc) : ReceiverTypeInfo->getTypeLoc().getSourceRange(); if (RequireCompleteType(Loc, Context.getObjCInterfaceType(Class), (getLangOpts().ObjCAutoRefCount ? diag::err_arc_receiver_forward_class : diag::warn_receiver_forward_class), TypeRange)) { // A forward class used in messaging is treated as a 'Class' Method = LookupFactoryMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc)); if (Method && !getLangOpts().ObjCAutoRefCount) Diag(Method->getLocation(), diag::note_method_sent_forward_class) << Method->getDeclName(); } if (!Method) Method = Class->lookupClassMethod(Sel); // If we have an implementation in scope, check "private" methods. if (!Method) Method = Class->lookupPrivateClassMethod(Sel); if (Method && DiagnoseUseOfDecl(Method, SelLoc)) return ExprError(); } // Check the argument types and determine the result type. QualType ReturnType; ExprValueKind VK = VK_RValue; unsigned NumArgs = ArgsIn.size(); Expr **Args = ArgsIn.data(); if (CheckMessageArgumentTypes(ReceiverType, MultiExprArg(Args, NumArgs), Sel, SelectorLocs, Method, true, SuperLoc.isValid(), LBracLoc, RBracLoc, SourceRange(), ReturnType, VK)) return ExprError(); if (Method && !Method->getReturnType()->isVoidType() && RequireCompleteType(LBracLoc, Method->getReturnType(), diag::err_illegal_message_expr_incomplete_type)) return ExprError(); // Warn about explicit call of +initialize on its own class. But not on 'super'. if (Method && Method->getMethodFamily() == OMF_initialize) { if (!SuperLoc.isValid()) { const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(Method->getDeclContext()); if (ID == Class) { Diag(Loc, diag::warn_direct_initialize_call); Diag(Method->getLocation(), diag::note_method_declared_at) << Method->getDeclName(); } } else if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) { // [super initialize] is allowed only within an +initialize implementation if (CurMeth->getMethodFamily() != OMF_initialize) { Diag(Loc, diag::warn_direct_super_initialize_call); Diag(Method->getLocation(), diag::note_method_declared_at) << Method->getDeclName(); Diag(CurMeth->getLocation(), diag::note_method_declared_at) << CurMeth->getDeclName(); } } } DiagnoseCStringFormatDirectiveInObjCAPI(*this, Method, Sel, Args, NumArgs); // Construct the appropriate ObjCMessageExpr. ObjCMessageExpr *Result; if (SuperLoc.isValid()) Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc, SuperLoc, /*IsInstanceSuper=*/false, ReceiverType, Sel, SelectorLocs, Method, makeArrayRef(Args, NumArgs), RBracLoc, isImplicit); else { Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc, ReceiverTypeInfo, Sel, SelectorLocs, Method, makeArrayRef(Args, NumArgs), RBracLoc, isImplicit); if (!isImplicit) checkCocoaAPI(*this, Result); } return MaybeBindToTemporary(Result); } // ActOnClassMessage - used for both unary and keyword messages. // ArgExprs is optional - if it is present, the number of expressions // is obtained from Sel.getNumArgs(). ExprResult Sema::ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args) { TypeSourceInfo *ReceiverTypeInfo; QualType ReceiverType = GetTypeFromParser(Receiver, &ReceiverTypeInfo); if (ReceiverType.isNull()) return ExprError(); if (!ReceiverTypeInfo) ReceiverTypeInfo = Context.getTrivialTypeSourceInfo(ReceiverType, LBracLoc); return BuildClassMessage(ReceiverTypeInfo, ReceiverType, /*SuperLoc=*/SourceLocation(), Sel, /*Method=*/nullptr, LBracLoc, SelectorLocs, RBracLoc, Args); } ExprResult Sema::BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args) { return BuildInstanceMessage(Receiver, ReceiverType, /*SuperLoc=*/!Receiver ? Loc : SourceLocation(), Sel, Method, Loc, Loc, Loc, Args, /*isImplicit=*/true); } /// \brief Build an Objective-C instance message expression. /// /// This routine takes care of both normal instance messages and /// instance messages to the superclass instance. /// /// \param Receiver The expression that computes the object that will /// receive this message. This may be empty, in which case we are /// sending to the superclass instance and \p SuperLoc must be a valid /// source location. /// /// \param ReceiverType The (static) type of the object receiving the /// message. When a \p Receiver expression is provided, this is the /// same type as that expression. For a superclass instance send, this /// is a pointer to the type of the superclass. /// /// \param SuperLoc The location of the "super" keyword in a /// superclass instance message. /// /// \param Sel The selector to which the message is being sent. /// /// \param Method The method that this instance message is invoking, if /// already known. /// /// \param LBracLoc The location of the opening square bracket ']'. /// /// \param RBracLoc The location of the closing square bracket ']'. /// /// \param ArgsIn The message arguments. ExprResult Sema::BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg ArgsIn, bool isImplicit) { // The location of the receiver. SourceLocation Loc = SuperLoc.isValid()? SuperLoc : Receiver->getLocStart(); SourceRange RecRange = SuperLoc.isValid()? SuperLoc : Receiver->getSourceRange(); SourceLocation SelLoc; if (!SelectorLocs.empty() && SelectorLocs.front().isValid()) SelLoc = SelectorLocs.front(); else SelLoc = Loc; if (LBracLoc.isInvalid()) { Diag(Loc, diag::err_missing_open_square_message_send) << FixItHint::CreateInsertion(Loc, "["); LBracLoc = Loc; } // If we have a receiver expression, perform appropriate promotions // and determine receiver type. if (Receiver) { if (Receiver->hasPlaceholderType()) { ExprResult Result; if (Receiver->getType() == Context.UnknownAnyTy) Result = forceUnknownAnyToType(Receiver, Context.getObjCIdType()); else Result = CheckPlaceholderExpr(Receiver); if (Result.isInvalid()) return ExprError(); Receiver = Result.get(); } if (Receiver->isTypeDependent()) { // If the receiver is type-dependent, we can't type-check anything // at this point. Build a dependent expression. unsigned NumArgs = ArgsIn.size(); Expr **Args = ArgsIn.data(); assert(SuperLoc.isInvalid() && "Message to super with dependent type"); return ObjCMessageExpr::Create( Context, Context.DependentTy, VK_RValue, LBracLoc, Receiver, Sel, SelectorLocs, /*Method=*/nullptr, makeArrayRef(Args, NumArgs), RBracLoc, isImplicit); } // If necessary, apply function/array conversion to the receiver. // C99 6.7.5.3p[7,8]. ExprResult Result = DefaultFunctionArrayLvalueConversion(Receiver); if (Result.isInvalid()) return ExprError(); Receiver = Result.get(); ReceiverType = Receiver->getType(); // If the receiver is an ObjC pointer, a block pointer, or an // __attribute__((NSObject)) pointer, we don't need to do any // special conversion in order to look up a receiver. if (ReceiverType->isObjCRetainableType()) { // do nothing } else if (!getLangOpts().ObjCAutoRefCount && !Context.getObjCIdType().isNull() && (ReceiverType->isPointerType() || ReceiverType->isIntegerType())) { // Implicitly convert integers and pointers to 'id' but emit a warning. // But not in ARC. Diag(Loc, diag::warn_bad_receiver_type) << ReceiverType << Receiver->getSourceRange(); if (ReceiverType->isPointerType()) { Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(), CK_CPointerToObjCPointerCast).get(); } else { // TODO: specialized warning on null receivers? bool IsNull = Receiver->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull); CastKind Kind = IsNull ? CK_NullToPointer : CK_IntegralToPointer; Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(), Kind).get(); } ReceiverType = Receiver->getType(); } else if (getLangOpts().CPlusPlus) { // The receiver must be a complete type. if (RequireCompleteType(Loc, Receiver->getType(), diag::err_incomplete_receiver_type)) return ExprError(); ExprResult result = PerformContextuallyConvertToObjCPointer(Receiver); if (result.isUsable()) { Receiver = result.get(); ReceiverType = Receiver->getType(); } } } // There's a somewhat weird interaction here where we assume that we // won't actually have a method unless we also don't need to do some // of the more detailed type-checking on the receiver. if (!Method) { // Handle messages to id and __kindof types (where we use the // global method pool). // FIXME: The type bound is currently ignored by lookup in the // global pool. const ObjCObjectType *typeBound = nullptr; bool receiverIsIdLike = ReceiverType->isObjCIdOrObjectKindOfType(Context, typeBound); if (receiverIsIdLike || ReceiverType->isBlockPointerType() || (Receiver && Context.isObjCNSObjectType(Receiver->getType()))) { Method = LookupInstanceMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc), receiverIsIdLike); if (!Method) Method = LookupFactoryMethodInGlobalPool(Sel, SourceRange(LBracLoc,RBracLoc), receiverIsIdLike); if (Method) { if (ObjCMethodDecl *BestMethod = SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod())) Method = BestMethod; if (!AreMultipleMethodsInGlobalPool(Sel, Method, SourceRange(LBracLoc, RBracLoc), receiverIsIdLike)) { DiagnoseUseOfDecl(Method, SelLoc); } } } else if (ReceiverType->isObjCClassOrClassKindOfType() || ReceiverType->isObjCQualifiedClassType()) { // Handle messages to Class. // We allow sending a message to a qualified Class ("Class<foo>"), which // is ok as long as one of the protocols implements the selector (if not, // warn). if (!ReceiverType->isObjCClassOrClassKindOfType()) { const ObjCObjectPointerType *QClassTy = ReceiverType->getAsObjCQualifiedClassType(); // Search protocols for class methods. Method = LookupMethodInQualifiedType(Sel, QClassTy, false); if (!Method) { Method = LookupMethodInQualifiedType(Sel, QClassTy, true); // warn if instance method found for a Class message. if (Method) { Diag(SelLoc, diag::warn_instance_method_on_class_found) << Method->getSelector() << Sel; Diag(Method->getLocation(), diag::note_method_declared_at) << Method->getDeclName(); } } } else { if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) { if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface()) { // First check the public methods in the class interface. Method = ClassDecl->lookupClassMethod(Sel); if (!Method) Method = ClassDecl->lookupPrivateClassMethod(Sel); } if (Method && DiagnoseUseOfDecl(Method, SelLoc)) return ExprError(); } if (!Method) { // If not messaging 'self', look for any factory method named 'Sel'. if (!Receiver || !isSelfExpr(Receiver)) { Method = LookupFactoryMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc)); if (!Method) { // If no class (factory) method was found, check if an _instance_ // method of the same name exists in the root class only. Method = LookupInstanceMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc)); if (Method) if (const ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(Method->getDeclContext())) { if (ID->getSuperClass()) Diag(SelLoc, diag::warn_root_inst_method_not_found) << Sel << SourceRange(LBracLoc, RBracLoc); } } if (Method) if (ObjCMethodDecl *BestMethod = SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod())) Method = BestMethod; } } } } else { ObjCInterfaceDecl *ClassDecl = nullptr; // We allow sending a message to a qualified ID ("id<foo>"), which is ok as // long as one of the protocols implements the selector (if not, warn). // And as long as message is not deprecated/unavailable (warn if it is). if (const ObjCObjectPointerType *QIdTy = ReceiverType->getAsObjCQualifiedIdType()) { // Search protocols for instance methods. Method = LookupMethodInQualifiedType(Sel, QIdTy, true); if (!Method) Method = LookupMethodInQualifiedType(Sel, QIdTy, false); if (Method && DiagnoseUseOfDecl(Method, SelLoc)) return ExprError(); } else if (const ObjCObjectPointerType *OCIType = ReceiverType->getAsObjCInterfacePointerType()) { // We allow sending a message to a pointer to an interface (an object). ClassDecl = OCIType->getInterfaceDecl(); // Try to complete the type. Under ARC, this is a hard error from which // we don't try to recover. const ObjCInterfaceDecl *forwardClass = nullptr; if (RequireCompleteType(Loc, OCIType->getPointeeType(), getLangOpts().ObjCAutoRefCount ? diag::err_arc_receiver_forward_instance : diag::warn_receiver_forward_instance, Receiver? Receiver->getSourceRange() : SourceRange(SuperLoc))) { if (getLangOpts().ObjCAutoRefCount) return ExprError(); forwardClass = OCIType->getInterfaceDecl(); Diag(Receiver ? Receiver->getLocStart() : SuperLoc, diag::note_receiver_is_id); Method = nullptr; } else { Method = ClassDecl->lookupInstanceMethod(Sel); } if (!Method) // Search protocol qualifiers. Method = LookupMethodInQualifiedType(Sel, OCIType, true); if (!Method) { // If we have implementations in scope, check "private" methods. Method = ClassDecl->lookupPrivateMethod(Sel); if (!Method && getLangOpts().ObjCAutoRefCount) { Diag(SelLoc, diag::err_arc_may_not_respond) << OCIType->getPointeeType() << Sel << RecRange << SourceRange(SelectorLocs.front(), SelectorLocs.back()); return ExprError(); } if (!Method && (!Receiver || !isSelfExpr(Receiver))) { // If we still haven't found a method, look in the global pool. This // behavior isn't very desirable, however we need it for GCC // compatibility. FIXME: should we deviate?? if (OCIType->qual_empty()) { Method = LookupInstanceMethodInGlobalPool(Sel, SourceRange(LBracLoc, RBracLoc)); if (Method) { if (auto BestMethod = SelectBestMethod(Sel, ArgsIn, Method->isInstanceMethod())) Method = BestMethod; AreMultipleMethodsInGlobalPool(Sel, Method, SourceRange(LBracLoc, RBracLoc), true); } if (Method && !forwardClass) Diag(SelLoc, diag::warn_maynot_respond) << OCIType->getInterfaceDecl()->getIdentifier() << Sel << RecRange; } } } if (Method && DiagnoseUseOfDecl(Method, SelLoc, forwardClass)) return ExprError(); } else { // Reject other random receiver types (e.g. structs). Diag(Loc, diag::err_bad_receiver_type) << ReceiverType << Receiver->getSourceRange(); return ExprError(); } } } FunctionScopeInfo *DIFunctionScopeInfo = (Method && Method->getMethodFamily() == OMF_init) ? getEnclosingFunction() : nullptr; if (DIFunctionScopeInfo && DIFunctionScopeInfo->ObjCIsDesignatedInit && (SuperLoc.isValid() || isSelfExpr(Receiver))) { bool isDesignatedInitChain = false; if (SuperLoc.isValid()) { if (const ObjCObjectPointerType * OCIType = ReceiverType->getAsObjCInterfacePointerType()) { if (const ObjCInterfaceDecl *ID = OCIType->getInterfaceDecl()) { // Either we know this is a designated initializer or we // conservatively assume it because we don't know for sure. if (!ID->declaresOrInheritsDesignatedInitializers() || ID->isDesignatedInitializer(Sel)) { isDesignatedInitChain = true; DIFunctionScopeInfo->ObjCWarnForNoDesignatedInitChain = false; } } } } if (!isDesignatedInitChain) { const ObjCMethodDecl *InitMethod = nullptr; bool isDesignated = getCurMethodDecl()->isDesignatedInitializerForTheInterface(&InitMethod); assert(isDesignated && InitMethod); (void)isDesignated; Diag(SelLoc, SuperLoc.isValid() ? diag::warn_objc_designated_init_non_designated_init_call : diag::warn_objc_designated_init_non_super_designated_init_call); Diag(InitMethod->getLocation(), diag::note_objc_designated_init_marked_here); } } if (DIFunctionScopeInfo && DIFunctionScopeInfo->ObjCIsSecondaryInit && (SuperLoc.isValid() || isSelfExpr(Receiver))) { if (SuperLoc.isValid()) { Diag(SelLoc, diag::warn_objc_secondary_init_super_init_call); } else { DIFunctionScopeInfo->ObjCWarnForNoInitDelegation = false; } } // Check the message arguments. unsigned NumArgs = ArgsIn.size(); Expr **Args = ArgsIn.data(); QualType ReturnType; ExprValueKind VK = VK_RValue; bool ClassMessage = (ReceiverType->isObjCClassType() || ReceiverType->isObjCQualifiedClassType()); if (CheckMessageArgumentTypes(ReceiverType, MultiExprArg(Args, NumArgs), Sel, SelectorLocs, Method, ClassMessage, SuperLoc.isValid(), LBracLoc, RBracLoc, RecRange, ReturnType, VK)) return ExprError(); if (Method && !Method->getReturnType()->isVoidType() && RequireCompleteType(LBracLoc, Method->getReturnType(), diag::err_illegal_message_expr_incomplete_type)) return ExprError(); // In ARC, forbid the user from sending messages to // retain/release/autorelease/dealloc/retainCount explicitly. if (getLangOpts().ObjCAutoRefCount) { ObjCMethodFamily family = (Method ? Method->getMethodFamily() : Sel.getMethodFamily()); switch (family) { case OMF_init: if (Method) checkInitMethod(Method, ReceiverType); case OMF_None: case OMF_alloc: case OMF_copy: case OMF_finalize: case OMF_mutableCopy: case OMF_new: case OMF_self: case OMF_initialize: break; case OMF_dealloc: case OMF_retain: case OMF_release: case OMF_autorelease: case OMF_retainCount: Diag(SelLoc, diag::err_arc_illegal_explicit_message) << Sel << RecRange; break; case OMF_performSelector: if (Method && NumArgs >= 1) { if (ObjCSelectorExpr *SelExp = dyn_cast<ObjCSelectorExpr>(Args[0])) { Selector ArgSel = SelExp->getSelector(); ObjCMethodDecl *SelMethod = LookupInstanceMethodInGlobalPool(ArgSel, SelExp->getSourceRange()); if (!SelMethod) SelMethod = LookupFactoryMethodInGlobalPool(ArgSel, SelExp->getSourceRange()); if (SelMethod) { ObjCMethodFamily SelFamily = SelMethod->getMethodFamily(); switch (SelFamily) { case OMF_alloc: case OMF_copy: case OMF_mutableCopy: case OMF_new: case OMF_self: case OMF_init: // Issue error, unless ns_returns_not_retained. if (!SelMethod->hasAttr<NSReturnsNotRetainedAttr>()) { // selector names a +1 method Diag(SelLoc, diag::err_arc_perform_selector_retains); Diag(SelMethod->getLocation(), diag::note_method_declared_at) << SelMethod->getDeclName(); } break; default: // +0 call. OK. unless ns_returns_retained. if (SelMethod->hasAttr<NSReturnsRetainedAttr>()) { // selector names a +1 method Diag(SelLoc, diag::err_arc_perform_selector_retains); Diag(SelMethod->getLocation(), diag::note_method_declared_at) << SelMethod->getDeclName(); } break; } } } else { // error (may leak). Diag(SelLoc, diag::warn_arc_perform_selector_leaks); Diag(Args[0]->getExprLoc(), diag::note_used_here); } } break; } } DiagnoseCStringFormatDirectiveInObjCAPI(*this, Method, Sel, Args, NumArgs); // Construct the appropriate ObjCMessageExpr instance. ObjCMessageExpr *Result; if (SuperLoc.isValid()) Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc, SuperLoc, /*IsInstanceSuper=*/true, ReceiverType, Sel, SelectorLocs, Method, makeArrayRef(Args, NumArgs), RBracLoc, isImplicit); else { Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc, Receiver, Sel, SelectorLocs, Method, makeArrayRef(Args, NumArgs), RBracLoc, isImplicit); if (!isImplicit) checkCocoaAPI(*this, Result); } if (getLangOpts().ObjCAutoRefCount) { // In ARC, annotate delegate init calls. if (Result->getMethodFamily() == OMF_init && (SuperLoc.isValid() || isSelfExpr(Receiver))) { // Only consider init calls *directly* in init implementations, // not within blocks. ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CurContext); if (method && method->getMethodFamily() == OMF_init) { // The implicit assignment to self means we also don't want to // consume the result. Result->setDelegateInitCall(true); return Result; } } // In ARC, check for message sends which are likely to introduce // retain cycles. checkRetainCycles(Result); if (!isImplicit && Method) { if (const ObjCPropertyDecl *Prop = Method->findPropertyDecl()) { bool IsWeak = Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak; if (!IsWeak && Sel.isUnarySelector()) IsWeak = ReturnType.getObjCLifetime() & Qualifiers::OCL_Weak; if (IsWeak && !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, LBracLoc)) getCurFunction()->recordUseOfWeak(Result, Prop); } } } CheckObjCCircularContainer(Result); return MaybeBindToTemporary(Result); } static void RemoveSelectorFromWarningCache(Sema &S, Expr* Arg) { if (ObjCSelectorExpr *OSE = dyn_cast<ObjCSelectorExpr>(Arg->IgnoreParenCasts())) { Selector Sel = OSE->getSelector(); SourceLocation Loc = OSE->getAtLoc(); auto Pos = S.ReferencedSelectors.find(Sel); if (Pos != S.ReferencedSelectors.end() && Pos->second == Loc) S.ReferencedSelectors.erase(Pos); } } // ActOnInstanceMessage - used for both unary and keyword messages. // ArgExprs is optional - if it is present, the number of expressions // is obtained from Sel.getNumArgs(). ExprResult Sema::ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args) { if (!Receiver) return ExprError(); // A ParenListExpr can show up while doing error recovery with invalid code. if (isa<ParenListExpr>(Receiver)) { ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Receiver); if (Result.isInvalid()) return ExprError(); Receiver = Result.get(); } if (RespondsToSelectorSel.isNull()) { IdentifierInfo *SelectorId = &Context.Idents.get("respondsToSelector"); RespondsToSelectorSel = Context.Selectors.getUnarySelector(SelectorId); } if (Sel == RespondsToSelectorSel) RemoveSelectorFromWarningCache(*this, Args[0]); return BuildInstanceMessage(Receiver, Receiver->getType(), /*SuperLoc=*/SourceLocation(), Sel, /*Method=*/nullptr, LBracLoc, SelectorLocs, RBracLoc, Args); } enum ARCConversionTypeClass { /// int, void, struct A ACTC_none, /// id, void (^)() ACTC_retainable, /// id*, id***, void (^*)(), ACTC_indirectRetainable, /// void* might be a normal C type, or it might a CF type. ACTC_voidPtr, /// struct A* ACTC_coreFoundation }; static bool isAnyRetainable(ARCConversionTypeClass ACTC) { return (ACTC == ACTC_retainable || ACTC == ACTC_coreFoundation || ACTC == ACTC_voidPtr); } static bool isAnyCLike(ARCConversionTypeClass ACTC) { return ACTC == ACTC_none || ACTC == ACTC_voidPtr || ACTC == ACTC_coreFoundation; } static ARCConversionTypeClass classifyTypeForARCConversion(QualType type) { bool isIndirect = false; // Ignore an outermost reference type. if (const ReferenceType *ref = type->getAs<ReferenceType>()) { type = ref->getPointeeType(); isIndirect = true; } // Drill through pointers and arrays recursively. while (true) { if (const PointerType *ptr = type->getAs<PointerType>()) { type = ptr->getPointeeType(); // The first level of pointer may be the innermost pointer on a CF type. if (!isIndirect) { if (type->isVoidType()) return ACTC_voidPtr; if (type->isRecordType()) return ACTC_coreFoundation; } } else if (const ArrayType *array = type->getAsArrayTypeUnsafe()) { type = QualType(array->getElementType()->getBaseElementTypeUnsafe(), 0); } else { break; } isIndirect = true; } if (isIndirect) { if (type->isObjCARCBridgableType()) return ACTC_indirectRetainable; return ACTC_none; } if (type->isObjCARCBridgableType()) return ACTC_retainable; return ACTC_none; } namespace { /// A result from the cast checker. enum ACCResult { /// Cannot be casted. ACC_invalid, /// Can be safely retained or not retained. ACC_bottom, /// Can be casted at +0. ACC_plusZero, /// Can be casted at +1. ACC_plusOne }; ACCResult merge(ACCResult left, ACCResult right) { if (left == right) return left; if (left == ACC_bottom) return right; if (right == ACC_bottom) return left; return ACC_invalid; } /// A checker which white-lists certain expressions whose conversion /// to or from retainable type would otherwise be forbidden in ARC. class ARCCastChecker : public StmtVisitor<ARCCastChecker, ACCResult> { typedef StmtVisitor<ARCCastChecker, ACCResult> super; ASTContext &Context; ARCConversionTypeClass SourceClass; ARCConversionTypeClass TargetClass; bool Diagnose; static bool isCFType(QualType type) { // Someday this can use ns_bridged. For now, it has to do this. return type->isCARCBridgableType(); } public: ARCCastChecker(ASTContext &Context, ARCConversionTypeClass source, ARCConversionTypeClass target, bool diagnose) : Context(Context), SourceClass(source), TargetClass(target), Diagnose(diagnose) {} using super::Visit; ACCResult Visit(Expr *e) { return super::Visit(e->IgnoreParens()); } ACCResult VisitStmt(Stmt *s) { return ACC_invalid; } /// Null pointer constants can be casted however you please. ACCResult VisitExpr(Expr *e) { if (e->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull)) return ACC_bottom; return ACC_invalid; } /// Objective-C string literals can be safely casted. ACCResult VisitObjCStringLiteral(ObjCStringLiteral *e) { // If we're casting to any retainable type, go ahead. Global // strings are immune to retains, so this is bottom. if (isAnyRetainable(TargetClass)) return ACC_bottom; return ACC_invalid; } /// Look through certain implicit and explicit casts. ACCResult VisitCastExpr(CastExpr *e) { switch (e->getCastKind()) { case CK_NullToPointer: return ACC_bottom; case CK_NoOp: case CK_LValueToRValue: case CK_BitCast: case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_AnyPointerToBlockPointerCast: return Visit(e->getSubExpr()); default: return ACC_invalid; } } /// Look through unary extension. ACCResult VisitUnaryExtension(UnaryOperator *e) { return Visit(e->getSubExpr()); } /// Ignore the LHS of a comma operator. ACCResult VisitBinComma(BinaryOperator *e) { return Visit(e->getRHS()); } /// Conditional operators are okay if both sides are okay. ACCResult VisitConditionalOperator(ConditionalOperator *e) { ACCResult left = Visit(e->getTrueExpr()); if (left == ACC_invalid) return ACC_invalid; return merge(left, Visit(e->getFalseExpr())); } /// Look through pseudo-objects. ACCResult VisitPseudoObjectExpr(PseudoObjectExpr *e) { // If we're getting here, we should always have a result. return Visit(e->getResultExpr()); } /// Statement expressions are okay if their result expression is okay. ACCResult VisitStmtExpr(StmtExpr *e) { return Visit(e->getSubStmt()->body_back()); } /// Some declaration references are okay. ACCResult VisitDeclRefExpr(DeclRefExpr *e) { VarDecl *var = dyn_cast<VarDecl>(e->getDecl()); // References to global constants are okay. if (isAnyRetainable(TargetClass) && isAnyRetainable(SourceClass) && var && var->getStorageClass() == SC_Extern && var->getType().isConstQualified()) { // In system headers, they can also be assumed to be immune to retains. // These are things like 'kCFStringTransformToLatin'. if (Context.getSourceManager().isInSystemHeader(var->getLocation())) return ACC_bottom; return ACC_plusZero; } // Nothing else. return ACC_invalid; } /// Some calls are okay. ACCResult VisitCallExpr(CallExpr *e) { if (FunctionDecl *fn = e->getDirectCallee()) if (ACCResult result = checkCallToFunction(fn)) return result; return super::VisitCallExpr(e); } ACCResult checkCallToFunction(FunctionDecl *fn) { // Require a CF*Ref return type. if (!isCFType(fn->getReturnType())) return ACC_invalid; if (!isAnyRetainable(TargetClass)) return ACC_invalid; // Honor an explicit 'not retained' attribute. if (fn->hasAttr<CFReturnsNotRetainedAttr>()) return ACC_plusZero; // Honor an explicit 'retained' attribute, except that for // now we're not going to permit implicit handling of +1 results, // because it's a bit frightening. if (fn->hasAttr<CFReturnsRetainedAttr>()) return Diagnose ? ACC_plusOne : ACC_invalid; // ACC_plusOne if we start accepting this // Recognize this specific builtin function, which is used by CFSTR. unsigned builtinID = fn->getBuiltinID(); if (builtinID == Builtin::BI__builtin___CFStringMakeConstantString) return ACC_bottom; // Otherwise, don't do anything implicit with an unaudited function. if (!fn->hasAttr<CFAuditedTransferAttr>()) return ACC_invalid; // Otherwise, it's +0 unless it follows the create convention. if (ento::coreFoundation::followsCreateRule(fn)) return Diagnose ? ACC_plusOne : ACC_invalid; // ACC_plusOne if we start accepting this return ACC_plusZero; } ACCResult VisitObjCMessageExpr(ObjCMessageExpr *e) { return checkCallToMethod(e->getMethodDecl()); } ACCResult VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *e) { ObjCMethodDecl *method; if (e->isExplicitProperty()) method = e->getExplicitProperty()->getGetterMethodDecl(); else method = e->getImplicitPropertyGetter(); return checkCallToMethod(method); } ACCResult checkCallToMethod(ObjCMethodDecl *method) { if (!method) return ACC_invalid; // Check for message sends to functions returning CF types. We // just obey the Cocoa conventions with these, even though the // return type is CF. if (!isAnyRetainable(TargetClass) || !isCFType(method->getReturnType())) return ACC_invalid; // If the method is explicitly marked not-retained, it's +0. if (method->hasAttr<CFReturnsNotRetainedAttr>()) return ACC_plusZero; // If the method is explicitly marked as returning retained, or its // selector follows a +1 Cocoa convention, treat it as +1. if (method->hasAttr<CFReturnsRetainedAttr>()) return ACC_plusOne; switch (method->getSelector().getMethodFamily()) { case OMF_alloc: case OMF_copy: case OMF_mutableCopy: case OMF_new: return ACC_plusOne; default: // Otherwise, treat it as +0. return ACC_plusZero; } } }; } bool Sema::isKnownName(StringRef name) { if (name.empty()) return false; LookupResult R(*this, &Context.Idents.get(name), SourceLocation(), Sema::LookupOrdinaryName); return LookupName(R, TUScope, false); } #if 0 // HLSL Change Start - No ObjC support static void addFixitForObjCARCConversion(Sema &S, DiagnosticBuilder &DiagB, Sema::CheckedConversionKind CCK, SourceLocation afterLParen, QualType castType, Expr *castExpr, Expr *realCast, const char *bridgeKeyword, const char *CFBridgeName) { // We handle C-style and implicit casts here. switch (CCK) { case Sema::CCK_ImplicitConversion: case Sema::CCK_CStyleCast: case Sema::CCK_OtherCast: break; case Sema::CCK_FunctionalCast: return; } if (CFBridgeName) { if (CCK == Sema::CCK_OtherCast) { if (const CXXNamedCastExpr *NCE = dyn_cast<CXXNamedCastExpr>(realCast)) { SourceRange range(NCE->getOperatorLoc(), NCE->getAngleBrackets().getEnd()); SmallString<32> BridgeCall; SourceManager &SM = S.getSourceManager(); char PrevChar = *SM.getCharacterData(range.getBegin().getLocWithOffset(-1)); if (Lexer::isIdentifierBodyChar(PrevChar, S.getLangOpts())) BridgeCall += ' '; BridgeCall += CFBridgeName; DiagB.AddFixItHint(FixItHint::CreateReplacement(range, BridgeCall)); } return; } Expr *castedE = castExpr; if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(castedE)) castedE = CCE->getSubExpr(); castedE = castedE->IgnoreImpCasts(); SourceRange range = castedE->getSourceRange(); SmallString<32> BridgeCall; SourceManager &SM = S.getSourceManager(); char PrevChar = *SM.getCharacterData(range.getBegin().getLocWithOffset(-1)); if (Lexer::isIdentifierBodyChar(PrevChar, S.getLangOpts())) BridgeCall += ' '; BridgeCall += CFBridgeName; if (isa<ParenExpr>(castedE)) { DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(), BridgeCall)); } else { BridgeCall += '('; DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(), BridgeCall)); DiagB.AddFixItHint(FixItHint::CreateInsertion( S.PP.getLocForEndOfToken(range.getEnd()), ")")); } return; } if (CCK == Sema::CCK_CStyleCast) { DiagB.AddFixItHint(FixItHint::CreateInsertion(afterLParen, bridgeKeyword)); } else if (CCK == Sema::CCK_OtherCast) { if (const CXXNamedCastExpr *NCE = dyn_cast<CXXNamedCastExpr>(realCast)) { std::string castCode = "("; castCode += bridgeKeyword; castCode += castType.getAsString(); castCode += ")"; SourceRange Range(NCE->getOperatorLoc(), NCE->getAngleBrackets().getEnd()); DiagB.AddFixItHint(FixItHint::CreateReplacement(Range, castCode)); } } else { std::string castCode = "("; castCode += bridgeKeyword; castCode += castType.getAsString(); castCode += ")"; Expr *castedE = castExpr->IgnoreImpCasts(); SourceRange range = castedE->getSourceRange(); if (isa<ParenExpr>(castedE)) { DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(), castCode)); } else { castCode += "("; DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(), castCode)); DiagB.AddFixItHint(FixItHint::CreateInsertion( S.PP.getLocForEndOfToken(range.getEnd()), ")")); } } } #endif // HLSL change End - No ObjC support template <typename T> static inline T *getObjCBridgeAttr(const TypedefType *TD) { TypedefNameDecl *TDNDecl = TD->getDecl(); QualType QT = TDNDecl->getUnderlyingType(); if (QT->isPointerType()) { QT = QT->getPointeeType(); if (const RecordType *RT = QT->getAs<RecordType>()) if (RecordDecl *RD = RT->getDecl()->getMostRecentDecl()) return RD->getAttr<T>(); } return nullptr; } static ObjCBridgeRelatedAttr *ObjCBridgeRelatedAttrFromType(QualType T, TypedefNameDecl *&TDNDecl) { while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) { TDNDecl = TD->getDecl(); if (ObjCBridgeRelatedAttr *ObjCBAttr = getObjCBridgeAttr<ObjCBridgeRelatedAttr>(TD)) return ObjCBAttr; T = TDNDecl->getUnderlyingType(); } return nullptr; } static void diagnoseObjCARCConversion(Sema &S, SourceRange castRange, QualType castType, ARCConversionTypeClass castACTC, Expr *castExpr, Expr *realCast, ARCConversionTypeClass exprACTC, Sema::CheckedConversionKind CCK) { SourceLocation loc = (castRange.isValid() ? castRange.getBegin() : castExpr->getExprLoc()); if (S.makeUnavailableInSystemHeader(loc, "converts between Objective-C and C pointers in -fobjc-arc")) return; QualType castExprType = castExpr->getType(); TypedefNameDecl *TDNDecl = nullptr; if ((castACTC == ACTC_coreFoundation && exprACTC == ACTC_retainable && ObjCBridgeRelatedAttrFromType(castType, TDNDecl)) || (exprACTC == ACTC_coreFoundation && castACTC == ACTC_retainable && ObjCBridgeRelatedAttrFromType(castExprType, TDNDecl))) return; unsigned srcKind = 0; switch (exprACTC) { case ACTC_none: case ACTC_coreFoundation: case ACTC_voidPtr: srcKind = (castExprType->isPointerType() ? 1 : 0); break; case ACTC_retainable: srcKind = (castExprType->isBlockPointerType() ? 2 : 3); break; case ACTC_indirectRetainable: srcKind = 4; break; } // Check whether this could be fixed with a bridge cast. SourceLocation afterLParen = S.PP.getLocForEndOfToken(castRange.getBegin()); SourceLocation noteLoc = afterLParen.isValid() ? afterLParen : loc; // Bridge from an ARC type to a CF type. if (castACTC == ACTC_retainable && isAnyRetainable(exprACTC)) { S.Diag(loc, diag::err_arc_cast_requires_bridge) << unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit << 2 // of C pointer type << castExprType << unsigned(castType->isBlockPointerType()) // to ObjC|block type << castType << castRange << castExpr->getSourceRange(); bool br = S.isKnownName("CFBridgingRelease"); ACCResult CreateRule = ARCCastChecker(S.Context, exprACTC, castACTC, true).Visit(castExpr); assert(CreateRule != ACC_bottom && "This cast should already be accepted."); if (CreateRule != ACC_plusOne) { DiagnosticBuilder DiagB = (CCK != Sema::CCK_OtherCast) ? S.Diag(noteLoc, diag::note_arc_bridge) : S.Diag(noteLoc, diag::note_arc_cstyle_bridge); addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen, castType, castExpr, realCast, "__bridge ", nullptr); } if (CreateRule != ACC_plusZero) { DiagnosticBuilder DiagB = (CCK == Sema::CCK_OtherCast && !br) ? S.Diag(noteLoc, diag::note_arc_cstyle_bridge_transfer) << castExprType : S.Diag(br ? castExpr->getExprLoc() : noteLoc, diag::note_arc_bridge_transfer) << castExprType << br; addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen, castType, castExpr, realCast, "__bridge_transfer ", br ? "CFBridgingRelease" : nullptr); } return; } // Bridge from a CF type to an ARC type. if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC)) { bool br = S.isKnownName("CFBridgingRetain"); S.Diag(loc, diag::err_arc_cast_requires_bridge) << unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit << unsigned(castExprType->isBlockPointerType()) // of ObjC|block type << castExprType << 2 // to C pointer type << castType << castRange << castExpr->getSourceRange(); ACCResult CreateRule = ARCCastChecker(S.Context, exprACTC, castACTC, true).Visit(castExpr); assert(CreateRule != ACC_bottom && "This cast should already be accepted."); if (CreateRule != ACC_plusOne) { DiagnosticBuilder DiagB = (CCK != Sema::CCK_OtherCast) ? S.Diag(noteLoc, diag::note_arc_bridge) : S.Diag(noteLoc, diag::note_arc_cstyle_bridge); addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen, castType, castExpr, realCast, "__bridge ", nullptr); } if (CreateRule != ACC_plusZero) { DiagnosticBuilder DiagB = (CCK == Sema::CCK_OtherCast && !br) ? S.Diag(noteLoc, diag::note_arc_cstyle_bridge_retained) << castType : S.Diag(br ? castExpr->getExprLoc() : noteLoc, diag::note_arc_bridge_retained) << castType << br; addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen, castType, castExpr, realCast, "__bridge_retained ", br ? "CFBridgingRetain" : nullptr); } return; } S.Diag(loc, diag::err_arc_mismatched_cast) << (CCK != Sema::CCK_ImplicitConversion) << srcKind << castExprType << castType << castRange << castExpr->getSourceRange(); } template <typename TB> static bool CheckObjCBridgeNSCast(Sema &S, QualType castType, Expr *castExpr, bool &HadTheAttribute, bool warn) { QualType T = castExpr->getType(); HadTheAttribute = false; while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) { TypedefNameDecl *TDNDecl = TD->getDecl(); if (TB *ObjCBAttr = getObjCBridgeAttr<TB>(TD)) { if (IdentifierInfo *Parm = ObjCBAttr->getBridgedType()) { HadTheAttribute = true; if (Parm->isStr("id")) return true; NamedDecl *Target = nullptr; // Check for an existing type with this name. LookupResult R(S, DeclarationName(Parm), SourceLocation(), Sema::LookupOrdinaryName); if (S.LookupName(R, S.TUScope)) { Target = R.getFoundDecl(); if (Target && isa<ObjCInterfaceDecl>(Target)) { ObjCInterfaceDecl *ExprClass = cast<ObjCInterfaceDecl>(Target); if (const ObjCObjectPointerType *InterfacePointerType = castType->getAsObjCInterfacePointerType()) { ObjCInterfaceDecl *CastClass = InterfacePointerType->getObjectType()->getInterface(); if ((CastClass == ExprClass) || (CastClass && CastClass->isSuperClassOf(ExprClass))) return true; if (warn) S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge) << T << Target->getName() << castType->getPointeeType(); return false; } else if (castType->isObjCIdType() || (S.Context.ObjCObjectAdoptsQTypeProtocols( castType, ExprClass))) // ok to cast to 'id'. // casting to id<p-list> is ok if bridge type adopts all of // p-list protocols. return true; else { if (warn) { S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge) << T << Target->getName() << castType; S.Diag(TDNDecl->getLocStart(), diag::note_declared_at); S.Diag(Target->getLocStart(), diag::note_declared_at); } return false; } } } else if (!castType->isObjCIdType()) { S.Diag(castExpr->getLocStart(), diag::err_objc_cf_bridged_not_interface) << castExpr->getType() << Parm; S.Diag(TDNDecl->getLocStart(), diag::note_declared_at); if (Target) S.Diag(Target->getLocStart(), diag::note_declared_at); } return true; } return false; } T = TDNDecl->getUnderlyingType(); } return true; } template <typename TB> static bool CheckObjCBridgeCFCast(Sema &S, QualType castType, Expr *castExpr, bool &HadTheAttribute, bool warn) { QualType T = castType; HadTheAttribute = false; while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) { TypedefNameDecl *TDNDecl = TD->getDecl(); if (TB *ObjCBAttr = getObjCBridgeAttr<TB>(TD)) { if (IdentifierInfo *Parm = ObjCBAttr->getBridgedType()) { HadTheAttribute = true; if (Parm->isStr("id")) return true; NamedDecl *Target = nullptr; // Check for an existing type with this name. LookupResult R(S, DeclarationName(Parm), SourceLocation(), Sema::LookupOrdinaryName); if (S.LookupName(R, S.TUScope)) { Target = R.getFoundDecl(); if (Target && isa<ObjCInterfaceDecl>(Target)) { ObjCInterfaceDecl *CastClass = cast<ObjCInterfaceDecl>(Target); if (const ObjCObjectPointerType *InterfacePointerType = castExpr->getType()->getAsObjCInterfacePointerType()) { ObjCInterfaceDecl *ExprClass = InterfacePointerType->getObjectType()->getInterface(); if ((CastClass == ExprClass) || (ExprClass && CastClass->isSuperClassOf(ExprClass))) return true; if (warn) { S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge_to_cf) << castExpr->getType()->getPointeeType() << T; S.Diag(TDNDecl->getLocStart(), diag::note_declared_at); } return false; } else if (castExpr->getType()->isObjCIdType() || (S.Context.QIdProtocolsAdoptObjCObjectProtocols( castExpr->getType(), CastClass))) // ok to cast an 'id' expression to a CFtype. // ok to cast an 'id<plist>' expression to CFtype provided plist // adopts all of CFtype's ObjetiveC's class plist. return true; else { if (warn) { S.Diag(castExpr->getLocStart(), diag::warn_objc_invalid_bridge_to_cf) << castExpr->getType() << castType; S.Diag(TDNDecl->getLocStart(), diag::note_declared_at); S.Diag(Target->getLocStart(), diag::note_declared_at); } return false; } } } S.Diag(castExpr->getLocStart(), diag::err_objc_ns_bridged_invalid_cfobject) << castExpr->getType() << castType; S.Diag(TDNDecl->getLocStart(), diag::note_declared_at); if (Target) S.Diag(Target->getLocStart(), diag::note_declared_at); return true; } return false; } T = TDNDecl->getUnderlyingType(); } return true; } void Sema::CheckTollFreeBridgeCast(QualType castType, Expr *castExpr) { if (!getLangOpts().ObjC1) return; // warn in presence of __bridge casting to or from a toll free bridge cast. ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExpr->getType()); ARCConversionTypeClass castACTC = classifyTypeForARCConversion(castType); if (castACTC == ACTC_retainable && exprACTC == ACTC_coreFoundation) { bool HasObjCBridgeAttr; bool ObjCBridgeAttrWillNotWarn = CheckObjCBridgeNSCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr, false); if (ObjCBridgeAttrWillNotWarn && HasObjCBridgeAttr) return; bool HasObjCBridgeMutableAttr; bool ObjCBridgeMutableAttrWillNotWarn = CheckObjCBridgeNSCast<ObjCBridgeMutableAttr>(*this, castType, castExpr, HasObjCBridgeMutableAttr, false); if (ObjCBridgeMutableAttrWillNotWarn && HasObjCBridgeMutableAttr) return; if (HasObjCBridgeAttr) CheckObjCBridgeNSCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr, true); else if (HasObjCBridgeMutableAttr) CheckObjCBridgeNSCast<ObjCBridgeMutableAttr>(*this, castType, castExpr, HasObjCBridgeMutableAttr, true); } else if (castACTC == ACTC_coreFoundation && exprACTC == ACTC_retainable) { bool HasObjCBridgeAttr; bool ObjCBridgeAttrWillNotWarn = CheckObjCBridgeCFCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr, false); if (ObjCBridgeAttrWillNotWarn && HasObjCBridgeAttr) return; bool HasObjCBridgeMutableAttr; bool ObjCBridgeMutableAttrWillNotWarn = CheckObjCBridgeCFCast<ObjCBridgeMutableAttr>(*this, castType, castExpr, HasObjCBridgeMutableAttr, false); if (ObjCBridgeMutableAttrWillNotWarn && HasObjCBridgeMutableAttr) return; if (HasObjCBridgeAttr) CheckObjCBridgeCFCast<ObjCBridgeAttr>(*this, castType, castExpr, HasObjCBridgeAttr, true); else if (HasObjCBridgeMutableAttr) CheckObjCBridgeCFCast<ObjCBridgeMutableAttr>(*this, castType, castExpr, HasObjCBridgeMutableAttr, true); } } void Sema::CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr) { QualType SrcType = castExpr->getType(); if (ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(castExpr)) { if (PRE->isExplicitProperty()) { if (ObjCPropertyDecl *PDecl = PRE->getExplicitProperty()) SrcType = PDecl->getType(); } else if (PRE->isImplicitProperty()) { if (ObjCMethodDecl *Getter = PRE->getImplicitPropertyGetter()) SrcType = Getter->getReturnType(); } } ARCConversionTypeClass srcExprACTC = classifyTypeForARCConversion(SrcType); ARCConversionTypeClass castExprACTC = classifyTypeForARCConversion(castType); if (srcExprACTC != ACTC_retainable || castExprACTC != ACTC_coreFoundation) return; CheckObjCBridgeRelatedConversions(castExpr->getLocStart(), castType, SrcType, castExpr); return; } bool Sema::CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind) { if (!getLangOpts().ObjC1) return false; ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExpr->getType()); ARCConversionTypeClass castACTC = classifyTypeForARCConversion(castType); if ((castACTC == ACTC_retainable && exprACTC == ACTC_coreFoundation) || (castACTC == ACTC_coreFoundation && exprACTC == ACTC_retainable)) { CheckTollFreeBridgeCast(castType, castExpr); Kind = (castACTC == ACTC_coreFoundation) ? CK_BitCast : CK_CPointerToObjCPointerCast; return true; } return false; } bool Sema::checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs) { QualType T = CfToNs ? SrcType : DestType; ObjCBridgeRelatedAttr *ObjCBAttr = ObjCBridgeRelatedAttrFromType(T, TDNDecl); if (!ObjCBAttr) return false; IdentifierInfo *RCId = ObjCBAttr->getRelatedClass(); IdentifierInfo *CMId = ObjCBAttr->getClassMethod(); IdentifierInfo *IMId = ObjCBAttr->getInstanceMethod(); if (!RCId) return false; NamedDecl *Target = nullptr; // Check for an existing type with this name. LookupResult R(*this, DeclarationName(RCId), SourceLocation(), Sema::LookupOrdinaryName); if (!LookupName(R, TUScope)) { Diag(Loc, diag::err_objc_bridged_related_invalid_class) << RCId << SrcType << DestType; Diag(TDNDecl->getLocStart(), diag::note_declared_at); return false; } Target = R.getFoundDecl(); if (Target && isa<ObjCInterfaceDecl>(Target)) RelatedClass = cast<ObjCInterfaceDecl>(Target); else { Diag(Loc, diag::err_objc_bridged_related_invalid_class_name) << RCId << SrcType << DestType; Diag(TDNDecl->getLocStart(), diag::note_declared_at); if (Target) Diag(Target->getLocStart(), diag::note_declared_at); return false; } // Check for an existing class method with the given selector name. if (CfToNs && CMId) { Selector Sel = Context.Selectors.getUnarySelector(CMId); ClassMethod = RelatedClass->lookupMethod(Sel, false); if (!ClassMethod) { Diag(Loc, diag::err_objc_bridged_related_known_method) << SrcType << DestType << Sel << false; Diag(TDNDecl->getLocStart(), diag::note_declared_at); return false; } } // Check for an existing instance method with the given selector name. if (!CfToNs && IMId) { Selector Sel = Context.Selectors.getNullarySelector(IMId); InstanceMethod = RelatedClass->lookupMethod(Sel, true); if (!InstanceMethod) { Diag(Loc, diag::err_objc_bridged_related_known_method) << SrcType << DestType << Sel << true; Diag(TDNDecl->getLocStart(), diag::note_declared_at); return false; } } return true; } bool Sema::CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr) { ARCConversionTypeClass rhsExprACTC = classifyTypeForARCConversion(SrcType); ARCConversionTypeClass lhsExprACTC = classifyTypeForARCConversion(DestType); bool CfToNs = (rhsExprACTC == ACTC_coreFoundation && lhsExprACTC == ACTC_retainable); bool NsToCf = (rhsExprACTC == ACTC_retainable && lhsExprACTC == ACTC_coreFoundation); if (!CfToNs && !NsToCf) return false; ObjCInterfaceDecl *RelatedClass; ObjCMethodDecl *ClassMethod = nullptr; ObjCMethodDecl *InstanceMethod = nullptr; TypedefNameDecl *TDNDecl = nullptr; if (!checkObjCBridgeRelatedComponents(Loc, DestType, SrcType, RelatedClass, ClassMethod, InstanceMethod, TDNDecl, CfToNs)) return false; if (CfToNs) { // Implicit conversion from CF to ObjC object is needed. if (ClassMethod) { std::string ExpressionString = "["; ExpressionString += RelatedClass->getNameAsString(); ExpressionString += " "; ExpressionString += ClassMethod->getSelector().getAsString(); SourceLocation SrcExprEndLoc = PP.getLocForEndOfToken(SrcExpr->getLocEnd()); // Provide a fixit: [RelatedClass ClassMethod SrcExpr] Diag(Loc, diag::err_objc_bridged_related_known_method) << SrcType << DestType << ClassMethod->getSelector() << false << FixItHint::CreateInsertion(SrcExpr->getLocStart(), ExpressionString) << FixItHint::CreateInsertion(SrcExprEndLoc, "]"); Diag(RelatedClass->getLocStart(), diag::note_declared_at); Diag(TDNDecl->getLocStart(), diag::note_declared_at); QualType receiverType = Context.getObjCInterfaceType(RelatedClass); // Argument. Expr *args[] = { SrcExpr }; ExprResult msg = BuildClassMessageImplicit(receiverType, false, ClassMethod->getLocation(), ClassMethod->getSelector(), ClassMethod, MultiExprArg(args, 1)); SrcExpr = msg.get(); return true; } } else { // Implicit conversion from ObjC type to CF object is needed. if (InstanceMethod) { std::string ExpressionString; SourceLocation SrcExprEndLoc = PP.getLocForEndOfToken(SrcExpr->getLocEnd()); if (InstanceMethod->isPropertyAccessor()) if (const ObjCPropertyDecl *PDecl = InstanceMethod->findPropertyDecl()) { // fixit: ObjectExpr.propertyname when it is aproperty accessor. ExpressionString = "."; ExpressionString += PDecl->getNameAsString(); Diag(Loc, diag::err_objc_bridged_related_known_method) << SrcType << DestType << InstanceMethod->getSelector() << true << FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString); } if (ExpressionString.empty()) { // Provide a fixit: [ObjectExpr InstanceMethod] ExpressionString = " "; ExpressionString += InstanceMethod->getSelector().getAsString(); ExpressionString += "]"; Diag(Loc, diag::err_objc_bridged_related_known_method) << SrcType << DestType << InstanceMethod->getSelector() << true << FixItHint::CreateInsertion(SrcExpr->getLocStart(), "[") << FixItHint::CreateInsertion(SrcExprEndLoc, ExpressionString); } Diag(RelatedClass->getLocStart(), diag::note_declared_at); Diag(TDNDecl->getLocStart(), diag::note_declared_at); ExprResult msg = BuildInstanceMessageImplicit(SrcExpr, SrcType, InstanceMethod->getLocation(), InstanceMethod->getSelector(), InstanceMethod, None); SrcExpr = msg.get(); return true; } } return false; } Sema::ARCConversionResult Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&castExpr, CheckedConversionKind CCK, bool DiagnoseCFAudited, BinaryOperatorKind Opc) { QualType castExprType = castExpr->getType(); // For the purposes of the classification, we assume reference types // will bind to temporaries. QualType effCastType = castType; if (const ReferenceType *ref = castType->getAs<ReferenceType>()) effCastType = ref->getPointeeType(); ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExprType); ARCConversionTypeClass castACTC = classifyTypeForARCConversion(effCastType); if (exprACTC == castACTC) { // check for viablity and report error if casting an rvalue to a // life-time qualifier. if ((castACTC == ACTC_retainable) && (CCK == CCK_CStyleCast || CCK == CCK_OtherCast) && (castType != castExprType)) { const Type *DT = castType.getTypePtr(); QualType QDT = castType; // We desugar some types but not others. We ignore those // that cannot happen in a cast; i.e. auto, and those which // should not be de-sugared; i.e typedef. if (const ParenType *PT = dyn_cast<ParenType>(DT)) QDT = PT->desugar(); else if (const TypeOfType *TP = dyn_cast<TypeOfType>(DT)) QDT = TP->desugar(); else if (const AttributedType *AT = dyn_cast<AttributedType>(DT)) QDT = AT->desugar(); if (QDT != castType && QDT.getObjCLifetime() != Qualifiers::OCL_None) { SourceLocation loc = (castRange.isValid() ? castRange.getBegin() : castExpr->getExprLoc()); Diag(loc, diag::err_arc_nolifetime_behavior); } } return ACR_okay; } if (isAnyCLike(exprACTC) && isAnyCLike(castACTC)) return ACR_okay; // Allow all of these types to be cast to integer types (but not // vice-versa). if (castACTC == ACTC_none && castType->isIntegralType(Context)) return ACR_okay; // Allow casts between pointers to lifetime types (e.g., __strong id*) // and pointers to void (e.g., cv void *). Casting from void* to lifetime* // must be explicit. if (exprACTC == ACTC_indirectRetainable && castACTC == ACTC_voidPtr) return ACR_okay; if (castACTC == ACTC_indirectRetainable && exprACTC == ACTC_voidPtr && CCK != CCK_ImplicitConversion) return ACR_okay; switch (ARCCastChecker(Context, exprACTC, castACTC, false).Visit(castExpr)) { // For invalid casts, fall through. case ACC_invalid: break; // Do nothing for both bottom and +0. case ACC_bottom: case ACC_plusZero: return ACR_okay; // If the result is +1, consume it here. case ACC_plusOne: castExpr = ImplicitCastExpr::Create(Context, castExpr->getType(), CK_ARCConsumeObject, castExpr, nullptr, VK_RValue); ExprNeedsCleanups = true; return ACR_okay; } // If this is a non-implicit cast from id or block type to a // CoreFoundation type, delay complaining in case the cast is used // in an acceptable context. if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC) && CCK != CCK_ImplicitConversion) return ACR_unbridged; // Do not issue bridge cast" diagnostic when implicit casting a cstring // to 'NSString *'. Let caller issue a normal mismatched diagnostic with // suitable fix-it. if (castACTC == ACTC_retainable && exprACTC == ACTC_none && ConversionToObjCStringLiteralCheck(castType, castExpr)) return ACR_okay; // Do not issue "bridge cast" diagnostic when implicit casting // a retainable object to a CF type parameter belonging to an audited // CF API function. Let caller issue a normal type mismatched diagnostic // instead. if (!DiagnoseCFAudited || exprACTC != ACTC_retainable || castACTC != ACTC_coreFoundation) if (!(exprACTC == ACTC_voidPtr && castACTC == ACTC_retainable && (Opc == BO_NE || Opc == BO_EQ))) diagnoseObjCARCConversion(*this, castRange, castType, castACTC, castExpr, castExpr, exprACTC, CCK); return ACR_okay; } /// Given that we saw an expression with the ARCUnbridgedCastTy /// placeholder type, complain bitterly. void Sema::diagnoseARCUnbridgedCast(Expr *e) { // We expect the spurious ImplicitCastExpr to already have been stripped. assert(!e->hasPlaceholderType(BuiltinType::ARCUnbridgedCast)); CastExpr *realCast = cast<CastExpr>(e->IgnoreParens()); SourceRange castRange; QualType castType; CheckedConversionKind CCK; if (CStyleCastExpr *cast = dyn_cast<CStyleCastExpr>(realCast)) { castRange = SourceRange(cast->getLParenLoc(), cast->getRParenLoc()); castType = cast->getTypeAsWritten(); CCK = CCK_CStyleCast; } else if (ExplicitCastExpr *cast = dyn_cast<ExplicitCastExpr>(realCast)) { castRange = cast->getTypeInfoAsWritten()->getTypeLoc().getSourceRange(); castType = cast->getTypeAsWritten(); CCK = CCK_OtherCast; } else { castType = cast->getType(); CCK = CCK_ImplicitConversion; } ARCConversionTypeClass castACTC = classifyTypeForARCConversion(castType.getNonReferenceType()); Expr *castExpr = realCast->getSubExpr(); assert(classifyTypeForARCConversion(castExpr->getType()) == ACTC_retainable); diagnoseObjCARCConversion(*this, castRange, castType, castACTC, castExpr, realCast, ACTC_retainable, CCK); } /// stripARCUnbridgedCast - Given an expression of ARCUnbridgedCast /// type, remove the placeholder cast. Expr *Sema::stripARCUnbridgedCast(Expr *e) { assert(e->hasPlaceholderType(BuiltinType::ARCUnbridgedCast)); if (ParenExpr *pe = dyn_cast<ParenExpr>(e)) { Expr *sub = stripARCUnbridgedCast(pe->getSubExpr()); return new (Context) ParenExpr(pe->getLParen(), pe->getRParen(), sub); } else if (UnaryOperator *uo = dyn_cast<UnaryOperator>(e)) { assert(uo->getOpcode() == UO_Extension); Expr *sub = stripARCUnbridgedCast(uo->getSubExpr()); return new (Context) UnaryOperator(sub, UO_Extension, sub->getType(), sub->getValueKind(), sub->getObjectKind(), uo->getOperatorLoc()); } else if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) { assert(!gse->isResultDependent()); unsigned n = gse->getNumAssocs(); SmallVector<Expr*, 4> subExprs(n); SmallVector<TypeSourceInfo*, 4> subTypes(n); for (unsigned i = 0; i != n; ++i) { subTypes[i] = gse->getAssocTypeSourceInfo(i); Expr *sub = gse->getAssocExpr(i); if (i == gse->getResultIndex()) sub = stripARCUnbridgedCast(sub); subExprs[i] = sub; } return new (Context) GenericSelectionExpr(Context, gse->getGenericLoc(), gse->getControllingExpr(), subTypes, subExprs, gse->getDefaultLoc(), gse->getRParenLoc(), gse->containsUnexpandedParameterPack(), gse->getResultIndex()); } else { assert(isa<ImplicitCastExpr>(e) && "bad form of unbridged cast!"); return cast<ImplicitCastExpr>(e)->getSubExpr(); } } bool Sema::CheckObjCARCUnavailableWeakConversion(QualType castType, QualType exprType) { QualType canCastType = Context.getCanonicalType(castType).getUnqualifiedType(); QualType canExprType = Context.getCanonicalType(exprType).getUnqualifiedType(); if (isa<ObjCObjectPointerType>(canCastType) && castType.getObjCLifetime() == Qualifiers::OCL_Weak && canExprType->isObjCObjectPointerType()) { if (const ObjCObjectPointerType *ObjT = canExprType->getAs<ObjCObjectPointerType>()) if (const ObjCInterfaceDecl *ObjI = ObjT->getInterfaceDecl()) return !ObjI->isArcWeakrefUnavailable(); } return true; } /// Look for an ObjCReclaimReturnedObject cast and destroy it. static Expr *maybeUndoReclaimObject(Expr *e) { // For now, we just undo operands that are *immediately* reclaim // expressions, which prevents the vast majority of potential // problems here. To catch them all, we'd need to rebuild arbitrary // value-propagating subexpressions --- we can't reliably rebuild // in-place because of expression sharing. if (ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e)) if (ice->getCastKind() == CK_ARCReclaimReturnedObject) return ice->getSubExpr(); return e; } ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr) { ExprResult SubResult = UsualUnaryConversions(SubExpr); if (SubResult.isInvalid()) return ExprError(); SubExpr = SubResult.get(); QualType T = TSInfo->getType(); QualType FromType = SubExpr->getType(); CastKind CK; bool MustConsume = false; if (T->isDependentType() || SubExpr->isTypeDependent()) { // Okay: we'll build a dependent expression type. CK = CK_Dependent; } else if (T->isObjCARCBridgableType() && FromType->isCARCBridgableType()) { // Casting CF -> id CK = (T->isBlockPointerType() ? CK_AnyPointerToBlockPointerCast : CK_CPointerToObjCPointerCast); switch (Kind) { case OBC_Bridge: break; case OBC_BridgeRetained: { bool br = isKnownName("CFBridgingRelease"); Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind) << 2 << FromType << (T->isBlockPointerType()? 1 : 0) << T << SubExpr->getSourceRange() << Kind; Diag(BridgeKeywordLoc, diag::note_arc_bridge) << FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge"); Diag(BridgeKeywordLoc, diag::note_arc_bridge_transfer) << FromType << br << FixItHint::CreateReplacement(BridgeKeywordLoc, br ? "CFBridgingRelease " : "__bridge_transfer "); Kind = OBC_Bridge; break; } case OBC_BridgeTransfer: // We must consume the Objective-C object produced by the cast. MustConsume = true; break; } } else if (T->isCARCBridgableType() && FromType->isObjCARCBridgableType()) { // Okay: id -> CF CK = CK_BitCast; switch (Kind) { case OBC_Bridge: // Reclaiming a value that's going to be __bridge-casted to CF // is very dangerous, so we don't do it. SubExpr = maybeUndoReclaimObject(SubExpr); break; case OBC_BridgeRetained: // Produce the object before casting it. SubExpr = ImplicitCastExpr::Create(Context, FromType, CK_ARCProduceObject, SubExpr, nullptr, VK_RValue); break; case OBC_BridgeTransfer: { bool br = isKnownName("CFBridgingRetain"); Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind) << (FromType->isBlockPointerType()? 1 : 0) << FromType << 2 << T << SubExpr->getSourceRange() << Kind; Diag(BridgeKeywordLoc, diag::note_arc_bridge) << FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge "); Diag(BridgeKeywordLoc, diag::note_arc_bridge_retained) << T << br << FixItHint::CreateReplacement(BridgeKeywordLoc, br ? "CFBridgingRetain " : "__bridge_retained"); Kind = OBC_Bridge; break; } } } else { Diag(LParenLoc, diag::err_arc_bridge_cast_incompatible) << FromType << T << Kind << SubExpr->getSourceRange() << TSInfo->getTypeLoc().getSourceRange(); return ExprError(); } Expr *Result = new (Context) ObjCBridgedCastExpr(LParenLoc, Kind, CK, BridgeKeywordLoc, TSInfo, SubExpr); if (MustConsume) { ExprNeedsCleanups = true; Result = ImplicitCastExpr::Create(Context, T, CK_ARCConsumeObject, Result, nullptr, VK_RValue); } return Result; } ExprResult Sema::ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr) { TypeSourceInfo *TSInfo = nullptr; QualType T = GetTypeFromParser(Type, &TSInfo); if (Kind == OBC_Bridge) CheckTollFreeBridgeCast(T, SubExpr); if (!TSInfo) TSInfo = Context.getTrivialTypeSourceInfo(T, LParenLoc); return BuildObjCBridgedCast(LParenLoc, Kind, BridgeKeywordLoc, TSInfo, SubExpr); } #endif // HLSL Change
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaOverload.cpp
//===--- SemaOverload.cpp - C++ Overloading -------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file provides Sema routines for C++ overloading. // //===----------------------------------------------------------------------===// #include "clang/Sema/Overload.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/DiagnosticOptions.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/TargetInfo.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/Template.h" #include "clang/Sema/TemplateDeduction.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change #include "clang/AST/HlslTypes.h" // HLSL Change #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" #include <algorithm> #include <cstdlib> using namespace clang; using namespace sema; /// A convenience routine for creating a decayed reference to a function. static ExprResult CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl, bool HadMultipleCandidates, SourceLocation Loc = SourceLocation(), const DeclarationNameLoc &LocInfo = DeclarationNameLoc()){ if (S.DiagnoseUseOfDecl(FoundDecl, Loc)) return ExprError(); // If FoundDecl is different from Fn (such as if one is a template // and the other a specialization), make sure DiagnoseUseOfDecl is // called on both. // FIXME: This would be more comprehensively addressed by modifying // DiagnoseUseOfDecl to accept both the FoundDecl and the decl // being used. if (FoundDecl != Fn && S.DiagnoseUseOfDecl(Fn, Loc)) return ExprError(); DeclRefExpr *DRE = new (S.Context) DeclRefExpr(Fn, false, Fn->getType(), VK_LValue, Loc, LocInfo); if (HadMultipleCandidates) DRE->setHadMultipleCandidates(true); S.MarkDeclRefReferenced(DRE); ExprResult E = DRE; E = S.DefaultFunctionArrayConversion(E.get()); if (E.isInvalid()) return ExprError(); return E; } static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType, bool InOverloadResolution, StandardConversionSequence &SCS, bool CStyle, bool AllowObjCWritebackConversion); static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From, QualType &ToType, bool InOverloadResolution, StandardConversionSequence &SCS, bool CStyle); static OverloadingResult IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType, UserDefinedConversionSequence& User, OverloadCandidateSet& Conversions, bool AllowExplicit, bool AllowObjCConversionOnExplicit); static ImplicitConversionSequence::CompareKind CompareStandardConversionSequences(Sema &S, const StandardConversionSequence& SCS1, const StandardConversionSequence& SCS2); static ImplicitConversionSequence::CompareKind CompareQualificationConversions(Sema &S, const StandardConversionSequence& SCS1, const StandardConversionSequence& SCS2); static ImplicitConversionSequence::CompareKind CompareDerivedToBaseConversions(Sema &S, const StandardConversionSequence& SCS1, const StandardConversionSequence& SCS2); /// GetConversionRank - Retrieve the implicit conversion rank /// corresponding to the given implicit conversion kind. ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) { static const ImplicitConversionRank Rank[] = { // HLSL Change (remove explicit size to verify alignment with enum) ICR_Exact_Match, ICR_Exact_Match, ICR_Exact_Match, ICR_Exact_Match, ICR_Exact_Match, ICR_Exact_Match, ICR_Promotion, ICR_Promotion, ICR_Promotion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Complex_Real_Conversion, ICR_Conversion, ICR_Conversion, ICR_Writeback_Conversion // HLSL Change Starts: missing from original ,ICR_Conversion, // HLSL Change: new entries ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, ICR_Conversion, // HLSL Change Ends }; static_assert(_countof(Rank) == ICK_Num_Conversion_Kinds, "Otherwise, GetConversionRank is out of sync with ImplicitConversionKind"); // HLSL Change assert((int)Kind < (int)ICK_Num_Conversion_Kinds); // HLSL Change return Rank[(int)Kind]; } /// GetImplicitConversionName - Return the name of this kind of /// implicit conversion. static const char* GetImplicitConversionName(ImplicitConversionKind Kind) { static const char* const Name[] = { // HLSL Change (remove explicit size to verify alignment with enum) "No conversion", "Lvalue-to-rvalue", "Array-to-pointer", "Function-to-pointer", "Noreturn adjustment", "Qualification", "Integral promotion", "Floating point promotion", "Complex promotion", "Integral conversion", "Floating conversion", "Complex conversion", "Floating-integral conversion", "Pointer conversion", "Pointer-to-member conversion", "Boolean conversion", "Compatible-types conversion", "Derived-to-base conversion", "Vector conversion", "Vector splat", "Complex-real conversion", "Block Pointer conversion", "Transparent Union Conversion", "Writeback conversion" // HLSL Change Starts ,"Zero constant to event conversion", // HLSL Change: missing value // HLSL Change: new values "HLSLVector/Matrix to scalar", "HLSLVector/Matrix conversion", "Flat assignment conversion", "HLSLVector/Matrix splat", "HLSLVector/Matrix truncation", "HLSL derived to base", // HLSL Change Ends }; static_assert(_countof(Name) == ICK_Num_Conversion_Kinds, "Otherwise, GetImplicitConversionName is out of sync with ImplicitConversionKind"); // HLSL Change return Name[Kind]; } /// StandardConversionSequence - Set the standard conversion /// sequence to the identity conversion. void StandardConversionSequence::setAsIdentityConversion() { First = ICK_Identity; Second = ICK_Identity; Third = ICK_Identity; DeprecatedStringLiteralToCharPtr = false; QualificationIncludesObjCLifetime = false; ReferenceBinding = false; DirectBinding = false; IsLvalueReference = true; BindsToFunctionLvalue = false; BindsToRvalue = false; BindsImplicitObjectArgumentWithoutRefQualifier = false; ObjCLifetimeConversionBinding = false; CopyConstructor = nullptr; } /// getRank - Retrieve the rank of this standard conversion sequence /// (C++ 13.3.3.1.1p3). The rank is the largest rank of each of the /// implicit conversions. ImplicitConversionRank StandardConversionSequence::getRank() const { ImplicitConversionRank Rank = ICR_Exact_Match; if (GetConversionRank(First) > Rank) Rank = GetConversionRank(First); if (GetConversionRank(Second) > Rank) Rank = GetConversionRank(Second); if (GetConversionRank(ComponentConversion) > Rank) // HLSL Change Rank = GetConversionRank(ComponentConversion); if (GetConversionRank(Third) > Rank) Rank = GetConversionRank(Third); return Rank; } /// isPointerConversionToBool - Determines whether this conversion is /// a conversion of a pointer or pointer-to-member to bool. This is /// used as part of the ranking of standard conversion sequences /// (C++ 13.3.3.2p4). bool StandardConversionSequence::isPointerConversionToBool() const { // Note that FromType has not necessarily been transformed by the // array-to-pointer or function-to-pointer implicit conversions, so // check for their presence as well as checking whether FromType is // a pointer. if (getToType(1)->isBooleanType() && (getFromType()->isPointerType() || getFromType()->isObjCObjectPointerType() || getFromType()->isBlockPointerType() || getFromType()->isNullPtrType() || First == ICK_Array_To_Pointer || First == ICK_Function_To_Pointer)) return true; return false; } /// isPointerConversionToVoidPointer - Determines whether this /// conversion is a conversion of a pointer to a void pointer. This is /// used as part of the ranking of standard conversion sequences (C++ /// 13.3.3.2p4). bool StandardConversionSequence:: isPointerConversionToVoidPointer(ASTContext& Context) const { QualType FromType = getFromType(); QualType ToType = getToType(1); // Note that FromType has not necessarily been transformed by the // array-to-pointer implicit conversion, so check for its presence // and redo the conversion to get a pointer. if (First == ICK_Array_To_Pointer) FromType = Context.getArrayDecayedType(FromType); if (Second == ICK_Pointer_Conversion && FromType->isAnyPointerType()) if (const PointerType* ToPtrType = ToType->getAs<PointerType>()) return ToPtrType->getPointeeType()->isVoidType(); return false; } /// Skip any implicit casts which could be either part of a narrowing conversion /// or after one in an implicit conversion. static const Expr *IgnoreNarrowingConversion(const Expr *Converted) { while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Converted)) { switch (ICE->getCastKind()) { case CK_NoOp: case CK_IntegralCast: case CK_IntegralToBoolean: case CK_IntegralToFloating: case CK_FloatingToIntegral: case CK_FloatingToBoolean: case CK_FloatingCast: Converted = ICE->getSubExpr(); continue; default: return Converted; } } return Converted; } /// Check if this standard conversion sequence represents a narrowing /// conversion, according to C++11 [dcl.init.list]p7. /// /// \param Ctx The AST context. /// \param Converted The result of applying this standard conversion sequence. /// \param ConstantValue If this is an NK_Constant_Narrowing conversion, the /// value of the expression prior to the narrowing conversion. /// \param ConstantType If this is an NK_Constant_Narrowing conversion, the /// type of the expression prior to the narrowing conversion. NarrowingKind StandardConversionSequence::getNarrowingKind(ASTContext &Ctx, const Expr *Converted, APValue &ConstantValue, QualType &ConstantType) const { assert(Ctx.getLangOpts().CPlusPlus && "narrowing check outside C++"); // C++11 [dcl.init.list]p7: // A narrowing conversion is an implicit conversion ... QualType FromType = getToType(0); QualType ToType = getToType(1); switch (Second) { // 'bool' is an integral type; dispatch to the right place to handle it. case ICK_Boolean_Conversion: if (FromType->isRealFloatingType()) goto FloatingIntegralConversion; if (FromType->isIntegralOrUnscopedEnumerationType()) goto IntegralConversion; // Boolean conversions can be from pointers and pointers to members // [conv.bool], and those aren't considered narrowing conversions. return NK_Not_Narrowing; // -- from a floating-point type to an integer type, or // // -- from an integer type or unscoped enumeration type to a floating-point // type, except where the source is a constant expression and the actual // value after conversion will fit into the target type and will produce // the original value when converted back to the original type, or case ICK_Floating_Integral: FloatingIntegralConversion: if (FromType->isRealFloatingType() && ToType->isIntegralType(Ctx)) { return NK_Type_Narrowing; } else if (FromType->isIntegralType(Ctx) && ToType->isRealFloatingType()) { llvm::APSInt IntConstantValue; const Expr *Initializer = IgnoreNarrowingConversion(Converted); if (Initializer && Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) { // Convert the integer to the floating type. llvm::APFloat Result(Ctx.getFloatTypeSemantics(ToType)); Result.convertFromAPInt(IntConstantValue, IntConstantValue.isSigned(), llvm::APFloat::rmNearestTiesToEven); // And back. llvm::APSInt ConvertedValue = IntConstantValue; bool ignored; Result.convertToInteger(ConvertedValue, llvm::APFloat::rmTowardZero, &ignored); // If the resulting value is different, this was a narrowing conversion. if (IntConstantValue != ConvertedValue) { ConstantValue = APValue(IntConstantValue); ConstantType = Initializer->getType(); return NK_Constant_Narrowing; } } else { // Variables are always narrowings. return NK_Variable_Narrowing; } } return NK_Not_Narrowing; // -- from long double to double or float, or from double to float, except // where the source is a constant expression and the actual value after // conversion is within the range of values that can be represented (even // if it cannot be represented exactly), or case ICK_Floating_Conversion: if (FromType->isRealFloatingType() && ToType->isRealFloatingType() && Ctx.getFloatingTypeOrder(FromType, ToType) == 1) { // FromType is larger than ToType. const Expr *Initializer = IgnoreNarrowingConversion(Converted); if (Initializer->isCXX11ConstantExpr(Ctx, &ConstantValue)) { // Constant! assert(ConstantValue.isFloat()); llvm::APFloat FloatVal = ConstantValue.getFloat(); // Convert the source value into the target type. bool ignored; llvm::APFloat::opStatus ConvertStatus = FloatVal.convert( Ctx.getFloatTypeSemantics(ToType), llvm::APFloat::rmNearestTiesToEven, &ignored); // If there was no overflow, the source value is within the range of // values that can be represented. if (ConvertStatus & llvm::APFloat::opOverflow) { ConstantType = Initializer->getType(); return NK_Constant_Narrowing; } } else { return NK_Variable_Narrowing; } } return NK_Not_Narrowing; // -- from an integer type or unscoped enumeration type to an integer type // that cannot represent all the values of the original type, except where // the source is a constant expression and the actual value after // conversion will fit into the target type and will produce the original // value when converted back to the original type. case ICK_Integral_Conversion: IntegralConversion: { assert(FromType->isIntegralOrUnscopedEnumerationType()); assert(ToType->isIntegralOrUnscopedEnumerationType()); const bool FromSigned = FromType->isSignedIntegerOrEnumerationType(); const unsigned FromWidth = Ctx.getIntWidth(FromType); const bool ToSigned = ToType->isSignedIntegerOrEnumerationType(); const unsigned ToWidth = Ctx.getIntWidth(ToType); if (FromWidth > ToWidth || (FromWidth == ToWidth && FromSigned != ToSigned) || (FromSigned && !ToSigned)) { // Not all values of FromType can be represented in ToType. llvm::APSInt InitializerValue; const Expr *Initializer = IgnoreNarrowingConversion(Converted); if (!Initializer->isIntegerConstantExpr(InitializerValue, Ctx)) { // Such conversions on variables are always narrowing. return NK_Variable_Narrowing; } bool Narrowing = false; if (FromWidth < ToWidth) { // Negative -> unsigned is narrowing. Otherwise, more bits is never // narrowing. if (InitializerValue.isSigned() && InitializerValue.isNegative()) Narrowing = true; } else { // Add a bit to the InitializerValue so we don't have to worry about // signed vs. unsigned comparisons. InitializerValue = InitializerValue.extend( InitializerValue.getBitWidth() + 1); // Convert the initializer to and from the target width and signed-ness. llvm::APSInt ConvertedValue = InitializerValue; ConvertedValue = ConvertedValue.trunc(ToWidth); ConvertedValue.setIsSigned(ToSigned); ConvertedValue = ConvertedValue.extend(InitializerValue.getBitWidth()); ConvertedValue.setIsSigned(InitializerValue.isSigned()); // If the result is different, this was a narrowing conversion. if (ConvertedValue != InitializerValue) Narrowing = true; } if (Narrowing) { ConstantType = Initializer->getType(); ConstantValue = APValue(InitializerValue); return NK_Constant_Narrowing; } } return NK_Not_Narrowing; } default: // Other kinds of conversions are not narrowings. return NK_Not_Narrowing; } } /// dump - Print this standard conversion sequence to standard /// error. Useful for debugging overloading issues. void StandardConversionSequence::dump() const { raw_ostream &OS = llvm::errs(); bool PrintedSomething = false; if (First != ICK_Identity) { OS << GetImplicitConversionName(First); PrintedSomething = true; } if (Second != ICK_Identity) { if (PrintedSomething) { OS << " -> "; } OS << GetImplicitConversionName(Second); if (CopyConstructor) { OS << " (by copy constructor)"; } else if (DirectBinding) { OS << " (direct reference binding)"; } else if (ReferenceBinding) { OS << " (reference binding)"; } PrintedSomething = true; } // HLSL Change Starts if (ComponentConversion != ICK_Identity) { if (PrintedSomething) { OS << " -> "; } OS << GetImplicitConversionName(ComponentConversion); PrintedSomething = true; } // HLSL Change Ends if (Third != ICK_Identity) { if (PrintedSomething) { OS << " -> "; } OS << GetImplicitConversionName(Third); PrintedSomething = true; } if (!PrintedSomething) { OS << "No conversions required"; } } /// dump - Print this user-defined conversion sequence to standard /// error. Useful for debugging overloading issues. void UserDefinedConversionSequence::dump() const { raw_ostream &OS = llvm::errs(); if (Before.First || Before.Second || Before.Third) { Before.dump(); OS << " -> "; } if (ConversionFunction) OS << '\'' << *ConversionFunction << '\''; else OS << "aggregate initialization"; if (After.First || After.Second || After.Third) { OS << " -> "; After.dump(); } } /// dump - Print this implicit conversion sequence to standard /// error. Useful for debugging overloading issues. void ImplicitConversionSequence::dump() const { raw_ostream &OS = llvm::errs(); if (isStdInitializerListElement()) OS << "Worst std::initializer_list element conversion: "; switch (ConversionKind) { case StandardConversion: OS << "Standard conversion: "; Standard.dump(); break; case UserDefinedConversion: OS << "User-defined conversion: "; UserDefined.dump(); break; case EllipsisConversion: OS << "Ellipsis conversion"; break; case AmbiguousConversion: OS << "Ambiguous conversion"; break; case BadConversion: OS << "Bad conversion"; break; } OS << "\n"; } void AmbiguousConversionSequence::construct() { new (&conversions()) ConversionSet(); } void AmbiguousConversionSequence::destruct() { conversions().~ConversionSet(); } void AmbiguousConversionSequence::copyFrom(const AmbiguousConversionSequence &O) { FromTypePtr = O.FromTypePtr; ToTypePtr = O.ToTypePtr; new (&conversions()) ConversionSet(O.conversions()); } namespace { // Structure used by DeductionFailureInfo to store // template argument information. struct DFIArguments { TemplateArgument FirstArg; TemplateArgument SecondArg; }; // Structure used by DeductionFailureInfo to store // template parameter and template argument information. struct DFIParamWithArguments : DFIArguments { TemplateParameter Param; }; } /// \brief Convert from Sema's representation of template deduction information /// to the form used in overload-candidate information. DeductionFailureInfo clang::MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, TemplateDeductionInfo &Info) { DeductionFailureInfo Result; Result.Result = static_cast<unsigned>(TDK); Result.HasDiagnostic = false; Result.Data = nullptr; switch (TDK) { case Sema::TDK_Success: case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: break; case Sema::TDK_Incomplete: case Sema::TDK_InvalidExplicitArguments: Result.Data = Info.Param.getOpaqueValue(); break; case Sema::TDK_NonDeducedMismatch: { // FIXME: Should allocate from normal heap so that we can free this later. DFIArguments *Saved = new (Context) DFIArguments; Saved->FirstArg = Info.FirstArg; Saved->SecondArg = Info.SecondArg; Result.Data = Saved; break; } case Sema::TDK_Inconsistent: case Sema::TDK_Underqualified: { // FIXME: Should allocate from normal heap so that we can free this later. DFIParamWithArguments *Saved = new (Context) DFIParamWithArguments; Saved->Param = Info.Param; Saved->FirstArg = Info.FirstArg; Saved->SecondArg = Info.SecondArg; Result.Data = Saved; break; } case Sema::TDK_SubstitutionFailure: Result.Data = Info.take(); if (Info.hasSFINAEDiagnostic()) { PartialDiagnosticAt *Diag = new (Result.Diagnostic) PartialDiagnosticAt( SourceLocation(), PartialDiagnostic::NullDiagnostic()); Info.takeSFINAEDiagnostic(*Diag); Result.HasDiagnostic = true; } break; case Sema::TDK_FailedOverloadResolution: Result.Data = Info.Expression; break; case Sema::TDK_MiscellaneousDeductionFailure: break; } return Result; } void DeductionFailureInfo::Destroy() { switch (static_cast<Sema::TemplateDeductionResult>(Result)) { case Sema::TDK_Success: case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_Incomplete: case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: case Sema::TDK_InvalidExplicitArguments: case Sema::TDK_FailedOverloadResolution: break; case Sema::TDK_Inconsistent: case Sema::TDK_Underqualified: case Sema::TDK_NonDeducedMismatch: // FIXME: Destroy the data? Data = nullptr; break; case Sema::TDK_SubstitutionFailure: // FIXME: Destroy the template argument list? Data = nullptr; if (PartialDiagnosticAt *Diag = getSFINAEDiagnostic()) { Diag->~PartialDiagnosticAt(); HasDiagnostic = false; } break; // Unhandled case Sema::TDK_MiscellaneousDeductionFailure: break; } } PartialDiagnosticAt *DeductionFailureInfo::getSFINAEDiagnostic() { if (HasDiagnostic) return static_cast<PartialDiagnosticAt*>(static_cast<void*>(Diagnostic)); return nullptr; } TemplateParameter DeductionFailureInfo::getTemplateParameter() { switch (static_cast<Sema::TemplateDeductionResult>(Result)) { case Sema::TDK_Success: case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: case Sema::TDK_SubstitutionFailure: case Sema::TDK_NonDeducedMismatch: case Sema::TDK_FailedOverloadResolution: return TemplateParameter(); case Sema::TDK_Incomplete: case Sema::TDK_InvalidExplicitArguments: return TemplateParameter::getFromOpaqueValue(Data); case Sema::TDK_Inconsistent: case Sema::TDK_Underqualified: return static_cast<DFIParamWithArguments*>(Data)->Param; // Unhandled case Sema::TDK_MiscellaneousDeductionFailure: break; } return TemplateParameter(); } TemplateArgumentList *DeductionFailureInfo::getTemplateArgumentList() { switch (static_cast<Sema::TemplateDeductionResult>(Result)) { case Sema::TDK_Success: case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: case Sema::TDK_Incomplete: case Sema::TDK_InvalidExplicitArguments: case Sema::TDK_Inconsistent: case Sema::TDK_Underqualified: case Sema::TDK_NonDeducedMismatch: case Sema::TDK_FailedOverloadResolution: return nullptr; case Sema::TDK_SubstitutionFailure: return static_cast<TemplateArgumentList*>(Data); // Unhandled case Sema::TDK_MiscellaneousDeductionFailure: break; } return nullptr; } const TemplateArgument *DeductionFailureInfo::getFirstArg() { switch (static_cast<Sema::TemplateDeductionResult>(Result)) { case Sema::TDK_Success: case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_Incomplete: case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: case Sema::TDK_InvalidExplicitArguments: case Sema::TDK_SubstitutionFailure: case Sema::TDK_FailedOverloadResolution: return nullptr; case Sema::TDK_Inconsistent: case Sema::TDK_Underqualified: case Sema::TDK_NonDeducedMismatch: return &static_cast<DFIArguments*>(Data)->FirstArg; // Unhandled case Sema::TDK_MiscellaneousDeductionFailure: break; } return nullptr; } const TemplateArgument *DeductionFailureInfo::getSecondArg() { switch (static_cast<Sema::TemplateDeductionResult>(Result)) { case Sema::TDK_Success: case Sema::TDK_Invalid: case Sema::TDK_InstantiationDepth: case Sema::TDK_Incomplete: case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: case Sema::TDK_InvalidExplicitArguments: case Sema::TDK_SubstitutionFailure: case Sema::TDK_FailedOverloadResolution: return nullptr; case Sema::TDK_Inconsistent: case Sema::TDK_Underqualified: case Sema::TDK_NonDeducedMismatch: return &static_cast<DFIArguments*>(Data)->SecondArg; // Unhandled case Sema::TDK_MiscellaneousDeductionFailure: break; } return nullptr; } Expr *DeductionFailureInfo::getExpr() { if (static_cast<Sema::TemplateDeductionResult>(Result) == Sema::TDK_FailedOverloadResolution) return static_cast<Expr*>(Data); return nullptr; } void OverloadCandidateSet::destroyCandidates() { for (iterator i = begin(), e = end(); i != e; ++i) { for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii) i->Conversions[ii].~ImplicitConversionSequence(); if (!i->Viable && i->FailureKind == ovl_fail_bad_deduction) i->DeductionFailure.Destroy(); } } void OverloadCandidateSet::clear() { destroyCandidates(); NumInlineSequences = 0; Candidates.clear(); Functions.clear(); } namespace { class UnbridgedCastsSet { struct Entry { Expr **Addr; Expr *Saved; }; SmallVector<Entry, 2> Entries; public: void save(Sema &S, Expr *&E) { assert(E->hasPlaceholderType(BuiltinType::ARCUnbridgedCast)); Entry entry = { &E, E }; Entries.push_back(entry); E = S.stripARCUnbridgedCast(E); } void restore() { for (SmallVectorImpl<Entry>::iterator i = Entries.begin(), e = Entries.end(); i != e; ++i) *i->Addr = i->Saved; } }; } /// checkPlaceholderForOverload - Do any interesting placeholder-like /// preprocessing on the given expression. /// /// \param unbridgedCasts a collection to which to add unbridged casts; /// without this, they will be immediately diagnosed as errors /// /// Return true on unrecoverable error. static bool checkPlaceholderForOverload(Sema &S, Expr *&E, UnbridgedCastsSet *unbridgedCasts = nullptr) { if (const BuiltinType *placeholder = E->getType()->getAsPlaceholderType()) { // We can't handle overloaded expressions here because overload // resolution might reasonably tweak them. if (placeholder->getKind() == BuiltinType::Overload) return false; // If the context potentially accepts unbridged ARC casts, strip // the unbridged cast and add it to the collection for later restoration. if (placeholder->getKind() == BuiltinType::ARCUnbridgedCast && unbridgedCasts) { unbridgedCasts->save(S, E); return false; } // Go ahead and check everything else. ExprResult result = S.CheckPlaceholderExpr(E); if (result.isInvalid()) return true; E = result.get(); return false; } // Nothing to do. return false; } /// checkArgPlaceholdersForOverload - Check a set of call operands for /// placeholders. static bool checkArgPlaceholdersForOverload(Sema &S, MultiExprArg Args, UnbridgedCastsSet &unbridged) { for (unsigned i = 0, e = Args.size(); i != e; ++i) if (checkPlaceholderForOverload(S, Args[i], &unbridged)) return true; return false; } // IsOverload - Determine whether the given New declaration is an // overload of the declarations in Old. This routine returns false if // New and Old cannot be overloaded, e.g., if New has the same // signature as some function in Old (C++ 1.3.10) or if the Old // declarations aren't functions (or function templates) at all. When // it does return false, MatchedDecl will point to the decl that New // cannot be overloaded with. This decl may be a UsingShadowDecl on // top of the underlying declaration. // // Example: Given the following input: // // void f(int, float); // #1 // void f(int, int); // #2 // int f(int, int); // #3 // // When we process #1, there is no previous declaration of "f", // so IsOverload will not be used. // // When we process #2, Old contains only the FunctionDecl for #1. By // comparing the parameter types, we see that #1 and #2 are overloaded // (since they have different signatures), so this routine returns // false; MatchedDecl is unchanged. // // When we process #3, Old is an overload set containing #1 and #2. We // compare the signatures of #3 to #1 (they're overloaded, so we do // nothing) and then #3 to #2. Since the signatures of #3 and #2 are // identical (return types of functions are not part of the // signature), IsOverload returns false and MatchedDecl will be set to // point to the FunctionDecl for #2. // // 'NewIsUsingShadowDecl' indicates that 'New' is being introduced // into a class by a using declaration. The rules for whether to hide // shadow declarations ignore some properties which otherwise figure // into a function template's signature. Sema::OverloadKind Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old, NamedDecl *&Match, bool NewIsUsingDecl) { for (LookupResult::iterator I = Old.begin(), E = Old.end(); I != E; ++I) { NamedDecl *OldD = *I; bool OldIsUsingDecl = false; if (isa<UsingShadowDecl>(OldD)) { OldIsUsingDecl = true; // We can always introduce two using declarations into the same // context, even if they have identical signatures. if (NewIsUsingDecl) continue; OldD = cast<UsingShadowDecl>(OldD)->getTargetDecl(); } // If either declaration was introduced by a using declaration, // we'll need to use slightly different rules for matching. // Essentially, these rules are the normal rules, except that // function templates hide function templates with different // return types or template parameter lists. bool UseMemberUsingDeclRules = (OldIsUsingDecl || NewIsUsingDecl) && CurContext->isRecord() && !New->getFriendObjectKind(); if (FunctionDecl *OldF = OldD->getAsFunction()) { if (!IsOverload(New, OldF, UseMemberUsingDeclRules)) { if (UseMemberUsingDeclRules && OldIsUsingDecl) { HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I)); continue; } if (!isa<FunctionTemplateDecl>(OldD) && !shouldLinkPossiblyHiddenDecl(*I, New)) continue; Match = *I; return Ovl_Match; } } else if (isa<UsingDecl>(OldD)) { // We can overload with these, which can show up when doing // redeclaration checks for UsingDecls. assert(Old.getLookupKind() == LookupUsingDeclName); } else if (isa<TagDecl>(OldD)) { // We can always overload with tags by hiding them. } else if (isa<UnresolvedUsingValueDecl>(OldD)) { // Optimistically assume that an unresolved using decl will // overload; if it doesn't, we'll have to diagnose during // template instantiation. } else { // (C++ 13p1): // Only function declarations can be overloaded; object and type // declarations cannot be overloaded. Match = *I; return Ovl_NonFunction; } } return Ovl_Overload; } bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old, bool UseUsingDeclRules) { // C++ [basic.start.main]p2: This function shall not be overloaded. if (New->isMain()) return false; // MSVCRT user defined entry points cannot be overloaded. if (New->isMSVCRTEntryPoint()) return false; FunctionTemplateDecl *OldTemplate = Old->getDescribedFunctionTemplate(); FunctionTemplateDecl *NewTemplate = New->getDescribedFunctionTemplate(); // C++ [temp.fct]p2: // A function template can be overloaded with other function templates // and with normal (non-template) functions. if ((OldTemplate == nullptr) != (NewTemplate == nullptr)) return true; // Is the function New an overload of the function Old? QualType OldQType = Context.getCanonicalType(Old->getType()); QualType NewQType = Context.getCanonicalType(New->getType()); // Compare the signatures (C++ 1.3.10) of the two functions to // determine whether they are overloads. If we find any mismatch // in the signature, they are overloads. // If either of these functions is a K&R-style function (no // prototype), then we consider them to have matching signatures. if (isa<FunctionNoProtoType>(OldQType.getTypePtr()) || isa<FunctionNoProtoType>(NewQType.getTypePtr())) return false; const FunctionProtoType *OldType = cast<FunctionProtoType>(OldQType); const FunctionProtoType *NewType = cast<FunctionProtoType>(NewQType); // The signature of a function includes the types of its // parameters (C++ 1.3.10), which includes the presence or absence // of the ellipsis; see C++ DR 357). if (OldQType != NewQType && (OldType->getNumParams() != NewType->getNumParams() || OldType->isVariadic() != NewType->isVariadic() || !FunctionParamTypesAreEqual(OldType, NewType))) return true; // C++ [temp.over.link]p4: // The signature of a function template consists of its function // signature, its return type and its template parameter list. The names // of the template parameters are significant only for establishing the // relationship between the template parameters and the rest of the // signature. // // We check the return type and template parameter lists for function // templates first; the remaining checks follow. // // However, we don't consider either of these when deciding whether // a member introduced by a shadow declaration is hidden. if (!UseUsingDeclRules && NewTemplate && (!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(), OldTemplate->getTemplateParameters(), false, TPL_TemplateMatch) || OldType->getReturnType() != NewType->getReturnType())) return true; // If the function is a class member, its signature includes the // cv-qualifiers (if any) and ref-qualifier (if any) on the function itself. // // As part of this, also check whether one of the member functions // is static, in which case they are not overloads (C++ // 13.1p2). While not part of the definition of the signature, // this check is important to determine whether these functions // can be overloaded. CXXMethodDecl *OldMethod = dyn_cast<CXXMethodDecl>(Old); CXXMethodDecl *NewMethod = dyn_cast<CXXMethodDecl>(New); if (OldMethod && NewMethod && !OldMethod->isStatic() && !NewMethod->isStatic()) { if (OldMethod->getRefQualifier() != NewMethod->getRefQualifier()) { if (!UseUsingDeclRules && (OldMethod->getRefQualifier() == RQ_None || NewMethod->getRefQualifier() == RQ_None)) { // C++0x [over.load]p2: // - Member function declarations with the same name and the same // parameter-type-list as well as member function template // declarations with the same name, the same parameter-type-list, and // the same template parameter lists cannot be overloaded if any of // them, but not all, have a ref-qualifier (8.3.5). Diag(NewMethod->getLocation(), diag::err_ref_qualifier_overload) << NewMethod->getRefQualifier() << OldMethod->getRefQualifier(); Diag(OldMethod->getLocation(), diag::note_previous_declaration); } return true; } // We may not have applied the implicit const for a constexpr member // function yet (because we haven't yet resolved whether this is a static // or non-static member function). Add it now, on the assumption that this // is a redeclaration of OldMethod. unsigned OldQuals = OldMethod->getTypeQualifiers(); unsigned NewQuals = NewMethod->getTypeQualifiers(); if (!getLangOpts().CPlusPlus14 && NewMethod->isConstexpr() && !isa<CXXConstructorDecl>(NewMethod)) NewQuals |= Qualifiers::Const; // We do not allow overloading based off of '__restrict'. OldQuals &= ~Qualifiers::Restrict; NewQuals &= ~Qualifiers::Restrict; if (OldQuals != NewQuals) return true; } // enable_if attributes are an order-sensitive part of the signature. for (specific_attr_iterator<EnableIfAttr> NewI = New->specific_attr_begin<EnableIfAttr>(), NewE = New->specific_attr_end<EnableIfAttr>(), OldI = Old->specific_attr_begin<EnableIfAttr>(), OldE = Old->specific_attr_end<EnableIfAttr>(); NewI != NewE || OldI != OldE; ++NewI, ++OldI) { if (NewI == NewE || OldI == OldE) return true; llvm::FoldingSetNodeID NewID, OldID; NewI->getCond()->Profile(NewID, Context, true); OldI->getCond()->Profile(OldID, Context, true); if (NewID != OldID) return true; } // The signatures match; this is not an overload. return false; } /// \brief Checks availability of the function depending on the current /// function context. Inside an unavailable function, unavailability is ignored. /// /// \returns true if \arg FD is unavailable and current context is inside /// an available function, false otherwise. bool Sema::isFunctionConsideredUnavailable(FunctionDecl *FD) { return FD->isUnavailable() && !cast<Decl>(CurContext)->isUnavailable(); } /// \brief Tries a user-defined conversion from From to ToType. /// /// Produces an implicit conversion sequence for when a standard conversion /// is not an option. See TryImplicitConversion for more information. static ImplicitConversionSequence TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion, bool AllowObjCConversionOnExplicit) { ImplicitConversionSequence ICS; if (SuppressUserConversions || S.getLangOpts().HLSL) { // HLSL Change - no user conversions // We're not in the case above, so there is no conversion that // we can perform. ICS.setBad(BadConversionSequence::no_conversion, From, ToType); return ICS; } // Attempt user-defined conversion. OverloadCandidateSet Conversions(From->getExprLoc(), OverloadCandidateSet::CSK_Normal); switch (IsUserDefinedConversion(S, From, ToType, ICS.UserDefined, Conversions, AllowExplicit, AllowObjCConversionOnExplicit)) { case OR_Success: case OR_Deleted: ICS.setUserDefined(); ICS.UserDefined.Before.setAsIdentityConversion(); // C++ [over.ics.user]p4: // A conversion of an expression of class type to the same class // type is given Exact Match rank, and a conversion of an // expression of class type to a base class of that type is // given Conversion rank, in spite of the fact that a copy // constructor (i.e., a user-defined conversion function) is // called for those cases. if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(ICS.UserDefined.ConversionFunction)) { QualType FromCanon = S.Context.getCanonicalType(From->getType().getUnqualifiedType()); QualType ToCanon = S.Context.getCanonicalType(ToType).getUnqualifiedType(); if (Constructor->isCopyConstructor() && (FromCanon == ToCanon || S.IsDerivedFrom(FromCanon, ToCanon))) { // Turn this into a "standard" conversion sequence, so that it // gets ranked with standard conversion sequences. ICS.setStandard(); ICS.Standard.setAsIdentityConversion(); ICS.Standard.setFromType(From->getType()); ICS.Standard.setAllToTypes(ToType); ICS.Standard.CopyConstructor = Constructor; if (ToCanon != FromCanon) ICS.Standard.Second = ICK_Derived_To_Base; } } break; case OR_Ambiguous: ICS.setAmbiguous(); ICS.Ambiguous.setFromType(From->getType()); ICS.Ambiguous.setToType(ToType); for (OverloadCandidateSet::iterator Cand = Conversions.begin(); Cand != Conversions.end(); ++Cand) if (Cand->Viable) ICS.Ambiguous.addConversion(Cand->Function); break; // Fall through. case OR_No_Viable_Function: ICS.setBad(BadConversionSequence::no_conversion, From, ToType); break; } return ICS; } /// TryImplicitConversion - Attempt to perform an implicit conversion /// from the given expression (Expr) to the given type (ToType). This /// function returns an implicit conversion sequence that can be used /// to perform the initialization. Given /// /// void f(float f); /// void g(int i) { f(i); } /// /// this routine would produce an implicit conversion sequence to /// describe the initialization of f from i, which will be a standard /// conversion sequence containing an lvalue-to-rvalue conversion (C++ /// 4.1) followed by a floating-integral conversion (C++ 4.9). // /// Note that this routine only determines how the conversion can be /// performed; it does not actually perform the conversion. As such, /// it will not produce any diagnostics if no conversion is available, /// but will instead return an implicit conversion sequence of kind /// "BadConversion". /// /// If @p SuppressUserConversions, then user-defined conversions are /// not permitted. /// If @p AllowExplicit, then explicit user-defined conversions are /// permitted. /// /// \param AllowObjCWritebackConversion Whether we allow the Objective-C /// writeback conversion, which allows __autoreleasing id* parameters to /// be initialized with __strong id* or __weak id* arguments. static ImplicitConversionSequence TryImplicitConversion(Sema &S, Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion, bool AllowObjCConversionOnExplicit) { ImplicitConversionSequence ICS; if (IsStandardConversion(S, From, ToType, InOverloadResolution, ICS.Standard, CStyle, AllowObjCWritebackConversion)){ ICS.setStandard(); return ICS; } if (!S.getLangOpts().CPlusPlus) { ICS.setBad(BadConversionSequence::no_conversion, From, ToType); return ICS; } // C++ [over.ics.user]p4: // A conversion of an expression of class type to the same class // type is given Exact Match rank, and a conversion of an // expression of class type to a base class of that type is // given Conversion rank, in spite of the fact that a copy/move // constructor (i.e., a user-defined conversion function) is // called for those cases. QualType FromType = From->getType(); if (ToType->getAs<RecordType>() && FromType->getAs<RecordType>() && (S.Context.hasSameUnqualifiedType(FromType, ToType) || S.IsDerivedFrom(FromType, ToType))) { ICS.setStandard(); ICS.Standard.setAsIdentityConversion(); ICS.Standard.setFromType(FromType); ICS.Standard.setAllToTypes(ToType); // We don't actually check at this point whether there is a valid // copy/move constructor, since overloading just assumes that it // exists. When we actually perform initialization, we'll find the // appropriate constructor to copy the returned object, if needed. ICS.Standard.CopyConstructor = nullptr; // Determine whether this is considered a derived-to-base conversion. if (!S.Context.hasSameUnqualifiedType(FromType, ToType)) ICS.Standard.Second = ICK_Derived_To_Base; return ICS; } return TryUserDefinedConversion(S, From, ToType, SuppressUserConversions, AllowExplicit, InOverloadResolution, CStyle, AllowObjCWritebackConversion, AllowObjCConversionOnExplicit); } ImplicitConversionSequence Sema::TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion) { return ::TryImplicitConversion(*this, From, ToType, SuppressUserConversions, AllowExplicit, InOverloadResolution, CStyle, AllowObjCWritebackConversion, /*AllowObjCConversionOnExplicit=*/false); } /// PerformImplicitConversion - Perform an implicit conversion of the /// expression From to the type ToType. Returns the /// converted expression. Flavor is the kind of conversion we're /// performing, used in the error message. If @p AllowExplicit, /// explicit user-defined conversions are permitted. ExprResult Sema::PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit) { ImplicitConversionSequence ICS; return PerformImplicitConversion(From, ToType, Action, AllowExplicit, ICS); } ExprResult Sema::PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS) { if (checkPlaceholderForOverload(*this, From)) return ExprError(); // Objective-C ARC: Determine whether we will allow the writeback conversion. bool AllowObjCWritebackConversion = getLangOpts().ObjCAutoRefCount && (Action == AA_Passing || Action == AA_Sending); if (getLangOpts().ObjC1) CheckObjCBridgeRelatedConversions(From->getLocStart(), ToType, From->getType(), From); ICS = ::TryImplicitConversion(*this, From, ToType, /*SuppressUserConversions=*/false, AllowExplicit, /*InOverloadResolution=*/false, /*CStyle=*/false, AllowObjCWritebackConversion, /*AllowObjCConversionOnExplicit=*/false); return PerformImplicitConversion(From, ToType, ICS, Action); } /// \brief Determine whether the conversion from FromType to ToType is a valid /// conversion that strips "noreturn" off the nested function type. bool Sema::IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy) { if (Context.hasSameUnqualifiedType(FromType, ToType)) return false; // Permit the conversion F(t __attribute__((noreturn))) -> F(t) // where F adds one of the following at most once: // - a pointer // - a member pointer // - a block pointer CanQualType CanTo = Context.getCanonicalType(ToType); CanQualType CanFrom = Context.getCanonicalType(FromType); Type::TypeClass TyClass = CanTo->getTypeClass(); if (TyClass != CanFrom->getTypeClass()) return false; if (TyClass != Type::FunctionProto && TyClass != Type::FunctionNoProto) { if (TyClass == Type::Pointer) { CanTo = CanTo.getAs<PointerType>()->getPointeeType(); CanFrom = CanFrom.getAs<PointerType>()->getPointeeType(); } else if (TyClass == Type::BlockPointer) { CanTo = CanTo.getAs<BlockPointerType>()->getPointeeType(); CanFrom = CanFrom.getAs<BlockPointerType>()->getPointeeType(); } else if (TyClass == Type::MemberPointer) { CanTo = CanTo.getAs<MemberPointerType>()->getPointeeType(); CanFrom = CanFrom.getAs<MemberPointerType>()->getPointeeType(); } else { return false; } TyClass = CanTo->getTypeClass(); if (TyClass != CanFrom->getTypeClass()) return false; if (TyClass != Type::FunctionProto && TyClass != Type::FunctionNoProto) return false; } const FunctionType *FromFn = cast<FunctionType>(CanFrom); FunctionType::ExtInfo EInfo = FromFn->getExtInfo(); if (!EInfo.getNoReturn()) return false; FromFn = Context.adjustFunctionType(FromFn, EInfo.withNoReturn(false)); assert(QualType(FromFn, 0).isCanonical()); if (QualType(FromFn, 0) != CanTo) return false; ResultTy = ToType; return true; } /// \brief Determine whether the conversion from FromType to ToType is a valid /// vector conversion. /// /// \param ICK Will be set to the vector conversion kind, if this is a vector /// conversion. static bool IsVectorConversion(Sema &S, QualType FromType, QualType ToType, ImplicitConversionKind &ICK) { // We need at least one of these types to be a vector type to have a vector // conversion. if (!ToType->isVectorType() && !FromType->isVectorType()) return false; // Identical types require no conversions. if (S.Context.hasSameUnqualifiedType(FromType, ToType)) return false; // There are no conversions between extended vector types, only identity. if (ToType->isExtVectorType()) { // There are no conversions between extended vector types other than the // identity conversion. if (FromType->isExtVectorType()) return false; // Vector splat from any arithmetic type to a vector. if (FromType->isArithmeticType()) { ICK = ICK_Vector_Splat; return true; } } // We can perform the conversion between vector types in the following cases: // 1)vector types are equivalent AltiVec and GCC vector types // 2)lax vector conversions are permitted and the vector types are of the // same size if (ToType->isVectorType() && FromType->isVectorType()) { if (S.Context.areCompatibleVectorTypes(FromType, ToType) || S.isLaxVectorConversion(FromType, ToType)) { ICK = ICK_Vector_Conversion; return true; } } return false; } static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType, bool InOverloadResolution, StandardConversionSequence &SCS, bool CStyle); /// IsStandardConversion - Determines whether there is a standard /// conversion sequence (C++ [conv], C++ [over.ics.scs]) from the /// expression From to the type ToType. Standard conversion sequences /// only consider non-class types; for conversions that involve class /// types, use TryImplicitConversion. If a conversion exists, SCS will /// contain the standard conversion sequence required to perform this /// conversion and this routine will return true. Otherwise, this /// routine will return false and the value of SCS is unspecified. static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType, bool InOverloadResolution, StandardConversionSequence &SCS, bool CStyle, bool AllowObjCWritebackConversion) { QualType FromType = From->getType(); // Standard conversions (C++ [conv]) SCS.setAsIdentityConversion(); SCS.IncompatibleObjC = false; SCS.setFromType(FromType); SCS.CopyConstructor = nullptr; // HLSL Change Begins if (S.getLangOpts().HLSL) { return hlsl::CanConvert(&S, SourceLocation(), From, ToType, /*explicitConversion=*/false, &SCS); } // HLSL Change Ends // There are no standard conversions for class types in C++, so // abort early. When overloading in C, however, we do permit if (FromType->isRecordType() || ToType->isRecordType()) { if (S.getLangOpts().CPlusPlus) return false; // When we're overloading in C, we allow, as standard conversions, } // The first conversion can be an lvalue-to-rvalue conversion, // array-to-pointer conversion, or function-to-pointer conversion // (C++ 4p1). if (FromType == S.Context.OverloadTy) { DeclAccessPair AccessPair; if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(From, ToType, false, AccessPair)) { // We were able to resolve the address of the overloaded function, // so we can convert to the type of that function. FromType = Fn->getType(); SCS.setFromType(FromType); // we can sometimes resolve &foo<int> regardless of ToType, so check // if the type matches (identity) or we are converting to bool if (!S.Context.hasSameUnqualifiedType( S.ExtractUnqualifiedFunctionType(ToType), FromType)) { QualType resultTy; // if the function type matches except for [[noreturn]], it's ok if (!S.IsNoReturnConversion(FromType, S.ExtractUnqualifiedFunctionType(ToType), resultTy)) // otherwise, only a boolean conversion is standard if (!ToType->isBooleanType()) return false; } // Check if the "from" expression is taking the address of an overloaded // function and recompute the FromType accordingly. Take advantage of the // fact that non-static member functions *must* have such an address-of // expression. CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn); if (Method && !Method->isStatic()) { assert(isa<UnaryOperator>(From->IgnoreParens()) && "Non-unary operator on non-static member address"); assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode() == UO_AddrOf && "Non-address-of operator on non-static member address"); const Type *ClassType = S.Context.getTypeDeclType(Method->getParent()).getTypePtr(); FromType = S.Context.getMemberPointerType(FromType, ClassType); } else if (isa<UnaryOperator>(From->IgnoreParens())) { assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode() == UO_AddrOf && "Non-address-of operator for overloaded function expression"); FromType = S.Context.getPointerType(FromType); } // Check that we've computed the proper type after overload resolution. assert(S.Context.hasSameType( FromType, S.FixOverloadedFunctionReference(From, AccessPair, Fn)->getType())); } else { return false; } } // Lvalue-to-rvalue conversion (C++11 4.1): // A glvalue (3.10) of a non-function, non-array type T can // be converted to a prvalue. bool argIsLValue = From->isGLValue(); if (argIsLValue && !FromType->isFunctionType() && (!FromType->isArrayType() || S.getLangOpts().HLSL) && // HLSL Change - HLSL allows arrays S.Context.getCanonicalType(FromType) != S.Context.OverloadTy) { SCS.First = ICK_Lvalue_To_Rvalue; // C11 6.3.2.1p2: // ... if the lvalue has atomic type, the value has the non-atomic version // of the type of the lvalue ... if (const AtomicType *Atomic = FromType->getAs<AtomicType>()) FromType = Atomic->getValueType(); // If T is a non-class type, the type of the rvalue is the // cv-unqualified version of T. Otherwise, the type of the rvalue // is T (C++ 4.1p1). C++ can't get here with class types; in C, we // just strip the qualifiers because they don't matter. FromType = FromType.getUnqualifiedType(); } else if (FromType->isArrayType()) { // Array-to-pointer conversion (C++ 4.2) SCS.First = ICK_Array_To_Pointer; // An lvalue or rvalue of type "array of N T" or "array of unknown // bound of T" can be converted to an rvalue of type "pointer to // T" (C++ 4.2p1). FromType = S.Context.getArrayDecayedType(FromType); if (S.IsStringLiteralToNonConstPointerConversion(From, ToType)) { // This conversion is deprecated in C++03 (D.4) SCS.DeprecatedStringLiteralToCharPtr = true; // For the purpose of ranking in overload resolution // (13.3.3.1.1), this conversion is considered an // array-to-pointer conversion followed by a qualification // conversion (4.4). (C++ 4.2p2) SCS.Second = ICK_Identity; SCS.Third = ICK_Qualification; SCS.QualificationIncludesObjCLifetime = false; SCS.setAllToTypes(FromType); return true; } } else if (FromType->isFunctionType() && argIsLValue) { // Function-to-pointer conversion (C++ 4.3). SCS.First = ICK_Function_To_Pointer; // An lvalue of function type T can be converted to an rvalue of // type "pointer to T." The result is a pointer to the // function. (C++ 4.3p1). FromType = S.Context.getPointerType(FromType); } else { // We don't require any conversions for the first step. SCS.First = ICK_Identity; } SCS.setToType(0, FromType); // The second conversion can be an integral promotion, floating // point promotion, integral conversion, floating point conversion, // floating-integral conversion, pointer conversion, // pointer-to-member conversion, or boolean conversion (C++ 4p1). // For overloading in C, this can also be a "compatible-type" // conversion. bool IncompatibleObjC = false; ImplicitConversionKind SecondICK = ICK_Identity; if (S.Context.hasSameUnqualifiedType(FromType, ToType)) { // The unqualified versions of the types are the same: there's no // conversion to do. SCS.Second = ICK_Identity; } else if (S.IsIntegralPromotion(From, FromType, ToType)) { // Integral promotion (C++ 4.5). SCS.Second = ICK_Integral_Promotion; FromType = ToType.getUnqualifiedType(); } else if (S.IsFloatingPointPromotion(FromType, ToType)) { // Floating point promotion (C++ 4.6). SCS.Second = ICK_Floating_Promotion; FromType = ToType.getUnqualifiedType(); } else if (S.IsComplexPromotion(FromType, ToType)) { // Complex promotion (Clang extension) SCS.Second = ICK_Complex_Promotion; FromType = ToType.getUnqualifiedType(); } else if (ToType->isBooleanType() && (FromType->isArithmeticType() || FromType->isAnyPointerType() || FromType->isBlockPointerType() || FromType->isMemberPointerType() || FromType->isNullPtrType())) { // Boolean conversions (C++ 4.12). SCS.Second = ICK_Boolean_Conversion; FromType = S.Context.BoolTy; } else if (FromType->isIntegralOrUnscopedEnumerationType() && ToType->isIntegralType(S.Context)) { // Integral conversions (C++ 4.7). SCS.Second = ICK_Integral_Conversion; FromType = ToType.getUnqualifiedType(); } else if (FromType->isAnyComplexType() && ToType->isAnyComplexType()) { // Complex conversions (C99 6.3.1.6) SCS.Second = ICK_Complex_Conversion; FromType = ToType.getUnqualifiedType(); } else if ((FromType->isAnyComplexType() && ToType->isArithmeticType()) || (ToType->isAnyComplexType() && FromType->isArithmeticType())) { // Complex-real conversions (C99 6.3.1.7) SCS.Second = ICK_Complex_Real; FromType = ToType.getUnqualifiedType(); } else if (FromType->isRealFloatingType() && ToType->isRealFloatingType()) { // Floating point conversions (C++ 4.8). SCS.Second = ICK_Floating_Conversion; FromType = ToType.getUnqualifiedType(); } else if ((FromType->isRealFloatingType() && ToType->isIntegralType(S.Context)) || (FromType->isIntegralOrUnscopedEnumerationType() && ToType->isRealFloatingType())) { // Floating-integral conversions (C++ 4.9). SCS.Second = ICK_Floating_Integral; FromType = ToType.getUnqualifiedType(); } else if (S.IsBlockPointerConversion(FromType, ToType, FromType)) { SCS.Second = ICK_Block_Pointer_Conversion; } else if (AllowObjCWritebackConversion && S.isObjCWritebackConversion(FromType, ToType, FromType)) { SCS.Second = ICK_Writeback_Conversion; } else if (S.IsPointerConversion(From, FromType, ToType, InOverloadResolution, FromType, IncompatibleObjC)) { // Pointer conversions (C++ 4.10). SCS.Second = ICK_Pointer_Conversion; SCS.IncompatibleObjC = IncompatibleObjC; FromType = FromType.getUnqualifiedType(); } else if (S.IsMemberPointerConversion(From, FromType, ToType, InOverloadResolution, FromType)) { // Pointer to member conversions (4.11). SCS.Second = ICK_Pointer_Member; } else if (IsVectorConversion(S, FromType, ToType, SecondICK)) { SCS.Second = SecondICK; FromType = ToType.getUnqualifiedType(); } else if (!S.getLangOpts().CPlusPlus && S.Context.typesAreCompatible(ToType, FromType)) { // Compatible conversions (Clang extension for C function overloading) SCS.Second = ICK_Compatible_Conversion; FromType = ToType.getUnqualifiedType(); } else if (S.IsNoReturnConversion(FromType, ToType, FromType)) { // Treat a conversion that strips "noreturn" as an identity conversion. SCS.Second = ICK_NoReturn_Adjustment; } else if (IsTransparentUnionStandardConversion(S, From, ToType, InOverloadResolution, SCS, CStyle)) { SCS.Second = ICK_TransparentUnionConversion; FromType = ToType; } else if (tryAtomicConversion(S, From, ToType, InOverloadResolution, SCS, CStyle)) { // tryAtomicConversion has updated the standard conversion sequence // appropriately. return true; } else if (ToType->isEventT() && From->isIntegerConstantExpr(S.getASTContext()) && (From->EvaluateKnownConstInt(S.getASTContext()) == 0)) { SCS.Second = ICK_Zero_Event_Conversion; FromType = ToType; } else { // No second conversion required. SCS.Second = ICK_Identity; } SCS.setToType(1, FromType); QualType CanonFrom; QualType CanonTo; // The third conversion can be a qualification conversion (C++ 4p1). bool ObjCLifetimeConversion; if (S.IsQualificationConversion(FromType, ToType, CStyle, ObjCLifetimeConversion)) { SCS.Third = ICK_Qualification; SCS.QualificationIncludesObjCLifetime = ObjCLifetimeConversion; FromType = ToType; CanonFrom = S.Context.getCanonicalType(FromType); CanonTo = S.Context.getCanonicalType(ToType); } else { // No conversion required SCS.Third = ICK_Identity; // C++ [over.best.ics]p6: // [...] Any difference in top-level cv-qualification is // subsumed by the initialization itself and does not constitute // a conversion. [...] CanonFrom = S.Context.getCanonicalType(FromType); CanonTo = S.Context.getCanonicalType(ToType); if (CanonFrom.getLocalUnqualifiedType() == CanonTo.getLocalUnqualifiedType() && CanonFrom.getLocalQualifiers() != CanonTo.getLocalQualifiers()) { FromType = ToType; CanonFrom = CanonTo; } } SCS.setToType(2, FromType); // If we have not converted the argument type to the parameter type, // this is a bad conversion sequence. if (CanonFrom != CanonTo) return false; return true; } static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From, QualType &ToType, bool InOverloadResolution, StandardConversionSequence &SCS, bool CStyle) { const RecordType *UT = ToType->getAsUnionType(); if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>()) return false; // The field to initialize within the transparent union. RecordDecl *UD = UT->getDecl(); // It's compatible if the expression matches any of the fields. for (const auto *it : UD->fields()) { if (IsStandardConversion(S, From, it->getType(), InOverloadResolution, SCS, CStyle, /*ObjCWritebackConversion=*/false)) { ToType = it->getType(); return true; } } return false; } /// IsIntegralPromotion - Determines whether the conversion from the /// expression From (whose potentially-adjusted type is FromType) to /// ToType is an integral promotion (C++ 4.5). If so, returns true and /// sets PromotedType to the promoted type. bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) { const BuiltinType *To = ToType->getAs<BuiltinType>(); // All integers are built-in. if (!To) { return false; } // An rvalue of type char, signed char, unsigned char, short int, or // unsigned short int can be converted to an rvalue of type int if // int can represent all the values of the source type; otherwise, // the source rvalue can be converted to an rvalue of type unsigned // int (C++ 4.5p1). if (FromType->isPromotableIntegerType() && !FromType->isBooleanType() && !FromType->isEnumeralType()) { if (// We can promote any signed, promotable integer type to an int (FromType->isSignedIntegerType() || // We can promote any unsigned integer type whose size is // less than int to an int. (!FromType->isSignedIntegerType() && Context.getTypeSize(FromType) < Context.getTypeSize(ToType)))) { return To->getKind() == BuiltinType::Int; } return To->getKind() == BuiltinType::UInt; } // C++11 [conv.prom]p3: // A prvalue of an unscoped enumeration type whose underlying type is not // fixed (7.2) can be converted to an rvalue a prvalue of the first of the // following types that can represent all the values of the enumeration // (i.e., the values in the range bmin to bmax as described in 7.2): int, // unsigned int, long int, unsigned long int, long long int, or unsigned // long long int. If none of the types in that list can represent all the // values of the enumeration, an rvalue a prvalue of an unscoped enumeration // type can be converted to an rvalue a prvalue of the extended integer type // with lowest integer conversion rank (4.13) greater than the rank of long // long in which all the values of the enumeration can be represented. If // there are two such extended types, the signed one is chosen. // C++11 [conv.prom]p4: // A prvalue of an unscoped enumeration type whose underlying type is fixed // can be converted to a prvalue of its underlying type. Moreover, if // integral promotion can be applied to its underlying type, a prvalue of an // unscoped enumeration type whose underlying type is fixed can also be // converted to a prvalue of the promoted underlying type. if (const EnumType *FromEnumType = FromType->getAs<EnumType>()) { // C++0x 7.2p9: Note that this implicit enum to int conversion is not // provided for a scoped enumeration. if (FromEnumType->getDecl()->isScoped()) return false; // We can perform an integral promotion to the underlying type of the enum, // even if that's not the promoted type. Note that the check for promoting // the underlying type is based on the type alone, and does not consider // the bitfield-ness of the actual source expression. if (FromEnumType->getDecl()->isFixed()) { QualType Underlying = FromEnumType->getDecl()->getIntegerType(); return Context.hasSameUnqualifiedType(Underlying, ToType) || IsIntegralPromotion(nullptr, Underlying, ToType); } // We have already pre-calculated the promotion type, so this is trivial. if (ToType->isIntegerType() && !RequireCompleteType(From->getLocStart(), FromType, 0)) return Context.hasSameUnqualifiedType( ToType, FromEnumType->getDecl()->getPromotionType()); } // C++0x [conv.prom]p2: // A prvalue of type char16_t, char32_t, or wchar_t (3.9.1) can be converted // to an rvalue a prvalue of the first of the following types that can // represent all the values of its underlying type: int, unsigned int, // long int, unsigned long int, long long int, or unsigned long long int. // If none of the types in that list can represent all the values of its // underlying type, an rvalue a prvalue of type char16_t, char32_t, // or wchar_t can be converted to an rvalue a prvalue of its underlying // type. if (FromType->isAnyCharacterType() && !FromType->isCharType() && ToType->isIntegerType()) { // Determine whether the type we're converting from is signed or // unsigned. bool FromIsSigned = FromType->isSignedIntegerType(); uint64_t FromSize = Context.getTypeSize(FromType); // The types we'll try to promote to, in the appropriate // order. Try each of these types. QualType PromoteTypes[6] = { Context.IntTy, Context.UnsignedIntTy, Context.LongTy, Context.UnsignedLongTy , Context.LongLongTy, Context.UnsignedLongLongTy }; for (int Idx = 0; Idx < 6; ++Idx) { uint64_t ToSize = Context.getTypeSize(PromoteTypes[Idx]); if (FromSize < ToSize || (FromSize == ToSize && FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) { // We found the type that we can promote to. If this is the // type we wanted, we have a promotion. Otherwise, no // promotion. return Context.hasSameUnqualifiedType(ToType, PromoteTypes[Idx]); } } } // An rvalue for an integral bit-field (9.6) can be converted to an // rvalue of type int if int can represent all the values of the // bit-field; otherwise, it can be converted to unsigned int if // unsigned int can represent all the values of the bit-field. If // the bit-field is larger yet, no integral promotion applies to // it. If the bit-field has an enumerated type, it is treated as any // other value of that type for promotion purposes (C++ 4.5p3). // FIXME: We should delay checking of bit-fields until we actually perform the // conversion. if (From) { if (FieldDecl *MemberDecl = From->getSourceBitField()) { llvm::APSInt BitWidth; if (FromType->isIntegralType(Context) && MemberDecl->getBitWidth()->isIntegerConstantExpr(BitWidth, Context)) { llvm::APSInt ToSize(BitWidth.getBitWidth(), BitWidth.isUnsigned()); ToSize = Context.getTypeSize(ToType); // Are we promoting to an int from a bitfield that fits in an int? if (BitWidth < ToSize || (FromType->isSignedIntegerType() && BitWidth <= ToSize)) { return To->getKind() == BuiltinType::Int; } // Are we promoting to an unsigned int from an unsigned bitfield // that fits into an unsigned int? if (FromType->isUnsignedIntegerType() && BitWidth <= ToSize) { return To->getKind() == BuiltinType::UInt; } return false; } } } // An rvalue of type bool can be converted to an rvalue of type int, // with false becoming zero and true becoming one (C++ 4.5p4). if (FromType->isBooleanType() && To->getKind() == BuiltinType::Int) { return true; } return false; } /// IsFloatingPointPromotion - Determines whether the conversion from /// FromType to ToType is a floating point promotion (C++ 4.6). If so, /// returns true and sets PromotedType to the promoted type. bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType) { if (const BuiltinType *FromBuiltin = FromType->getAs<BuiltinType>()) if (const BuiltinType *ToBuiltin = ToType->getAs<BuiltinType>()) { /// An rvalue of type float can be converted to an rvalue of type /// double. (C++ 4.6p1). if (FromBuiltin->getKind() == BuiltinType::Float && ToBuiltin->getKind() == BuiltinType::Double) return true; // C99 6.3.1.5p1: // When a float is promoted to double or long double, or a // double is promoted to long double [...]. if (!getLangOpts().CPlusPlus && (FromBuiltin->getKind() == BuiltinType::Float || FromBuiltin->getKind() == BuiltinType::Double) && (ToBuiltin->getKind() == BuiltinType::LongDouble)) return true; // Half can be promoted to float. if (!getLangOpts().NativeHalfType && FromBuiltin->getKind() == BuiltinType::Half && ToBuiltin->getKind() == BuiltinType::Float) return true; // HLSL Change Starts // TODO: Update this for new builtin types min16float, min16int, min16uint. if (getLangOpts().HLSL) { if (FromBuiltin->getKind() == BuiltinType::LitFloat && (ToBuiltin->getKind() == BuiltinType::Min10Float || ToBuiltin->getKind() == BuiltinType::HalfFloat || ToBuiltin->getKind() == BuiltinType::Float || ToBuiltin->getKind() == BuiltinType::Half || ToBuiltin->getKind() == BuiltinType::Double)) return true; if (FromBuiltin->getKind() == BuiltinType::Min10Float && (ToBuiltin->getKind() == BuiltinType::Float || ToBuiltin->getKind() == BuiltinType::HalfFloat || ToBuiltin->getKind() == BuiltinType::Half || ToBuiltin->getKind() == BuiltinType::Double)) return true; } // HLSL Change Ends } return false; } /// \brief Determine if a conversion is a complex promotion. /// /// A complex promotion is defined as a complex -> complex conversion /// where the conversion between the underlying real types is a /// floating-point or integral promotion. bool Sema::IsComplexPromotion(QualType FromType, QualType ToType) { const ComplexType *FromComplex = FromType->getAs<ComplexType>(); if (!FromComplex) return false; const ComplexType *ToComplex = ToType->getAs<ComplexType>(); if (!ToComplex) return false; return IsFloatingPointPromotion(FromComplex->getElementType(), ToComplex->getElementType()) || IsIntegralPromotion(nullptr, FromComplex->getElementType(), ToComplex->getElementType()); } /// BuildSimilarlyQualifiedPointerType - In a pointer conversion from /// the pointer type FromPtr to a pointer to type ToPointee, with the /// same type qualifiers as FromPtr has on its pointee type. ToType, /// if non-empty, will be a pointer to ToType that may or may not have /// the right set of qualifiers on its pointee. /// static QualType BuildSimilarlyQualifiedPointerType(const Type *FromPtr, QualType ToPointee, QualType ToType, ASTContext &Context, bool StripObjCLifetime = false) { assert((FromPtr->getTypeClass() == Type::Pointer || FromPtr->getTypeClass() == Type::ObjCObjectPointer) && "Invalid similarly-qualified pointer type"); /// Conversions to 'id' subsume cv-qualifier conversions. if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType()) return ToType.getUnqualifiedType(); QualType CanonFromPointee = Context.getCanonicalType(FromPtr->getPointeeType()); QualType CanonToPointee = Context.getCanonicalType(ToPointee); Qualifiers Quals = CanonFromPointee.getQualifiers(); if (StripObjCLifetime) Quals.removeObjCLifetime(); // Exact qualifier match -> return the pointer type we're converting to. if (CanonToPointee.getLocalQualifiers() == Quals) { // ToType is exactly what we need. Return it. if (!ToType.isNull()) return ToType.getUnqualifiedType(); // Build a pointer to ToPointee. It has the right qualifiers // already. if (isa<ObjCObjectPointerType>(ToType)) return Context.getObjCObjectPointerType(ToPointee); return Context.getPointerType(ToPointee); } // Just build a canonical type that has the right qualifiers. QualType QualifiedCanonToPointee = Context.getQualifiedType(CanonToPointee.getLocalUnqualifiedType(), Quals); if (isa<ObjCObjectPointerType>(ToType)) return Context.getObjCObjectPointerType(QualifiedCanonToPointee); return Context.getPointerType(QualifiedCanonToPointee); } static bool isNullPointerConstantForConversion(Expr *Expr, bool InOverloadResolution, ASTContext &Context) { // Handle value-dependent integral null pointer constants correctly. // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#903 if (Expr->isValueDependent() && !Expr->isTypeDependent() && Expr->getType()->isIntegerType() && !Expr->getType()->isEnumeralType()) return !InOverloadResolution; return Expr->isNullPointerConstant(Context, InOverloadResolution? Expr::NPC_ValueDependentIsNotNull : Expr::NPC_ValueDependentIsNull); } /// IsPointerConversion - Determines whether the conversion of the /// expression From, which has the (possibly adjusted) type FromType, /// can be converted to the type ToType via a pointer conversion (C++ /// 4.10). If so, returns true and places the converted type (that /// might differ from ToType in its cv-qualifiers at some level) into /// ConvertedType. /// /// This routine also supports conversions to and from block pointers /// and conversions with Objective-C's 'id', 'id<protocols...>', and /// pointers to interfaces. FIXME: Once we've determined the /// appropriate overloading rules for Objective-C, we may want to /// split the Objective-C checks into a different routine; however, /// GCC seems to consider all of these conversions to be pointer /// conversions, so for now they live here. IncompatibleObjC will be /// set if the conversion is an allowed Objective-C conversion that /// should result in a warning. bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC) { IncompatibleObjC = false; if (isObjCPointerConversion(FromType, ToType, ConvertedType, IncompatibleObjC)) return true; // Conversion from a null pointer constant to any Objective-C pointer type. if (ToType->isObjCObjectPointerType() && isNullPointerConstantForConversion(From, InOverloadResolution, Context)) { ConvertedType = ToType; return true; } // Blocks: Block pointers can be converted to void*. if (FromType->isBlockPointerType() && ToType->isPointerType() && ToType->getAs<PointerType>()->getPointeeType()->isVoidType()) { ConvertedType = ToType; return true; } // Blocks: A null pointer constant can be converted to a block // pointer type. if (ToType->isBlockPointerType() && isNullPointerConstantForConversion(From, InOverloadResolution, Context)) { ConvertedType = ToType; return true; } // If the left-hand-side is nullptr_t, the right side can be a null // pointer constant. if (ToType->isNullPtrType() && isNullPointerConstantForConversion(From, InOverloadResolution, Context)) { ConvertedType = ToType; return true; } const PointerType* ToTypePtr = ToType->getAs<PointerType>(); if (!ToTypePtr) return false; // A null pointer constant can be converted to a pointer type (C++ 4.10p1). if (isNullPointerConstantForConversion(From, InOverloadResolution, Context)) { ConvertedType = ToType; return true; } // Beyond this point, both types need to be pointers // , including objective-c pointers. QualType ToPointeeType = ToTypePtr->getPointeeType(); if (FromType->isObjCObjectPointerType() && ToPointeeType->isVoidType() && !getLangOpts().ObjCAutoRefCount) { ConvertedType = BuildSimilarlyQualifiedPointerType( FromType->getAs<ObjCObjectPointerType>(), ToPointeeType, ToType, Context); return true; } const PointerType *FromTypePtr = FromType->getAs<PointerType>(); if (!FromTypePtr) return false; QualType FromPointeeType = FromTypePtr->getPointeeType(); // If the unqualified pointee types are the same, this can't be a // pointer conversion, so don't do all of the work below. if (Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType)) return false; // An rvalue of type "pointer to cv T," where T is an object type, // can be converted to an rvalue of type "pointer to cv void" (C++ // 4.10p2). if (FromPointeeType->isIncompleteOrObjectType() && ToPointeeType->isVoidType()) { ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr, ToPointeeType, ToType, Context, /*StripObjCLifetime=*/true); return true; } // MSVC allows implicit function to void* type conversion. if (getLangOpts().MicrosoftExt && FromPointeeType->isFunctionType() && ToPointeeType->isVoidType()) { ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr, ToPointeeType, ToType, Context); return true; } // When we're overloading in C, we allow a special kind of pointer // conversion for compatible-but-not-identical pointee types. if (!getLangOpts().CPlusPlus && Context.typesAreCompatible(FromPointeeType, ToPointeeType)) { ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr, ToPointeeType, ToType, Context); return true; } // C++ [conv.ptr]p3: // // An rvalue of type "pointer to cv D," where D is a class type, // can be converted to an rvalue of type "pointer to cv B," where // B is a base class (clause 10) of D. If B is an inaccessible // (clause 11) or ambiguous (10.2) base class of D, a program that // necessitates this conversion is ill-formed. The result of the // conversion is a pointer to the base class sub-object of the // derived class object. The null pointer value is converted to // the null pointer value of the destination type. // // Note that we do not check for ambiguity or inaccessibility // here. That is handled by CheckPointerConversion. if (getLangOpts().CPlusPlus && FromPointeeType->isRecordType() && ToPointeeType->isRecordType() && !Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType) && !RequireCompleteType(From->getLocStart(), FromPointeeType, 0) && IsDerivedFrom(FromPointeeType, ToPointeeType)) { ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr, ToPointeeType, ToType, Context); return true; } if (FromPointeeType->isVectorType() && ToPointeeType->isVectorType() && Context.areCompatibleVectorTypes(FromPointeeType, ToPointeeType)) { ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr, ToPointeeType, ToType, Context); return true; } return false; } /// \brief Adopt the given qualifiers for the given type. static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){ Qualifiers TQs = T.getQualifiers(); // Check whether qualifiers already match. if (TQs == Qs) return T; if (Qs.compatiblyIncludes(TQs)) return Context.getQualifiedType(T, Qs); return Context.getQualifiedType(T.getUnqualifiedType(), Qs); } /// isObjCPointerConversion - Determines whether this is an /// Objective-C pointer conversion. Subroutine of IsPointerConversion, /// with the same arguments and return values. bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC) { if (!getLangOpts().ObjC1) return false; // The set of qualifiers on the type we're converting from. Qualifiers FromQualifiers = FromType.getQualifiers(); // First, we handle all conversions on ObjC object pointer types. const ObjCObjectPointerType* ToObjCPtr = ToType->getAs<ObjCObjectPointerType>(); const ObjCObjectPointerType *FromObjCPtr = FromType->getAs<ObjCObjectPointerType>(); if (ToObjCPtr && FromObjCPtr) { // If the pointee types are the same (ignoring qualifications), // then this is not a pointer conversion. if (Context.hasSameUnqualifiedType(ToObjCPtr->getPointeeType(), FromObjCPtr->getPointeeType())) return false; // Conversion between Objective-C pointers. if (Context.canAssignObjCInterfaces(ToObjCPtr, FromObjCPtr)) { const ObjCInterfaceType* LHS = ToObjCPtr->getInterfaceType(); const ObjCInterfaceType* RHS = FromObjCPtr->getInterfaceType(); if (getLangOpts().CPlusPlus && LHS && RHS && !ToObjCPtr->getPointeeType().isAtLeastAsQualifiedAs( FromObjCPtr->getPointeeType())) return false; ConvertedType = BuildSimilarlyQualifiedPointerType(FromObjCPtr, ToObjCPtr->getPointeeType(), ToType, Context); ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers); return true; } if (Context.canAssignObjCInterfaces(FromObjCPtr, ToObjCPtr)) { // Okay: this is some kind of implicit downcast of Objective-C // interfaces, which is permitted. However, we're going to // complain about it. IncompatibleObjC = true; ConvertedType = BuildSimilarlyQualifiedPointerType(FromObjCPtr, ToObjCPtr->getPointeeType(), ToType, Context); ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers); return true; } } // Beyond this point, both types need to be C pointers or block pointers. QualType ToPointeeType; if (const PointerType *ToCPtr = ToType->getAs<PointerType>()) ToPointeeType = ToCPtr->getPointeeType(); else if (const BlockPointerType *ToBlockPtr = ToType->getAs<BlockPointerType>()) { // Objective C++: We're able to convert from a pointer to any object // to a block pointer type. if (FromObjCPtr && FromObjCPtr->isObjCBuiltinType()) { ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers); return true; } ToPointeeType = ToBlockPtr->getPointeeType(); } else if (FromType->getAs<BlockPointerType>() && ToObjCPtr && ToObjCPtr->isObjCBuiltinType()) { // Objective C++: We're able to convert from a block pointer type to a // pointer to any object. ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers); return true; } else return false; QualType FromPointeeType; if (const PointerType *FromCPtr = FromType->getAs<PointerType>()) FromPointeeType = FromCPtr->getPointeeType(); else if (const BlockPointerType *FromBlockPtr = FromType->getAs<BlockPointerType>()) FromPointeeType = FromBlockPtr->getPointeeType(); else return false; // If we have pointers to pointers, recursively check whether this // is an Objective-C conversion. if (FromPointeeType->isPointerType() && ToPointeeType->isPointerType() && isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType, IncompatibleObjC)) { // We always complain about this conversion. IncompatibleObjC = true; ConvertedType = Context.getPointerType(ConvertedType); ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers); return true; } // Allow conversion of pointee being objective-c pointer to another one; // as in I* to id. if (FromPointeeType->getAs<ObjCObjectPointerType>() && ToPointeeType->getAs<ObjCObjectPointerType>() && isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType, IncompatibleObjC)) { ConvertedType = Context.getPointerType(ConvertedType); ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers); return true; } // If we have pointers to functions or blocks, check whether the only // differences in the argument and result types are in Objective-C // pointer conversions. If so, we permit the conversion (but // complain about it). const FunctionProtoType *FromFunctionType = FromPointeeType->getAs<FunctionProtoType>(); const FunctionProtoType *ToFunctionType = ToPointeeType->getAs<FunctionProtoType>(); if (FromFunctionType && ToFunctionType) { // If the function types are exactly the same, this isn't an // Objective-C pointer conversion. if (Context.getCanonicalType(FromPointeeType) == Context.getCanonicalType(ToPointeeType)) return false; // Perform the quick checks that will tell us whether these // function types are obviously different. if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() || FromFunctionType->isVariadic() != ToFunctionType->isVariadic() || FromFunctionType->getTypeQuals() != ToFunctionType->getTypeQuals()) return false; bool HasObjCConversion = false; if (Context.getCanonicalType(FromFunctionType->getReturnType()) == Context.getCanonicalType(ToFunctionType->getReturnType())) { // Okay, the types match exactly. Nothing to do. } else if (isObjCPointerConversion(FromFunctionType->getReturnType(), ToFunctionType->getReturnType(), ConvertedType, IncompatibleObjC)) { // Okay, we have an Objective-C pointer conversion. HasObjCConversion = true; } else { // Function types are too different. Abort. return false; } // Check argument types. for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumParams(); ArgIdx != NumArgs; ++ArgIdx) { QualType FromArgType = FromFunctionType->getParamType(ArgIdx); QualType ToArgType = ToFunctionType->getParamType(ArgIdx); if (Context.getCanonicalType(FromArgType) == Context.getCanonicalType(ToArgType)) { // Okay, the types match exactly. Nothing to do. } else if (isObjCPointerConversion(FromArgType, ToArgType, ConvertedType, IncompatibleObjC)) { // Okay, we have an Objective-C pointer conversion. HasObjCConversion = true; } else { // Argument types are too different. Abort. return false; } } if (HasObjCConversion) { // We had an Objective-C conversion. Allow this pointer // conversion, but complain about it. ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers); IncompatibleObjC = true; return true; } } return false; } /// \brief Determine whether this is an Objective-C writeback conversion, /// used for parameter passing when performing automatic reference counting. /// /// \param FromType The type we're converting form. /// /// \param ToType The type we're converting to. /// /// \param ConvertedType The type that will be produced after applying /// this conversion. bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType) { if (!getLangOpts().ObjCAutoRefCount || Context.hasSameUnqualifiedType(FromType, ToType)) return false; // Parameter must be a pointer to __autoreleasing (with no other qualifiers). QualType ToPointee; if (const PointerType *ToPointer = ToType->getAs<PointerType>()) ToPointee = ToPointer->getPointeeType(); else return false; Qualifiers ToQuals = ToPointee.getQualifiers(); if (!ToPointee->isObjCLifetimeType() || ToQuals.getObjCLifetime() != Qualifiers::OCL_Autoreleasing || !ToQuals.withoutObjCLifetime().empty()) return false; // Argument must be a pointer to __strong to __weak. QualType FromPointee; if (const PointerType *FromPointer = FromType->getAs<PointerType>()) FromPointee = FromPointer->getPointeeType(); else return false; Qualifiers FromQuals = FromPointee.getQualifiers(); if (!FromPointee->isObjCLifetimeType() || (FromQuals.getObjCLifetime() != Qualifiers::OCL_Strong && FromQuals.getObjCLifetime() != Qualifiers::OCL_Weak)) return false; // Make sure that we have compatible qualifiers. FromQuals.setObjCLifetime(Qualifiers::OCL_Autoreleasing); if (!ToQuals.compatiblyIncludes(FromQuals)) return false; // Remove qualifiers from the pointee type we're converting from; they // aren't used in the compatibility check belong, and we'll be adding back // qualifiers (with __autoreleasing) if the compatibility check succeeds. FromPointee = FromPointee.getUnqualifiedType(); // The unqualified form of the pointee types must be compatible. ToPointee = ToPointee.getUnqualifiedType(); bool IncompatibleObjC; if (Context.typesAreCompatible(FromPointee, ToPointee)) FromPointee = ToPointee; else if (!isObjCPointerConversion(FromPointee, ToPointee, FromPointee, IncompatibleObjC)) return false; /// \brief Construct the type we're converting to, which is a pointer to /// __autoreleasing pointee. FromPointee = Context.getQualifiedType(FromPointee, FromQuals); ConvertedType = Context.getPointerType(FromPointee); return true; } bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType) { QualType ToPointeeType; if (const BlockPointerType *ToBlockPtr = ToType->getAs<BlockPointerType>()) ToPointeeType = ToBlockPtr->getPointeeType(); else return false; QualType FromPointeeType; if (const BlockPointerType *FromBlockPtr = FromType->getAs<BlockPointerType>()) FromPointeeType = FromBlockPtr->getPointeeType(); else return false; // We have pointer to blocks, check whether the only // differences in the argument and result types are in Objective-C // pointer conversions. If so, we permit the conversion. const FunctionProtoType *FromFunctionType = FromPointeeType->getAs<FunctionProtoType>(); const FunctionProtoType *ToFunctionType = ToPointeeType->getAs<FunctionProtoType>(); if (!FromFunctionType || !ToFunctionType) return false; if (Context.hasSameType(FromPointeeType, ToPointeeType)) return true; // Perform the quick checks that will tell us whether these // function types are obviously different. if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() || FromFunctionType->isVariadic() != ToFunctionType->isVariadic()) return false; FunctionType::ExtInfo FromEInfo = FromFunctionType->getExtInfo(); FunctionType::ExtInfo ToEInfo = ToFunctionType->getExtInfo(); if (FromEInfo != ToEInfo) return false; bool IncompatibleObjC = false; if (Context.hasSameType(FromFunctionType->getReturnType(), ToFunctionType->getReturnType())) { // Okay, the types match exactly. Nothing to do. } else { QualType RHS = FromFunctionType->getReturnType(); QualType LHS = ToFunctionType->getReturnType(); if ((!getLangOpts().CPlusPlus || !RHS->isRecordType()) && !RHS.hasQualifiers() && LHS.hasQualifiers()) LHS = LHS.getUnqualifiedType(); if (Context.hasSameType(RHS,LHS)) { // OK exact match. } else if (isObjCPointerConversion(RHS, LHS, ConvertedType, IncompatibleObjC)) { if (IncompatibleObjC) return false; // Okay, we have an Objective-C pointer conversion. } else return false; } // Check argument types. for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumParams(); ArgIdx != NumArgs; ++ArgIdx) { IncompatibleObjC = false; QualType FromArgType = FromFunctionType->getParamType(ArgIdx); QualType ToArgType = ToFunctionType->getParamType(ArgIdx); if (Context.hasSameType(FromArgType, ToArgType)) { // Okay, the types match exactly. Nothing to do. } else if (isObjCPointerConversion(ToArgType, FromArgType, ConvertedType, IncompatibleObjC)) { if (IncompatibleObjC) return false; // Okay, we have an Objective-C pointer conversion. } else // Argument types are too different. Abort. return false; } if (LangOpts.ObjCAutoRefCount && !Context.FunctionTypesMatchOnNSConsumedAttrs(FromFunctionType, ToFunctionType)) return false; ConvertedType = ToType; return true; } enum { ft_default, ft_different_class, ft_parameter_arity, ft_parameter_mismatch, ft_return_type, ft_qualifer_mismatch }; /// HandleFunctionTypeMismatch - Gives diagnostic information for differeing /// function types. Catches different number of parameter, mismatch in /// parameter types, and different return types. void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType) { // If either type is not valid, include no extra info. if (FromType.isNull() || ToType.isNull()) { PDiag << ft_default; return; } // Get the function type from the pointers. if (FromType->isMemberPointerType() && ToType->isMemberPointerType()) { const MemberPointerType *FromMember = FromType->getAs<MemberPointerType>(), *ToMember = ToType->getAs<MemberPointerType>(); if (!Context.hasSameType(FromMember->getClass(), ToMember->getClass())) { PDiag << ft_different_class << QualType(ToMember->getClass(), 0) << QualType(FromMember->getClass(), 0); return; } FromType = FromMember->getPointeeType(); ToType = ToMember->getPointeeType(); } if (FromType->isPointerType()) FromType = FromType->getPointeeType(); if (ToType->isPointerType()) ToType = ToType->getPointeeType(); // Remove references. FromType = FromType.getNonReferenceType(); ToType = ToType.getNonReferenceType(); // Don't print extra info for non-specialized template functions. if (FromType->isInstantiationDependentType() && !FromType->getAs<TemplateSpecializationType>()) { PDiag << ft_default; return; } // No extra info for same types. if (Context.hasSameType(FromType, ToType)) { PDiag << ft_default; return; } const FunctionProtoType *FromFunction = FromType->getAs<FunctionProtoType>(), *ToFunction = ToType->getAs<FunctionProtoType>(); // Both types need to be function types. if (!FromFunction || !ToFunction) { PDiag << ft_default; return; } if (FromFunction->getNumParams() != ToFunction->getNumParams()) { PDiag << ft_parameter_arity << ToFunction->getNumParams() << FromFunction->getNumParams(); return; } // Handle different parameter types. unsigned ArgPos; if (!FunctionParamTypesAreEqual(FromFunction, ToFunction, &ArgPos)) { PDiag << ft_parameter_mismatch << ArgPos + 1 << ToFunction->getParamType(ArgPos) << FromFunction->getParamType(ArgPos); return; } // Handle different return type. if (!Context.hasSameType(FromFunction->getReturnType(), ToFunction->getReturnType())) { PDiag << ft_return_type << ToFunction->getReturnType() << FromFunction->getReturnType(); return; } unsigned FromQuals = FromFunction->getTypeQuals(), ToQuals = ToFunction->getTypeQuals(); if (FromQuals != ToQuals) { PDiag << ft_qualifer_mismatch << ToQuals << FromQuals; return; } // Unable to find a difference, so add no extra info. PDiag << ft_default; } /// FunctionParamTypesAreEqual - This routine checks two function proto types /// for equality of their argument types. Caller has already checked that /// they have same number of arguments. If the parameters are different, /// ArgPos will have the parameter index of the first different parameter. bool Sema::FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos) { int index = 0; for (FunctionProtoType::param_type_iterator O = OldType->param_type_begin(), N = NewType->param_type_begin(), E = OldType->param_type_end(); O && (O != E); ++O, ++N) { if (!Context.hasSameType(O->getUnqualifiedType(), N->getUnqualifiedType()) || OldType->getParamMods()[index] != NewType->getParamMods()[index]) { // HLSL Change - check param mods index++; if (ArgPos) *ArgPos = O - OldType->param_type_begin(); return false; } } return true; } /// CheckPointerConversion - Check the pointer conversion from the /// expression From to the type ToType. This routine checks for /// ambiguous or inaccessible derived-to-base pointer /// conversions for which IsPointerConversion has already returned /// true. It returns true and produces a diagnostic if there was an /// error, or returns false otherwise. bool Sema::CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess) { QualType FromType = From->getType(); bool IsCStyleOrFunctionalCast = IgnoreBaseAccess; Kind = CK_BitCast; if (!IsCStyleOrFunctionalCast && !FromType->isAnyPointerType() && From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull) == Expr::NPCK_ZeroExpression) { if (Context.hasSameUnqualifiedType(From->getType(), Context.BoolTy)) DiagRuntimeBehavior(From->getExprLoc(), From, PDiag(diag::warn_impcast_bool_to_null_pointer) << ToType << From->getSourceRange()); else if (!isUnevaluatedContext()) Diag(From->getExprLoc(), diag::warn_non_literal_null_pointer) << ToType << From->getSourceRange(); } if (const PointerType *ToPtrType = ToType->getAs<PointerType>()) { if (const PointerType *FromPtrType = FromType->getAs<PointerType>()) { QualType FromPointeeType = FromPtrType->getPointeeType(), ToPointeeType = ToPtrType->getPointeeType(); if (FromPointeeType->isRecordType() && ToPointeeType->isRecordType() && !Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType)) { // We must have a derived-to-base conversion. Check an // ambiguous or inaccessible conversion. if (CheckDerivedToBaseConversion(FromPointeeType, ToPointeeType, From->getExprLoc(), From->getSourceRange(), &BasePath, IgnoreBaseAccess)) return true; // The conversion was successful. Kind = CK_DerivedToBase; } } } else if (const ObjCObjectPointerType *ToPtrType = ToType->getAs<ObjCObjectPointerType>()) { if (const ObjCObjectPointerType *FromPtrType = FromType->getAs<ObjCObjectPointerType>()) { // Objective-C++ conversions are always okay. // FIXME: We should have a different class of conversions for the // Objective-C++ implicit conversions. if (FromPtrType->isObjCBuiltinType() || ToPtrType->isObjCBuiltinType()) return false; } else if (FromType->isBlockPointerType()) { Kind = CK_BlockPointerToObjCPointerCast; } else { Kind = CK_CPointerToObjCPointerCast; } } else if (ToType->isBlockPointerType()) { if (!FromType->isBlockPointerType()) Kind = CK_AnyPointerToBlockPointerCast; } // We shouldn't fall into this case unless it's valid for other // reasons. if (From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) Kind = CK_NullToPointer; return false; } /// IsMemberPointerConversion - Determines whether the conversion of the /// expression From, which has the (possibly adjusted) type FromType, can be /// converted to the type ToType via a member pointer conversion (C++ 4.11). /// If so, returns true and places the converted type (that might differ from /// ToType in its cv-qualifiers at some level) into ConvertedType. bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType) { const MemberPointerType *ToTypePtr = ToType->getAs<MemberPointerType>(); if (!ToTypePtr) return false; // A null pointer constant can be converted to a member pointer (C++ 4.11p1) if (From->isNullPointerConstant(Context, InOverloadResolution? Expr::NPC_ValueDependentIsNotNull : Expr::NPC_ValueDependentIsNull)) { ConvertedType = ToType; return true; } // Otherwise, both types have to be member pointers. const MemberPointerType *FromTypePtr = FromType->getAs<MemberPointerType>(); if (!FromTypePtr) return false; // A pointer to member of B can be converted to a pointer to member of D, // where D is derived from B (C++ 4.11p2). QualType FromClass(FromTypePtr->getClass(), 0); QualType ToClass(ToTypePtr->getClass(), 0); if (!Context.hasSameUnqualifiedType(FromClass, ToClass) && !RequireCompleteType(From->getLocStart(), ToClass, 0) && IsDerivedFrom(ToClass, FromClass)) { ConvertedType = Context.getMemberPointerType(FromTypePtr->getPointeeType(), ToClass.getTypePtr()); return true; } return false; } /// CheckMemberPointerConversion - Check the member pointer conversion from the /// expression From to the type ToType. This routine checks for ambiguous or /// virtual or inaccessible base-to-derived member pointer conversions /// for which IsMemberPointerConversion has already returned true. It returns /// true and produces a diagnostic if there was an error, or returns false /// otherwise. bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess) { QualType FromType = From->getType(); const MemberPointerType *FromPtrType = FromType->getAs<MemberPointerType>(); if (!FromPtrType) { // This must be a null pointer to member pointer conversion assert(From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull) && "Expr must be null pointer constant!"); Kind = CK_NullToMemberPointer; return false; } const MemberPointerType *ToPtrType = ToType->getAs<MemberPointerType>(); assert(ToPtrType && "No member pointer cast has a target type " "that is not a member pointer."); QualType FromClass = QualType(FromPtrType->getClass(), 0); QualType ToClass = QualType(ToPtrType->getClass(), 0); // FIXME: What about dependent types? assert(FromClass->isRecordType() && "Pointer into non-class."); assert(ToClass->isRecordType() && "Pointer into non-class."); CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, /*DetectVirtual=*/true); bool DerivationOkay = IsDerivedFrom(ToClass, FromClass, Paths); assert(DerivationOkay && "Should not have been called if derivation isn't OK."); (void)DerivationOkay; if (Paths.isAmbiguous(Context.getCanonicalType(FromClass). getUnqualifiedType())) { std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths); Diag(From->getExprLoc(), diag::err_ambiguous_memptr_conv) << 0 << FromClass << ToClass << PathDisplayStr << From->getSourceRange(); return true; } if (const RecordType *VBase = Paths.getDetectedVirtual()) { Diag(From->getExprLoc(), diag::err_memptr_conv_via_virtual) << FromClass << ToClass << QualType(VBase, 0) << From->getSourceRange(); return true; } if (!IgnoreBaseAccess) CheckBaseClassAccess(From->getExprLoc(), FromClass, ToClass, Paths.front(), diag::err_downcast_from_inaccessible_base); // Must be a base to derived member conversion. BuildBasePathArray(Paths, BasePath); Kind = CK_BaseToDerivedMemberPointer; return false; } /// Determine whether the lifetime conversion between the two given /// qualifiers sets is nontrivial. static bool isNonTrivialObjCLifetimeConversion(Qualifiers FromQuals, Qualifiers ToQuals) { // Converting anything to const __unsafe_unretained is trivial. if (ToQuals.hasConst() && ToQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone) return false; return true; } /// IsQualificationConversion - Determines whether the conversion from /// an rvalue of type FromType to ToType is a qualification conversion /// (C++ 4.4). /// /// \param ObjCLifetimeConversion Output parameter that will be set to indicate /// when the qualification conversion involves a change in the Objective-C /// object lifetime. bool Sema::IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion) { FromType = Context.getCanonicalType(FromType); ToType = Context.getCanonicalType(ToType); ObjCLifetimeConversion = false; // If FromType and ToType are the same type, this is not a // qualification conversion. if (FromType.getUnqualifiedType() == ToType.getUnqualifiedType()) return false; // (C++ 4.4p4): // A conversion can add cv-qualifiers at levels other than the first // in multi-level pointers, subject to the following rules: [...] bool PreviousToQualsIncludeConst = true; bool UnwrappedAnyPointer = false; while (Context.UnwrapSimilarPointerTypes(FromType, ToType)) { // Within each iteration of the loop, we check the qualifiers to // determine if this still looks like a qualification // conversion. Then, if all is well, we unwrap one more level of // pointers or pointers-to-members and do it all again // until there are no more pointers or pointers-to-members left to // unwrap. UnwrappedAnyPointer = true; Qualifiers FromQuals = FromType.getQualifiers(); Qualifiers ToQuals = ToType.getQualifiers(); // Objective-C ARC: // Check Objective-C lifetime conversions. if (FromQuals.getObjCLifetime() != ToQuals.getObjCLifetime() && UnwrappedAnyPointer) { if (ToQuals.compatiblyIncludesObjCLifetime(FromQuals)) { if (isNonTrivialObjCLifetimeConversion(FromQuals, ToQuals)) ObjCLifetimeConversion = true; FromQuals.removeObjCLifetime(); ToQuals.removeObjCLifetime(); } else { // Qualification conversions cannot cast between different // Objective-C lifetime qualifiers. return false; } } // Allow addition/removal of GC attributes but not changing GC attributes. if (FromQuals.getObjCGCAttr() != ToQuals.getObjCGCAttr() && (!FromQuals.hasObjCGCAttr() || !ToQuals.hasObjCGCAttr())) { FromQuals.removeObjCGCAttr(); ToQuals.removeObjCGCAttr(); } // -- for every j > 0, if const is in cv 1,j then const is in cv // 2,j, and similarly for volatile. if (!CStyle && !ToQuals.compatiblyIncludes(FromQuals)) return false; // -- if the cv 1,j and cv 2,j are different, then const is in // every cv for 0 < k < j. if (!CStyle && FromQuals.getCVRQualifiers() != ToQuals.getCVRQualifiers() && !PreviousToQualsIncludeConst) return false; // Keep track of whether all prior cv-qualifiers in the "to" type // include const. PreviousToQualsIncludeConst = PreviousToQualsIncludeConst && ToQuals.hasConst(); } // We are left with FromType and ToType being the pointee types // after unwrapping the original FromType and ToType the same number // of types. If we unwrapped any pointers, and if FromType and // ToType have the same unqualified type (since we checked // qualifiers above), then this is a qualification conversion. return UnwrappedAnyPointer && Context.hasSameUnqualifiedType(FromType,ToType); } /// \brief - Determine whether this is a conversion from a scalar type to an /// atomic type. /// /// If successful, updates \c SCS's second and third steps in the conversion /// sequence to finish the conversion. static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType, bool InOverloadResolution, StandardConversionSequence &SCS, bool CStyle) { const AtomicType *ToAtomic = ToType->getAs<AtomicType>(); if (!ToAtomic) return false; StandardConversionSequence InnerSCS; if (!IsStandardConversion(S, From, ToAtomic->getValueType(), InOverloadResolution, InnerSCS, CStyle, /*AllowObjCWritebackConversion=*/false)) return false; SCS.Second = InnerSCS.Second; SCS.setToType(1, InnerSCS.getToType(1)); SCS.Third = InnerSCS.Third; SCS.QualificationIncludesObjCLifetime = InnerSCS.QualificationIncludesObjCLifetime; SCS.setToType(2, InnerSCS.getToType(2)); return true; } static bool isFirstArgumentCompatibleWithType(ASTContext &Context, CXXConstructorDecl *Constructor, QualType Type) { const FunctionProtoType *CtorType = Constructor->getType()->getAs<FunctionProtoType>(); if (CtorType->getNumParams() > 0) { QualType FirstArg = CtorType->getParamType(0); if (Context.hasSameUnqualifiedType(Type, FirstArg.getNonReferenceType())) return true; } return false; } static OverloadingResult IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType, CXXRecordDecl *To, UserDefinedConversionSequence &User, OverloadCandidateSet &CandidateSet, bool AllowExplicit) { DeclContext::lookup_result R = S.LookupConstructors(To); for (DeclContext::lookup_iterator Con = R.begin(), ConEnd = R.end(); Con != ConEnd; ++Con) { NamedDecl *D = *Con; DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); // Find the constructor (which may be a template). CXXConstructorDecl *Constructor = nullptr; FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D); if (ConstructorTmpl) Constructor = cast<CXXConstructorDecl>(ConstructorTmpl->getTemplatedDecl()); else Constructor = cast<CXXConstructorDecl>(D); bool Usable = !Constructor->isInvalidDecl() && S.isInitListConstructor(Constructor) && (AllowExplicit || !Constructor->isExplicit()); if (Usable) { // If the first argument is (a reference to) the target type, // suppress conversions. bool SuppressUserConversions = isFirstArgumentCompatibleWithType(S.Context, Constructor, ToType); if (ConstructorTmpl) S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, /*ExplicitArgs*/ nullptr, From, CandidateSet, SuppressUserConversions); else S.AddOverloadCandidate(Constructor, FoundDecl, From, CandidateSet, SuppressUserConversions); } } bool HadMultipleCandidates = (CandidateSet.size() > 1); OverloadCandidateSet::iterator Best; switch (auto Result = CandidateSet.BestViableFunction(S, From->getLocStart(), Best, true)) { case OR_Deleted: case OR_Success: { // Record the standard conversion we used and the conversion function. CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function); QualType ThisType = Constructor->getThisType(S.Context); // Initializer lists don't have conversions as such. User.Before.setAsIdentityConversion(); User.HadMultipleCandidates = HadMultipleCandidates; User.ConversionFunction = Constructor; User.FoundConversionFunction = Best->FoundDecl; User.After.setAsIdentityConversion(); User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType()); User.After.setAllToTypes(ToType); return Result; } case OR_No_Viable_Function: return OR_No_Viable_Function; case OR_Ambiguous: return OR_Ambiguous; } llvm_unreachable("Invalid OverloadResult!"); } /// Determines whether there is a user-defined conversion sequence /// (C++ [over.ics.user]) that converts expression From to the type /// ToType. If such a conversion exists, User will contain the /// user-defined conversion sequence that performs such a conversion /// and this routine will return true. Otherwise, this routine returns /// false and User is unspecified. /// /// \param AllowExplicit true if the conversion should consider C++0x /// "explicit" conversion functions as well as non-explicit conversion /// functions (C++0x [class.conv.fct]p2). /// /// \param AllowObjCConversionOnExplicit true if the conversion should /// allow an extra Objective-C pointer conversion on uses of explicit /// constructors. Requires \c AllowExplicit to also be set. static OverloadingResult IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType, UserDefinedConversionSequence &User, OverloadCandidateSet &CandidateSet, bool AllowExplicit, bool AllowObjCConversionOnExplicit) { assert(AllowExplicit || !AllowObjCConversionOnExplicit); // Whether we will only visit constructors. bool ConstructorsOnly = false; // If the type we are conversion to is a class type, enumerate its // constructors. if (const RecordType *ToRecordType = ToType->getAs<RecordType>()) { // C++ [over.match.ctor]p1: // When objects of class type are direct-initialized (8.5), or // copy-initialized from an expression of the same or a // derived class type (8.5), overload resolution selects the // constructor. [...] For copy-initialization, the candidate // functions are all the converting constructors (12.3.1) of // that class. The argument list is the expression-list within // the parentheses of the initializer. if (S.Context.hasSameUnqualifiedType(ToType, From->getType()) || (From->getType()->getAs<RecordType>() && S.IsDerivedFrom(From->getType(), ToType))) ConstructorsOnly = true; S.RequireCompleteType(From->getExprLoc(), ToType, 0); // RequireCompleteType may have returned true due to some invalid decl // during template instantiation, but ToType may be complete enough now // to try to recover. if (ToType->isIncompleteType()) { // We're not going to find any constructors. } else if (CXXRecordDecl *ToRecordDecl = dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) { Expr **Args = &From; unsigned NumArgs = 1; bool ListInitializing = false; if (InitListExpr *InitList = dyn_cast<InitListExpr>(From)) { // But first, see if there is an init-list-constructor that will work. OverloadingResult Result = IsInitializerListConstructorConversion( S, From, ToType, ToRecordDecl, User, CandidateSet, AllowExplicit); if (Result != OR_No_Viable_Function) return Result; // Never mind. CandidateSet.clear(); // If we're list-initializing, we pass the individual elements as // arguments, not the entire list. Args = InitList->getInits(); NumArgs = InitList->getNumInits(); ListInitializing = true; } DeclContext::lookup_result R = S.LookupConstructors(ToRecordDecl); for (DeclContext::lookup_iterator Con = R.begin(), ConEnd = R.end(); Con != ConEnd; ++Con) { NamedDecl *D = *Con; DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); // Find the constructor (which may be a template). CXXConstructorDecl *Constructor = nullptr; FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D); if (ConstructorTmpl) Constructor = cast<CXXConstructorDecl>(ConstructorTmpl->getTemplatedDecl()); else Constructor = cast<CXXConstructorDecl>(D); bool Usable = !Constructor->isInvalidDecl(); if (ListInitializing) Usable = Usable && (AllowExplicit || !Constructor->isExplicit()); else Usable = Usable &&Constructor->isConvertingConstructor(AllowExplicit); if (Usable) { bool SuppressUserConversions = !ConstructorsOnly; if (SuppressUserConversions && ListInitializing) { SuppressUserConversions = false; if (NumArgs == 1) { // If the first argument is (a reference to) the target type, // suppress conversions. SuppressUserConversions = isFirstArgumentCompatibleWithType( S.Context, Constructor, ToType); } } if (ConstructorTmpl) S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, /*ExplicitArgs*/ nullptr, llvm::makeArrayRef(Args, NumArgs), CandidateSet, SuppressUserConversions); else // Allow one user-defined conversion when user specifies a // From->ToType conversion via an static cast (c-style, etc). S.AddOverloadCandidate(Constructor, FoundDecl, llvm::makeArrayRef(Args, NumArgs), CandidateSet, SuppressUserConversions); } } } } // Enumerate conversion functions, if we're allowed to. if (ConstructorsOnly || isa<InitListExpr>(From)) { } else if (S.RequireCompleteType(From->getLocStart(), From->getType(), 0)) { // No conversion functions from incomplete types. } else if (const RecordType *FromRecordType = From->getType()->getAs<RecordType>()) { if (CXXRecordDecl *FromRecordDecl = dyn_cast<CXXRecordDecl>(FromRecordType->getDecl())) { // Add all of the conversion functions as candidates. const auto &Conversions = FromRecordDecl->getVisibleConversionFunctions(); for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { DeclAccessPair FoundDecl = I.getPair(); NamedDecl *D = FoundDecl.getDecl(); CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); CXXConversionDecl *Conv; FunctionTemplateDecl *ConvTemplate; if ((ConvTemplate = dyn_cast<FunctionTemplateDecl>(D))) Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl()); else Conv = cast<CXXConversionDecl>(D); if (AllowExplicit || !Conv->isExplicit()) { if (ConvTemplate) S.AddTemplateConversionCandidate(ConvTemplate, FoundDecl, ActingContext, From, ToType, CandidateSet, AllowObjCConversionOnExplicit); else S.AddConversionCandidate(Conv, FoundDecl, ActingContext, From, ToType, CandidateSet, AllowObjCConversionOnExplicit); } } } } bool HadMultipleCandidates = (CandidateSet.size() > 1); OverloadCandidateSet::iterator Best; switch (auto Result = CandidateSet.BestViableFunction(S, From->getLocStart(), Best, true)) { case OR_Success: case OR_Deleted: // Record the standard conversion we used and the conversion function. if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Best->Function)) { // C++ [over.ics.user]p1: // If the user-defined conversion is specified by a // constructor (12.3.1), the initial standard conversion // sequence converts the source type to the type required by // the argument of the constructor. // QualType ThisType = Constructor->getThisType(S.Context); if (isa<InitListExpr>(From)) { // Initializer lists don't have conversions as such. User.Before.setAsIdentityConversion(); } else { if (Best->Conversions[0].isEllipsis()) User.EllipsisConversion = true; else { User.Before = Best->Conversions[0].Standard; User.EllipsisConversion = false; } } User.HadMultipleCandidates = HadMultipleCandidates; User.ConversionFunction = Constructor; User.FoundConversionFunction = Best->FoundDecl; User.After.setAsIdentityConversion(); User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType()); User.After.setAllToTypes(ToType); return Result; } if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(Best->Function)) { // C++ [over.ics.user]p1: // // [...] If the user-defined conversion is specified by a // conversion function (12.3.2), the initial standard // conversion sequence converts the source type to the // implicit object parameter of the conversion function. User.Before = Best->Conversions[0].Standard; User.HadMultipleCandidates = HadMultipleCandidates; User.ConversionFunction = Conversion; User.FoundConversionFunction = Best->FoundDecl; User.EllipsisConversion = false; // C++ [over.ics.user]p2: // The second standard conversion sequence converts the // result of the user-defined conversion to the target type // for the sequence. Since an implicit conversion sequence // is an initialization, the special rules for // initialization by user-defined conversion apply when // selecting the best user-defined conversion for a // user-defined conversion sequence (see 13.3.3 and // 13.3.3.1). User.After = Best->FinalConversion; return Result; } llvm_unreachable("Not a constructor or conversion function?"); case OR_No_Viable_Function: return OR_No_Viable_Function; case OR_Ambiguous: return OR_Ambiguous; } llvm_unreachable("Invalid OverloadResult!"); } bool Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) { ImplicitConversionSequence ICS; OverloadCandidateSet CandidateSet(From->getExprLoc(), OverloadCandidateSet::CSK_Normal); OverloadingResult OvResult = IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined, CandidateSet, false, false); if (OvResult == OR_Ambiguous) Diag(From->getLocStart(), diag::err_typecheck_ambiguous_condition) << From->getType() << ToType << From->getSourceRange(); else if (OvResult == OR_No_Viable_Function && !CandidateSet.empty()) { if (!RequireCompleteType(From->getLocStart(), ToType, diag::err_typecheck_nonviable_condition_incomplete, From->getType(), From->getSourceRange())) Diag(From->getLocStart(), diag::err_typecheck_nonviable_condition) << From->getType() << From->getSourceRange() << ToType; } else return false; CandidateSet.NoteCandidates(*this, OCD_AllCandidates, From); return true; } /// \brief Compare the user-defined conversion functions or constructors /// of two user-defined conversion sequences to determine whether any ordering /// is possible. static ImplicitConversionSequence::CompareKind compareConversionFunctions(Sema &S, FunctionDecl *Function1, FunctionDecl *Function2) { if (!S.getLangOpts().ObjC1 || !S.getLangOpts().CPlusPlus11) return ImplicitConversionSequence::Indistinguishable; // Objective-C++: // If both conversion functions are implicitly-declared conversions from // a lambda closure type to a function pointer and a block pointer, // respectively, always prefer the conversion to a function pointer, // because the function pointer is more lightweight and is more likely // to keep code working. CXXConversionDecl *Conv1 = dyn_cast_or_null<CXXConversionDecl>(Function1); if (!Conv1) return ImplicitConversionSequence::Indistinguishable; CXXConversionDecl *Conv2 = dyn_cast<CXXConversionDecl>(Function2); if (!Conv2) return ImplicitConversionSequence::Indistinguishable; if (Conv1->getParent()->isLambda() && Conv2->getParent()->isLambda()) { bool Block1 = Conv1->getConversionType()->isBlockPointerType(); bool Block2 = Conv2->getConversionType()->isBlockPointerType(); if (Block1 != Block2) return Block1 ? ImplicitConversionSequence::Worse : ImplicitConversionSequence::Better; } return ImplicitConversionSequence::Indistinguishable; } static bool hasDeprecatedStringLiteralToCharPtrConversion( const ImplicitConversionSequence &ICS) { return (ICS.isStandard() && ICS.Standard.DeprecatedStringLiteralToCharPtr) || (ICS.isUserDefined() && ICS.UserDefined.Before.DeprecatedStringLiteralToCharPtr); } /// CompareImplicitConversionSequences - Compare two implicit /// conversion sequences to determine whether one is better than the /// other or if they are indistinguishable (C++ 13.3.3.2). static ImplicitConversionSequence::CompareKind CompareImplicitConversionSequences(Sema &S, const ImplicitConversionSequence& ICS1, const ImplicitConversionSequence& ICS2) { // (C++ 13.3.3.2p2): When comparing the basic forms of implicit // conversion sequences (as defined in 13.3.3.1) // -- a standard conversion sequence (13.3.3.1.1) is a better // conversion sequence than a user-defined conversion sequence or // an ellipsis conversion sequence, and // -- a user-defined conversion sequence (13.3.3.1.2) is a better // conversion sequence than an ellipsis conversion sequence // (13.3.3.1.3). // // C++0x [over.best.ics]p10: // For the purpose of ranking implicit conversion sequences as // described in 13.3.3.2, the ambiguous conversion sequence is // treated as a user-defined sequence that is indistinguishable // from any other user-defined conversion sequence. // String literal to 'char *' conversion has been deprecated in C++03. It has // been removed from C++11. We still accept this conversion, if it happens at // the best viable function. Otherwise, this conversion is considered worse // than ellipsis conversion. Consider this as an extension; this is not in the // standard. For example: // // int &f(...); // #1 // void f(char*); // #2 // void g() { int &r = f("foo"); } // // In C++03, we pick #2 as the best viable function. // In C++11, we pick #1 as the best viable function, because ellipsis // conversion is better than string-literal to char* conversion (since there // is no such conversion in C++11). If there was no #1 at all or #1 couldn't // convert arguments, #2 would be the best viable function in C++11. // If the best viable function has this conversion, a warning will be issued // in C++03, or an ExtWarn (+SFINAE failure) will be issued in C++11. if (S.getLangOpts().CPlusPlus11 && !S.getLangOpts().WritableStrings && hasDeprecatedStringLiteralToCharPtrConversion(ICS1) != hasDeprecatedStringLiteralToCharPtrConversion(ICS2)) return hasDeprecatedStringLiteralToCharPtrConversion(ICS1) ? ImplicitConversionSequence::Worse : ImplicitConversionSequence::Better; // HLSL Change Starts // This ranking happens in HLSL as part of diagnostics; otherwise the specific ranking is used. if (S.getLangOpts().HLSL) { if (!ICS1.isInitialized() || !ICS2.isInitialized()) return ImplicitConversionSequence::Indistinguishable; } // HLSL Change Ends if (ICS1.getKindRank() < ICS2.getKindRank()) return ImplicitConversionSequence::Better; if (ICS2.getKindRank() < ICS1.getKindRank()) return ImplicitConversionSequence::Worse; // The following checks require both conversion sequences to be of // the same kind. if (ICS1.getKind() != ICS2.getKind()) return ImplicitConversionSequence::Indistinguishable; ImplicitConversionSequence::CompareKind Result = ImplicitConversionSequence::Indistinguishable; // Two implicit conversion sequences of the same form are // indistinguishable conversion sequences unless one of the // following rules apply: (C++ 13.3.3.2p3): // List-initialization sequence L1 is a better conversion sequence than // list-initialization sequence L2 if: // - L1 converts to std::initializer_list<X> for some X and L2 does not, or, // if not that, // - L1 converts to type "array of N1 T", L2 converts to type "array of N2 T", // and N1 is smaller than N2., // even if one of the other rules in this paragraph would otherwise apply. if (!ICS1.isBad()) { if (ICS1.isStdInitializerListElement() && !ICS2.isStdInitializerListElement()) return ImplicitConversionSequence::Better; if (!ICS1.isStdInitializerListElement() && ICS2.isStdInitializerListElement()) return ImplicitConversionSequence::Worse; } if (ICS1.isStandard()) // Standard conversion sequence S1 is a better conversion sequence than // standard conversion sequence S2 if [...] Result = CompareStandardConversionSequences(S, ICS1.Standard, ICS2.Standard); else if (ICS1.isUserDefined()) { // User-defined conversion sequence U1 is a better conversion // sequence than another user-defined conversion sequence U2 if // they contain the same user-defined conversion function or // constructor and if the second standard conversion sequence of // U1 is better than the second standard conversion sequence of // U2 (C++ 13.3.3.2p3). if (ICS1.UserDefined.ConversionFunction == ICS2.UserDefined.ConversionFunction) Result = CompareStandardConversionSequences(S, ICS1.UserDefined.After, ICS2.UserDefined.After); else Result = compareConversionFunctions(S, ICS1.UserDefined.ConversionFunction, ICS2.UserDefined.ConversionFunction); } return Result; } static bool hasSimilarType(ASTContext &Context, QualType T1, QualType T2) { while (Context.UnwrapSimilarPointerTypes(T1, T2)) { Qualifiers Quals; T1 = Context.getUnqualifiedArrayType(T1, Quals); T2 = Context.getUnqualifiedArrayType(T2, Quals); } return Context.hasSameUnqualifiedType(T1, T2); } // Per 13.3.3.2p3, compare the given standard conversion sequences to // determine if one is a proper subset of the other. static ImplicitConversionSequence::CompareKind compareStandardConversionSubsets(ASTContext &Context, const StandardConversionSequence& SCS1, const StandardConversionSequence& SCS2) { ImplicitConversionSequence::CompareKind Result = ImplicitConversionSequence::Indistinguishable; // the identity conversion sequence is considered to be a subsequence of // any non-identity conversion sequence if (SCS1.isIdentityConversion() && !SCS2.isIdentityConversion()) return ImplicitConversionSequence::Better; else if (!SCS1.isIdentityConversion() && SCS2.isIdentityConversion()) return ImplicitConversionSequence::Worse; if (SCS1.Second != SCS2.Second) { if (SCS1.Second == ICK_Identity) Result = ImplicitConversionSequence::Better; else if (SCS2.Second == ICK_Identity) Result = ImplicitConversionSequence::Worse; else return ImplicitConversionSequence::Indistinguishable; } else if (!hasSimilarType(Context, SCS1.getToType(1), SCS2.getToType(1))) return ImplicitConversionSequence::Indistinguishable; if (SCS1.Third == SCS2.Third) { return Context.hasSameType(SCS1.getToType(2), SCS2.getToType(2))? Result : ImplicitConversionSequence::Indistinguishable; } if (SCS1.Third == ICK_Identity) return Result == ImplicitConversionSequence::Worse ? ImplicitConversionSequence::Indistinguishable : ImplicitConversionSequence::Better; if (SCS2.Third == ICK_Identity) return Result == ImplicitConversionSequence::Better ? ImplicitConversionSequence::Indistinguishable : ImplicitConversionSequence::Worse; return ImplicitConversionSequence::Indistinguishable; } /// \brief Determine whether one of the given reference bindings is better /// than the other based on what kind of bindings they are. static bool isBetterReferenceBindingKind(const StandardConversionSequence &SCS1, const StandardConversionSequence &SCS2) { // C++0x [over.ics.rank]p3b4: // -- S1 and S2 are reference bindings (8.5.3) and neither refers to an // implicit object parameter of a non-static member function declared // without a ref-qualifier, and *either* S1 binds an rvalue reference // to an rvalue and S2 binds an lvalue reference *or S1 binds an // lvalue reference to a function lvalue and S2 binds an rvalue // reference*. // // FIXME: Rvalue references. We're going rogue with the above edits, // because the semantics in the current C++0x working paper (N3225 at the // time of this writing) break the standard definition of std::forward // and std::reference_wrapper when dealing with references to functions. // Proposed wording changes submitted to CWG for consideration. if (SCS1.BindsImplicitObjectArgumentWithoutRefQualifier || SCS2.BindsImplicitObjectArgumentWithoutRefQualifier) return false; return (!SCS1.IsLvalueReference && SCS1.BindsToRvalue && SCS2.IsLvalueReference) || (SCS1.IsLvalueReference && SCS1.BindsToFunctionLvalue && !SCS2.IsLvalueReference && SCS2.BindsToFunctionLvalue); } /// CompareStandardConversionSequences - Compare two standard /// conversion sequences to determine whether one is better than the /// other or if they are indistinguishable (C++ 13.3.3.2p3). static ImplicitConversionSequence::CompareKind CompareStandardConversionSequences(Sema &S, const StandardConversionSequence& SCS1, const StandardConversionSequence& SCS2) { // Standard conversion sequence S1 is a better conversion sequence // than standard conversion sequence S2 if (C++ 13.3.3.2p3): // -- S1 is a proper subsequence of S2 (comparing the conversion // sequences in the canonical form defined by 13.3.3.1.1, // excluding any Lvalue Transformation; the identity conversion // sequence is considered to be a subsequence of any // non-identity conversion sequence) or, if not that, if (ImplicitConversionSequence::CompareKind CK = compareStandardConversionSubsets(S.Context, SCS1, SCS2)) return CK; // -- the rank of S1 is better than the rank of S2 (by the rules // defined below), or, if not that, ImplicitConversionRank Rank1 = SCS1.getRank(); ImplicitConversionRank Rank2 = SCS2.getRank(); if (Rank1 < Rank2) return ImplicitConversionSequence::Better; else if (Rank2 < Rank1) return ImplicitConversionSequence::Worse; // (C++ 13.3.3.2p4): Two conversion sequences with the same rank // are indistinguishable unless one of the following rules // applies: // A conversion that is not a conversion of a pointer, or // pointer to member, to bool is better than another conversion // that is such a conversion. if (SCS1.isPointerConversionToBool() != SCS2.isPointerConversionToBool()) return SCS2.isPointerConversionToBool() ? ImplicitConversionSequence::Better : ImplicitConversionSequence::Worse; // C++ [over.ics.rank]p4b2: // // If class B is derived directly or indirectly from class A, // conversion of B* to A* is better than conversion of B* to // void*, and conversion of A* to void* is better than conversion // of B* to void*. bool SCS1ConvertsToVoid = SCS1.isPointerConversionToVoidPointer(S.Context); bool SCS2ConvertsToVoid = SCS2.isPointerConversionToVoidPointer(S.Context); if (SCS1ConvertsToVoid != SCS2ConvertsToVoid) { // Exactly one of the conversion sequences is a conversion to // a void pointer; it's the worse conversion. return SCS2ConvertsToVoid ? ImplicitConversionSequence::Better : ImplicitConversionSequence::Worse; } else if (!SCS1ConvertsToVoid && !SCS2ConvertsToVoid) { // Neither conversion sequence converts to a void pointer; compare // their derived-to-base conversions. if (ImplicitConversionSequence::CompareKind DerivedCK = CompareDerivedToBaseConversions(S, SCS1, SCS2)) return DerivedCK; } else if (SCS1ConvertsToVoid && SCS2ConvertsToVoid && !S.Context.hasSameType(SCS1.getFromType(), SCS2.getFromType())) { // Both conversion sequences are conversions to void // pointers. Compare the source types to determine if there's an // inheritance relationship in their sources. QualType FromType1 = SCS1.getFromType(); QualType FromType2 = SCS2.getFromType(); // Adjust the types we're converting from via the array-to-pointer // conversion, if we need to. if (SCS1.First == ICK_Array_To_Pointer) FromType1 = S.Context.getArrayDecayedType(FromType1); if (SCS2.First == ICK_Array_To_Pointer) FromType2 = S.Context.getArrayDecayedType(FromType2); QualType FromPointee1 = FromType1->getPointeeType().getUnqualifiedType(); QualType FromPointee2 = FromType2->getPointeeType().getUnqualifiedType(); if (S.IsDerivedFrom(FromPointee2, FromPointee1)) return ImplicitConversionSequence::Better; else if (S.IsDerivedFrom(FromPointee1, FromPointee2)) return ImplicitConversionSequence::Worse; // Objective-C++: If one interface is more specific than the // other, it is the better one. const ObjCObjectPointerType* FromObjCPtr1 = FromType1->getAs<ObjCObjectPointerType>(); const ObjCObjectPointerType* FromObjCPtr2 = FromType2->getAs<ObjCObjectPointerType>(); if (FromObjCPtr1 && FromObjCPtr2) { bool AssignLeft = S.Context.canAssignObjCInterfaces(FromObjCPtr1, FromObjCPtr2); bool AssignRight = S.Context.canAssignObjCInterfaces(FromObjCPtr2, FromObjCPtr1); if (AssignLeft != AssignRight) { return AssignLeft? ImplicitConversionSequence::Better : ImplicitConversionSequence::Worse; } } } // Compare based on qualification conversions (C++ 13.3.3.2p3, // bullet 3). if (ImplicitConversionSequence::CompareKind QualCK = CompareQualificationConversions(S, SCS1, SCS2)) return QualCK; if (SCS1.ReferenceBinding && SCS2.ReferenceBinding) { // Check for a better reference binding based on the kind of bindings. if (isBetterReferenceBindingKind(SCS1, SCS2)) return ImplicitConversionSequence::Better; else if (isBetterReferenceBindingKind(SCS2, SCS1)) return ImplicitConversionSequence::Worse; // C++ [over.ics.rank]p3b4: // -- S1 and S2 are reference bindings (8.5.3), and the types to // which the references refer are the same type except for // top-level cv-qualifiers, and the type to which the reference // initialized by S2 refers is more cv-qualified than the type // to which the reference initialized by S1 refers. QualType T1 = SCS1.getToType(2); QualType T2 = SCS2.getToType(2); T1 = S.Context.getCanonicalType(T1); T2 = S.Context.getCanonicalType(T2); Qualifiers T1Quals, T2Quals; QualType UnqualT1 = S.Context.getUnqualifiedArrayType(T1, T1Quals); QualType UnqualT2 = S.Context.getUnqualifiedArrayType(T2, T2Quals); if (UnqualT1 == UnqualT2) { // Objective-C++ ARC: If the references refer to objects with different // lifetimes, prefer bindings that don't change lifetime. if (SCS1.ObjCLifetimeConversionBinding != SCS2.ObjCLifetimeConversionBinding) { return SCS1.ObjCLifetimeConversionBinding ? ImplicitConversionSequence::Worse : ImplicitConversionSequence::Better; } // If the type is an array type, promote the element qualifiers to the // type for comparison. if (isa<ArrayType>(T1) && T1Quals) T1 = S.Context.getQualifiedType(UnqualT1, T1Quals); if (isa<ArrayType>(T2) && T2Quals) T2 = S.Context.getQualifiedType(UnqualT2, T2Quals); if (T2.isMoreQualifiedThan(T1)) return ImplicitConversionSequence::Better; else if (T1.isMoreQualifiedThan(T2)) return ImplicitConversionSequence::Worse; } } // In Microsoft mode, prefer an integral conversion to a // floating-to-integral conversion if the integral conversion // is between types of the same size. // For example: // void f(float); // void f(int); // int main { // long a; // f(a); // } // Here, MSVC will call f(int) instead of generating a compile error // as clang will do in standard mode. if (S.getLangOpts().MSVCCompat && SCS1.Second == ICK_Integral_Conversion && SCS2.Second == ICK_Floating_Integral && S.Context.getTypeSize(SCS1.getFromType()) == S.Context.getTypeSize(SCS1.getToType(2))) return ImplicitConversionSequence::Better; return ImplicitConversionSequence::Indistinguishable; } /// CompareQualificationConversions - Compares two standard conversion /// sequences to determine whether they can be ranked based on their /// qualification conversions (C++ 13.3.3.2p3 bullet 3). static ImplicitConversionSequence::CompareKind CompareQualificationConversions(Sema &S, const StandardConversionSequence& SCS1, const StandardConversionSequence& SCS2) { // C++ 13.3.3.2p3: // -- S1 and S2 differ only in their qualification conversion and // yield similar types T1 and T2 (C++ 4.4), respectively, and the // cv-qualification signature of type T1 is a proper subset of // the cv-qualification signature of type T2, and S1 is not the // deprecated string literal array-to-pointer conversion (4.2). if (SCS1.First != SCS2.First || SCS1.Second != SCS2.Second || SCS1.Third != SCS2.Third || SCS1.Third != ICK_Qualification) return ImplicitConversionSequence::Indistinguishable; // FIXME: the example in the standard doesn't use a qualification // conversion (!) QualType T1 = SCS1.getToType(2); QualType T2 = SCS2.getToType(2); T1 = S.Context.getCanonicalType(T1); T2 = S.Context.getCanonicalType(T2); Qualifiers T1Quals, T2Quals; QualType UnqualT1 = S.Context.getUnqualifiedArrayType(T1, T1Quals); QualType UnqualT2 = S.Context.getUnqualifiedArrayType(T2, T2Quals); // If the types are the same, we won't learn anything by unwrapped // them. if (UnqualT1 == UnqualT2) return ImplicitConversionSequence::Indistinguishable; // If the type is an array type, promote the element qualifiers to the type // for comparison. if (isa<ArrayType>(T1) && T1Quals) T1 = S.Context.getQualifiedType(UnqualT1, T1Quals); if (isa<ArrayType>(T2) && T2Quals) T2 = S.Context.getQualifiedType(UnqualT2, T2Quals); ImplicitConversionSequence::CompareKind Result = ImplicitConversionSequence::Indistinguishable; // Objective-C++ ARC: // Prefer qualification conversions not involving a change in lifetime // to qualification conversions that do not change lifetime. if (SCS1.QualificationIncludesObjCLifetime != SCS2.QualificationIncludesObjCLifetime) { Result = SCS1.QualificationIncludesObjCLifetime ? ImplicitConversionSequence::Worse : ImplicitConversionSequence::Better; } while (S.Context.UnwrapSimilarPointerTypes(T1, T2)) { // Within each iteration of the loop, we check the qualifiers to // determine if this still looks like a qualification // conversion. Then, if all is well, we unwrap one more level of // pointers or pointers-to-members and do it all again // until there are no more pointers or pointers-to-members left // to unwrap. This essentially mimics what // IsQualificationConversion does, but here we're checking for a // strict subset of qualifiers. if (T1.getCVRQualifiers() == T2.getCVRQualifiers()) // The qualifiers are the same, so this doesn't tell us anything // about how the sequences rank. ; else if (T2.isMoreQualifiedThan(T1)) { // T1 has fewer qualifiers, so it could be the better sequence. if (Result == ImplicitConversionSequence::Worse) // Neither has qualifiers that are a subset of the other's // qualifiers. return ImplicitConversionSequence::Indistinguishable; Result = ImplicitConversionSequence::Better; } else if (T1.isMoreQualifiedThan(T2)) { // T2 has fewer qualifiers, so it could be the better sequence. if (Result == ImplicitConversionSequence::Better) // Neither has qualifiers that are a subset of the other's // qualifiers. return ImplicitConversionSequence::Indistinguishable; Result = ImplicitConversionSequence::Worse; } else { // Qualifiers are disjoint. return ImplicitConversionSequence::Indistinguishable; } // If the types after this point are equivalent, we're done. if (S.Context.hasSameUnqualifiedType(T1, T2)) break; } // Check that the winning standard conversion sequence isn't using // the deprecated string literal array to pointer conversion. switch (Result) { case ImplicitConversionSequence::Better: if (SCS1.DeprecatedStringLiteralToCharPtr) Result = ImplicitConversionSequence::Indistinguishable; break; case ImplicitConversionSequence::Indistinguishable: break; case ImplicitConversionSequence::Worse: if (SCS2.DeprecatedStringLiteralToCharPtr) Result = ImplicitConversionSequence::Indistinguishable; break; } return Result; } /// CompareDerivedToBaseConversions - Compares two standard conversion /// sequences to determine whether they can be ranked based on their /// various kinds of derived-to-base conversions (C++ /// [over.ics.rank]p4b3). As part of these checks, we also look at /// conversions between Objective-C interface types. static ImplicitConversionSequence::CompareKind CompareDerivedToBaseConversions(Sema &S, const StandardConversionSequence& SCS1, const StandardConversionSequence& SCS2) { QualType FromType1 = SCS1.getFromType(); QualType ToType1 = SCS1.getToType(1); QualType FromType2 = SCS2.getFromType(); QualType ToType2 = SCS2.getToType(1); // Adjust the types we're converting from via the array-to-pointer // conversion, if we need to. if (SCS1.First == ICK_Array_To_Pointer) FromType1 = S.Context.getArrayDecayedType(FromType1); if (SCS2.First == ICK_Array_To_Pointer) FromType2 = S.Context.getArrayDecayedType(FromType2); // Canonicalize all of the types. FromType1 = S.Context.getCanonicalType(FromType1); ToType1 = S.Context.getCanonicalType(ToType1); FromType2 = S.Context.getCanonicalType(FromType2); ToType2 = S.Context.getCanonicalType(ToType2); // C++ [over.ics.rank]p4b3: // // If class B is derived directly or indirectly from class A and // class C is derived directly or indirectly from B, // // Compare based on pointer conversions. if (SCS1.Second == ICK_Pointer_Conversion && SCS2.Second == ICK_Pointer_Conversion && /*FIXME: Remove if Objective-C id conversions get their own rank*/ FromType1->isPointerType() && FromType2->isPointerType() && ToType1->isPointerType() && ToType2->isPointerType()) { QualType FromPointee1 = FromType1->getAs<PointerType>()->getPointeeType().getUnqualifiedType(); QualType ToPointee1 = ToType1->getAs<PointerType>()->getPointeeType().getUnqualifiedType(); QualType FromPointee2 = FromType2->getAs<PointerType>()->getPointeeType().getUnqualifiedType(); QualType ToPointee2 = ToType2->getAs<PointerType>()->getPointeeType().getUnqualifiedType(); // -- conversion of C* to B* is better than conversion of C* to A*, if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) { if (S.IsDerivedFrom(ToPointee1, ToPointee2)) return ImplicitConversionSequence::Better; else if (S.IsDerivedFrom(ToPointee2, ToPointee1)) return ImplicitConversionSequence::Worse; } // -- conversion of B* to A* is better than conversion of C* to A*, if (FromPointee1 != FromPointee2 && ToPointee1 == ToPointee2) { if (S.IsDerivedFrom(FromPointee2, FromPointee1)) return ImplicitConversionSequence::Better; else if (S.IsDerivedFrom(FromPointee1, FromPointee2)) return ImplicitConversionSequence::Worse; } } else if (SCS1.Second == ICK_Pointer_Conversion && SCS2.Second == ICK_Pointer_Conversion) { const ObjCObjectPointerType *FromPtr1 = FromType1->getAs<ObjCObjectPointerType>(); const ObjCObjectPointerType *FromPtr2 = FromType2->getAs<ObjCObjectPointerType>(); const ObjCObjectPointerType *ToPtr1 = ToType1->getAs<ObjCObjectPointerType>(); const ObjCObjectPointerType *ToPtr2 = ToType2->getAs<ObjCObjectPointerType>(); if (FromPtr1 && FromPtr2 && ToPtr1 && ToPtr2) { // Apply the same conversion ranking rules for Objective-C pointer types // that we do for C++ pointers to class types. However, we employ the // Objective-C pseudo-subtyping relationship used for assignment of // Objective-C pointer types. bool FromAssignLeft = S.Context.canAssignObjCInterfaces(FromPtr1, FromPtr2); bool FromAssignRight = S.Context.canAssignObjCInterfaces(FromPtr2, FromPtr1); bool ToAssignLeft = S.Context.canAssignObjCInterfaces(ToPtr1, ToPtr2); bool ToAssignRight = S.Context.canAssignObjCInterfaces(ToPtr2, ToPtr1); // A conversion to an a non-id object pointer type or qualified 'id' // type is better than a conversion to 'id'. if (ToPtr1->isObjCIdType() && (ToPtr2->isObjCQualifiedIdType() || ToPtr2->getInterfaceDecl())) return ImplicitConversionSequence::Worse; if (ToPtr2->isObjCIdType() && (ToPtr1->isObjCQualifiedIdType() || ToPtr1->getInterfaceDecl())) return ImplicitConversionSequence::Better; // A conversion to a non-id object pointer type is better than a // conversion to a qualified 'id' type if (ToPtr1->isObjCQualifiedIdType() && ToPtr2->getInterfaceDecl()) return ImplicitConversionSequence::Worse; if (ToPtr2->isObjCQualifiedIdType() && ToPtr1->getInterfaceDecl()) return ImplicitConversionSequence::Better; // A conversion to an a non-Class object pointer type or qualified 'Class' // type is better than a conversion to 'Class'. if (ToPtr1->isObjCClassType() && (ToPtr2->isObjCQualifiedClassType() || ToPtr2->getInterfaceDecl())) return ImplicitConversionSequence::Worse; if (ToPtr2->isObjCClassType() && (ToPtr1->isObjCQualifiedClassType() || ToPtr1->getInterfaceDecl())) return ImplicitConversionSequence::Better; // A conversion to a non-Class object pointer type is better than a // conversion to a qualified 'Class' type. if (ToPtr1->isObjCQualifiedClassType() && ToPtr2->getInterfaceDecl()) return ImplicitConversionSequence::Worse; if (ToPtr2->isObjCQualifiedClassType() && ToPtr1->getInterfaceDecl()) return ImplicitConversionSequence::Better; // -- "conversion of C* to B* is better than conversion of C* to A*," if (S.Context.hasSameType(FromType1, FromType2) && !FromPtr1->isObjCIdType() && !FromPtr1->isObjCClassType() && (ToAssignLeft != ToAssignRight)) return ToAssignLeft? ImplicitConversionSequence::Worse : ImplicitConversionSequence::Better; // -- "conversion of B* to A* is better than conversion of C* to A*," if (S.Context.hasSameUnqualifiedType(ToType1, ToType2) && (FromAssignLeft != FromAssignRight)) return FromAssignLeft? ImplicitConversionSequence::Better : ImplicitConversionSequence::Worse; } } // Ranking of member-pointer types. if (SCS1.Second == ICK_Pointer_Member && SCS2.Second == ICK_Pointer_Member && FromType1->isMemberPointerType() && FromType2->isMemberPointerType() && ToType1->isMemberPointerType() && ToType2->isMemberPointerType()) { const MemberPointerType * FromMemPointer1 = FromType1->getAs<MemberPointerType>(); const MemberPointerType * ToMemPointer1 = ToType1->getAs<MemberPointerType>(); const MemberPointerType * FromMemPointer2 = FromType2->getAs<MemberPointerType>(); const MemberPointerType * ToMemPointer2 = ToType2->getAs<MemberPointerType>(); const Type *FromPointeeType1 = FromMemPointer1->getClass(); const Type *ToPointeeType1 = ToMemPointer1->getClass(); const Type *FromPointeeType2 = FromMemPointer2->getClass(); const Type *ToPointeeType2 = ToMemPointer2->getClass(); QualType FromPointee1 = QualType(FromPointeeType1, 0).getUnqualifiedType(); QualType ToPointee1 = QualType(ToPointeeType1, 0).getUnqualifiedType(); QualType FromPointee2 = QualType(FromPointeeType2, 0).getUnqualifiedType(); QualType ToPointee2 = QualType(ToPointeeType2, 0).getUnqualifiedType(); // conversion of A::* to B::* is better than conversion of A::* to C::*, if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) { if (S.IsDerivedFrom(ToPointee1, ToPointee2)) return ImplicitConversionSequence::Worse; else if (S.IsDerivedFrom(ToPointee2, ToPointee1)) return ImplicitConversionSequence::Better; } // conversion of B::* to C::* is better than conversion of A::* to C::* if (ToPointee1 == ToPointee2 && FromPointee1 != FromPointee2) { if (S.IsDerivedFrom(FromPointee1, FromPointee2)) return ImplicitConversionSequence::Better; else if (S.IsDerivedFrom(FromPointee2, FromPointee1)) return ImplicitConversionSequence::Worse; } } if (SCS1.Second == ICK_Derived_To_Base) { // -- conversion of C to B is better than conversion of C to A, // -- binding of an expression of type C to a reference of type // B& is better than binding an expression of type C to a // reference of type A&, if (S.Context.hasSameUnqualifiedType(FromType1, FromType2) && !S.Context.hasSameUnqualifiedType(ToType1, ToType2)) { if (S.IsDerivedFrom(ToType1, ToType2)) return ImplicitConversionSequence::Better; else if (S.IsDerivedFrom(ToType2, ToType1)) return ImplicitConversionSequence::Worse; } // -- conversion of B to A is better than conversion of C to A. // -- binding of an expression of type B to a reference of type // A& is better than binding an expression of type C to a // reference of type A&, if (!S.Context.hasSameUnqualifiedType(FromType1, FromType2) && S.Context.hasSameUnqualifiedType(ToType1, ToType2)) { if (S.IsDerivedFrom(FromType2, FromType1)) return ImplicitConversionSequence::Better; else if (S.IsDerivedFrom(FromType1, FromType2)) return ImplicitConversionSequence::Worse; } } return ImplicitConversionSequence::Indistinguishable; } /// \brief Determine whether the given type is valid, e.g., it is not an invalid /// C++ class. static bool isTypeValid(QualType T) { if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) return !Record->isInvalidDecl(); return true; } /// CompareReferenceRelationship - Compare the two types T1 and T2 to /// determine whether they are reference-related, /// reference-compatible, reference-compatible with added /// qualification, or incompatible, for use in C++ initialization by /// reference (C++ [dcl.ref.init]p4). Neither type can be a reference /// type, and the first type (T1) is the pointee type of the reference /// type being initialized. Sema::ReferenceCompareResult Sema::CompareReferenceRelationship(SourceLocation Loc, QualType OrigT1, QualType OrigT2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion) { assert(!OrigT1->isReferenceType() && "T1 must be the pointee type of the reference type"); assert(!OrigT2->isReferenceType() && "T2 cannot be a reference type"); QualType T1 = Context.getCanonicalType(OrigT1); QualType T2 = Context.getCanonicalType(OrigT2); Qualifiers T1Quals, T2Quals; QualType UnqualT1 = Context.getUnqualifiedArrayType(T1, T1Quals); QualType UnqualT2 = Context.getUnqualifiedArrayType(T2, T2Quals); // C++ [dcl.init.ref]p4: // Given types "cv1 T1" and "cv2 T2," "cv1 T1" is // reference-related to "cv2 T2" if T1 is the same type as T2, or // T1 is a base class of T2. DerivedToBase = false; ObjCConversion = false; ObjCLifetimeConversion = false; if (UnqualT1 == UnqualT2) { // Nothing to do. } else if (!RequireCompleteType(Loc, OrigT2, 0) && isTypeValid(UnqualT1) && isTypeValid(UnqualT2) && IsDerivedFrom(UnqualT2, UnqualT1)) DerivedToBase = true; else if (UnqualT1->isObjCObjectOrInterfaceType() && UnqualT2->isObjCObjectOrInterfaceType() && Context.canBindObjCObjectType(UnqualT1, UnqualT2)) ObjCConversion = true; else return Ref_Incompatible; // At this point, we know that T1 and T2 are reference-related (at // least). // If the type is an array type, promote the element qualifiers to the type // for comparison. if (isa<ArrayType>(T1) && T1Quals) T1 = Context.getQualifiedType(UnqualT1, T1Quals); if (isa<ArrayType>(T2) && T2Quals) T2 = Context.getQualifiedType(UnqualT2, T2Quals); // C++ [dcl.init.ref]p4: // "cv1 T1" is reference-compatible with "cv2 T2" if T1 is // reference-related to T2 and cv1 is the same cv-qualification // as, or greater cv-qualification than, cv2. For purposes of // overload resolution, cases for which cv1 is greater // cv-qualification than cv2 are identified as // reference-compatible with added qualification (see 13.3.3.2). // // Note that we also require equivalence of Objective-C GC and address-space // qualifiers when performing these computations, so that e.g., an int in // address space 1 is not reference-compatible with an int in address // space 2. if (T1Quals.getObjCLifetime() != T2Quals.getObjCLifetime() && T1Quals.compatiblyIncludesObjCLifetime(T2Quals)) { if (isNonTrivialObjCLifetimeConversion(T2Quals, T1Quals)) ObjCLifetimeConversion = true; T1Quals.removeObjCLifetime(); T2Quals.removeObjCLifetime(); } if (T1Quals == T2Quals) return Ref_Compatible; else if (T1Quals.compatiblyIncludes(T2Quals)) return Ref_Compatible_With_Added_Qualification; else return Ref_Related; } /// \brief Look for a user-defined conversion to an value reference-compatible /// with DeclType. Return true if something definite is found. static bool FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS, QualType DeclType, SourceLocation DeclLoc, Expr *Init, QualType T2, bool AllowRvalues, bool AllowExplicit) { assert(T2->isRecordType() && "Can only find conversions of record types."); CXXRecordDecl *T2RecordDecl = dyn_cast<CXXRecordDecl>(T2->getAs<RecordType>()->getDecl()); OverloadCandidateSet CandidateSet(DeclLoc, OverloadCandidateSet::CSK_Normal); const auto &Conversions = T2RecordDecl->getVisibleConversionFunctions(); for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { NamedDecl *D = *I; CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D); CXXConversionDecl *Conv; if (ConvTemplate) Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl()); else Conv = cast<CXXConversionDecl>(D); // If this is an explicit conversion, and we're not allowed to consider // explicit conversions, skip it. if (!AllowExplicit && Conv->isExplicit()) continue; if (AllowRvalues) { bool DerivedToBase = false; bool ObjCConversion = false; bool ObjCLifetimeConversion = false; // If we are initializing an rvalue reference, don't permit conversion // functions that return lvalues. if (!ConvTemplate && DeclType->isRValueReferenceType()) { const ReferenceType *RefType = Conv->getConversionType()->getAs<LValueReferenceType>(); if (RefType && !RefType->getPointeeType()->isFunctionType()) continue; } if (!ConvTemplate && S.CompareReferenceRelationship( DeclLoc, Conv->getConversionType().getNonReferenceType() .getUnqualifiedType(), DeclType.getNonReferenceType().getUnqualifiedType(), DerivedToBase, ObjCConversion, ObjCLifetimeConversion) == Sema::Ref_Incompatible) continue; } else { // If the conversion function doesn't return a reference type, // it can't be considered for this conversion. An rvalue reference // is only acceptable if its referencee is a function type. const ReferenceType *RefType = Conv->getConversionType()->getAs<ReferenceType>(); if (!RefType || (!RefType->isLValueReferenceType() && !RefType->getPointeeType()->isFunctionType())) continue; } if (ConvTemplate) S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC, Init, DeclType, CandidateSet, /*AllowObjCConversionOnExplicit=*/false); else S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Init, DeclType, CandidateSet, /*AllowObjCConversionOnExplicit=*/false); } bool HadMultipleCandidates = (CandidateSet.size() > 1); OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(S, DeclLoc, Best, true)) { case OR_Success: // C++ [over.ics.ref]p1: // // [...] If the parameter binds directly to the result of // applying a conversion function to the argument // expression, the implicit conversion sequence is a // user-defined conversion sequence (13.3.3.1.2), with the // second standard conversion sequence either an identity // conversion or, if the conversion function returns an // entity of a type that is a derived class of the parameter // type, a derived-to-base Conversion. if (!Best->FinalConversion.DirectBinding) return false; ICS.setUserDefined(); ICS.UserDefined.Before = Best->Conversions[0].Standard; ICS.UserDefined.After = Best->FinalConversion; ICS.UserDefined.HadMultipleCandidates = HadMultipleCandidates; ICS.UserDefined.ConversionFunction = Best->Function; ICS.UserDefined.FoundConversionFunction = Best->FoundDecl; ICS.UserDefined.EllipsisConversion = false; assert(ICS.UserDefined.After.ReferenceBinding && ICS.UserDefined.After.DirectBinding && "Expected a direct reference binding!"); return true; case OR_Ambiguous: ICS.setAmbiguous(); for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(); Cand != CandidateSet.end(); ++Cand) if (Cand->Viable) ICS.Ambiguous.addConversion(Cand->Function); return true; case OR_No_Viable_Function: case OR_Deleted: // There was no suitable conversion, or we found a deleted // conversion; continue with other checks. return false; } llvm_unreachable("Invalid OverloadResult!"); } /// \brief Compute an implicit conversion sequence for reference /// initialization. static ImplicitConversionSequence TryReferenceInit(Sema &S, Expr *Init, QualType DeclType, SourceLocation DeclLoc, bool SuppressUserConversions, bool AllowExplicit) { assert(DeclType->isReferenceType() && "Reference init needs a reference"); // Most paths end in a failed conversion. ImplicitConversionSequence ICS; ICS.setBad(BadConversionSequence::no_conversion, Init, DeclType); QualType T1 = DeclType->getAs<ReferenceType>()->getPointeeType(); QualType T2 = Init->getType(); // If the initializer is the address of an overloaded function, try // to resolve the overloaded function. If all goes well, T2 is the // type of the resulting function. if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) { DeclAccessPair Found; if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(Init, DeclType, false, Found)) T2 = Fn->getType(); } // Compute some basic properties of the types and the initializer. bool isRValRef = DeclType->isRValueReferenceType(); bool DerivedToBase = false; bool ObjCConversion = false; bool ObjCLifetimeConversion = false; Expr::Classification InitCategory = Init->Classify(S.Context); Sema::ReferenceCompareResult RefRelationship = S.CompareReferenceRelationship(DeclLoc, T1, T2, DerivedToBase, ObjCConversion, ObjCLifetimeConversion); // C++0x [dcl.init.ref]p5: // A reference to type "cv1 T1" is initialized by an expression // of type "cv2 T2" as follows: // -- If reference is an lvalue reference and the initializer expression if (!isRValRef) { // -- is an lvalue (but is not a bit-field), and "cv1 T1" is // reference-compatible with "cv2 T2," or // // Per C++ [over.ics.ref]p4, we don't check the bit-field property here. if (InitCategory.isLValue() && RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) { // C++ [over.ics.ref]p1: // When a parameter of reference type binds directly (8.5.3) // to an argument expression, the implicit conversion sequence // is the identity conversion, unless the argument expression // has a type that is a derived class of the parameter type, // in which case the implicit conversion sequence is a // derived-to-base Conversion (13.3.3.1). ICS.setStandard(); ICS.Standard.First = ICK_Identity; ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base : ObjCConversion? ICK_Compatible_Conversion : ICK_Identity; ICS.Standard.Third = ICK_Identity; ICS.Standard.FromTypePtr = T2.getAsOpaquePtr(); ICS.Standard.setToType(0, T2); ICS.Standard.setToType(1, T1); ICS.Standard.setToType(2, T1); ICS.Standard.ReferenceBinding = true; ICS.Standard.DirectBinding = true; ICS.Standard.IsLvalueReference = !isRValRef; ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType(); ICS.Standard.BindsToRvalue = false; ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false; ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion; ICS.Standard.CopyConstructor = nullptr; ICS.Standard.DeprecatedStringLiteralToCharPtr = false; // Nothing more to do: the inaccessibility/ambiguity check for // derived-to-base conversions is suppressed when we're // computing the implicit conversion sequence (C++ // [over.best.ics]p2). return ICS; } // -- has a class type (i.e., T2 is a class type), where T1 is // not reference-related to T2, and can be implicitly // converted to an lvalue of type "cv3 T3," where "cv1 T1" // is reference-compatible with "cv3 T3" 92) (this // conversion is selected by enumerating the applicable // conversion functions (13.3.1.6) and choosing the best // one through overload resolution (13.3)), if (!SuppressUserConversions && T2->isRecordType() && !S.RequireCompleteType(DeclLoc, T2, 0) && RefRelationship == Sema::Ref_Incompatible) { if (FindConversionForRefInit(S, ICS, DeclType, DeclLoc, Init, T2, /*AllowRvalues=*/false, AllowExplicit)) return ICS; } } // -- Otherwise, the reference shall be an lvalue reference to a // non-volatile const type (i.e., cv1 shall be const), or the reference // shall be an rvalue reference. if (!isRValRef && (!T1.isConstQualified() || T1.isVolatileQualified())) return ICS; // -- If the initializer expression // // -- is an xvalue, class prvalue, array prvalue or function // lvalue and "cv1 T1" is reference-compatible with "cv2 T2", or if (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification && (InitCategory.isXValue() || (InitCategory.isPRValue() && (T2->isRecordType() || T2->isArrayType())) || (InitCategory.isLValue() && T2->isFunctionType()))) { ICS.setStandard(); ICS.Standard.First = ICK_Identity; ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base : ObjCConversion? ICK_Compatible_Conversion : ICK_Identity; ICS.Standard.Third = ICK_Identity; ICS.Standard.FromTypePtr = T2.getAsOpaquePtr(); ICS.Standard.setToType(0, T2); ICS.Standard.setToType(1, T1); ICS.Standard.setToType(2, T1); ICS.Standard.ReferenceBinding = true; // In C++0x, this is always a direct binding. In C++98/03, it's a direct // binding unless we're binding to a class prvalue. // Note: Although xvalues wouldn't normally show up in C++98/03 code, we // allow the use of rvalue references in C++98/03 for the benefit of // standard library implementors; therefore, we need the xvalue check here. ICS.Standard.DirectBinding = S.getLangOpts().CPlusPlus11 || !(InitCategory.isPRValue() || T2->isRecordType()); ICS.Standard.IsLvalueReference = !isRValRef; ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType(); ICS.Standard.BindsToRvalue = InitCategory.isRValue(); ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false; ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion; ICS.Standard.CopyConstructor = nullptr; ICS.Standard.DeprecatedStringLiteralToCharPtr = false; return ICS; } // -- has a class type (i.e., T2 is a class type), where T1 is not // reference-related to T2, and can be implicitly converted to // an xvalue, class prvalue, or function lvalue of type // "cv3 T3", where "cv1 T1" is reference-compatible with // "cv3 T3", // // then the reference is bound to the value of the initializer // expression in the first case and to the result of the conversion // in the second case (or, in either case, to an appropriate base // class subobject). if (!SuppressUserConversions && RefRelationship == Sema::Ref_Incompatible && T2->isRecordType() && !S.RequireCompleteType(DeclLoc, T2, 0) && FindConversionForRefInit(S, ICS, DeclType, DeclLoc, Init, T2, /*AllowRvalues=*/true, AllowExplicit)) { // In the second case, if the reference is an rvalue reference // and the second standard conversion sequence of the // user-defined conversion sequence includes an lvalue-to-rvalue // conversion, the program is ill-formed. if (ICS.isUserDefined() && isRValRef && ICS.UserDefined.After.First == ICK_Lvalue_To_Rvalue) ICS.setBad(BadConversionSequence::no_conversion, Init, DeclType); return ICS; } // A temporary of function type cannot be created; don't even try. if (T1->isFunctionType()) return ICS; // -- Otherwise, a temporary of type "cv1 T1" is created and // initialized from the initializer expression using the // rules for a non-reference copy initialization (8.5). The // reference is then bound to the temporary. If T1 is // reference-related to T2, cv1 must be the same // cv-qualification as, or greater cv-qualification than, // cv2; otherwise, the program is ill-formed. if (RefRelationship == Sema::Ref_Related) { // If cv1 == cv2 or cv1 is a greater cv-qualified than cv2, then // we would be reference-compatible or reference-compatible with // added qualification. But that wasn't the case, so the reference // initialization fails. // // Note that we only want to check address spaces and cvr-qualifiers here. // ObjC GC and lifetime qualifiers aren't important. Qualifiers T1Quals = T1.getQualifiers(); Qualifiers T2Quals = T2.getQualifiers(); T1Quals.removeObjCGCAttr(); T1Quals.removeObjCLifetime(); T2Quals.removeObjCGCAttr(); T2Quals.removeObjCLifetime(); if (!T1Quals.compatiblyIncludes(T2Quals)) return ICS; } // If at least one of the types is a class type, the types are not // related, and we aren't allowed any user conversions, the // reference binding fails. This case is important for breaking // recursion, since TryImplicitConversion below will attempt to // create a temporary through the use of a copy constructor. if (SuppressUserConversions && RefRelationship == Sema::Ref_Incompatible && (T1->isRecordType() || T2->isRecordType())) return ICS; // If T1 is reference-related to T2 and the reference is an rvalue // reference, the initializer expression shall not be an lvalue. if (RefRelationship >= Sema::Ref_Related && isRValRef && Init->Classify(S.Context).isLValue()) return ICS; // C++ [over.ics.ref]p2: // When a parameter of reference type is not bound directly to // an argument expression, the conversion sequence is the one // required to convert the argument expression to the // underlying type of the reference according to // 13.3.3.1. Conceptually, this conversion sequence corresponds // to copy-initializing a temporary of the underlying type with // the argument expression. Any difference in top-level // cv-qualification is subsumed by the initialization itself // and does not constitute a conversion. ICS = TryImplicitConversion(S, Init, T1, SuppressUserConversions, /*AllowExplicit=*/false, /*InOverloadResolution=*/false, /*CStyle=*/false, /*AllowObjCWritebackConversion=*/false, /*AllowObjCConversionOnExplicit=*/false); // Of course, that's still a reference binding. if (ICS.isStandard()) { ICS.Standard.ReferenceBinding = true; ICS.Standard.IsLvalueReference = !isRValRef; ICS.Standard.BindsToFunctionLvalue = false; ICS.Standard.BindsToRvalue = true; ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false; ICS.Standard.ObjCLifetimeConversionBinding = false; } else if (ICS.isUserDefined()) { const ReferenceType *LValRefType = ICS.UserDefined.ConversionFunction->getReturnType() ->getAs<LValueReferenceType>(); // C++ [over.ics.ref]p3: // Except for an implicit object parameter, for which see 13.3.1, a // standard conversion sequence cannot be formed if it requires [...] // binding an rvalue reference to an lvalue other than a function // lvalue. // Note that the function case is not possible here. if (DeclType->isRValueReferenceType() && LValRefType) { // FIXME: This is the wrong BadConversionSequence. The problem is binding // an rvalue reference to a (non-function) lvalue, not binding an lvalue // reference to an rvalue! ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, Init, DeclType); return ICS; } ICS.UserDefined.Before.setAsIdentityConversion(); ICS.UserDefined.After.ReferenceBinding = true; ICS.UserDefined.After.IsLvalueReference = !isRValRef; ICS.UserDefined.After.BindsToFunctionLvalue = false; ICS.UserDefined.After.BindsToRvalue = !LValRefType; ICS.UserDefined.After.BindsImplicitObjectArgumentWithoutRefQualifier = false; ICS.UserDefined.After.ObjCLifetimeConversionBinding = false; } return ICS; } static ImplicitConversionSequence TryCopyInitialization(Sema &S, Expr *From, QualType ToType, bool SuppressUserConversions, bool InOverloadResolution, bool AllowObjCWritebackConversion, bool AllowExplicit = false); /// TryListConversion - Try to copy-initialize a value of type ToType from the /// initializer list From. static ImplicitConversionSequence TryListConversion(Sema &S, InitListExpr *From, QualType ToType, bool SuppressUserConversions, bool InOverloadResolution, bool AllowObjCWritebackConversion) { // C++11 [over.ics.list]p1: // When an argument is an initializer list, it is not an expression and // special rules apply for converting it to a parameter type. ImplicitConversionSequence Result; Result.setBad(BadConversionSequence::no_conversion, From, ToType); // We need a complete type for what follows. Incomplete types can never be // initialized from init lists. if (S.RequireCompleteType(From->getLocStart(), ToType, 0)) return Result; // Per DR1467: // If the parameter type is a class X and the initializer list has a single // element of type cv U, where U is X or a class derived from X, the // implicit conversion sequence is the one required to convert the element // to the parameter type. // // Otherwise, if the parameter type is a character array [... ] // and the initializer list has a single element that is an // appropriately-typed string literal (8.5.2 [dcl.init.string]), the // implicit conversion sequence is the identity conversion. if (From->getNumInits() == 1) { if (ToType->isRecordType()) { QualType InitType = From->getInit(0)->getType(); if (S.Context.hasSameUnqualifiedType(InitType, ToType) || S.IsDerivedFrom(InitType, ToType)) return TryCopyInitialization(S, From->getInit(0), ToType, SuppressUserConversions, InOverloadResolution, AllowObjCWritebackConversion); } // FIXME: Check the other conditions here: array of character type, // initializer is a string literal. if (ToType->isArrayType()) { InitializedEntity Entity = InitializedEntity::InitializeParameter(S.Context, ToType, /*Consumed=*/false); if (S.CanPerformCopyInitialization(Entity, From)) { Result.setStandard(); Result.Standard.setAsIdentityConversion(); Result.Standard.setFromType(ToType); Result.Standard.setAllToTypes(ToType); return Result; } } } // C++14 [over.ics.list]p2: Otherwise, if the parameter type [...] (below). // C++11 [over.ics.list]p2: // If the parameter type is std::initializer_list<X> or "array of X" and // all the elements can be implicitly converted to X, the implicit // conversion sequence is the worst conversion necessary to convert an // element of the list to X. // // C++14 [over.ics.list]p3: // Otherwise, if the parameter type is "array of N X", if the initializer // list has exactly N elements or if it has fewer than N elements and X is // default-constructible, and if all the elements of the initializer list // can be implicitly converted to X, the implicit conversion sequence is // the worst conversion necessary to convert an element of the list to X. // // FIXME: We're missing a lot of these checks. bool toStdInitializerList = false; QualType X; if (ToType->isArrayType()) X = S.Context.getAsArrayType(ToType)->getElementType(); else toStdInitializerList = S.isStdInitializerList(ToType, &X); if (!X.isNull()) { for (unsigned i = 0, e = From->getNumInits(); i < e; ++i) { Expr *Init = From->getInit(i); ImplicitConversionSequence ICS = TryCopyInitialization(S, Init, X, SuppressUserConversions, InOverloadResolution, AllowObjCWritebackConversion); // If a single element isn't convertible, fail. if (ICS.isBad()) { Result = ICS; break; } // Otherwise, look for the worst conversion. if (Result.isBad() || CompareImplicitConversionSequences(S, ICS, Result) == ImplicitConversionSequence::Worse) Result = ICS; } // For an empty list, we won't have computed any conversion sequence. // Introduce the identity conversion sequence. if (From->getNumInits() == 0) { Result.setStandard(); Result.Standard.setAsIdentityConversion(); Result.Standard.setFromType(ToType); Result.Standard.setAllToTypes(ToType); } Result.setStdInitializerListElement(toStdInitializerList); return Result; } // C++14 [over.ics.list]p4: // C++11 [over.ics.list]p3: // Otherwise, if the parameter is a non-aggregate class X and overload // resolution chooses a single best constructor [...] the implicit // conversion sequence is a user-defined conversion sequence. If multiple // constructors are viable but none is better than the others, the // implicit conversion sequence is a user-defined conversion sequence. if (ToType->isRecordType() && !ToType->isAggregateType()) { // This function can deal with initializer lists. return TryUserDefinedConversion(S, From, ToType, SuppressUserConversions, /*AllowExplicit=*/false, InOverloadResolution, /*CStyle=*/false, AllowObjCWritebackConversion, /*AllowObjCConversionOnExplicit=*/false); } // C++14 [over.ics.list]p5: // C++11 [over.ics.list]p4: // Otherwise, if the parameter has an aggregate type which can be // initialized from the initializer list [...] the implicit conversion // sequence is a user-defined conversion sequence. if (ToType->isAggregateType()) { // Type is an aggregate, argument is an init list. At this point it comes // down to checking whether the initialization works. // FIXME: Find out whether this parameter is consumed or not. InitializedEntity Entity = InitializedEntity::InitializeParameter(S.Context, ToType, /*Consumed=*/false); if (S.CanPerformCopyInitialization(Entity, From)) { Result.setUserDefined(); Result.UserDefined.Before.setAsIdentityConversion(); // Initializer lists don't have a type. Result.UserDefined.Before.setFromType(QualType()); Result.UserDefined.Before.setAllToTypes(QualType()); Result.UserDefined.After.setAsIdentityConversion(); Result.UserDefined.After.setFromType(ToType); Result.UserDefined.After.setAllToTypes(ToType); Result.UserDefined.ConversionFunction = nullptr; } return Result; } // C++14 [over.ics.list]p6: // C++11 [over.ics.list]p5: // Otherwise, if the parameter is a reference, see 13.3.3.1.4. if (ToType->isReferenceType()) { // The standard is notoriously unclear here, since 13.3.3.1.4 doesn't // mention initializer lists in any way. So we go by what list- // initialization would do and try to extrapolate from that. QualType T1 = ToType->getAs<ReferenceType>()->getPointeeType(); // If the initializer list has a single element that is reference-related // to the parameter type, we initialize the reference from that. if (From->getNumInits() == 1) { Expr *Init = From->getInit(0); QualType T2 = Init->getType(); // If the initializer is the address of an overloaded function, try // to resolve the overloaded function. If all goes well, T2 is the // type of the resulting function. if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) { DeclAccessPair Found; if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction( Init, ToType, false, Found)) T2 = Fn->getType(); } // Compute some basic properties of the types and the initializer. bool dummy1 = false; bool dummy2 = false; bool dummy3 = false; Sema::ReferenceCompareResult RefRelationship = S.CompareReferenceRelationship(From->getLocStart(), T1, T2, dummy1, dummy2, dummy3); if (RefRelationship >= Sema::Ref_Related) { return TryReferenceInit(S, Init, ToType, /*FIXME*/From->getLocStart(), SuppressUserConversions, /*AllowExplicit=*/false); } } // Otherwise, we bind the reference to a temporary created from the // initializer list. Result = TryListConversion(S, From, T1, SuppressUserConversions, InOverloadResolution, AllowObjCWritebackConversion); if (Result.isFailure()) return Result; assert(!Result.isEllipsis() && "Sub-initialization cannot result in ellipsis conversion."); // Can we even bind to a temporary? if (ToType->isRValueReferenceType() || (T1.isConstQualified() && !T1.isVolatileQualified())) { StandardConversionSequence &SCS = Result.isStandard() ? Result.Standard : Result.UserDefined.After; SCS.ReferenceBinding = true; SCS.IsLvalueReference = ToType->isLValueReferenceType(); SCS.BindsToRvalue = true; SCS.BindsToFunctionLvalue = false; SCS.BindsImplicitObjectArgumentWithoutRefQualifier = false; SCS.ObjCLifetimeConversionBinding = false; } else Result.setBad(BadConversionSequence::lvalue_ref_to_rvalue, From, ToType); return Result; } // C++14 [over.ics.list]p7: // C++11 [over.ics.list]p6: // Otherwise, if the parameter type is not a class: if (!ToType->isRecordType()) { // - if the initializer list has one element that is not itself an // initializer list, the implicit conversion sequence is the one // required to convert the element to the parameter type. unsigned NumInits = From->getNumInits(); if (NumInits == 1 && !isa<InitListExpr>(From->getInit(0))) Result = TryCopyInitialization(S, From->getInit(0), ToType, SuppressUserConversions, InOverloadResolution, AllowObjCWritebackConversion); // - if the initializer list has no elements, the implicit conversion // sequence is the identity conversion. else if (NumInits == 0) { Result.setStandard(); Result.Standard.setAsIdentityConversion(); Result.Standard.setFromType(ToType); Result.Standard.setAllToTypes(ToType); } return Result; } // C++14 [over.ics.list]p8: // C++11 [over.ics.list]p7: // In all cases other than those enumerated above, no conversion is possible return Result; } /// TryCopyInitialization - Try to copy-initialize a value of type /// ToType from the expression From. Return the implicit conversion /// sequence required to pass this argument, which may be a bad /// conversion sequence (meaning that the argument cannot be passed to /// a parameter of this type). If @p SuppressUserConversions, then we /// do not permit any user-defined conversion sequences. static ImplicitConversionSequence TryCopyInitialization(Sema &S, Expr *From, QualType ToType, bool SuppressUserConversions, bool InOverloadResolution, bool AllowObjCWritebackConversion, bool AllowExplicit) { if (InitListExpr *FromInitList = dyn_cast<InitListExpr>(From)) return TryListConversion(S, FromInitList, ToType, SuppressUserConversions, InOverloadResolution,AllowObjCWritebackConversion); // HLSL Change Starts if (S.getLangOpts().HLSL) { // Note that this is incorrect. Copy initialization isn't syntactically // allowed in HLSL, but C++ uses it to perform matching for argments to // parameters. // // The correct fix should go to implicit conversions, but because the type // system isn't currently up to spec, it's easier to isolate the behavior by // putting this only on this path. const bool ListInitFalse = false; const bool SuppressDiagTrue = true; const Sema::CheckedConversionKind kind = Sema::CCK_ImplicitConversion; ImplicitConversionSequence ICS; ICS.setStandard(); unsigned msg; CastKind castKind; CXXCastPath castPath; ExprResult E(From); if (::hlsl::TryStaticCastForHLSL( &S, E, ToType, kind, From->getSourceRange(), msg, castKind, castPath, ListInitFalse, SuppressDiagTrue, &ICS.Standard)) { return ICS; } } // HLSL Change Ends if (ToType->isReferenceType()) return TryReferenceInit(S, From, ToType, /*FIXME:*/From->getLocStart(), SuppressUserConversions, AllowExplicit); return TryImplicitConversion(S, From, ToType, SuppressUserConversions, /*AllowExplicit=*/false, InOverloadResolution, /*CStyle=*/false, AllowObjCWritebackConversion, /*AllowObjCConversionOnExplicit=*/false); } static bool TryCopyInitialization(const CanQualType FromQTy, const CanQualType ToQTy, Sema &S, SourceLocation Loc, ExprValueKind FromVK) { OpaqueValueExpr TmpExpr(Loc, FromQTy, FromVK); ImplicitConversionSequence ICS = TryCopyInitialization(S, &TmpExpr, ToQTy, true, true, false); return !ICS.isBad(); } /// TryObjectArgumentInitialization - Try to initialize the object /// parameter of the given member function (@c Method) from the /// expression @p From. static ImplicitConversionSequence TryObjectArgumentInitialization(Sema &S, QualType FromType, Expr::Classification FromClassification, CXXMethodDecl *Method, CXXRecordDecl *ActingContext) { QualType ClassType = S.Context.getTypeDeclType(ActingContext); // [class.dtor]p2: A destructor can be invoked for a const, volatile or // const volatile object. unsigned Quals = isa<CXXDestructorDecl>(Method) ? Qualifiers::Const | Qualifiers::Volatile : Method->getTypeQualifiers(); QualType ImplicitParamType = S.Context.getCVRQualifiedType(ClassType, Quals); // Set up the conversion sequence as a "bad" conversion, to allow us // to exit early. ImplicitConversionSequence ICS; // We need to have an object of class type. if (const PointerType *PT = FromType->getAs<PointerType>()) { FromType = PT->getPointeeType(); // When we had a pointer, it's implicitly dereferenced, so we // better have an lvalue. assert(FromClassification.isLValue()); } assert(FromType->isRecordType()); // C++0x [over.match.funcs]p4: // For non-static member functions, the type of the implicit object // parameter is // // - "lvalue reference to cv X" for functions declared without a // ref-qualifier or with the & ref-qualifier // - "rvalue reference to cv X" for functions declared with the && // ref-qualifier // // where X is the class of which the function is a member and cv is the // cv-qualification on the member function declaration. // // However, when finding an implicit conversion sequence for the argument, we // are not allowed to create temporaries or perform user-defined conversions // (C++ [over.match.funcs]p5). We perform a simplified version of // reference binding here, that allows class rvalues to bind to // non-constant references. // First check the qualifiers. QualType FromTypeCanon = S.Context.getCanonicalType(FromType); // HLSL Change Starts // HLSL Note: For calls that aren't compiler-generated C++ overloads, we // disregard const qualifiers so that member functions can be called on // `const` objects from constant buffer types. This should change in the // future if we support const instance methods. FromTypeCanon.removeLocalRestrict(); // HLSL Change - disregard restrict. if (!S.getLangOpts().HLSL || (Method != nullptr && Method->hasAttr<HLSLCXXOverloadAttr>())) { // HLSL Change Ends if (ImplicitParamType.getCVRQualifiers() != FromTypeCanon.getLocalCVRQualifiers() && !ImplicitParamType.isAtLeastAsQualifiedAs(FromTypeCanon)) { ICS.setBad(BadConversionSequence::bad_qualifiers, FromType, ImplicitParamType); return ICS; } } // HLSL Change - end branch // Check that we have either the same type or a derived type. It // affects the conversion rank. QualType ClassTypeCanon = S.Context.getCanonicalType(ClassType); ImplicitConversionKind SecondKind; if (ClassTypeCanon == FromTypeCanon.getLocalUnqualifiedType()) { SecondKind = ICK_Identity; } else if (S.IsDerivedFrom(FromType, ClassType)) SecondKind = ICK_Derived_To_Base; else { ICS.setBad(BadConversionSequence::unrelated_class, FromType, ImplicitParamType); return ICS; } // Check the ref-qualifier. switch (Method->getRefQualifier()) { case RQ_None: // Do nothing; we don't care about lvalueness or rvalueness. break; case RQ_LValue: if (!FromClassification.isLValue() && Quals != Qualifiers::Const) { // non-const lvalue reference cannot bind to an rvalue ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, FromType, ImplicitParamType); return ICS; } break; case RQ_RValue: if (!FromClassification.isRValue()) { // rvalue reference cannot bind to an lvalue ICS.setBad(BadConversionSequence::rvalue_ref_to_lvalue, FromType, ImplicitParamType); return ICS; } break; } // Success. Mark this as a reference binding. ICS.setStandard(); ICS.Standard.setAsIdentityConversion(); ICS.Standard.Second = SecondKind; ICS.Standard.setFromType(FromType); ICS.Standard.setAllToTypes(ImplicitParamType); ICS.Standard.ReferenceBinding = true; ICS.Standard.DirectBinding = true; ICS.Standard.IsLvalueReference = Method->getRefQualifier() != RQ_RValue; ICS.Standard.BindsToFunctionLvalue = false; ICS.Standard.BindsToRvalue = FromClassification.isRValue(); ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = (Method->getRefQualifier() == RQ_None); ICS.Standard.ComponentConversion = ICK_Identity; return ICS; } // HLSL Change Starts static void InitCallParamConversions(Sema &S, const FunctionProtoType *Proto, ParmVarDecl *Param, unsigned ArgIdx, Expr *Arg, bool SuppressUserConversions, bool InOverloadResolution, bool AllowExplicit, ImplicitConversionSequence &InConversion, ImplicitConversionSequence &OutConversion) { hlsl::ParameterModifier paramMods = Proto->getParamMods()[ArgIdx]; QualType ParamType = Proto->getParamType(ArgIdx); if (paramMods.isAnyIn()) { InConversion = TryCopyInitialization(S, Arg, ParamType, SuppressUserConversions, InOverloadResolution, false, AllowExplicit); } if (paramMods.isAnyOut()) { // TryCopyInitialization takes an expression but there isn't one at this // point - we're just trying to figure out whether the result value can be // converted back into the argument. if (Arg->getType().isConstant(S.getASTContext()) || !Arg->isLValue()) { OutConversion.setBad( BadConversionSequence::FailureKind::rvalue_ref_to_lvalue, ParamType, Arg->getType()); return; } Expr *OutFrom = DeclRefExpr::Create( S.getASTContext(), NestedNameSpecifierLoc(), SourceLocation(), Param, true, Param->getLocation(), ParamType.getNonReferenceType(), VK_RValue, nullptr); OutConversion = TryCopyInitialization( S, OutFrom, Arg->getType(), SuppressUserConversions, InOverloadResolution, false, AllowExplicit); } } // HLSL Change Ends /// PerformObjectArgumentInitialization - Perform initialization of /// the implicit object parameter for the given Method with the given /// expression. ExprResult Sema::PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method) { QualType FromRecordType, DestType; // HLSL Change - this is a reference. QualType ImplicitParamRecordType = Method->getThisObjectType(Context); Expr::Classification FromClassification; if (const PointerType *PT = From->getType()->getAs<PointerType>()) { FromRecordType = PT->getPointeeType(); DestType = Method->getThisType(Context); FromClassification = Expr::Classification::makeSimpleLValue(); } else { FromRecordType = From->getType(); DestType = ImplicitParamRecordType; FromClassification = From->Classify(Context); } // Note that we always use the true parent context when performing // the actual argument initialization. ImplicitConversionSequence ICS = TryObjectArgumentInitialization( *this, From->getType(), FromClassification, Method, Method->getParent()); if (ICS.isBad()) { if (ICS.Bad.Kind == BadConversionSequence::bad_qualifiers) { Qualifiers FromQs = FromRecordType.getQualifiers(); Qualifiers ToQs = DestType.getQualifiers(); unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers(); if (CVR) { Diag(From->getLocStart(), diag::err_member_function_call_bad_cvr) << Method->getDeclName() << FromRecordType << (CVR - 1) << From->getSourceRange(); Diag(Method->getLocation(), diag::note_previous_decl) << Method->getDeclName(); return ExprError(); } } return Diag(From->getLocStart(), diag::err_implicit_object_parameter_init) << ImplicitParamRecordType << FromRecordType << From->getSourceRange(); } if (ICS.Standard.Second == ICK_Derived_To_Base) { ExprResult FromRes = PerformObjectMemberConversion(From, Qualifier, FoundDecl, Method); if (FromRes.isInvalid()) return ExprError(); From = FromRes.get(); } if (!Context.hasSameType(From->getType(), DestType)) From = ImpCastExprToType(From, DestType, CK_NoOp, From->getValueKind()).get(); return From; } /// TryContextuallyConvertToBool - Attempt to contextually convert the /// expression From to bool (C++0x [conv]p3). static ImplicitConversionSequence TryContextuallyConvertToBool(Sema &S, Expr *From) { return TryImplicitConversion(S, From, S.Context.BoolTy, /*SuppressUserConversions=*/false, /*AllowExplicit=*/true, /*InOverloadResolution=*/false, /*CStyle=*/false, /*AllowObjCWritebackConversion=*/false, /*AllowObjCConversionOnExplicit=*/false); } /// PerformContextuallyConvertToBool - Perform a contextual conversion /// of the expression From to bool (C++0x [conv]p3). ExprResult Sema::PerformContextuallyConvertToBool(Expr *From) { if (checkPlaceholderForOverload(*this, From)) return ExprError(); ImplicitConversionSequence ICS = TryContextuallyConvertToBool(*this, From); if (!ICS.isBad()) return PerformImplicitConversion(From, Context.BoolTy, ICS, AA_Converting); if (!DiagnoseMultipleUserDefinedConversion(From, Context.BoolTy)) return Diag(From->getLocStart(), diag::err_typecheck_bool_condition) << From->getType() << From->getSourceRange(); return ExprError(); } /// Check that the specified conversion is permitted in a converted constant /// expression, according to C++11 [expr.const]p3. Return true if the conversion /// is acceptable. static bool CheckConvertedConstantConversions(Sema &S, StandardConversionSequence &SCS) { // Since we know that the target type is an integral or unscoped enumeration // type, most conversion kinds are impossible. All possible First and Third // conversions are fine. switch (SCS.Second) { case ICK_Identity: case ICK_NoReturn_Adjustment: case ICK_Integral_Promotion: case ICK_Integral_Conversion: // Narrowing conversions are checked elsewhere. return true; case ICK_Boolean_Conversion: // Conversion from an integral or unscoped enumeration type to bool is // classified as ICK_Boolean_Conversion, but it's also arguably an integral // conversion, so we allow it in a converted constant expression. // // FIXME: Per core issue 1407, we should not allow this, but that breaks // a lot of popular code. We should at least add a warning for this // (non-conforming) extension. return SCS.getFromType()->isIntegralOrUnscopedEnumerationType() && SCS.getToType(2)->isBooleanType(); case ICK_Pointer_Conversion: case ICK_Pointer_Member: // C++1z: null pointer conversions and null member pointer conversions are // only permitted if the source type is std::nullptr_t. return SCS.getFromType()->isNullPtrType(); case ICK_Floating_Promotion: case ICK_Complex_Promotion: case ICK_Floating_Conversion: case ICK_Complex_Conversion: case ICK_Floating_Integral: case ICK_Compatible_Conversion: case ICK_Derived_To_Base: case ICK_Vector_Conversion: case ICK_Vector_Splat: case ICK_Complex_Real: case ICK_Block_Pointer_Conversion: case ICK_TransparentUnionConversion: case ICK_Writeback_Conversion: case ICK_Zero_Event_Conversion: case ICK_Flat_Conversion: // HLSL Change return false; case ICK_Lvalue_To_Rvalue: case ICK_Array_To_Pointer: case ICK_Function_To_Pointer: llvm_unreachable("found a first conversion kind in Second"); case ICK_Qualification: llvm_unreachable("found a third conversion kind in Second"); case ICK_Num_Conversion_Kinds: break; } llvm_unreachable("unknown conversion kind"); } /// CheckConvertedConstantExpression - Check that the expression From is a /// converted constant expression of type T, perform the conversion and produce /// the converted expression, per C++11 [expr.const]p3. static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From, QualType T, APValue &Value, Sema::CCEKind CCE, bool RequireInt) { assert((S.getLangOpts().CPlusPlus11 || S.getLangOpts().HLSLVersion >= hlsl::LangStd::v2017) && "converted constant expression outside C++11"); if (checkPlaceholderForOverload(S, From)) return ExprError(); // C++1z [expr.const]p3: // A converted constant expression of type T is an expression, // implicitly converted to type T, where the converted // expression is a constant expression and the implicit conversion // sequence contains only [... list of conversions ...]. ImplicitConversionSequence ICS = TryCopyInitialization(S, From, T, /*SuppressUserConversions=*/false, /*InOverloadResolution=*/false, /*AllowObjcWritebackConversion=*/false, /*AllowExplicit=*/false); StandardConversionSequence *SCS = nullptr; switch (ICS.getKind()) { case ImplicitConversionSequence::StandardConversion: SCS = &ICS.Standard; break; case ImplicitConversionSequence::UserDefinedConversion: // We are converting to a non-class type, so the Before sequence // must be trivial. SCS = &ICS.UserDefined.After; break; case ImplicitConversionSequence::AmbiguousConversion: case ImplicitConversionSequence::BadConversion: if (!S.DiagnoseMultipleUserDefinedConversion(From, T)) return S.Diag(From->getLocStart(), diag::err_typecheck_converted_constant_expression) << From->getType() << From->getSourceRange() << T; return ExprError(); case ImplicitConversionSequence::EllipsisConversion: llvm_unreachable("ellipsis conversion in converted constant expression"); } // Check that we would only use permitted conversions. if (!CheckConvertedConstantConversions(S, *SCS)) { return S.Diag(From->getLocStart(), diag::err_typecheck_converted_constant_expression_disallowed) << From->getType() << From->getSourceRange() << T; } // [...] and where the reference binding (if any) binds directly. if (SCS->ReferenceBinding && !SCS->DirectBinding) { return S.Diag(From->getLocStart(), diag::err_typecheck_converted_constant_expression_indirect) << From->getType() << From->getSourceRange() << T; } ExprResult Result = S.PerformImplicitConversion(From, T, ICS, Sema::AA_Converting); if (Result.isInvalid()) return Result; // Check for a narrowing implicit conversion. APValue PreNarrowingValue; QualType PreNarrowingType; switch (SCS->getNarrowingKind(S.Context, Result.get(), PreNarrowingValue, PreNarrowingType)) { case NK_Variable_Narrowing: // Implicit conversion to a narrower type, and the value is not a constant // expression. We'll diagnose this in a moment. case NK_Not_Narrowing: break; case NK_Constant_Narrowing: S.Diag(From->getLocStart(), diag::ext_cce_narrowing) << CCE << /*Constant*/1 << PreNarrowingValue.getAsString(S.Context, PreNarrowingType) << T; break; case NK_Type_Narrowing: S.Diag(From->getLocStart(), diag::ext_cce_narrowing) << CCE << /*Constant*/0 << From->getType() << T; break; } // Check the expression is a constant expression. SmallVector<PartialDiagnosticAt, 8> Notes; Expr::EvalResult Eval; Eval.Diag = &Notes; if ((T->isReferenceType() ? !Result.get()->EvaluateAsLValue(Eval, S.Context) : !Result.get()->EvaluateAsRValue(Eval, S.Context)) || (RequireInt && !Eval.Val.isInt())) { // The expression can't be folded, so we can't keep it at this position in // the AST. Result = ExprError(); } else { Value = Eval.Val; if (Notes.empty()) { // It's a constant expression. return Result; } } // It's not a constant expression. Produce an appropriate diagnostic. if (Notes.size() == 1 && Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr) S.Diag(Notes[0].first, diag::err_expr_not_cce) << CCE; else { S.Diag(From->getLocStart(), diag::err_expr_not_cce) << CCE << From->getSourceRange(); for (unsigned I = 0; I < Notes.size(); ++I) S.Diag(Notes[I].first, Notes[I].second); } return ExprError(); } ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE) { return ::CheckConvertedConstantExpression(*this, From, T, Value, CCE, false); } ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE) { assert(T->isIntegralOrEnumerationType() && "unexpected converted const type"); APValue V; auto R = ::CheckConvertedConstantExpression(*this, From, T, V, CCE, true); if (!R.isInvalid()) Value = V.getInt(); return R; } /// dropPointerConversions - If the given standard conversion sequence /// involves any pointer conversions, remove them. This may change /// the result type of the conversion sequence. static void dropPointerConversion(StandardConversionSequence &SCS) { if (SCS.Second == ICK_Pointer_Conversion) { SCS.Second = ICK_Identity; SCS.Third = ICK_Identity; SCS.ToTypePtrs[2] = SCS.ToTypePtrs[1] = SCS.ToTypePtrs[0]; } } /// TryContextuallyConvertToObjCPointer - Attempt to contextually /// convert the expression From to an Objective-C pointer type. static ImplicitConversionSequence TryContextuallyConvertToObjCPointer(Sema &S, Expr *From) { // Do an implicit conversion to 'id'. QualType Ty = S.Context.getObjCIdType(); ImplicitConversionSequence ICS = TryImplicitConversion(S, From, Ty, // FIXME: Are these flags correct? /*SuppressUserConversions=*/false, /*AllowExplicit=*/true, /*InOverloadResolution=*/false, /*CStyle=*/false, /*AllowObjCWritebackConversion=*/false, /*AllowObjCConversionOnExplicit=*/true); // Strip off any final conversions to 'id'. switch (ICS.getKind()) { case ImplicitConversionSequence::BadConversion: case ImplicitConversionSequence::AmbiguousConversion: case ImplicitConversionSequence::EllipsisConversion: break; case ImplicitConversionSequence::UserDefinedConversion: dropPointerConversion(ICS.UserDefined.After); break; case ImplicitConversionSequence::StandardConversion: dropPointerConversion(ICS.Standard); break; } return ICS; } /// PerformContextuallyConvertToObjCPointer - Perform a contextual /// conversion of the expression From to an Objective-C pointer type. ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) { if (checkPlaceholderForOverload(*this, From)) return ExprError(); QualType Ty = Context.getObjCIdType(); ImplicitConversionSequence ICS = TryContextuallyConvertToObjCPointer(*this, From); if (!ICS.isBad()) return PerformImplicitConversion(From, Ty, ICS, AA_Converting); return ExprError(); } /// Determine whether the provided type is an integral type, or an enumeration /// type of a permitted flavor. bool Sema::ICEConvertDiagnoser::match(QualType T) { return AllowScopedEnumerations ? T->isIntegralOrEnumerationType() : T->isIntegralOrUnscopedEnumerationType(); } static ExprResult diagnoseAmbiguousConversion(Sema &SemaRef, SourceLocation Loc, Expr *From, Sema::ContextualImplicitConverter &Converter, QualType T, UnresolvedSetImpl &ViableConversions) { if (Converter.Suppress) return ExprError(); Converter.diagnoseAmbiguous(SemaRef, Loc, T) << From->getSourceRange(); for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) { CXXConversionDecl *Conv = cast<CXXConversionDecl>(ViableConversions[I]->getUnderlyingDecl()); QualType ConvTy = Conv->getConversionType().getNonReferenceType(); Converter.noteAmbiguous(SemaRef, Conv, ConvTy); } return From; } static bool diagnoseNoViableConversion(Sema &SemaRef, SourceLocation Loc, Expr *&From, Sema::ContextualImplicitConverter &Converter, QualType T, bool HadMultipleCandidates, UnresolvedSetImpl &ExplicitConversions) { if (ExplicitConversions.size() == 1 && !Converter.Suppress) { DeclAccessPair Found = ExplicitConversions[0]; CXXConversionDecl *Conversion = cast<CXXConversionDecl>(Found->getUnderlyingDecl()); // The user probably meant to invoke the given explicit // conversion; use it. QualType ConvTy = Conversion->getConversionType().getNonReferenceType(); std::string TypeStr; ConvTy.getAsStringInternal(TypeStr, SemaRef.getPrintingPolicy()); Converter.diagnoseExplicitConv(SemaRef, Loc, T, ConvTy) << FixItHint::CreateInsertion(From->getLocStart(), "static_cast<" + TypeStr + ">(") << FixItHint::CreateInsertion( SemaRef.getLocForEndOfToken(From->getLocEnd()), ")"); Converter.noteExplicitConv(SemaRef, Conversion, ConvTy); // If we aren't in a SFINAE context, build a call to the // explicit conversion function. if (SemaRef.isSFINAEContext()) return true; SemaRef.CheckMemberOperatorAccess(From->getExprLoc(), From, nullptr, Found); ExprResult Result = SemaRef.BuildCXXMemberCallExpr(From, Found, Conversion, HadMultipleCandidates); if (Result.isInvalid()) return true; // Record usage of conversion in an implicit cast. From = ImplicitCastExpr::Create(SemaRef.Context, Result.get()->getType(), CK_UserDefinedConversion, Result.get(), nullptr, Result.get()->getValueKind()); } return false; } static bool recordConversion(Sema &SemaRef, SourceLocation Loc, Expr *&From, Sema::ContextualImplicitConverter &Converter, QualType T, bool HadMultipleCandidates, DeclAccessPair &Found) { CXXConversionDecl *Conversion = cast<CXXConversionDecl>(Found->getUnderlyingDecl()); SemaRef.CheckMemberOperatorAccess(From->getExprLoc(), From, nullptr, Found); QualType ToType = Conversion->getConversionType().getNonReferenceType(); if (!Converter.SuppressConversion) { if (SemaRef.isSFINAEContext()) return true; Converter.diagnoseConversion(SemaRef, Loc, T, ToType) << From->getSourceRange(); } ExprResult Result = SemaRef.BuildCXXMemberCallExpr(From, Found, Conversion, HadMultipleCandidates); if (Result.isInvalid()) return true; // Record usage of conversion in an implicit cast. From = ImplicitCastExpr::Create(SemaRef.Context, Result.get()->getType(), CK_UserDefinedConversion, Result.get(), nullptr, Result.get()->getValueKind()); return false; } static ExprResult finishContextualImplicitConversion( Sema &SemaRef, SourceLocation Loc, Expr *From, Sema::ContextualImplicitConverter &Converter) { if (!Converter.match(From->getType()) && !Converter.Suppress) Converter.diagnoseNoMatch(SemaRef, Loc, From->getType()) << From->getSourceRange(); return SemaRef.DefaultLvalueConversion(From); } static void collectViableConversionCandidates(Sema &SemaRef, Expr *From, QualType ToType, UnresolvedSetImpl &ViableConversions, OverloadCandidateSet &CandidateSet) { for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) { DeclAccessPair FoundDecl = ViableConversions[I]; NamedDecl *D = FoundDecl.getDecl(); CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); CXXConversionDecl *Conv; FunctionTemplateDecl *ConvTemplate; if ((ConvTemplate = dyn_cast<FunctionTemplateDecl>(D))) Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl()); else Conv = cast<CXXConversionDecl>(D); if (ConvTemplate) SemaRef.AddTemplateConversionCandidate( ConvTemplate, FoundDecl, ActingContext, From, ToType, CandidateSet, /*AllowObjCConversionOnExplicit=*/false); else SemaRef.AddConversionCandidate(Conv, FoundDecl, ActingContext, From, ToType, CandidateSet, /*AllowObjCConversionOnExplicit=*/false); } } /// \brief Attempt to convert the given expression to a type which is accepted /// by the given converter. /// /// This routine will attempt to convert an expression of class type to a /// type accepted by the specified converter. In C++11 and before, the class /// must have a single non-explicit conversion function converting to a matching /// type. In C++1y, there can be multiple such conversion functions, but only /// one target type. /// /// \param Loc The source location of the construct that requires the /// conversion. /// /// \param From The expression we're converting from. /// /// \param Converter Used to control and diagnose the conversion process. /// /// \returns The expression, converted to an integral or enumeration type if /// successful. ExprResult Sema::PerformContextualImplicitConversion( SourceLocation Loc, Expr *From, ContextualImplicitConverter &Converter) { // We can't perform any more checking for type-dependent expressions. if (From->isTypeDependent()) return From; // Process placeholders immediately. if (From->hasPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(From); if (result.isInvalid()) return result; From = result.get(); } // If the expression already has a matching type, we're golden. QualType T = From->getType(); if (Converter.match(T)) return DefaultLvalueConversion(From); // FIXME: Check for missing '()' if T is a function type? // We can only perform contextual implicit conversions on objects of class // type. const RecordType *RecordTy = T->getAs<RecordType>(); if (!RecordTy || !getLangOpts().CPlusPlus) { if (!Converter.Suppress) Converter.diagnoseNoMatch(*this, Loc, T) << From->getSourceRange(); return From; } // We must have a complete class type. struct TypeDiagnoserPartialDiag : TypeDiagnoser { ContextualImplicitConverter &Converter; Expr *From; TypeDiagnoserPartialDiag(ContextualImplicitConverter &Converter, Expr *From) : TypeDiagnoser(Converter.Suppress), Converter(Converter), From(From) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { Converter.diagnoseIncomplete(S, Loc, T) << From->getSourceRange(); } } IncompleteDiagnoser(Converter, From); if (RequireCompleteType(Loc, T, IncompleteDiagnoser)) return From; // Look for a conversion to an integral or enumeration type. UnresolvedSet<4> ViableConversions; // These are *potentially* viable in C++1y. UnresolvedSet<4> ExplicitConversions; const auto &Conversions = cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions(); bool HadMultipleCandidates = (std::distance(Conversions.begin(), Conversions.end()) > 1); // To check that there is only one target type, in C++1y: QualType ToType; bool HasUniqueTargetType = true; // Collect explicit or viable (potentially in C++1y) conversions. for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { NamedDecl *D = (*I)->getUnderlyingDecl(); CXXConversionDecl *Conversion; FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D); if (ConvTemplate) { if (getLangOpts().CPlusPlus14) Conversion = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl()); else continue; // C++11 does not consider conversion operator templates(?). } else Conversion = cast<CXXConversionDecl>(D); assert((!ConvTemplate || getLangOpts().CPlusPlus14) && "Conversion operator templates are considered potentially " "viable in C++1y"); QualType CurToType = Conversion->getConversionType().getNonReferenceType(); if (Converter.match(CurToType) || ConvTemplate) { if (Conversion->isExplicit()) { // FIXME: For C++1y, do we need this restriction? // cf. diagnoseNoViableConversion() if (!ConvTemplate) ExplicitConversions.addDecl(I.getDecl(), I.getAccess()); } else { if (!ConvTemplate && getLangOpts().CPlusPlus14) { if (ToType.isNull()) ToType = CurToType.getUnqualifiedType(); else if (HasUniqueTargetType && (CurToType.getUnqualifiedType() != ToType)) HasUniqueTargetType = false; } ViableConversions.addDecl(I.getDecl(), I.getAccess()); } } } if (getLangOpts().CPlusPlus14) { // C++1y [conv]p6: // ... An expression e of class type E appearing in such a context // is said to be contextually implicitly converted to a specified // type T and is well-formed if and only if e can be implicitly // converted to a type T that is determined as follows: E is searched // for conversion functions whose return type is cv T or reference to // cv T such that T is allowed by the context. There shall be // exactly one such T. // If no unique T is found: if (ToType.isNull()) { if (diagnoseNoViableConversion(*this, Loc, From, Converter, T, HadMultipleCandidates, ExplicitConversions)) return ExprError(); return finishContextualImplicitConversion(*this, Loc, From, Converter); } // If more than one unique Ts are found: if (!HasUniqueTargetType) return diagnoseAmbiguousConversion(*this, Loc, From, Converter, T, ViableConversions); // If one unique T is found: // First, build a candidate set from the previously recorded // potentially viable conversions. OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal); collectViableConversionCandidates(*this, From, ToType, ViableConversions, CandidateSet); // Then, perform overload resolution over the candidate set. OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(*this, Loc, Best)) { case OR_Success: { // Apply this conversion. DeclAccessPair Found = DeclAccessPair::make(Best->Function, Best->FoundDecl.getAccess()); if (recordConversion(*this, Loc, From, Converter, T, HadMultipleCandidates, Found)) return ExprError(); break; } case OR_Ambiguous: return diagnoseAmbiguousConversion(*this, Loc, From, Converter, T, ViableConversions); case OR_No_Viable_Function: if (diagnoseNoViableConversion(*this, Loc, From, Converter, T, HadMultipleCandidates, ExplicitConversions)) return ExprError(); // fall through 'OR_Deleted' case. LLVM_FALLTHROUGH; // HLSL Change case OR_Deleted: // We'll complain below about a non-integral condition type. break; } } else { switch (ViableConversions.size()) { case 0: { if (diagnoseNoViableConversion(*this, Loc, From, Converter, T, HadMultipleCandidates, ExplicitConversions)) return ExprError(); // We'll complain below about a non-integral condition type. break; } case 1: { // Apply this conversion. DeclAccessPair Found = ViableConversions[0]; if (recordConversion(*this, Loc, From, Converter, T, HadMultipleCandidates, Found)) return ExprError(); break; } default: return diagnoseAmbiguousConversion(*this, Loc, From, Converter, T, ViableConversions); } } return finishContextualImplicitConversion(*this, Loc, From, Converter); } /// IsAcceptableNonMemberOperatorCandidate - Determine whether Fn is /// an acceptable non-member overloaded operator for a call whose /// arguments have types T1 (and, if non-empty, T2). This routine /// implements the check in C++ [over.match.oper]p3b2 concerning /// enumeration types. static bool IsAcceptableNonMemberOperatorCandidate(ASTContext &Context, FunctionDecl *Fn, ArrayRef<Expr *> Args) { QualType T1 = Args[0]->getType(); QualType T2 = Args.size() > 1 ? Args[1]->getType() : QualType(); if (T1->isDependentType() || (!T2.isNull() && T2->isDependentType())) return true; if (T1->isRecordType() || (!T2.isNull() && T2->isRecordType())) return true; const FunctionProtoType *Proto = Fn->getType()->getAs<FunctionProtoType>(); if (Proto->getNumParams() < 1) return false; if (T1->isEnumeralType()) { QualType ArgType = Proto->getParamType(0).getNonReferenceType(); if (Context.hasSameUnqualifiedType(T1, ArgType)) return true; } if (Proto->getNumParams() < 2) return false; if (!T2.isNull() && T2->isEnumeralType()) { QualType ArgType = Proto->getParamType(1).getNonReferenceType(); if (Context.hasSameUnqualifiedType(T2, ArgType)) return true; } return false; } /// AddOverloadCandidate - Adds the given function to the set of /// candidate functions, using the given function call arguments. If /// @p SuppressUserConversions, then don't allow user-defined /// conversions via constructors or conversion operators. /// /// \param PartialOverloading true if we are performing "partial" overloading /// based on an incomplete set of function arguments. This feature is used by /// code completion. void Sema::AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions, bool PartialOverloading, bool AllowExplicit) { const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>()); assert(Proto && "Functions without a prototype cannot be overloaded"); assert(!Function->getDescribedFunctionTemplate() && "Use AddTemplateOverloadCandidate for function templates"); if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) { if (!isa<CXXConstructorDecl>(Method)) { // If we get here, it's because we're calling a member function // that is named without a member access expression (e.g., // "this->f") that was either written explicitly or created // implicitly. This can happen with a qualified call to a member // function, e.g., X::f(). We use an empty type for the implied // object argument (C++ [over.call.func]p3), and the acting context // is irrelevant. AddMethodCandidate(Method, FoundDecl, Method->getParent(), QualType(), Expr::Classification::makeSimpleLValue(), Args, CandidateSet, SuppressUserConversions, PartialOverloading); return; } // We treat a constructor like a non-member function, since its object // argument doesn't participate in overload resolution. } if (!CandidateSet.isNewCandidate(Function) || hlsl::IsIntrinsicOp(Function)) return; // C++ [over.match.oper]p3: // if no operand has a class type, only those non-member functions in the // lookup set that have a first parameter of type T1 or "reference to // (possibly cv-qualified) T1", when T1 is an enumeration type, or (if there // is a right operand) a second parameter of type T2 or "reference to // (possibly cv-qualified) T2", when T2 is an enumeration type, are // candidate functions. if (CandidateSet.getKind() == OverloadCandidateSet::CSK_Operator && !IsAcceptableNonMemberOperatorCandidate(Context, Function, Args)) return; // C++11 [class.copy]p11: [DR1402] // A defaulted move constructor that is defined as deleted is ignored by // overload resolution. CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Function); if (Constructor && Constructor->isDefaulted() && Constructor->isDeleted() && Constructor->isMoveConstructor()) return; // Overload resolution is always an unevaluated context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); // Add this candidate OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size()); Candidate.FoundDecl = FoundDecl; Candidate.Function = Function; Candidate.Viable = true; Candidate.IsSurrogate = false; Candidate.IgnoreObjectArgument = false; Candidate.ExplicitCallArguments = Args.size(); if (Constructor) { // C++ [class.copy]p3: // A member function template is never instantiated to perform the copy // of a class object to an object of its class type. QualType ClassType = Context.getTypeDeclType(Constructor->getParent()); if (Args.size() == 1 && Constructor->isSpecializationCopyingObject() && (Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) || IsDerivedFrom(Args[0]->getType(), ClassType))) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_illegal_constructor; return; } } unsigned NumParams = Proto->getNumParams(); // (C++ 13.3.2p2): A candidate function having fewer than m // parameters is viable only if it has an ellipsis in its parameter // list (8.3.5). if (TooManyArguments(NumParams, Args.size(), PartialOverloading) && !Proto->isVariadic()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_too_many_arguments; return; } // (C++ 13.3.2p2): A candidate function having more than m parameters // is viable only if the (m+1)st parameter has a default argument // (8.3.6). For the purposes of overload resolution, the // parameter list is truncated on the right, so that there are // exactly m parameters. unsigned MinRequiredArgs = Function->getMinRequiredArguments(); if (Args.size() < MinRequiredArgs && !PartialOverloading) { // Not enough arguments. Candidate.Viable = false; Candidate.FailureKind = ovl_fail_too_few_arguments; return; } // (CUDA B.1): Check for invalid calls between targets. if (getLangOpts().CUDA) if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext)) // Skip the check for callers that are implicit members, because in this // case we may not yet know what the member's target is; the target is // inferred for the member automatically, based on the bases and fields of // the class. if (!Caller->isImplicit() && CheckCUDATarget(Caller, Function)) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_target; return; } // Determine the implicit conversion sequences for each of the // arguments. for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) { if (ArgIdx < NumParams) { // (C++ 13.3.2p3): for F to be a viable function, there shall // exist for each argument an implicit conversion sequence // (13.3.3.1) that converts that argument to the corresponding // parameter of F. // HLSL Change Starts if (getLangOpts().HLSL) { InitCallParamConversions( *this, Proto, Function->getParamDecl(ArgIdx), ArgIdx, Args[ArgIdx], SuppressUserConversions, true, AllowExplicit, Candidate.Conversions[ArgIdx], Candidate.OutConversions[ArgIdx]); } else { QualType ParamType = Proto->getParamType(ArgIdx); Candidate.Conversions[ArgIdx] = TryCopyInitialization(*this, Args[ArgIdx], ParamType, SuppressUserConversions, /*InOverloadResolution=*/true, /*AllowObjCWritebackConversion=*/ getLangOpts().ObjCAutoRefCount, AllowExplicit); } // HLSL Change Ends if ((Candidate.Conversions[ArgIdx].isInitialized() && Candidate.Conversions[ArgIdx].isBad()) || (Candidate.OutConversions[ArgIdx].isInitialized() && Candidate.OutConversions[ArgIdx].isBad())) { // HLSL Change - add out conversion, check initialized Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_conversion; return; } } else { // (C++ 13.3.2p2): For the purposes of overload resolution, any // argument for which there is no corresponding parameter is // considered to ""match the ellipsis" (C+ 13.3.3.1.3). Candidate.Conversions[ArgIdx].setEllipsis(); } } if (EnableIfAttr *FailedAttr = CheckEnableIf(Function, Args)) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_enable_if; Candidate.DeductionFailure.Data = FailedAttr; return; } } ObjCMethodDecl *Sema::SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance) { SmallVector<ObjCMethodDecl*, 4> Methods; if (!CollectMultipleMethodsInGlobalPool(Sel, Methods, IsInstance)) return nullptr; for (unsigned b = 0, e = Methods.size(); b < e; b++) { bool Match = true; ObjCMethodDecl *Method = Methods[b]; unsigned NumNamedArgs = Sel.getNumArgs(); // Method might have more arguments than selector indicates. This is due // to addition of c-style arguments in method. if (Method->param_size() > NumNamedArgs) NumNamedArgs = Method->param_size(); if (Args.size() < NumNamedArgs) continue; for (unsigned i = 0; i < NumNamedArgs; i++) { // We can't do any type-checking on a type-dependent argument. if (Args[i]->isTypeDependent()) { Match = false; break; } ParmVarDecl *param = Method->parameters()[i]; Expr *argExpr = Args[i]; assert(argExpr && "SelectBestMethod(): missing expression"); // Strip the unbridged-cast placeholder expression off unless it's // a consumed argument. if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) && !param->hasAttr<CFConsumedAttr>()) argExpr = stripARCUnbridgedCast(argExpr); // If the parameter is __unknown_anytype, move on to the next method. if (param->getType() == Context.UnknownAnyTy) { Match = false; break; } ImplicitConversionSequence ConversionState = TryCopyInitialization(*this, argExpr, param->getType(), /*SuppressUserConversions*/false, /*InOverloadResolution=*/true, /*AllowObjCWritebackConversion=*/ getLangOpts().ObjCAutoRefCount, /*AllowExplicit*/false); if (ConversionState.isBad()) { Match = false; break; } } // Promote additional arguments to variadic methods. if (Match && Method->isVariadic()) { for (unsigned i = NumNamedArgs, e = Args.size(); i < e; ++i) { if (Args[i]->isTypeDependent()) { Match = false; break; } ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod, nullptr); if (Arg.isInvalid()) { Match = false; break; } } } else { // Check for extra arguments to non-variadic methods. if (Args.size() != NumNamedArgs) Match = false; else if (Match && NumNamedArgs == 0 && Methods.size() > 1) { // Special case when selectors have no argument. In this case, select // one with the most general result type of 'id'. for (unsigned b = 0, e = Methods.size(); b < e; b++) { QualType ReturnT = Methods[b]->getReturnType(); if (ReturnT->isObjCIdType()) return Methods[b]; } } } if (Match) return Method; } return nullptr; } static bool IsNotEnableIfAttr(Attr *A) { return !isa<EnableIfAttr>(A); } EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis) { // FIXME: specific_attr_iterator<EnableIfAttr> iterates in reverse order, but // we need to find the first failing one. if (!Function->hasAttrs()) return nullptr; AttrVec Attrs = Function->getAttrs(); AttrVec::iterator E = std::remove_if(Attrs.begin(), Attrs.end(), IsNotEnableIfAttr); if (Attrs.begin() == E) return nullptr; std::reverse(Attrs.begin(), E); SFINAETrap Trap(*this); // Convert the arguments. SmallVector<Expr *, 16> ConvertedArgs; bool InitializationFailed = false; bool ContainsValueDependentExpr = false; for (unsigned i = 0, e = Args.size(); i != e; ++i) { if (i == 0 && !MissingImplicitThis && isa<CXXMethodDecl>(Function) && !cast<CXXMethodDecl>(Function)->isStatic() && !isa<CXXConstructorDecl>(Function)) { CXXMethodDecl *Method = cast<CXXMethodDecl>(Function); ExprResult R = PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr, Method, Method); if (R.isInvalid()) { InitializationFailed = true; break; } ContainsValueDependentExpr |= R.get()->isValueDependent(); ConvertedArgs.push_back(R.get()); } else { ExprResult R = PerformCopyInitialization(InitializedEntity::InitializeParameter( Context, Function->getParamDecl(i)), SourceLocation(), Args[i]); if (R.isInvalid()) { InitializationFailed = true; break; } ContainsValueDependentExpr |= R.get()->isValueDependent(); ConvertedArgs.push_back(R.get()); } } if (InitializationFailed || Trap.hasErrorOccurred()) return cast<EnableIfAttr>(Attrs[0]); for (AttrVec::iterator I = Attrs.begin(); I != E; ++I) { APValue Result; EnableIfAttr *EIA = cast<EnableIfAttr>(*I); if (EIA->getCond()->isValueDependent()) { // Don't even try now, we'll examine it after instantiation. continue; } if (!EIA->getCond()->EvaluateWithSubstitution( Result, Context, Function, llvm::makeArrayRef(ConvertedArgs))) { if (!ContainsValueDependentExpr) return EIA; } else if (!Result.isInt() || !Result.getInt().getBoolValue()) { return EIA; } } return nullptr; } /// \brief Add all of the function declarations in the given function set to /// the overload candidate set. void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs, bool SuppressUserConversions, bool PartialOverloading) { for (UnresolvedSetIterator F = Fns.begin(), E = Fns.end(); F != E; ++F) { NamedDecl *D = F.getDecl()->getUnderlyingDecl(); if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic()) AddMethodCandidate(cast<CXXMethodDecl>(FD), F.getPair(), cast<CXXMethodDecl>(FD)->getParent(), Args[0]->getType(), Args[0]->Classify(Context), Args.slice(1), CandidateSet, SuppressUserConversions, PartialOverloading); else AddOverloadCandidate(FD, F.getPair(), Args, CandidateSet, SuppressUserConversions, PartialOverloading); } else { FunctionTemplateDecl *FunTmpl = cast<FunctionTemplateDecl>(D); if (isa<CXXMethodDecl>(FunTmpl->getTemplatedDecl()) && !cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl())->isStatic()) AddMethodTemplateCandidate(FunTmpl, F.getPair(), cast<CXXRecordDecl>(FunTmpl->getDeclContext()), ExplicitTemplateArgs, Args[0]->getType(), Args[0]->Classify(Context), Args.slice(1), CandidateSet, SuppressUserConversions, PartialOverloading); else AddTemplateOverloadCandidate(FunTmpl, F.getPair(), ExplicitTemplateArgs, Args, CandidateSet, SuppressUserConversions, PartialOverloading); } } } /// AddMethodCandidate - Adds a named decl (which is some kind of /// method) as a method candidate to the given overload set. void Sema::AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions) { NamedDecl *Decl = FoundDecl.getDecl(); CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(Decl->getDeclContext()); if (isa<UsingShadowDecl>(Decl)) Decl = cast<UsingShadowDecl>(Decl)->getTargetDecl(); if (FunctionTemplateDecl *TD = dyn_cast<FunctionTemplateDecl>(Decl)) { assert(isa<CXXMethodDecl>(TD->getTemplatedDecl()) && "Expected a member function template"); AddMethodTemplateCandidate(TD, FoundDecl, ActingContext, /*ExplicitArgs*/ nullptr, ObjectType, ObjectClassification, Args, CandidateSet, SuppressUserConversions); } else { AddMethodCandidate(cast<CXXMethodDecl>(Decl), FoundDecl, ActingContext, ObjectType, ObjectClassification, Args, CandidateSet, SuppressUserConversions); } } /// AddMethodCandidate - Adds the given C++ member function to the set /// of candidate functions, using the given function call arguments /// and the object argument (@c Object). For example, in a call /// @c o.f(a1,a2), @c Object will contain @c o and @c Args will contain /// both @c a1 and @c a2. If @p SuppressUserConversions, then don't /// allow user-defined conversions via constructors or conversion /// operators. void Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions, bool PartialOverloading) { const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(Method->getType()->getAs<FunctionType>()); assert(Proto && "Methods without a prototype cannot be overloaded"); assert(!isa<CXXConstructorDecl>(Method) && "Use AddOverloadCandidate for constructors"); if (!CandidateSet.isNewCandidate(Method)) return; // C++11 [class.copy]p23: [DR1402] // A defaulted move assignment operator that is defined as deleted is // ignored by overload resolution. if (Method->isDefaulted() && Method->isDeleted() && Method->isMoveAssignmentOperator()) return; // Overload resolution is always an unevaluated context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); // Add this candidate OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size() + 1); Candidate.FoundDecl = FoundDecl; Candidate.Function = Method; Candidate.IsSurrogate = false; Candidate.IgnoreObjectArgument = false; Candidate.ExplicitCallArguments = Args.size(); unsigned NumParams = Proto->getNumParams(); // (C++ 13.3.2p2): A candidate function having fewer than m // parameters is viable only if it has an ellipsis in its parameter // list (8.3.5). if (TooManyArguments(NumParams, Args.size(), PartialOverloading) && !Proto->isVariadic()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_too_many_arguments; return; } // (C++ 13.3.2p2): A candidate function having more than m parameters // is viable only if the (m+1)st parameter has a default argument // (8.3.6). For the purposes of overload resolution, the // parameter list is truncated on the right, so that there are // exactly m parameters. unsigned MinRequiredArgs = Method->getMinRequiredArguments(); if (Args.size() < MinRequiredArgs && !PartialOverloading) { // Not enough arguments. Candidate.Viable = false; Candidate.FailureKind = ovl_fail_too_few_arguments; return; } Candidate.Viable = true; if (Method->isStatic() || ObjectType.isNull()) // The implicit object argument is ignored. Candidate.IgnoreObjectArgument = true; else { // Determine the implicit conversion sequence for the object // parameter. Candidate.Conversions[0] = TryObjectArgumentInitialization(*this, ObjectType, ObjectClassification, Method, ActingContext); if (Candidate.Conversions[0].isBad()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_conversion; return; } } // (CUDA B.1): Check for invalid calls between targets. if (getLangOpts().CUDA) if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext)) if (CheckCUDATarget(Caller, Method)) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_target; return; } // Determine the implicit conversion sequences for each of the // arguments. for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) { if (ArgIdx < NumParams) { // (C++ 13.3.2p3): for F to be a viable function, there shall // exist for each argument an implicit conversion sequence // (13.3.3.1) that converts that argument to the corresponding // parameter of F. QualType ParamType = Proto->getParamType(ArgIdx); // HLSL Change Starts if (getLangOpts().HLSL && Method->getDeclName() == Context.DeclarationNames.getCXXOperatorName(OO_Subscript)) { Candidate.Conversions[ArgIdx + 1] = hlsl::TrySubscriptIndexInitialization( this, Args[ArgIdx], ParamType); goto EvaluateCandidate; } // HLSL Change Ends Candidate.Conversions[ArgIdx + 1] = TryCopyInitialization(*this, Args[ArgIdx], ParamType, SuppressUserConversions, /*InOverloadResolution=*/true, /*AllowObjCWritebackConversion=*/ getLangOpts().ObjCAutoRefCount); EvaluateCandidate:// HLSL Change - present alterantive to TryCopyInitialization if (Candidate.Conversions[ArgIdx + 1].isBad()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_conversion; return; } } else { // (C++ 13.3.2p2): For the purposes of overload resolution, any // argument for which there is no corresponding parameter is // considered to "match the ellipsis" (C+ 13.3.3.1.3). Candidate.Conversions[ArgIdx + 1].setEllipsis(); } } if (EnableIfAttr *FailedAttr = CheckEnableIf(Method, Args, true)) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_enable_if; Candidate.DeductionFailure.Data = FailedAttr; return; } } /// \brief Add a C++ member function template as a candidate to the candidate /// set, using template argument deduction to produce an appropriate member /// function template specialization. void Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions, bool PartialOverloading) { if (!CandidateSet.isNewCandidate(MethodTmpl)) return; // C++ [over.match.funcs]p7: // In each case where a candidate is a function template, candidate // function template specializations are generated using template argument // deduction (14.8.3, 14.8.2). Those candidates are then handled as // candidate functions in the usual way.113) A given name can refer to one // or more function templates and also to a set of overloaded non-template // functions. In such a case, the candidate functions generated from each // function template are combined with the set of non-template candidate // functions. TemplateDeductionInfo Info(CandidateSet.getLocation()); FunctionDecl *Specialization = nullptr; if (TemplateDeductionResult Result = DeduceTemplateArguments(MethodTmpl, ExplicitTemplateArgs, Args, Specialization, Info, PartialOverloading)) { OverloadCandidate &Candidate = CandidateSet.addCandidate(); Candidate.FoundDecl = FoundDecl; Candidate.Function = MethodTmpl->getTemplatedDecl(); Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_deduction; Candidate.IsSurrogate = false; Candidate.IgnoreObjectArgument = false; Candidate.ExplicitCallArguments = Args.size(); Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result, Info); return; } // Add the function template specialization produced by template argument // deduction as a candidate. assert(Specialization && "Missing member function template specialization?"); assert(isa<CXXMethodDecl>(Specialization) && "Specialization is not a member function?"); AddMethodCandidate(cast<CXXMethodDecl>(Specialization), FoundDecl, ActingContext, ObjectType, ObjectClassification, Args, CandidateSet, SuppressUserConversions, PartialOverloading); } /// \brief Add a C++ function template specialization as a candidate /// in the candidate set, using template argument deduction to produce /// an appropriate function template specialization. void Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions, bool PartialOverloading) { if (!CandidateSet.isNewCandidate(FunctionTemplate)) return; // C++ [over.match.funcs]p7: // In each case where a candidate is a function template, candidate // function template specializations are generated using template argument // deduction (14.8.3, 14.8.2). Those candidates are then handled as // candidate functions in the usual way.113) A given name can refer to one // or more function templates and also to a set of overloaded non-template // functions. In such a case, the candidate functions generated from each // function template are combined with the set of non-template candidate // functions. TemplateDeductionInfo Info(CandidateSet.getLocation()); FunctionDecl *Specialization = nullptr; if (TemplateDeductionResult Result = DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs, Args, Specialization, Info, PartialOverloading)) { OverloadCandidate &Candidate = CandidateSet.addCandidate(); Candidate.FoundDecl = FoundDecl; Candidate.Function = FunctionTemplate->getTemplatedDecl(); Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_deduction; Candidate.IsSurrogate = false; Candidate.IgnoreObjectArgument = false; Candidate.ExplicitCallArguments = Args.size(); Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result, Info); return; } // Add the function template specialization produced by template argument // deduction as a candidate. assert(Specialization && "Missing function template specialization?"); AddOverloadCandidate(Specialization, FoundDecl, Args, CandidateSet, SuppressUserConversions, PartialOverloading); } /// Determine whether this is an allowable conversion from the result /// of an explicit conversion operator to the expected type, per C++ /// [over.match.conv]p1 and [over.match.ref]p1. /// /// \param ConvType The return type of the conversion function. /// /// \param ToType The type we are converting to. /// /// \param AllowObjCPointerConversion Allow a conversion from one /// Objective-C pointer to another. /// /// \returns true if the conversion is allowable, false otherwise. static bool isAllowableExplicitConversion(Sema &S, QualType ConvType, QualType ToType, bool AllowObjCPointerConversion) { QualType ToNonRefType = ToType.getNonReferenceType(); // Easy case: the types are the same. if (S.Context.hasSameUnqualifiedType(ConvType, ToNonRefType)) return true; // Allow qualification conversions. bool ObjCLifetimeConversion; if (S.IsQualificationConversion(ConvType, ToNonRefType, /*CStyle*/false, ObjCLifetimeConversion)) return true; // If we're not allowed to consider Objective-C pointer conversions, // we're done. if (!AllowObjCPointerConversion) return false; // Is this an Objective-C pointer conversion? bool IncompatibleObjC = false; QualType ConvertedType; return S.isObjCPointerConversion(ConvType, ToNonRefType, ConvertedType, IncompatibleObjC); } /// AddConversionCandidate - Add a C++ conversion function as a /// candidate in the candidate set (C++ [over.match.conv], /// C++ [over.match.copy]). From is the expression we're converting from, /// and ToType is the type that we're eventually trying to convert to /// (which may or may not be the same type as the type that the /// conversion function produces). void Sema::AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit) { assert(!Conversion->getDescribedFunctionTemplate() && "Conversion function templates use AddTemplateConversionCandidate"); QualType ConvType = Conversion->getConversionType().getNonReferenceType(); if (!CandidateSet.isNewCandidate(Conversion)) return; // If the conversion function has an undeduced return type, trigger its // deduction now. if (getLangOpts().CPlusPlus14 && ConvType->isUndeducedType()) { if (DeduceReturnType(Conversion, From->getExprLoc())) return; ConvType = Conversion->getConversionType().getNonReferenceType(); } // Per C++ [over.match.conv]p1, [over.match.ref]p1, an explicit conversion // operator is only a candidate if its return type is the target type or // can be converted to the target type with a qualification conversion. if (Conversion->isExplicit() && !isAllowableExplicitConversion(*this, ConvType, ToType, AllowObjCConversionOnExplicit)) return; // Overload resolution is always an unevaluated context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); // Add this candidate OverloadCandidate &Candidate = CandidateSet.addCandidate(1); Candidate.FoundDecl = FoundDecl; Candidate.Function = Conversion; Candidate.IsSurrogate = false; Candidate.IgnoreObjectArgument = false; Candidate.FinalConversion.setAsIdentityConversion(); Candidate.FinalConversion.setFromType(ConvType); Candidate.FinalConversion.setAllToTypes(ToType); Candidate.Viable = true; Candidate.ExplicitCallArguments = 1; // C++ [over.match.funcs]p4: // For conversion functions, the function is considered to be a member of // the class of the implicit implied object argument for the purpose of // defining the type of the implicit object parameter. // // Determine the implicit conversion sequence for the implicit // object parameter. QualType ImplicitParamType = From->getType(); if (const PointerType *FromPtrType = ImplicitParamType->getAs<PointerType>()) ImplicitParamType = FromPtrType->getPointeeType(); CXXRecordDecl *ConversionContext = cast<CXXRecordDecl>(ImplicitParamType->getAs<RecordType>()->getDecl()); Candidate.Conversions[0] = TryObjectArgumentInitialization(*this, From->getType(), From->Classify(Context), Conversion, ConversionContext); if (Candidate.Conversions[0].isBad()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_conversion; return; } // We won't go through a user-defined type conversion function to convert a // derived to base as such conversions are given Conversion Rank. They only // go through a copy constructor. 13.3.3.1.2-p4 [over.ics.user] QualType FromCanon = Context.getCanonicalType(From->getType().getUnqualifiedType()); QualType ToCanon = Context.getCanonicalType(ToType).getUnqualifiedType(); if (FromCanon == ToCanon || IsDerivedFrom(FromCanon, ToCanon)) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_trivial_conversion; return; } // To determine what the conversion from the result of calling the // conversion function to the type we're eventually trying to // convert to (ToType), we need to synthesize a call to the // conversion function and attempt copy initialization from it. This // makes sure that we get the right semantics with respect to // lvalues/rvalues and the type. Fortunately, we can allocate this // call on the stack and we don't need its arguments to be // well-formed. DeclRefExpr ConversionRef(Conversion, false, Conversion->getType(), VK_LValue, From->getLocStart()); ImplicitCastExpr ConversionFn(ImplicitCastExpr::OnStack, Context.getPointerType(Conversion->getType()), CK_FunctionToPointerDecay, &ConversionRef, VK_RValue); QualType ConversionType = Conversion->getConversionType(); if (RequireCompleteType(From->getLocStart(), ConversionType, 0)) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_final_conversion; return; } ExprValueKind VK = Expr::getValueKindForType(ConversionType); // Note that it is safe to allocate CallExpr on the stack here because // there are 0 arguments (i.e., nothing is allocated using ASTContext's // allocator). QualType CallResultType = ConversionType.getNonLValueExprType(Context); CallExpr Call(Context, &ConversionFn, None, CallResultType, VK, From->getLocStart()); ImplicitConversionSequence ICS = TryCopyInitialization(*this, &Call, ToType, /*SuppressUserConversions=*/true, /*InOverloadResolution=*/false, /*AllowObjCWritebackConversion=*/false); switch (ICS.getKind()) { case ImplicitConversionSequence::StandardConversion: Candidate.FinalConversion = ICS.Standard; // C++ [over.ics.user]p3: // If the user-defined conversion is specified by a specialization of a // conversion function template, the second standard conversion sequence // shall have exact match rank. if (Conversion->getPrimaryTemplate() && GetConversionRank(ICS.Standard.Second) != ICR_Exact_Match) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_final_conversion_not_exact; return; } // C++0x [dcl.init.ref]p5: // In the second case, if the reference is an rvalue reference and // the second standard conversion sequence of the user-defined // conversion sequence includes an lvalue-to-rvalue conversion, the // program is ill-formed. if (ToType->isRValueReferenceType() && ICS.Standard.First == ICK_Lvalue_To_Rvalue) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_final_conversion; return; } break; case ImplicitConversionSequence::BadConversion: Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_final_conversion; return; default: llvm_unreachable( "Can only end up with a standard conversion sequence or failure"); } if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_enable_if; Candidate.DeductionFailure.Data = FailedAttr; return; } } /// \brief Adds a conversion function template specialization /// candidate to the overload set, using template argument deduction /// to deduce the template arguments of the conversion function /// template from the type that we are converting to (C++ /// [temp.deduct.conv]). void Sema::AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingDC, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit) { assert(isa<CXXConversionDecl>(FunctionTemplate->getTemplatedDecl()) && "Only conversion function templates permitted here"); if (!CandidateSet.isNewCandidate(FunctionTemplate)) return; TemplateDeductionInfo Info(CandidateSet.getLocation()); CXXConversionDecl *Specialization = nullptr; if (TemplateDeductionResult Result = DeduceTemplateArguments(FunctionTemplate, ToType, Specialization, Info)) { OverloadCandidate &Candidate = CandidateSet.addCandidate(); Candidate.FoundDecl = FoundDecl; Candidate.Function = FunctionTemplate->getTemplatedDecl(); Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_deduction; Candidate.IsSurrogate = false; Candidate.IgnoreObjectArgument = false; Candidate.ExplicitCallArguments = 1; Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result, Info); return; } // Add the conversion function template specialization produced by // template argument deduction as a candidate. assert(Specialization && "Missing function template specialization?"); AddConversionCandidate(Specialization, FoundDecl, ActingDC, From, ToType, CandidateSet, AllowObjCConversionOnExplicit); } /// AddSurrogateCandidate - Adds a "surrogate" candidate function that /// converts the given @c Object to a function pointer via the /// conversion function @c Conversion, and then attempts to call it /// with the given arguments (C++ [over.call.object]p2-4). Proto is /// the type of function that we'll eventually be calling. void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet) { if (!CandidateSet.isNewCandidate(Conversion)) return; // Overload resolution is always an unevaluated context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size() + 1); Candidate.FoundDecl = FoundDecl; Candidate.Function = nullptr; Candidate.Surrogate = Conversion; Candidate.Viable = true; Candidate.IsSurrogate = true; Candidate.IgnoreObjectArgument = false; Candidate.ExplicitCallArguments = Args.size(); // Determine the implicit conversion sequence for the implicit // object parameter. ImplicitConversionSequence ObjectInit = TryObjectArgumentInitialization(*this, Object->getType(), Object->Classify(Context), Conversion, ActingContext); if (ObjectInit.isBad()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_conversion; Candidate.Conversions[0] = ObjectInit; return; } // The first conversion is actually a user-defined conversion whose // first conversion is ObjectInit's standard conversion (which is // effectively a reference binding). Record it as such. Candidate.Conversions[0].setUserDefined(); Candidate.Conversions[0].UserDefined.Before = ObjectInit.Standard; Candidate.Conversions[0].UserDefined.EllipsisConversion = false; Candidate.Conversions[0].UserDefined.HadMultipleCandidates = false; Candidate.Conversions[0].UserDefined.ConversionFunction = Conversion; Candidate.Conversions[0].UserDefined.FoundConversionFunction = FoundDecl; Candidate.Conversions[0].UserDefined.After = Candidate.Conversions[0].UserDefined.Before; Candidate.Conversions[0].UserDefined.After.setAsIdentityConversion(); // Find the unsigned NumParams = Proto->getNumParams(); // (C++ 13.3.2p2): A candidate function having fewer than m // parameters is viable only if it has an ellipsis in its parameter // list (8.3.5). if (Args.size() > NumParams && !Proto->isVariadic()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_too_many_arguments; return; } // Function types don't have any default arguments, so just check if // we have enough arguments. if (Args.size() < NumParams) { // Not enough arguments. Candidate.Viable = false; Candidate.FailureKind = ovl_fail_too_few_arguments; return; } // Determine the implicit conversion sequences for each of the // arguments. for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) { if (ArgIdx < NumParams) { // (C++ 13.3.2p3): for F to be a viable function, there shall // exist for each argument an implicit conversion sequence // (13.3.3.1) that converts that argument to the corresponding // parameter of F. QualType ParamType = Proto->getParamType(ArgIdx); Candidate.Conversions[ArgIdx + 1] = TryCopyInitialization(*this, Args[ArgIdx], ParamType, /*SuppressUserConversions=*/false, /*InOverloadResolution=*/false, /*AllowObjCWritebackConversion=*/ getLangOpts().ObjCAutoRefCount); if (Candidate.Conversions[ArgIdx + 1].isBad()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_conversion; return; } } else { // (C++ 13.3.2p2): For the purposes of overload resolution, any // argument for which there is no corresponding parameter is // considered to ""match the ellipsis" (C+ 13.3.3.1.3). Candidate.Conversions[ArgIdx + 1].setEllipsis(); } } if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_enable_if; Candidate.DeductionFailure.Data = FailedAttr; return; } } /// \brief Add overload candidates for overloaded operators that are /// member functions. /// /// Add the overloaded operator candidates that are member functions /// for the operator Op that was used in an operator expression such /// as "x Op y". , Args/NumArgs provides the operator arguments, and /// CandidateSet will store the added overload candidates. (C++ /// [over.match.oper]). void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange) { DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op); // C++ [over.match.oper]p3: // For a unary operator @ with an operand of a type whose // cv-unqualified version is T1, and for a binary operator @ with // a left operand of a type whose cv-unqualified version is T1 and // a right operand of a type whose cv-unqualified version is T2, // three sets of candidate functions, designated member // candidates, non-member candidates and built-in candidates, are // constructed as follows: QualType T1 = Args[0]->getType(); // -- If T1 is a complete class type or a class currently being // defined, the set of member candidates is the result of the // qualified lookup of T1::operator@ (13.3.1.1.1); otherwise, // the set of member candidates is empty. if (const RecordType *T1Rec = T1->getAs<RecordType>()) { // Complete the type if it can be completed. RequireCompleteType(OpLoc, T1, 0); // If the type is neither complete nor being defined, bail out now. if (!T1Rec->getDecl()->getDefinition()) return; LookupResult Operators(*this, OpName, OpLoc, LookupOrdinaryName); LookupQualifiedName(Operators, T1Rec->getDecl()); Operators.suppressDiagnostics(); for (LookupResult::iterator Oper = Operators.begin(), OperEnd = Operators.end(); Oper != OperEnd; ++Oper) AddMethodCandidate(Oper.getPair(), Args[0]->getType(), Args[0]->Classify(Context), Args.slice(1), CandidateSet, /* SuppressUserConversions = */ false); } } /// AddBuiltinCandidate - Add a candidate for a built-in /// operator. ResultTy and ParamTys are the result and parameter types /// of the built-in candidate, respectively. Args and NumArgs are the /// arguments being passed to the candidate. IsAssignmentOperator /// should be true when this built-in candidate is an assignment /// operator. NumContextualBoolArguments is the number of arguments /// (at the beginning of the argument list) that will be contextually /// converted to bool. void Sema::AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator, unsigned NumContextualBoolArguments) { // Overload resolution is always an unevaluated context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); // Add this candidate OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size()); Candidate.FoundDecl = DeclAccessPair::make(nullptr, AS_none); Candidate.Function = nullptr; Candidate.IsSurrogate = false; Candidate.IgnoreObjectArgument = false; Candidate.BuiltinTypes.ResultTy = ResultTy; for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) Candidate.BuiltinTypes.ParamTypes[ArgIdx] = ParamTys[ArgIdx]; // Determine the implicit conversion sequences for each of the // arguments. Candidate.Viable = true; Candidate.ExplicitCallArguments = Args.size(); for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) { // C++ [over.match.oper]p4: // For the built-in assignment operators, conversions of the // left operand are restricted as follows: // -- no temporaries are introduced to hold the left operand, and // -- no user-defined conversions are applied to the left // operand to achieve a type match with the left-most // parameter of a built-in candidate. // // We block these conversions by turning off user-defined // conversions, since that is the only way that initialization of // a reference to a non-class type can occur from something that // is not of the same type. if (ArgIdx < NumContextualBoolArguments) { assert(ParamTys[ArgIdx] == Context.BoolTy && "Contextual conversion to bool requires bool type"); Candidate.Conversions[ArgIdx] = TryContextuallyConvertToBool(*this, Args[ArgIdx]); } else { Candidate.Conversions[ArgIdx] = TryCopyInitialization(*this, Args[ArgIdx], ParamTys[ArgIdx], ArgIdx == 0 && IsAssignmentOperator, /*InOverloadResolution=*/false, /*AllowObjCWritebackConversion=*/ getLangOpts().ObjCAutoRefCount); } if (Candidate.Conversions[ArgIdx].isBad()) { Candidate.Viable = false; Candidate.FailureKind = ovl_fail_bad_conversion; break; } } } namespace { /// BuiltinCandidateTypeSet - A set of types that will be used for the /// candidate operator functions for built-in operators (C++ /// [over.built]). The types are separated into pointer types and /// enumeration types. class BuiltinCandidateTypeSet { /// TypeSet - A set of types. typedef llvm::SmallPtrSet<QualType, 8> TypeSet; /// PointerTypes - The set of pointer types that will be used in the /// built-in candidates. TypeSet PointerTypes; /// MemberPointerTypes - The set of member pointer types that will be /// used in the built-in candidates. TypeSet MemberPointerTypes; /// EnumerationTypes - The set of enumeration types that will be /// used in the built-in candidates. TypeSet EnumerationTypes; /// \brief The set of vector types that will be used in the built-in /// candidates. TypeSet VectorTypes; /// \brief A flag indicating non-record types are viable candidates bool HasNonRecordTypes; /// \brief A flag indicating whether either arithmetic or enumeration types /// were present in the candidate set. bool HasArithmeticOrEnumeralTypes; /// \brief A flag indicating whether the nullptr type was present in the /// candidate set. bool HasNullPtrType; /// Sema - The semantic analysis instance where we are building the /// candidate type set. Sema &SemaRef; /// Context - The AST context in which we will build the type sets. ASTContext &Context; bool AddPointerWithMoreQualifiedTypeVariants(QualType Ty, const Qualifiers &VisibleQuals); bool AddMemberPointerWithMoreQualifiedTypeVariants(QualType Ty); public: /// iterator - Iterates through the types that are part of the set. typedef TypeSet::iterator iterator; BuiltinCandidateTypeSet(Sema &SemaRef) : HasNonRecordTypes(false), HasArithmeticOrEnumeralTypes(false), HasNullPtrType(false), SemaRef(SemaRef), Context(SemaRef.Context) { } void AddTypesConvertedFrom(QualType Ty, SourceLocation Loc, bool AllowUserConversions, bool AllowExplicitConversions, const Qualifiers &VisibleTypeConversionsQuals); /// pointer_begin - First pointer type found; iterator pointer_begin() { return PointerTypes.begin(); } /// pointer_end - Past the last pointer type found; iterator pointer_end() { return PointerTypes.end(); } /// member_pointer_begin - First member pointer type found; iterator member_pointer_begin() { return MemberPointerTypes.begin(); } /// member_pointer_end - Past the last member pointer type found; iterator member_pointer_end() { return MemberPointerTypes.end(); } /// enumeration_begin - First enumeration type found; iterator enumeration_begin() { return EnumerationTypes.begin(); } /// enumeration_end - Past the last enumeration type found; iterator enumeration_end() { return EnumerationTypes.end(); } iterator vector_begin() { return VectorTypes.begin(); } iterator vector_end() { return VectorTypes.end(); } bool hasNonRecordTypes() { return HasNonRecordTypes; } bool hasArithmeticOrEnumeralTypes() { return HasArithmeticOrEnumeralTypes; } bool hasNullPtrType() const { return HasNullPtrType; } }; } // end anonymous namespace /// AddPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty to /// the set of pointer types along with any more-qualified variants of /// that type. For example, if @p Ty is "int const *", this routine /// will add "int const *", "int const volatile *", "int const /// restrict *", and "int const volatile restrict *" to the set of /// pointer types. Returns true if the add of @p Ty itself succeeded, /// false otherwise. /// /// FIXME: what to do about extended qualifiers? bool BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty, const Qualifiers &VisibleQuals) { // Insert this type. if (!PointerTypes.insert(Ty).second) return false; QualType PointeeTy; const PointerType *PointerTy = Ty->getAs<PointerType>(); bool buildObjCPtr = false; if (!PointerTy) { const ObjCObjectPointerType *PTy = Ty->castAs<ObjCObjectPointerType>(); PointeeTy = PTy->getPointeeType(); buildObjCPtr = true; } else { PointeeTy = PointerTy->getPointeeType(); } // Don't add qualified variants of arrays. For one, they're not allowed // (the qualifier would sink to the element type), and for another, the // only overload situation where it matters is subscript or pointer +- int, // and those shouldn't have qualifier variants anyway. if (PointeeTy->isArrayType()) return true; unsigned BaseCVR = PointeeTy.getCVRQualifiers(); bool hasVolatile = VisibleQuals.hasVolatile(); bool hasRestrict = VisibleQuals.hasRestrict(); // Iterate through all strict supersets of BaseCVR. for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) { if ((CVR | BaseCVR) != CVR) continue; // Skip over volatile if no volatile found anywhere in the types. if ((CVR & Qualifiers::Volatile) && !hasVolatile) continue; // Skip over restrict if no restrict found anywhere in the types, or if // the type cannot be restrict-qualified. if ((CVR & Qualifiers::Restrict) && (!hasRestrict || (!(PointeeTy->isAnyPointerType() || PointeeTy->isReferenceType())))) continue; // Build qualified pointee type. QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR); // Build qualified pointer type. QualType QPointerTy; if (!buildObjCPtr) QPointerTy = Context.getPointerType(QPointeeTy); else QPointerTy = Context.getObjCObjectPointerType(QPointeeTy); // Insert qualified pointer type. PointerTypes.insert(QPointerTy); } return true; } /// AddMemberPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty /// to the set of pointer types along with any more-qualified variants of /// that type. For example, if @p Ty is "int const *", this routine /// will add "int const *", "int const volatile *", "int const /// restrict *", and "int const volatile restrict *" to the set of /// pointer types. Returns true if the add of @p Ty itself succeeded, /// false otherwise. /// /// FIXME: what to do about extended qualifiers? bool BuiltinCandidateTypeSet::AddMemberPointerWithMoreQualifiedTypeVariants( QualType Ty) { // Insert this type. if (!MemberPointerTypes.insert(Ty).second) return false; const MemberPointerType *PointerTy = Ty->getAs<MemberPointerType>(); assert(PointerTy && "type was not a member pointer type!"); QualType PointeeTy = PointerTy->getPointeeType(); // Don't add qualified variants of arrays. For one, they're not allowed // (the qualifier would sink to the element type), and for another, the // only overload situation where it matters is subscript or pointer +- int, // and those shouldn't have qualifier variants anyway. if (PointeeTy->isArrayType()) return true; const Type *ClassTy = PointerTy->getClass(); // Iterate through all strict supersets of the pointee type's CVR // qualifiers. unsigned BaseCVR = PointeeTy.getCVRQualifiers(); for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) { if ((CVR | BaseCVR) != CVR) continue; QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR); MemberPointerTypes.insert( Context.getMemberPointerType(QPointeeTy, ClassTy)); } return true; } /// AddTypesConvertedFrom - Add each of the types to which the type @p /// Ty can be implicit converted to the given set of @p Types. We're /// primarily interested in pointer types and enumeration types. We also /// take member pointer types, for the conditional operator. /// AllowUserConversions is true if we should look at the conversion /// functions of a class type, and AllowExplicitConversions if we /// should also include the explicit conversion functions of a class /// type. void BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty, SourceLocation Loc, bool AllowUserConversions, bool AllowExplicitConversions, const Qualifiers &VisibleQuals) { // Only deal with canonical types. Ty = Context.getCanonicalType(Ty); // Look through reference types; they aren't part of the type of an // expression for the purposes of conversions. if (const ReferenceType *RefTy = Ty->getAs<ReferenceType>()) Ty = RefTy->getPointeeType(); // If we're dealing with an array type, decay to the pointer. if (Ty->isArrayType()) Ty = SemaRef.Context.getArrayDecayedType(Ty); // Otherwise, we don't care about qualifiers on the type. Ty = Ty.getLocalUnqualifiedType(); // Flag if we ever add a non-record type. const RecordType *TyRec = Ty->getAs<RecordType>(); HasNonRecordTypes = HasNonRecordTypes || !TyRec; // Flag if we encounter an arithmetic type. HasArithmeticOrEnumeralTypes = HasArithmeticOrEnumeralTypes || Ty->isArithmeticType(); if (Ty->isObjCIdType() || Ty->isObjCClassType()) PointerTypes.insert(Ty); else if (Ty->getAs<PointerType>() || Ty->getAs<ObjCObjectPointerType>()) { // Insert our type, and its more-qualified variants, into the set // of types. if (!AddPointerWithMoreQualifiedTypeVariants(Ty, VisibleQuals)) return; } else if (Ty->isMemberPointerType()) { // Member pointers are far easier, since the pointee can't be converted. if (!AddMemberPointerWithMoreQualifiedTypeVariants(Ty)) return; } else if (Ty->isEnumeralType()) { HasArithmeticOrEnumeralTypes = true; EnumerationTypes.insert(Ty); } else if (Ty->isVectorType()) { // We treat vector types as arithmetic types in many contexts as an // extension. HasArithmeticOrEnumeralTypes = true; VectorTypes.insert(Ty); } else if (Ty->isNullPtrType()) { HasNullPtrType = true; } else if (AllowUserConversions && TyRec) { // No conversion functions in incomplete types. if (SemaRef.RequireCompleteType(Loc, Ty, 0)) return; CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl()); for (NamedDecl *D : ClassDecl->getVisibleConversionFunctions()) { if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); // Skip conversion function templates; they don't tell us anything // about which builtin types we can convert to. if (isa<FunctionTemplateDecl>(D)) continue; CXXConversionDecl *Conv = cast<CXXConversionDecl>(D); if (AllowExplicitConversions || !Conv->isExplicit()) { AddTypesConvertedFrom(Conv->getConversionType(), Loc, false, false, VisibleQuals); } } } } /// \brief Helper function for AddBuiltinOperatorCandidates() that adds /// the volatile- and non-volatile-qualified assignment operators for the /// given type to the candidate set. static void AddBuiltinAssignmentOperatorCandidates(Sema &S, QualType T, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet) { QualType ParamTypes[2]; // T& operator=(T&, T) ParamTypes[0] = S.Context.getLValueReferenceType(T); ParamTypes[1] = T; S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssignmentOperator=*/true); if (!S.Context.getCanonicalType(T).isVolatileQualified()) { // volatile T& operator=(volatile T&, T) ParamTypes[0] = S.Context.getLValueReferenceType(S.Context.getVolatileType(T)); ParamTypes[1] = T; S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssignmentOperator=*/true); } } /// CollectVRQualifiers - This routine returns Volatile/Restrict qualifiers, /// if any, found in visible type conversion functions found in ArgExpr's type. static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) { Qualifiers VRQuals; const RecordType *TyRec; if (const MemberPointerType *RHSMPType = ArgExpr->getType()->getAs<MemberPointerType>()) TyRec = RHSMPType->getClass()->getAs<RecordType>(); else TyRec = ArgExpr->getType()->getAs<RecordType>(); if (!TyRec) { // Just to be safe, assume the worst case. VRQuals.addVolatile(); VRQuals.addRestrict(); return VRQuals; } CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl()); if (!ClassDecl->hasDefinition()) return VRQuals; for (NamedDecl *D : ClassDecl->getVisibleConversionFunctions()) { if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); if (CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(D)) { QualType CanTy = Context.getCanonicalType(Conv->getConversionType()); if (const ReferenceType *ResTypeRef = CanTy->getAs<ReferenceType>()) CanTy = ResTypeRef->getPointeeType(); // Need to go down the pointer/mempointer chain and add qualifiers // as see them. bool done = false; while (!done) { if (CanTy.isRestrictQualified()) VRQuals.addRestrict(); if (const PointerType *ResTypePtr = CanTy->getAs<PointerType>()) CanTy = ResTypePtr->getPointeeType(); else if (const MemberPointerType *ResTypeMPtr = CanTy->getAs<MemberPointerType>()) CanTy = ResTypeMPtr->getPointeeType(); else done = true; if (CanTy.isVolatileQualified()) VRQuals.addVolatile(); if (VRQuals.hasRestrict() && VRQuals.hasVolatile()) return VRQuals; } } } return VRQuals; } namespace { /// \brief Helper class to manage the addition of builtin operator overload /// candidates. It provides shared state and utility methods used throughout /// the process, as well as a helper method to add each group of builtin /// operator overloads from the standard to a candidate set. class BuiltinOperatorOverloadBuilder { // Common instance state available to all overload candidate addition methods. Sema &S; ArrayRef<Expr *> Args; Qualifiers VisibleTypeConversionsQuals; bool HasArithmeticOrEnumeralCandidateType; SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes; OverloadCandidateSet &CandidateSet; // Define some constants used to index and iterate over the arithemetic types // provided via the getArithmeticType() method below. // The "promoted arithmetic types" are the arithmetic // types are that preserved by promotion (C++ [over.built]p2). static const unsigned FirstIntegralType = 3; static const unsigned LastIntegralType = 20; static const unsigned FirstPromotedIntegralType = 3, LastPromotedIntegralType = 11; static const unsigned FirstPromotedArithmeticType = 0, LastPromotedArithmeticType = 11; static const unsigned NumArithmeticTypes = 20; /// \brief Get the canonical type for a given arithmetic type index. CanQualType getArithmeticType(unsigned index) { assert(index < NumArithmeticTypes); static CanQualType ASTContext::* const ArithmeticTypes[NumArithmeticTypes] = { // Start of promoted types. &ASTContext::FloatTy, &ASTContext::DoubleTy, &ASTContext::LongDoubleTy, // Start of integral types. &ASTContext::IntTy, &ASTContext::LongTy, &ASTContext::LongLongTy, &ASTContext::Int128Ty, &ASTContext::UnsignedIntTy, &ASTContext::UnsignedLongTy, &ASTContext::UnsignedLongLongTy, &ASTContext::UnsignedInt128Ty, // End of promoted types. &ASTContext::BoolTy, &ASTContext::CharTy, &ASTContext::WCharTy, &ASTContext::Char16Ty, &ASTContext::Char32Ty, &ASTContext::SignedCharTy, &ASTContext::ShortTy, &ASTContext::UnsignedCharTy, &ASTContext::UnsignedShortTy, // End of integral types. // FIXME: What about complex? What about half? }; return S.Context.*ArithmeticTypes[index]; } /// \brief Gets the canonical type resulting from the usual arithemetic /// converions for the given arithmetic types. CanQualType getUsualArithmeticConversions(unsigned L, unsigned R) { // Accelerator table for performing the usual arithmetic conversions. // The rules are basically: // - if either is floating-point, use the wider floating-point // - if same signedness, use the higher rank // - if same size, use unsigned of the higher rank // - use the larger type // These rules, together with the axiom that higher ranks are // never smaller, are sufficient to precompute all of these results // *except* when dealing with signed types of higher rank. // (we could precompute SLL x UI for all known platforms, but it's // better not to make any assumptions). // We assume that int128 has a higher rank than long long on all platforms. enum PromotedType { Dep=-1, Flt, Dbl, LDbl, SI, SL, SLL, S128, UI, UL, ULL, U128 }; static const PromotedType ConversionsTable[LastPromotedArithmeticType] [LastPromotedArithmeticType] = { /* Flt*/ { Flt, Dbl, LDbl, Flt, Flt, Flt, Flt, Flt, Flt, Flt, Flt }, /* Dbl*/ { Dbl, Dbl, LDbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl }, /*LDbl*/ { LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl }, /* SI*/ { Flt, Dbl, LDbl, SI, SL, SLL, S128, UI, UL, ULL, U128 }, /* SL*/ { Flt, Dbl, LDbl, SL, SL, SLL, S128, Dep, UL, ULL, U128 }, /* SLL*/ { Flt, Dbl, LDbl, SLL, SLL, SLL, S128, Dep, Dep, ULL, U128 }, /*S128*/ { Flt, Dbl, LDbl, S128, S128, S128, S128, S128, S128, S128, U128 }, /* UI*/ { Flt, Dbl, LDbl, UI, Dep, Dep, S128, UI, UL, ULL, U128 }, /* UL*/ { Flt, Dbl, LDbl, UL, UL, Dep, S128, UL, UL, ULL, U128 }, /* ULL*/ { Flt, Dbl, LDbl, ULL, ULL, ULL, S128, ULL, ULL, ULL, U128 }, /*U128*/ { Flt, Dbl, LDbl, U128, U128, U128, U128, U128, U128, U128, U128 }, }; assert(L < LastPromotedArithmeticType); assert(R < LastPromotedArithmeticType); int Idx = ConversionsTable[L][R]; // Fast path: the table gives us a concrete answer. if (Idx != Dep) return getArithmeticType(Idx); // Slow path: we need to compare widths. // An invariant is that the signed type has higher rank. CanQualType LT = getArithmeticType(L), RT = getArithmeticType(R); unsigned LW = S.Context.getIntWidth(LT), RW = S.Context.getIntWidth(RT); // If they're different widths, use the signed type. if (LW > RW) return LT; else if (LW < RW) return RT; // Otherwise, use the unsigned type of the signed type's rank. if (L == SL || R == SL) return S.Context.UnsignedLongTy; assert(L == SLL || R == SLL); return S.Context.UnsignedLongLongTy; } /// \brief Helper method to factor out the common pattern of adding overloads /// for '++' and '--' builtin operators. void addPlusPlusMinusMinusStyleOverloads(QualType CandidateTy, bool HasVolatile, bool HasRestrict) { QualType ParamTypes[2] = { S.Context.getLValueReferenceType(CandidateTy), S.Context.IntTy }; // Non-volatile version. if (Args.size() == 1) S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet); else S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet); // Use a heuristic to reduce number of builtin candidates in the set: // add volatile version only if there are conversions to a volatile type. if (HasVolatile) { ParamTypes[0] = S.Context.getLValueReferenceType( S.Context.getVolatileType(CandidateTy)); if (Args.size() == 1) S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet); else S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet); } // Add restrict version only if there are conversions to a restrict type // and our candidate type is a non-restrict-qualified pointer. if (HasRestrict && CandidateTy->isAnyPointerType() && !CandidateTy.isRestrictQualified()) { ParamTypes[0] = S.Context.getLValueReferenceType( S.Context.getCVRQualifiedType(CandidateTy, Qualifiers::Restrict)); if (Args.size() == 1) S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet); else S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet); if (HasVolatile) { ParamTypes[0] = S.Context.getLValueReferenceType( S.Context.getCVRQualifiedType(CandidateTy, (Qualifiers::Volatile | Qualifiers::Restrict))); if (Args.size() == 1) S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet); else S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, CandidateSet); } } } public: BuiltinOperatorOverloadBuilder( Sema &S, ArrayRef<Expr *> Args, Qualifiers VisibleTypeConversionsQuals, bool HasArithmeticOrEnumeralCandidateType, SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes, OverloadCandidateSet &CandidateSet) : S(S), Args(Args), VisibleTypeConversionsQuals(VisibleTypeConversionsQuals), HasArithmeticOrEnumeralCandidateType( HasArithmeticOrEnumeralCandidateType), CandidateTypes(CandidateTypes), CandidateSet(CandidateSet) { // Validate some of our static helper constants in debug builds. assert(getArithmeticType(FirstPromotedIntegralType) == S.Context.IntTy && "Invalid first promoted integral type"); assert(getArithmeticType(LastPromotedIntegralType - 1) == S.Context.UnsignedInt128Ty && "Invalid last promoted integral type"); assert(getArithmeticType(FirstPromotedArithmeticType) == S.Context.FloatTy && "Invalid first promoted arithmetic type"); assert(getArithmeticType(LastPromotedArithmeticType - 1) == S.Context.UnsignedInt128Ty && "Invalid last promoted arithmetic type"); } // C++ [over.built]p3: // // For every pair (T, VQ), where T is an arithmetic type, and VQ // is either volatile or empty, there exist candidate operator // functions of the form // // VQ T& operator++(VQ T&); // T operator++(VQ T&, int); // // C++ [over.built]p4: // // For every pair (T, VQ), where T is an arithmetic type other // than bool, and VQ is either volatile or empty, there exist // candidate operator functions of the form // // VQ T& operator--(VQ T&); // T operator--(VQ T&, int); void addPlusPlusMinusMinusArithmeticOverloads(OverloadedOperatorKind Op) { if (!HasArithmeticOrEnumeralCandidateType) return; for (unsigned Arith = (Op == OO_PlusPlus? 0 : 1); Arith < NumArithmeticTypes; ++Arith) { addPlusPlusMinusMinusStyleOverloads( getArithmeticType(Arith), VisibleTypeConversionsQuals.hasVolatile(), VisibleTypeConversionsQuals.hasRestrict()); } } // C++ [over.built]p5: // // For every pair (T, VQ), where T is a cv-qualified or // cv-unqualified object type, and VQ is either volatile or // empty, there exist candidate operator functions of the form // // T*VQ& operator++(T*VQ&); // T*VQ& operator--(T*VQ&); // T* operator++(T*VQ&, int); // T* operator--(T*VQ&, int); void addPlusPlusMinusMinusPointerOverloads() { for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[0].pointer_begin(), PtrEnd = CandidateTypes[0].pointer_end(); Ptr != PtrEnd; ++Ptr) { // Skip pointer types that aren't pointers to object types. if (!(*Ptr)->getPointeeType()->isObjectType()) continue; addPlusPlusMinusMinusStyleOverloads(*Ptr, (!(*Ptr).isVolatileQualified() && VisibleTypeConversionsQuals.hasVolatile()), (!(*Ptr).isRestrictQualified() && VisibleTypeConversionsQuals.hasRestrict())); } } // C++ [over.built]p6: // For every cv-qualified or cv-unqualified object type T, there // exist candidate operator functions of the form // // T& operator*(T*); // // C++ [over.built]p7: // For every function type T that does not have cv-qualifiers or a // ref-qualifier, there exist candidate operator functions of the form // T& operator*(T*); void addUnaryStarPointerOverloads() { for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[0].pointer_begin(), PtrEnd = CandidateTypes[0].pointer_end(); Ptr != PtrEnd; ++Ptr) { QualType ParamTy = *Ptr; QualType PointeeTy = ParamTy->getPointeeType(); if (!PointeeTy->isObjectType() && !PointeeTy->isFunctionType()) continue; if (const FunctionProtoType *Proto =PointeeTy->getAs<FunctionProtoType>()) if (Proto->getTypeQuals() || Proto->getRefQualifier()) continue; S.AddBuiltinCandidate(S.Context.getLValueReferenceType(PointeeTy), &ParamTy, Args, CandidateSet); } } // C++ [over.built]p9: // For every promoted arithmetic type T, there exist candidate // operator functions of the form // // T operator+(T); // T operator-(T); void addUnaryPlusOrMinusArithmeticOverloads() { if (!HasArithmeticOrEnumeralCandidateType) return; for (unsigned Arith = FirstPromotedArithmeticType; Arith < LastPromotedArithmeticType; ++Arith) { QualType ArithTy = getArithmeticType(Arith); S.AddBuiltinCandidate(ArithTy, &ArithTy, Args, CandidateSet); } // Extension: We also add these operators for vector types. for (BuiltinCandidateTypeSet::iterator Vec = CandidateTypes[0].vector_begin(), VecEnd = CandidateTypes[0].vector_end(); Vec != VecEnd; ++Vec) { QualType VecTy = *Vec; S.AddBuiltinCandidate(VecTy, &VecTy, Args, CandidateSet); } } // C++ [over.built]p8: // For every type T, there exist candidate operator functions of // the form // // T* operator+(T*); void addUnaryPlusPointerOverloads() { for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[0].pointer_begin(), PtrEnd = CandidateTypes[0].pointer_end(); Ptr != PtrEnd; ++Ptr) { QualType ParamTy = *Ptr; S.AddBuiltinCandidate(ParamTy, &ParamTy, Args, CandidateSet); } } // C++ [over.built]p10: // For every promoted integral type T, there exist candidate // operator functions of the form // // T operator~(T); void addUnaryTildePromotedIntegralOverloads() { if (!HasArithmeticOrEnumeralCandidateType) return; for (unsigned Int = FirstPromotedIntegralType; Int < LastPromotedIntegralType; ++Int) { QualType IntTy = getArithmeticType(Int); S.AddBuiltinCandidate(IntTy, &IntTy, Args, CandidateSet); } // Extension: We also add this operator for vector types. for (BuiltinCandidateTypeSet::iterator Vec = CandidateTypes[0].vector_begin(), VecEnd = CandidateTypes[0].vector_end(); Vec != VecEnd; ++Vec) { QualType VecTy = *Vec; S.AddBuiltinCandidate(VecTy, &VecTy, Args, CandidateSet); } } // C++ [over.match.oper]p16: // For every pointer to member type T, there exist candidate operator // functions of the form // // bool operator==(T,T); // bool operator!=(T,T); void addEqualEqualOrNotEqualMemberPointerOverloads() { /// Set of (canonical) types that we've already handled. llvm::SmallPtrSet<QualType, 8> AddedTypes; for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) { for (BuiltinCandidateTypeSet::iterator MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(), MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end(); MemPtr != MemPtrEnd; ++MemPtr) { // Don't add the same builtin candidate twice. if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second) continue; QualType ParamTypes[2] = { *MemPtr, *MemPtr }; S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet); } } } // C++ [over.built]p15: // // For every T, where T is an enumeration type, a pointer type, or // std::nullptr_t, there exist candidate operator functions of the form // // bool operator<(T, T); // bool operator>(T, T); // bool operator<=(T, T); // bool operator>=(T, T); // bool operator==(T, T); // bool operator!=(T, T); void addRelationalPointerOrEnumeralOverloads() { // C++ [over.match.oper]p3: // [...]the built-in candidates include all of the candidate operator // functions defined in 13.6 that, compared to the given operator, [...] // do not have the same parameter-type-list as any non-template non-member // candidate. // // Note that in practice, this only affects enumeration types because there // aren't any built-in candidates of record type, and a user-defined operator // must have an operand of record or enumeration type. Also, the only other // overloaded operator with enumeration arguments, operator=, // cannot be overloaded for enumeration types, so this is the only place // where we must suppress candidates like this. llvm::DenseSet<std::pair<CanQualType, CanQualType> > UserDefinedBinaryOperators; for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) { if (CandidateTypes[ArgIdx].enumeration_begin() != CandidateTypes[ArgIdx].enumeration_end()) { for (OverloadCandidateSet::iterator C = CandidateSet.begin(), CEnd = CandidateSet.end(); C != CEnd; ++C) { if (!C->Viable || !C->Function || C->Function->getNumParams() != 2) continue; if (C->Function->isFunctionTemplateSpecialization()) continue; QualType FirstParamType = C->Function->getParamDecl(0)->getType().getUnqualifiedType(); QualType SecondParamType = C->Function->getParamDecl(1)->getType().getUnqualifiedType(); // Skip if either parameter isn't of enumeral type. if (!FirstParamType->isEnumeralType() || !SecondParamType->isEnumeralType()) continue; // Add this operator to the set of known user-defined operators. UserDefinedBinaryOperators.insert( std::make_pair(S.Context.getCanonicalType(FirstParamType), S.Context.getCanonicalType(SecondParamType))); } } } /// Set of (canonical) types that we've already handled. llvm::SmallPtrSet<QualType, 8> AddedTypes; for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) { for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[ArgIdx].pointer_begin(), PtrEnd = CandidateTypes[ArgIdx].pointer_end(); Ptr != PtrEnd; ++Ptr) { // Don't add the same builtin candidate twice. if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second) continue; QualType ParamTypes[2] = { *Ptr, *Ptr }; S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet); } for (BuiltinCandidateTypeSet::iterator Enum = CandidateTypes[ArgIdx].enumeration_begin(), EnumEnd = CandidateTypes[ArgIdx].enumeration_end(); Enum != EnumEnd; ++Enum) { CanQualType CanonType = S.Context.getCanonicalType(*Enum); // Don't add the same builtin candidate twice, or if a user defined // candidate exists. if (!AddedTypes.insert(CanonType).second || UserDefinedBinaryOperators.count(std::make_pair(CanonType, CanonType))) continue; QualType ParamTypes[2] = { *Enum, *Enum }; S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet); } if (CandidateTypes[ArgIdx].hasNullPtrType()) { CanQualType NullPtrTy = S.Context.getCanonicalType(S.Context.NullPtrTy); if (AddedTypes.insert(NullPtrTy).second && !UserDefinedBinaryOperators.count(std::make_pair(NullPtrTy, NullPtrTy))) { QualType ParamTypes[2] = { NullPtrTy, NullPtrTy }; S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet); } } } } // C++ [over.built]p13: // // For every cv-qualified or cv-unqualified object type T // there exist candidate operator functions of the form // // T* operator+(T*, ptrdiff_t); // T& operator[](T*, ptrdiff_t); [BELOW] // T* operator-(T*, ptrdiff_t); // T* operator+(ptrdiff_t, T*); // T& operator[](ptrdiff_t, T*); [BELOW] // // C++ [over.built]p14: // // For every T, where T is a pointer to object type, there // exist candidate operator functions of the form // // ptrdiff_t operator-(T, T); void addBinaryPlusOrMinusPointerOverloads(OverloadedOperatorKind Op) { /// Set of (canonical) types that we've already handled. llvm::SmallPtrSet<QualType, 8> AddedTypes; for (int Arg = 0; Arg < 2; ++Arg) { QualType AsymetricParamTypes[2] = { S.Context.getPointerDiffType(), S.Context.getPointerDiffType(), }; for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[Arg].pointer_begin(), PtrEnd = CandidateTypes[Arg].pointer_end(); Ptr != PtrEnd; ++Ptr) { QualType PointeeTy = (*Ptr)->getPointeeType(); if (!PointeeTy->isObjectType()) continue; AsymetricParamTypes[Arg] = *Ptr; if (Arg == 0 || Op == OO_Plus) { // operator+(T*, ptrdiff_t) or operator-(T*, ptrdiff_t) // T* operator+(ptrdiff_t, T*); S.AddBuiltinCandidate(*Ptr, AsymetricParamTypes, Args, CandidateSet); } if (Op == OO_Minus) { // ptrdiff_t operator-(T, T); if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second) continue; QualType ParamTypes[2] = { *Ptr, *Ptr }; S.AddBuiltinCandidate(S.Context.getPointerDiffType(), ParamTypes, Args, CandidateSet); } } } } // C++ [over.built]p12: // // For every pair of promoted arithmetic types L and R, there // exist candidate operator functions of the form // // LR operator*(L, R); // LR operator/(L, R); // LR operator+(L, R); // LR operator-(L, R); // bool operator<(L, R); // bool operator>(L, R); // bool operator<=(L, R); // bool operator>=(L, R); // bool operator==(L, R); // bool operator!=(L, R); // // where LR is the result of the usual arithmetic conversions // between types L and R. // // C++ [over.built]p24: // // For every pair of promoted arithmetic types L and R, there exist // candidate operator functions of the form // // LR operator?(bool, L, R); // // where LR is the result of the usual arithmetic conversions // between types L and R. // Our candidates ignore the first parameter. void addGenericBinaryArithmeticOverloads(bool isComparison) { if (!HasArithmeticOrEnumeralCandidateType) return; for (unsigned Left = FirstPromotedArithmeticType; Left < LastPromotedArithmeticType; ++Left) { for (unsigned Right = FirstPromotedArithmeticType; Right < LastPromotedArithmeticType; ++Right) { QualType LandR[2] = { getArithmeticType(Left), getArithmeticType(Right) }; QualType Result = isComparison ? S.Context.BoolTy : getUsualArithmeticConversions(Left, Right); S.AddBuiltinCandidate(Result, LandR, Args, CandidateSet); } } // Extension: Add the binary operators ==, !=, <, <=, >=, >, *, /, and the // conditional operator for vector types. for (BuiltinCandidateTypeSet::iterator Vec1 = CandidateTypes[0].vector_begin(), Vec1End = CandidateTypes[0].vector_end(); Vec1 != Vec1End; ++Vec1) { for (BuiltinCandidateTypeSet::iterator Vec2 = CandidateTypes[1].vector_begin(), Vec2End = CandidateTypes[1].vector_end(); Vec2 != Vec2End; ++Vec2) { QualType LandR[2] = { *Vec1, *Vec2 }; QualType Result = S.Context.BoolTy; if (!isComparison) { if ((*Vec1)->isExtVectorType() || !(*Vec2)->isExtVectorType()) Result = *Vec1; else Result = *Vec2; } S.AddBuiltinCandidate(Result, LandR, Args, CandidateSet); } } } // C++ [over.built]p17: // // For every pair of promoted integral types L and R, there // exist candidate operator functions of the form // // LR operator%(L, R); // LR operator&(L, R); // LR operator^(L, R); // LR operator|(L, R); // L operator<<(L, R); // L operator>>(L, R); // // where LR is the result of the usual arithmetic conversions // between types L and R. void addBinaryBitwiseArithmeticOverloads(OverloadedOperatorKind Op) { if (!HasArithmeticOrEnumeralCandidateType) return; for (unsigned Left = FirstPromotedIntegralType; Left < LastPromotedIntegralType; ++Left) { for (unsigned Right = FirstPromotedIntegralType; Right < LastPromotedIntegralType; ++Right) { QualType LandR[2] = { getArithmeticType(Left), getArithmeticType(Right) }; QualType Result = (Op == OO_LessLess || Op == OO_GreaterGreater) ? LandR[0] : getUsualArithmeticConversions(Left, Right); S.AddBuiltinCandidate(Result, LandR, Args, CandidateSet); } } } // C++ [over.built]p20: // // For every pair (T, VQ), where T is an enumeration or // pointer to member type and VQ is either volatile or // empty, there exist candidate operator functions of the form // // VQ T& operator=(VQ T&, T); void addAssignmentMemberPointerOrEnumeralOverloads() { /// Set of (canonical) types that we've already handled. llvm::SmallPtrSet<QualType, 8> AddedTypes; for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) { for (BuiltinCandidateTypeSet::iterator Enum = CandidateTypes[ArgIdx].enumeration_begin(), EnumEnd = CandidateTypes[ArgIdx].enumeration_end(); Enum != EnumEnd; ++Enum) { if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)).second) continue; AddBuiltinAssignmentOperatorCandidates(S, *Enum, Args, CandidateSet); } for (BuiltinCandidateTypeSet::iterator MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(), MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end(); MemPtr != MemPtrEnd; ++MemPtr) { if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second) continue; AddBuiltinAssignmentOperatorCandidates(S, *MemPtr, Args, CandidateSet); } } } // C++ [over.built]p19: // // For every pair (T, VQ), where T is any type and VQ is either // volatile or empty, there exist candidate operator functions // of the form // // T*VQ& operator=(T*VQ&, T*); // // C++ [over.built]p21: // // For every pair (T, VQ), where T is a cv-qualified or // cv-unqualified object type and VQ is either volatile or // empty, there exist candidate operator functions of the form // // T*VQ& operator+=(T*VQ&, ptrdiff_t); // T*VQ& operator-=(T*VQ&, ptrdiff_t); void addAssignmentPointerOverloads(bool isEqualOp) { /// Set of (canonical) types that we've already handled. llvm::SmallPtrSet<QualType, 8> AddedTypes; for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[0].pointer_begin(), PtrEnd = CandidateTypes[0].pointer_end(); Ptr != PtrEnd; ++Ptr) { // If this is operator=, keep track of the builtin candidates we added. if (isEqualOp) AddedTypes.insert(S.Context.getCanonicalType(*Ptr)); else if (!(*Ptr)->getPointeeType()->isObjectType()) continue; // non-volatile version QualType ParamTypes[2] = { S.Context.getLValueReferenceType(*Ptr), isEqualOp ? *Ptr : S.Context.getPointerDiffType(), }; S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/ isEqualOp); bool NeedVolatile = !(*Ptr).isVolatileQualified() && VisibleTypeConversionsQuals.hasVolatile(); if (NeedVolatile) { // volatile version ParamTypes[0] = S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr)); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/isEqualOp); } if (!(*Ptr).isRestrictQualified() && VisibleTypeConversionsQuals.hasRestrict()) { // restrict version ParamTypes[0] = S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr)); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/isEqualOp); if (NeedVolatile) { // volatile restrict version ParamTypes[0] = S.Context.getLValueReferenceType( S.Context.getCVRQualifiedType(*Ptr, (Qualifiers::Volatile | Qualifiers::Restrict))); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/isEqualOp); } } } if (isEqualOp) { for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[1].pointer_begin(), PtrEnd = CandidateTypes[1].pointer_end(); Ptr != PtrEnd; ++Ptr) { // Make sure we don't add the same candidate twice. if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second) continue; QualType ParamTypes[2] = { S.Context.getLValueReferenceType(*Ptr), *Ptr, }; // non-volatile version S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/true); bool NeedVolatile = !(*Ptr).isVolatileQualified() && VisibleTypeConversionsQuals.hasVolatile(); if (NeedVolatile) { // volatile version ParamTypes[0] = S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr)); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/true); } if (!(*Ptr).isRestrictQualified() && VisibleTypeConversionsQuals.hasRestrict()) { // restrict version ParamTypes[0] = S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr)); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/true); if (NeedVolatile) { // volatile restrict version ParamTypes[0] = S.Context.getLValueReferenceType( S.Context.getCVRQualifiedType(*Ptr, (Qualifiers::Volatile | Qualifiers::Restrict))); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/true); } } } } } // C++ [over.built]p18: // // For every triple (L, VQ, R), where L is an arithmetic type, // VQ is either volatile or empty, and R is a promoted // arithmetic type, there exist candidate operator functions of // the form // // VQ L& operator=(VQ L&, R); // VQ L& operator*=(VQ L&, R); // VQ L& operator/=(VQ L&, R); // VQ L& operator+=(VQ L&, R); // VQ L& operator-=(VQ L&, R); void addAssignmentArithmeticOverloads(bool isEqualOp) { if (!HasArithmeticOrEnumeralCandidateType) return; for (unsigned Left = 0; Left < NumArithmeticTypes; ++Left) { for (unsigned Right = FirstPromotedArithmeticType; Right < LastPromotedArithmeticType; ++Right) { QualType ParamTypes[2]; ParamTypes[1] = getArithmeticType(Right); // Add this built-in operator as a candidate (VQ is empty). ParamTypes[0] = S.Context.getLValueReferenceType(getArithmeticType(Left)); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/isEqualOp); // Add this built-in operator as a candidate (VQ is 'volatile'). if (VisibleTypeConversionsQuals.hasVolatile()) { ParamTypes[0] = S.Context.getVolatileType(getArithmeticType(Left)); ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/isEqualOp); } } } // Extension: Add the binary operators =, +=, -=, *=, /= for vector types. for (BuiltinCandidateTypeSet::iterator Vec1 = CandidateTypes[0].vector_begin(), Vec1End = CandidateTypes[0].vector_end(); Vec1 != Vec1End; ++Vec1) { for (BuiltinCandidateTypeSet::iterator Vec2 = CandidateTypes[1].vector_begin(), Vec2End = CandidateTypes[1].vector_end(); Vec2 != Vec2End; ++Vec2) { QualType ParamTypes[2]; ParamTypes[1] = *Vec2; // Add this built-in operator as a candidate (VQ is empty). ParamTypes[0] = S.Context.getLValueReferenceType(*Vec1); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/isEqualOp); // Add this built-in operator as a candidate (VQ is 'volatile'). if (VisibleTypeConversionsQuals.hasVolatile()) { ParamTypes[0] = S.Context.getVolatileType(*Vec1); ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet, /*IsAssigmentOperator=*/isEqualOp); } } } } // C++ [over.built]p22: // // For every triple (L, VQ, R), where L is an integral type, VQ // is either volatile or empty, and R is a promoted integral // type, there exist candidate operator functions of the form // // VQ L& operator%=(VQ L&, R); // VQ L& operator<<=(VQ L&, R); // VQ L& operator>>=(VQ L&, R); // VQ L& operator&=(VQ L&, R); // VQ L& operator^=(VQ L&, R); // VQ L& operator|=(VQ L&, R); void addAssignmentIntegralOverloads() { if (!HasArithmeticOrEnumeralCandidateType) return; for (unsigned Left = FirstIntegralType; Left < LastIntegralType; ++Left) { for (unsigned Right = FirstPromotedIntegralType; Right < LastPromotedIntegralType; ++Right) { QualType ParamTypes[2]; ParamTypes[1] = getArithmeticType(Right); // Add this built-in operator as a candidate (VQ is empty). ParamTypes[0] = S.Context.getLValueReferenceType(getArithmeticType(Left)); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet); if (VisibleTypeConversionsQuals.hasVolatile()) { // Add this built-in operator as a candidate (VQ is 'volatile'). ParamTypes[0] = getArithmeticType(Left); ParamTypes[0] = S.Context.getVolatileType(ParamTypes[0]); ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]); S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, CandidateSet); } } } } // C++ [over.operator]p23: // // There also exist candidate operator functions of the form // // bool operator!(bool); // bool operator&&(bool, bool); // bool operator||(bool, bool); void addExclaimOverload() { QualType ParamTy = S.Context.BoolTy; S.AddBuiltinCandidate(ParamTy, &ParamTy, Args, CandidateSet, /*IsAssignmentOperator=*/false, /*NumContextualBoolArguments=*/1); } void addAmpAmpOrPipePipeOverload() { QualType ParamTypes[2] = { S.Context.BoolTy, S.Context.BoolTy }; S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet, /*IsAssignmentOperator=*/false, /*NumContextualBoolArguments=*/2); } // C++ [over.built]p13: // // For every cv-qualified or cv-unqualified object type T there // exist candidate operator functions of the form // // T* operator+(T*, ptrdiff_t); [ABOVE] // T& operator[](T*, ptrdiff_t); // T* operator-(T*, ptrdiff_t); [ABOVE] // T* operator+(ptrdiff_t, T*); [ABOVE] // T& operator[](ptrdiff_t, T*); void addSubscriptOverloads() { for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[0].pointer_begin(), PtrEnd = CandidateTypes[0].pointer_end(); Ptr != PtrEnd; ++Ptr) { QualType ParamTypes[2] = { *Ptr, S.Context.getPointerDiffType() }; QualType PointeeType = (*Ptr)->getPointeeType(); if (!PointeeType->isObjectType()) continue; QualType ResultTy = S.Context.getLValueReferenceType(PointeeType); // T& operator[](T*, ptrdiff_t) S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, CandidateSet); } for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[1].pointer_begin(), PtrEnd = CandidateTypes[1].pointer_end(); Ptr != PtrEnd; ++Ptr) { QualType ParamTypes[2] = { S.Context.getPointerDiffType(), *Ptr }; QualType PointeeType = (*Ptr)->getPointeeType(); if (!PointeeType->isObjectType()) continue; QualType ResultTy = S.Context.getLValueReferenceType(PointeeType); // T& operator[](ptrdiff_t, T*) S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, CandidateSet); } } // C++ [over.built]p11: // For every quintuple (C1, C2, T, CV1, CV2), where C2 is a class type, // C1 is the same type as C2 or is a derived class of C2, T is an object // type or a function type, and CV1 and CV2 are cv-qualifier-seqs, // there exist candidate operator functions of the form // // CV12 T& operator->*(CV1 C1*, CV2 T C2::*); // // where CV12 is the union of CV1 and CV2. void addArrowStarOverloads() { for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[0].pointer_begin(), PtrEnd = CandidateTypes[0].pointer_end(); Ptr != PtrEnd; ++Ptr) { QualType C1Ty = (*Ptr); QualType C1; QualifierCollector Q1; C1 = QualType(Q1.strip(C1Ty->getPointeeType()), 0); if (!isa<RecordType>(C1)) continue; // heuristic to reduce number of builtin candidates in the set. // Add volatile/restrict version only if there are conversions to a // volatile/restrict type. if (!VisibleTypeConversionsQuals.hasVolatile() && Q1.hasVolatile()) continue; if (!VisibleTypeConversionsQuals.hasRestrict() && Q1.hasRestrict()) continue; for (BuiltinCandidateTypeSet::iterator MemPtr = CandidateTypes[1].member_pointer_begin(), MemPtrEnd = CandidateTypes[1].member_pointer_end(); MemPtr != MemPtrEnd; ++MemPtr) { const MemberPointerType *mptr = cast<MemberPointerType>(*MemPtr); QualType C2 = QualType(mptr->getClass(), 0); C2 = C2.getUnqualifiedType(); if (C1 != C2 && !S.IsDerivedFrom(C1, C2)) break; QualType ParamTypes[2] = { *Ptr, *MemPtr }; // build CV12 T& QualType T = mptr->getPointeeType(); if (!VisibleTypeConversionsQuals.hasVolatile() && T.isVolatileQualified()) continue; if (!VisibleTypeConversionsQuals.hasRestrict() && T.isRestrictQualified()) continue; T = Q1.apply(S.Context, T); QualType ResultTy = S.Context.getLValueReferenceType(T); S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, CandidateSet); } } } // Note that we don't consider the first argument, since it has been // contextually converted to bool long ago. The candidates below are // therefore added as binary. // // C++ [over.built]p25: // For every type T, where T is a pointer, pointer-to-member, or scoped // enumeration type, there exist candidate operator functions of the form // // T operator?(bool, T, T); // void addConditionalOperatorOverloads() { /// Set of (canonical) types that we've already handled. llvm::SmallPtrSet<QualType, 8> AddedTypes; for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) { for (BuiltinCandidateTypeSet::iterator Ptr = CandidateTypes[ArgIdx].pointer_begin(), PtrEnd = CandidateTypes[ArgIdx].pointer_end(); Ptr != PtrEnd; ++Ptr) { if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second) continue; QualType ParamTypes[2] = { *Ptr, *Ptr }; S.AddBuiltinCandidate(*Ptr, ParamTypes, Args, CandidateSet); } for (BuiltinCandidateTypeSet::iterator MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(), MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end(); MemPtr != MemPtrEnd; ++MemPtr) { if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second) continue; QualType ParamTypes[2] = { *MemPtr, *MemPtr }; S.AddBuiltinCandidate(*MemPtr, ParamTypes, Args, CandidateSet); } if (S.getLangOpts().CPlusPlus11) { for (BuiltinCandidateTypeSet::iterator Enum = CandidateTypes[ArgIdx].enumeration_begin(), EnumEnd = CandidateTypes[ArgIdx].enumeration_end(); Enum != EnumEnd; ++Enum) { if (!(*Enum)->getAs<EnumType>()->getDecl()->isScoped()) continue; if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)).second) continue; QualType ParamTypes[2] = { *Enum, *Enum }; S.AddBuiltinCandidate(*Enum, ParamTypes, Args, CandidateSet); } } } } }; } // end anonymous namespace /// AddBuiltinOperatorCandidates - Add the appropriate built-in /// operator overloads to the candidate set (C++ [over.built]), based /// on the operator @p Op and the arguments given. For example, if the /// operator is a binary '+', this routine might add "int /// operator+(int, int)" to cover integer addition. void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet) { // Find all of the types that the arguments can convert to, but only // if the operator we're looking at has built-in operator candidates // that make use of these types. Also record whether we encounter non-record // candidate types or either arithmetic or enumeral candidate types. Qualifiers VisibleTypeConversionsQuals; VisibleTypeConversionsQuals.addConst(); for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) VisibleTypeConversionsQuals += CollectVRQualifiers(Context, Args[ArgIdx]); bool HasNonRecordCandidateType = false; bool HasArithmeticOrEnumeralCandidateType = false; SmallVector<BuiltinCandidateTypeSet, 2> CandidateTypes; for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) { CandidateTypes.emplace_back(*this); CandidateTypes[ArgIdx].AddTypesConvertedFrom(Args[ArgIdx]->getType(), OpLoc, true, (Op == OO_Exclaim || Op == OO_AmpAmp || Op == OO_PipePipe), VisibleTypeConversionsQuals); HasNonRecordCandidateType = HasNonRecordCandidateType || CandidateTypes[ArgIdx].hasNonRecordTypes(); HasArithmeticOrEnumeralCandidateType = HasArithmeticOrEnumeralCandidateType || CandidateTypes[ArgIdx].hasArithmeticOrEnumeralTypes(); } // Exit early when no non-record types have been added to the candidate set // for any of the arguments to the operator. // // We can't exit early for !, ||, or &&, since there we have always have // 'bool' overloads. if (!HasNonRecordCandidateType && !(Op == OO_Exclaim || Op == OO_AmpAmp || Op == OO_PipePipe)) return; // Setup an object to manage the common state for building overloads. BuiltinOperatorOverloadBuilder OpBuilder(*this, Args, VisibleTypeConversionsQuals, HasArithmeticOrEnumeralCandidateType, CandidateTypes, CandidateSet); // Dispatch over the operation to add in only those overloads which apply. switch (Op) { case OO_None: case NUM_OVERLOADED_OPERATORS: llvm_unreachable("Expected an overloaded operator"); case OO_New: case OO_Delete: case OO_Array_New: case OO_Array_Delete: case OO_Call: llvm_unreachable( "Special operators don't use AddBuiltinOperatorCandidates"); case OO_Comma: case OO_Arrow: // C++ [over.match.oper]p3: // -- For the operator ',', the unary operator '&', or the // operator '->', the built-in candidates set is empty. break; case OO_Plus: // '+' is either unary or binary if (Args.size() == 1) OpBuilder.addUnaryPlusPointerOverloads(); LLVM_FALLTHROUGH; // HLSL Change case OO_Minus: // '-' is either unary or binary if (Args.size() == 1) { OpBuilder.addUnaryPlusOrMinusArithmeticOverloads(); } else { OpBuilder.addBinaryPlusOrMinusPointerOverloads(Op); OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false); } break; case OO_Star: // '*' is either unary or binary if (Args.size() == 1) OpBuilder.addUnaryStarPointerOverloads(); else OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false); break; case OO_Slash: OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false); break; case OO_PlusPlus: case OO_MinusMinus: OpBuilder.addPlusPlusMinusMinusArithmeticOverloads(Op); OpBuilder.addPlusPlusMinusMinusPointerOverloads(); break; case OO_EqualEqual: case OO_ExclaimEqual: OpBuilder.addEqualEqualOrNotEqualMemberPointerOverloads(); LLVM_FALLTHROUGH; // HLSL Change case OO_Less: case OO_Greater: case OO_LessEqual: case OO_GreaterEqual: OpBuilder.addRelationalPointerOrEnumeralOverloads(); OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/true); break; case OO_Percent: case OO_Caret: case OO_Pipe: case OO_LessLess: case OO_GreaterGreater: OpBuilder.addBinaryBitwiseArithmeticOverloads(Op); break; case OO_Amp: // '&' is either unary or binary if (Args.size() == 1) // C++ [over.match.oper]p3: // -- For the operator ',', the unary operator '&', or the // operator '->', the built-in candidates set is empty. break; OpBuilder.addBinaryBitwiseArithmeticOverloads(Op); break; case OO_Tilde: OpBuilder.addUnaryTildePromotedIntegralOverloads(); break; case OO_Equal: OpBuilder.addAssignmentMemberPointerOrEnumeralOverloads(); LLVM_FALLTHROUGH; // HLSL Change case OO_PlusEqual: case OO_MinusEqual: OpBuilder.addAssignmentPointerOverloads(Op == OO_Equal); LLVM_FALLTHROUGH; // HLSL Change case OO_StarEqual: case OO_SlashEqual: OpBuilder.addAssignmentArithmeticOverloads(Op == OO_Equal); break; case OO_PercentEqual: case OO_LessLessEqual: case OO_GreaterGreaterEqual: case OO_AmpEqual: case OO_CaretEqual: case OO_PipeEqual: OpBuilder.addAssignmentIntegralOverloads(); break; case OO_Exclaim: OpBuilder.addExclaimOverload(); break; case OO_AmpAmp: case OO_PipePipe: OpBuilder.addAmpAmpOrPipePipeOverload(); break; case OO_Subscript: OpBuilder.addSubscriptOverloads(); break; case OO_ArrowStar: OpBuilder.addArrowStarOverloads(); break; case OO_Conditional: OpBuilder.addConditionalOperatorOverloads(); OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false); break; } } /// \brief Add function candidates found via argument-dependent lookup /// to the set of overloading candidates. /// /// This routine performs argument-dependent name lookup based on the /// given function name (which may also be an operator name) and adds /// all of the overload candidates found by ADL to the overload /// candidate set (C++ [basic.lookup.argdep]). void Sema::AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading) { ADLResult Fns; // FIXME: This approach for uniquing ADL results (and removing // redundant candidates from the set) relies on pointer-equality, // which means we need to key off the canonical decl. However, // always going back to the canonical decl might not get us the // right set of default arguments. What default arguments are // we supposed to consider on ADL candidates, anyway? // FIXME: Pass in the explicit template arguments? ArgumentDependentLookup(Name, Loc, Args, Fns); // Erase all of the candidates we already knew about. for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(), CandEnd = CandidateSet.end(); Cand != CandEnd; ++Cand) if (Cand->Function) { Fns.erase(Cand->Function); if (FunctionTemplateDecl *FunTmpl = Cand->Function->getPrimaryTemplate()) Fns.erase(FunTmpl); } // For each of the ADL candidates we found, add it to the overload // set. for (ADLResult::iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) { DeclAccessPair FoundDecl = DeclAccessPair::make(*I, AS_none); if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) { if (ExplicitTemplateArgs) continue; AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet, false, PartialOverloading); } else AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I), FoundDecl, ExplicitTemplateArgs, Args, CandidateSet, PartialOverloading); } } /// isBetterOverloadCandidate - Determines whether the first overload /// candidate is a better candidate than the second (C++ 13.3.3p1). bool clang::isBetterOverloadCandidate(Sema &S, const OverloadCandidate &Cand1, const OverloadCandidate &Cand2, SourceLocation Loc, bool UserDefinedConversion) { // Define viable functions to be better candidates than non-viable // functions. if (!Cand2.Viable) return Cand1.Viable; else if (!Cand1.Viable) return false; // C++ [over.match.best]p1: // // -- if F is a static member function, ICS1(F) is defined such // that ICS1(F) is neither better nor worse than ICS1(G) for // any function G, and, symmetrically, ICS1(G) is neither // better nor worse than ICS1(F). unsigned StartArg = 0; if (Cand1.IgnoreObjectArgument || Cand2.IgnoreObjectArgument) StartArg = 1; // C++ [over.match.best]p1: // A viable function F1 is defined to be a better function than another // viable function F2 if for all arguments i, ICSi(F1) is not a worse // conversion sequence than ICSi(F2), and then... unsigned NumArgs = Cand1.NumConversions; assert(Cand2.NumConversions == NumArgs && "Overload candidate mismatch"); bool HasBetterConversion = false; for (unsigned ArgIdx = StartArg; ArgIdx < NumArgs; ++ArgIdx) { switch (CompareImplicitConversionSequences(S, Cand1.Conversions[ArgIdx], Cand2.Conversions[ArgIdx])) { case ImplicitConversionSequence::Better: // Cand1 has a better conversion sequence. HasBetterConversion = true; break; case ImplicitConversionSequence::Worse: // Cand1 can't be better than Cand2. return false; case ImplicitConversionSequence::Indistinguishable: // Do nothing. break; } } // -- for some argument j, ICSj(F1) is a better conversion sequence than // ICSj(F2), or, if not that, if (HasBetterConversion) return true; // -- the context is an initialization by user-defined conversion // (see 8.5, 13.3.1.5) and the standard conversion sequence // from the return type of F1 to the destination type (i.e., // the type of the entity being initialized) is a better // conversion sequence than the standard conversion sequence // from the return type of F2 to the destination type. if (UserDefinedConversion && Cand1.Function && Cand2.Function && isa<CXXConversionDecl>(Cand1.Function) && isa<CXXConversionDecl>(Cand2.Function)) { // First check whether we prefer one of the conversion functions over the // other. This only distinguishes the results in non-standard, extension // cases such as the conversion from a lambda closure type to a function // pointer or block. ImplicitConversionSequence::CompareKind Result = compareConversionFunctions(S, Cand1.Function, Cand2.Function); if (Result == ImplicitConversionSequence::Indistinguishable) Result = CompareStandardConversionSequences(S, Cand1.FinalConversion, Cand2.FinalConversion); if (Result != ImplicitConversionSequence::Indistinguishable) return Result == ImplicitConversionSequence::Better; // FIXME: Compare kind of reference binding if conversion functions // convert to a reference type used in direct reference binding, per // C++14 [over.match.best]p1 section 2 bullet 3. } // -- F1 is a non-template function and F2 is a function template // specialization, or, if not that, bool Cand1IsSpecialization = Cand1.Function && Cand1.Function->getPrimaryTemplate(); bool Cand2IsSpecialization = Cand2.Function && Cand2.Function->getPrimaryTemplate(); if (Cand1IsSpecialization != Cand2IsSpecialization) return Cand2IsSpecialization; // -- F1 and F2 are function template specializations, and the function // template for F1 is more specialized than the template for F2 // according to the partial ordering rules described in 14.5.5.2, or, // if not that, if (Cand1IsSpecialization && Cand2IsSpecialization) { if (FunctionTemplateDecl *BetterTemplate = S.getMoreSpecializedTemplate(Cand1.Function->getPrimaryTemplate(), Cand2.Function->getPrimaryTemplate(), Loc, isa<CXXConversionDecl>(Cand1.Function)? TPOC_Conversion : TPOC_Call, Cand1.ExplicitCallArguments, Cand2.ExplicitCallArguments)) return BetterTemplate == Cand1.Function->getPrimaryTemplate(); } // Check for enable_if value-based overload resolution. if (Cand1.Function && Cand2.Function && (Cand1.Function->hasAttr<EnableIfAttr>() || Cand2.Function->hasAttr<EnableIfAttr>())) { // FIXME: The next several lines are just // specific_attr_iterator<EnableIfAttr> but going in declaration order, // instead of reverse order which is how they're stored in the AST. AttrVec Cand1Attrs; if (Cand1.Function->hasAttrs()) { Cand1Attrs = Cand1.Function->getAttrs(); Cand1Attrs.erase(std::remove_if(Cand1Attrs.begin(), Cand1Attrs.end(), IsNotEnableIfAttr), Cand1Attrs.end()); std::reverse(Cand1Attrs.begin(), Cand1Attrs.end()); } AttrVec Cand2Attrs; if (Cand2.Function->hasAttrs()) { Cand2Attrs = Cand2.Function->getAttrs(); Cand2Attrs.erase(std::remove_if(Cand2Attrs.begin(), Cand2Attrs.end(), IsNotEnableIfAttr), Cand2Attrs.end()); std::reverse(Cand2Attrs.begin(), Cand2Attrs.end()); } // Candidate 1 is better if it has strictly more attributes and // the common sequence is identical. if (Cand1Attrs.size() <= Cand2Attrs.size()) return false; auto Cand1I = Cand1Attrs.begin(); for (auto &Cand2A : Cand2Attrs) { auto &Cand1A = *Cand1I++; llvm::FoldingSetNodeID Cand1ID, Cand2ID; cast<EnableIfAttr>(Cand1A)->getCond()->Profile(Cand1ID, S.getASTContext(), true); cast<EnableIfAttr>(Cand2A)->getCond()->Profile(Cand2ID, S.getASTContext(), true); if (Cand1ID != Cand2ID) return false; } return true; } return false; } /// \brief Computes the best viable function (C++ 13.3.3) /// within an overload candidate set. /// /// \param Loc The location of the function name (or operator symbol) for /// which overload resolution occurs. /// /// \param Best If overload resolution was successful or found a deleted /// function, \p Best points to the candidate function found. /// /// \returns The result of overload resolution. OverloadingResult OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc, iterator &Best, bool UserDefinedConversion) { // HLSL Change Starts // Function calls should use HLSL-style overloading. Except for compiler // generated functions which are annotated as requiring C++ overload // resolution like operator[] overloads where `const` methods are aren't // supported by HLSL's defined rules. if (S.getLangOpts().HLSL && !empty() && begin()->Function != nullptr && !begin()->Function->hasAttr<HLSLCXXOverloadAttr>()) { return ::hlsl::GetBestViableFunction(S, Loc, *this, Best); } // HLSL Change Ends // Find the best viable function. Best = end(); for (iterator Cand = begin(); Cand != end(); ++Cand) { if (Cand->Viable) if (Best == end() || isBetterOverloadCandidate(S, *Cand, *Best, Loc, UserDefinedConversion)) Best = Cand; } // If we didn't find any viable functions, abort. if (Best == end()) return OR_No_Viable_Function; // Make sure that this function is better than every other viable // function. If not, we have an ambiguity. for (iterator Cand = begin(); Cand != end(); ++Cand) { if (Cand->Viable && Cand != Best && !isBetterOverloadCandidate(S, *Best, *Cand, Loc, UserDefinedConversion)) { Best = end(); return OR_Ambiguous; } } // Best is the best viable function. if (Best->Function && (Best->Function->isDeleted() || S.isFunctionConsideredUnavailable(Best->Function))) return OR_Deleted; return OR_Success; } namespace { enum OverloadCandidateKind { oc_function, oc_method, oc_constructor, oc_function_template, oc_method_template, oc_constructor_template, oc_implicit_default_constructor, oc_implicit_copy_constructor, oc_implicit_move_constructor, oc_implicit_copy_assignment, oc_implicit_move_assignment, oc_implicit_inherited_constructor }; OverloadCandidateKind ClassifyOverloadCandidate(Sema &S, FunctionDecl *Fn, std::string &Description) { bool isTemplate = false; if (FunctionTemplateDecl *FunTmpl = Fn->getPrimaryTemplate()) { isTemplate = true; Description = S.getTemplateArgumentBindingsText( FunTmpl->getTemplateParameters(), *Fn->getTemplateSpecializationArgs()); } if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn)) { if (!Ctor->isImplicit()) return isTemplate ? oc_constructor_template : oc_constructor; if (Ctor->getInheritedConstructor()) return oc_implicit_inherited_constructor; if (Ctor->isDefaultConstructor()) return oc_implicit_default_constructor; if (Ctor->isMoveConstructor()) return oc_implicit_move_constructor; assert(Ctor->isCopyConstructor() && "unexpected sort of implicit constructor"); return oc_implicit_copy_constructor; } if (CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Fn)) { if (S.getLangOpts().HLSL) return isTemplate ? oc_method_template : oc_method; // HLSL Change - all intrinsics are implicit, doesn't imply conversion // This actually gets spelled 'candidate function' for now, but // it doesn't hurt to split it out. if (!Meth->isImplicit()) return isTemplate ? oc_method_template : oc_method; if (Meth->isMoveAssignmentOperator()) return oc_implicit_move_assignment; if (Meth->isCopyAssignmentOperator()) return oc_implicit_copy_assignment; assert(isa<CXXConversionDecl>(Meth) && "expected conversion"); return oc_method; } return isTemplate ? oc_function_template : oc_function; } void MaybeEmitInheritedConstructorNote(Sema &S, Decl *Fn) { const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn); if (!Ctor) return; Ctor = Ctor->getInheritedConstructor(); if (!Ctor) return; S.Diag(Ctor->getLocation(), diag::note_ovl_candidate_inherited_constructor); } } // end anonymous namespace // Notes the location of an overload candidate. void Sema::NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType) { std::string FnDesc; OverloadCandidateKind K = ClassifyOverloadCandidate(*this, Fn, FnDesc); PartialDiagnostic PD = PDiag(diag::note_ovl_candidate) << (unsigned) K << FnDesc; HandleFunctionTypeMismatch(PD, Fn->getType(), DestType); Diag(Fn->getLocation(), PD); MaybeEmitInheritedConstructorNote(*this, Fn); } // Notes the location of all overload candidates designated through // OverloadedExpr void Sema::NoteAllOverloadCandidates(Expr* OverloadedExpr, QualType DestType) { assert(OverloadedExpr->getType() == Context.OverloadTy); OverloadExpr::FindResult Ovl = OverloadExpr::find(OverloadedExpr); OverloadExpr *OvlExpr = Ovl.Expression; for (UnresolvedSetIterator I = OvlExpr->decls_begin(), IEnd = OvlExpr->decls_end(); I != IEnd; ++I) { if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()) ) { NoteOverloadCandidate(FunTmpl->getTemplatedDecl(), DestType); } else if (FunctionDecl *Fun = dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()) ) { NoteOverloadCandidate(Fun, DestType); } } } /// Diagnoses an ambiguous conversion. The partial diagnostic is the /// "lead" diagnostic; it will be given two arguments, the source and /// target types of the conversion. void ImplicitConversionSequence::DiagnoseAmbiguousConversion( Sema &S, SourceLocation CaretLoc, const PartialDiagnostic &PDiag) const { S.Diag(CaretLoc, PDiag) << Ambiguous.getFromType() << Ambiguous.getToType(); // FIXME: The note limiting machinery is borrowed from // OverloadCandidateSet::NoteCandidates; there's an opportunity for // refactoring here. const OverloadsShown ShowOverloads = S.Diags.getShowOverloads(); unsigned CandsShown = 0; AmbiguousConversionSequence::const_iterator I, E; for (I = Ambiguous.begin(), E = Ambiguous.end(); I != E; ++I) { if (CandsShown >= 4 && ShowOverloads == Ovl_Best) break; ++CandsShown; S.NoteOverloadCandidate(*I); } if (I != E) S.Diag(SourceLocation(), diag::note_ovl_too_many_candidates) << int(E - I); } static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I, const ImplicitConversionSequence &Conv, SourceLocation OpLoc) { // HLSL Change: add OpLoc and Conv // const ImplicitConversionSequence &Conv = Cand->Conversions[I]; assert(Conv.isBad()); assert(Cand->Function && "for now, candidate must be a function"); FunctionDecl *Fn = Cand->Function; // There's a conversion slot for the object argument if this is a // non-constructor method. Note that 'I' corresponds the // conversion-slot index. bool isObjectArgument = false; if (isa<CXXMethodDecl>(Fn) && !isa<CXXConstructorDecl>(Fn)) { if (I == 0) isObjectArgument = true; else I--; } std::string FnDesc; OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, FnDesc); Expr *FromExpr = Conv.Bad.FromExpr; QualType FromTy = Conv.Bad.getFromType(); QualType ToTy = Conv.Bad.getToType(); // HLSL Change: replace Fn->getLocation() in diagnostics with FnDiagLocation // and avoid notes that try to point to built-in targets SourceLocation FnDiagLocation = Fn->getLocation().isValid() ? Fn->getLocation() : OpLoc; if (FromTy == S.Context.OverloadTy) { assert(FromExpr && "overload set argument came from implicit argument?"); Expr *E = FromExpr->IgnoreParens(); if (isa<UnaryOperator>(E)) E = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens(); DeclarationName Name = cast<OverloadExpr>(E)->getName(); S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_overload) // HLSL Change - FnDiagLocation << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << ToTy << Name << I+1; MaybeEmitInheritedConstructorNote(S, Fn); return; } // Do some hand-waving analysis to see if the non-viability is due // to a qualifier mismatch. CanQualType CFromTy = S.Context.getCanonicalType(FromTy); CanQualType CToTy = S.Context.getCanonicalType(ToTy); if (CanQual<ReferenceType> RT = CToTy->getAs<ReferenceType>()) CToTy = RT->getPointeeType(); else { // TODO: detect and diagnose the full richness of const mismatches. if (CanQual<PointerType> FromPT = CFromTy->getAs<PointerType>()) if (CanQual<PointerType> ToPT = CToTy->getAs<PointerType>()) CFromTy = FromPT->getPointeeType(), CToTy = ToPT->getPointeeType(); } if (CToTy.getUnqualifiedType() == CFromTy.getUnqualifiedType() && !CToTy.isAtLeastAsQualifiedAs(CFromTy)) { Qualifiers FromQs = CFromTy.getQualifiers(); Qualifiers ToQs = CToTy.getQualifiers(); if (FromQs.getAddressSpace() != ToQs.getAddressSpace()) { S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_addrspace) << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy << FromQs.getAddressSpace() << ToQs.getAddressSpace() << (unsigned) isObjectArgument << I+1; MaybeEmitInheritedConstructorNote(S, Fn); return; } if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) { S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_ownership) << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy << FromQs.getObjCLifetime() << ToQs.getObjCLifetime() << (unsigned) isObjectArgument << I+1; MaybeEmitInheritedConstructorNote(S, Fn); return; } if (FromQs.getObjCGCAttr() != ToQs.getObjCGCAttr()) { S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_gc) << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy << FromQs.getObjCGCAttr() << ToQs.getObjCGCAttr() << (unsigned) isObjectArgument << I+1; MaybeEmitInheritedConstructorNote(S, Fn); return; } unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers(); assert(CVR && "unexpected qualifiers mismatch"); if (isObjectArgument) { S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_cvr_this) << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy << (CVR - 1); } else { S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_cvr) << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy << (CVR - 1) << I+1; } MaybeEmitInheritedConstructorNote(S, Fn); return; } // Special diagnostic for failure to convert an initializer list, since // telling the user that it has type void is not useful. if (FromExpr && isa<InitListExpr>(FromExpr)) { S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_list_argument) << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy << ToTy << (unsigned) isObjectArgument << I+1; MaybeEmitInheritedConstructorNote(S, Fn); return; } // Diagnose references or pointers to incomplete types differently, // since it's far from impossible that the incompleteness triggered // the failure. QualType TempFromTy = FromTy.getNonReferenceType(); if (const PointerType *PTy = TempFromTy->getAs<PointerType>()) TempFromTy = PTy->getPointeeType(); if (TempFromTy->isIncompleteType()) { S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_conv_incomplete) << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy << ToTy << (unsigned) isObjectArgument << I+1; MaybeEmitInheritedConstructorNote(S, Fn); return; } // Diagnose base -> derived pointer conversions. unsigned BaseToDerivedConversion = 0; if (const PointerType *FromPtrTy = FromTy->getAs<PointerType>()) { if (const PointerType *ToPtrTy = ToTy->getAs<PointerType>()) { if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs( FromPtrTy->getPointeeType()) && !FromPtrTy->getPointeeType()->isIncompleteType() && !ToPtrTy->getPointeeType()->isIncompleteType() && S.IsDerivedFrom(ToPtrTy->getPointeeType(), FromPtrTy->getPointeeType())) BaseToDerivedConversion = 1; } } else if (const ObjCObjectPointerType *FromPtrTy = FromTy->getAs<ObjCObjectPointerType>()) { if (const ObjCObjectPointerType *ToPtrTy = ToTy->getAs<ObjCObjectPointerType>()) if (const ObjCInterfaceDecl *FromIface = FromPtrTy->getInterfaceDecl()) if (const ObjCInterfaceDecl *ToIface = ToPtrTy->getInterfaceDecl()) if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs( FromPtrTy->getPointeeType()) && FromIface->isSuperClassOf(ToIface)) BaseToDerivedConversion = 2; } else if (const ReferenceType *ToRefTy = ToTy->getAs<ReferenceType>()) { if (ToRefTy->getPointeeType().isAtLeastAsQualifiedAs(FromTy) && !FromTy->isIncompleteType() && !ToRefTy->getPointeeType()->isIncompleteType() && S.IsDerivedFrom(ToRefTy->getPointeeType(), FromTy)) { BaseToDerivedConversion = 3; } else if (ToTy->isLValueReferenceType() && !FromExpr->isLValue() && ToTy.getNonReferenceType().getCanonicalType() == FromTy.getNonReferenceType().getCanonicalType()) { S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_lvalue) << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << (unsigned) isObjectArgument << I + 1; MaybeEmitInheritedConstructorNote(S, Fn); return; } } if (BaseToDerivedConversion) { S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_base_to_derived_conv) << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << (BaseToDerivedConversion - 1) << FromTy << ToTy << I+1; MaybeEmitInheritedConstructorNote(S, Fn); return; } if (isa<ObjCObjectPointerType>(CFromTy) && isa<PointerType>(CToTy)) { Qualifiers FromQs = CFromTy.getQualifiers(); Qualifiers ToQs = CToTy.getQualifiers(); if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) { S.Diag(FnDiagLocation, diag::note_ovl_candidate_bad_arc_conv) << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy << ToTy << (unsigned) isObjectArgument << I+1; MaybeEmitInheritedConstructorNote(S, Fn); return; } } // Emit the generic diagnostic and, optionally, add the hints to it. PartialDiagnostic FDiag = S.PDiag(diag::note_ovl_candidate_bad_conv); FDiag << (unsigned) FnKind << FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy << ToTy << (unsigned) isObjectArgument << I + 1 << (unsigned) (Cand->Fix.Kind); // If we can fix the conversion, suggest the FixIts. for (std::vector<FixItHint>::iterator HI = Cand->Fix.Hints.begin(), HE = Cand->Fix.Hints.end(); HI != HE; ++HI) FDiag << *HI; S.Diag(FnDiagLocation, FDiag); MaybeEmitInheritedConstructorNote(S, Fn); } /// Additional arity mismatch diagnosis specific to a function overload /// candidates. This is not covered by the more general DiagnoseArityMismatch() /// over a candidate in any candidate set. static bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand, unsigned NumArgs) { FunctionDecl *Fn = Cand->Function; unsigned MinParams = Fn->getMinRequiredArguments(); // With invalid overloaded operators, it's possible that we think we // have an arity mismatch when in fact it looks like we have the // right number of arguments, because only overloaded operators have // the weird behavior of overloading member and non-member functions. // Just don't report anything. if (Fn->isInvalidDecl() && Fn->getDeclName().getNameKind() == DeclarationName::CXXOperatorName) return true; if (NumArgs < MinParams) { assert((Cand->FailureKind == ovl_fail_too_few_arguments) || (Cand->FailureKind == ovl_fail_bad_deduction && Cand->DeductionFailure.Result == Sema::TDK_TooFewArguments)); } else { assert((Cand->FailureKind == ovl_fail_too_many_arguments) || (Cand->FailureKind == ovl_fail_bad_deduction && Cand->DeductionFailure.Result == Sema::TDK_TooManyArguments)); } return false; } /// General arity mismatch diagnosis over a candidate in a candidate set. static void DiagnoseArityMismatch(Sema &S, Decl *D, unsigned NumFormalArgs, SourceLocation OpLoc) { // HLSL Change - add OpLoc assert(isa<FunctionDecl>(D) && "The templated declaration should at least be a function" " when diagnosing bad template argument deduction due to too many" " or too few arguments"); FunctionDecl *Fn = cast<FunctionDecl>(D); // TODO: treat calls to a missing default constructor as a special case const FunctionProtoType *FnTy = Fn->getType()->getAs<FunctionProtoType>(); unsigned MinParams = Fn->getMinRequiredArguments(); // at least / at most / exactly unsigned mode, modeCount; if (NumFormalArgs < MinParams) { if (MinParams != FnTy->getNumParams() || FnTy->isVariadic() || FnTy->isTemplateVariadic()) mode = 0; // "at least" else mode = 2; // "exactly" modeCount = MinParams; } else { if (MinParams != FnTy->getNumParams()) mode = 1; // "at most" else mode = 2; // "exactly" modeCount = FnTy->getNumParams(); } std::string Description; OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, Description); // HLSL Change Starts - fallback for built-ins SourceLocation DiagLoc = Fn->getLocation(); if (DiagLoc.isInvalid()) DiagLoc = OpLoc; if (modeCount == 1 && Fn->getParamDecl(0)->getDeclName()) S.Diag(DiagLoc, diag::note_ovl_candidate_arity_one) << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != nullptr) << mode << Fn->getParamDecl(0) << NumFormalArgs; else S.Diag(DiagLoc, diag::note_ovl_candidate_arity) << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != nullptr) << mode << modeCount << NumFormalArgs; // HLSL Change Ends MaybeEmitInheritedConstructorNote(S, Fn); } /// Arity mismatch diagnosis specific to a function overload candidate. static void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand, unsigned NumFormalArgs, SourceLocation OpLoc) { // HLSL Change - add OpLoc if (!CheckArityMismatch(S, Cand, NumFormalArgs)) DiagnoseArityMismatch(S, Cand->Function, NumFormalArgs, OpLoc); } static TemplateDecl *getDescribedTemplate(Decl *Templated) { if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Templated)) return FD->getDescribedFunctionTemplate(); else if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Templated)) return RD->getDescribedClassTemplate(); llvm_unreachable("Unsupported: Getting the described template declaration" " for bad deduction diagnosis"); } /// Diagnose a failed template-argument deduction. static void DiagnoseBadDeduction(Sema &S, Decl *Templated, DeductionFailureInfo &DeductionFailure, unsigned NumArgs, SourceLocation OpLoc) { // HLSL Change - add OpLoc TemplateParameter Param = DeductionFailure.getTemplateParameter(); NamedDecl *ParamD; (ParamD = Param.dyn_cast<TemplateTypeParmDecl*>()) || (ParamD = Param.dyn_cast<NonTypeTemplateParmDecl*>()) || (ParamD = Param.dyn_cast<TemplateTemplateParmDecl*>()); switch (DeductionFailure.Result) { case Sema::TDK_Success: llvm_unreachable("TDK_success while diagnosing bad deduction"); case Sema::TDK_Incomplete: { assert(ParamD && "no parameter found for incomplete deduction result"); S.Diag(Templated->getLocation(), diag::note_ovl_candidate_incomplete_deduction) << ParamD->getDeclName(); MaybeEmitInheritedConstructorNote(S, Templated); return; } case Sema::TDK_Underqualified: { assert(ParamD && "no parameter found for bad qualifiers deduction result"); TemplateTypeParmDecl *TParam = cast<TemplateTypeParmDecl>(ParamD); QualType Param = DeductionFailure.getFirstArg()->getAsType(); // Param will have been canonicalized, but it should just be a // qualified version of ParamD, so move the qualifiers to that. QualifierCollector Qs; Qs.strip(Param); QualType NonCanonParam = Qs.apply(S.Context, TParam->getTypeForDecl()); assert(S.Context.hasSameType(Param, NonCanonParam)); // Arg has also been canonicalized, but there's nothing we can do // about that. It also doesn't matter as much, because it won't // have any template parameters in it (because deduction isn't // done on dependent types). QualType Arg = DeductionFailure.getSecondArg()->getAsType(); S.Diag(Templated->getLocation(), diag::note_ovl_candidate_underqualified) << ParamD->getDeclName() << Arg << NonCanonParam; MaybeEmitInheritedConstructorNote(S, Templated); return; } case Sema::TDK_Inconsistent: { assert(ParamD && "no parameter found for inconsistent deduction result"); int which = 0; if (isa<TemplateTypeParmDecl>(ParamD)) which = 0; else if (isa<NonTypeTemplateParmDecl>(ParamD)) which = 1; else { which = 2; } S.Diag(Templated->getLocation(), diag::note_ovl_candidate_inconsistent_deduction) << which << ParamD->getDeclName() << *DeductionFailure.getFirstArg() << *DeductionFailure.getSecondArg(); MaybeEmitInheritedConstructorNote(S, Templated); return; } case Sema::TDK_InvalidExplicitArguments: assert(ParamD && "no parameter found for invalid explicit arguments"); if (ParamD->getDeclName()) S.Diag(Templated->getLocation(), diag::note_ovl_candidate_explicit_arg_mismatch_named) << ParamD->getDeclName(); else { int index = 0; if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ParamD)) index = TTP->getIndex(); else if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ParamD)) index = NTTP->getIndex(); else index = cast<TemplateTemplateParmDecl>(ParamD)->getIndex(); S.Diag(Templated->getLocation(), diag::note_ovl_candidate_explicit_arg_mismatch_unnamed) << (index + 1); } MaybeEmitInheritedConstructorNote(S, Templated); return; case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: DiagnoseArityMismatch(S, Templated, NumArgs, OpLoc); return; case Sema::TDK_InstantiationDepth: S.Diag(Templated->getLocation(), diag::note_ovl_candidate_instantiation_depth); MaybeEmitInheritedConstructorNote(S, Templated); return; case Sema::TDK_SubstitutionFailure: { // Format the template argument list into the argument string. SmallString<128> TemplateArgString; if (TemplateArgumentList *Args = DeductionFailure.getTemplateArgumentList()) { TemplateArgString = " "; TemplateArgString += S.getTemplateArgumentBindingsText( getDescribedTemplate(Templated)->getTemplateParameters(), *Args); } // If this candidate was disabled by enable_if, say so. PartialDiagnosticAt *PDiag = DeductionFailure.getSFINAEDiagnostic(); if (PDiag && PDiag->second.getDiagID() == diag::err_typename_nested_not_found_enable_if) { // FIXME: Use the source range of the condition, and the fully-qualified // name of the enable_if template. These are both present in PDiag. S.Diag(PDiag->first, diag::note_ovl_candidate_disabled_by_enable_if) << "'enable_if'" << TemplateArgString; return; } // Format the SFINAE diagnostic into the argument string. // FIXME: Add a general mechanism to include a PartialDiagnostic *'s // formatted message in another diagnostic. SmallString<128> SFINAEArgString; SourceRange R; if (PDiag) { SFINAEArgString = ": "; R = SourceRange(PDiag->first, PDiag->first); PDiag->second.EmitToString(S.getDiagnostics(), SFINAEArgString); } S.Diag(Templated->getLocation(), diag::note_ovl_candidate_substitution_failure) << TemplateArgString << SFINAEArgString << R; MaybeEmitInheritedConstructorNote(S, Templated); return; } case Sema::TDK_FailedOverloadResolution: { OverloadExpr::FindResult R = OverloadExpr::find(DeductionFailure.getExpr()); S.Diag(Templated->getLocation(), diag::note_ovl_candidate_failed_overload_resolution) << R.Expression->getName(); return; } case Sema::TDK_NonDeducedMismatch: { // FIXME: Provide a source location to indicate what we couldn't match. TemplateArgument FirstTA = *DeductionFailure.getFirstArg(); TemplateArgument SecondTA = *DeductionFailure.getSecondArg(); if (FirstTA.getKind() == TemplateArgument::Template && SecondTA.getKind() == TemplateArgument::Template) { TemplateName FirstTN = FirstTA.getAsTemplate(); TemplateName SecondTN = SecondTA.getAsTemplate(); if (FirstTN.getKind() == TemplateName::Template && SecondTN.getKind() == TemplateName::Template) { if (FirstTN.getAsTemplateDecl()->getName() == SecondTN.getAsTemplateDecl()->getName()) { // FIXME: This fixes a bad diagnostic where both templates are named // the same. This particular case is a bit difficult since: // 1) It is passed as a string to the diagnostic printer. // 2) The diagnostic printer only attempts to find a better // name for types, not decls. // Ideally, this should folded into the diagnostic printer. S.Diag(Templated->getLocation(), diag::note_ovl_candidate_non_deduced_mismatch_qualified) << FirstTN.getAsTemplateDecl() << SecondTN.getAsTemplateDecl(); return; } } } // HLSL Change Starts // The implementation for template argument deducation does not yet provide // FirstArg and SecondArg information for failure cases; ellide the note in // this case. if (FirstTA.isNull() || SecondTA.isNull()) return; // HLSL Change Ends // FIXME: For generic lambda parameters, check if the function is a lambda // call operator, and if so, emit a prettier and more informative // diagnostic that mentions 'auto' and lambda in addition to // (or instead of?) the canonical template type parameters. S.Diag(Templated->getLocation(), diag::note_ovl_candidate_non_deduced_mismatch) << FirstTA << SecondTA; return; } // TODO: diagnose these individually, then kill off // note_ovl_candidate_bad_deduction, which is uselessly vague. case Sema::TDK_MiscellaneousDeductionFailure: S.Diag(Templated->getLocation(), diag::note_ovl_candidate_bad_deduction); MaybeEmitInheritedConstructorNote(S, Templated); return; } } /// Diagnose a failed template-argument deduction, for function calls. static void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand, unsigned NumArgs, SourceLocation OpLoc) { // HLSL Change - add OpLoc unsigned TDK = Cand->DeductionFailure.Result; if (TDK == Sema::TDK_TooFewArguments || TDK == Sema::TDK_TooManyArguments) { if (CheckArityMismatch(S, Cand, NumArgs)) return; } DiagnoseBadDeduction(S, Cand->Function, // pattern Cand->DeductionFailure, NumArgs, OpLoc); // HLSL Change - add OpLoc } /// CUDA: diagnose an invalid call across targets. static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) { FunctionDecl *Caller = cast<FunctionDecl>(S.CurContext); FunctionDecl *Callee = Cand->Function; Sema::CUDAFunctionTarget CallerTarget = S.IdentifyCUDATarget(Caller), CalleeTarget = S.IdentifyCUDATarget(Callee); std::string FnDesc; OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Callee, FnDesc); S.Diag(Callee->getLocation(), diag::note_ovl_candidate_bad_target) << (unsigned)FnKind << CalleeTarget << CallerTarget; // This could be an implicit constructor for which we could not infer the // target due to a collsion. Diagnose that case. CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Callee); if (Meth != nullptr && Meth->isImplicit()) { CXXRecordDecl *ParentClass = Meth->getParent(); Sema::CXXSpecialMember CSM; switch (FnKind) { default: return; case oc_implicit_default_constructor: CSM = Sema::CXXDefaultConstructor; break; case oc_implicit_copy_constructor: CSM = Sema::CXXCopyConstructor; break; case oc_implicit_move_constructor: CSM = Sema::CXXMoveConstructor; break; case oc_implicit_copy_assignment: CSM = Sema::CXXCopyAssignment; break; case oc_implicit_move_assignment: CSM = Sema::CXXMoveAssignment; break; }; bool ConstRHS = false; if (Meth->getNumParams()) { if (const ReferenceType *RT = Meth->getParamDecl(0)->getType()->getAs<ReferenceType>()) { ConstRHS = RT->getPointeeType().isConstQualified(); } } S.inferCUDATargetForImplicitSpecialMember(ParentClass, CSM, Meth, /* ConstRHS */ ConstRHS, /* Diagnose */ true); } } static void DiagnoseFailedEnableIfAttr(Sema &S, OverloadCandidate *Cand) { FunctionDecl *Callee = Cand->Function; EnableIfAttr *Attr = static_cast<EnableIfAttr*>(Cand->DeductionFailure.Data); S.Diag(Callee->getLocation(), diag::note_ovl_candidate_disabled_by_enable_if_attr) << Attr->getCond()->getSourceRange() << Attr->getMessage(); } /// Generates a 'note' diagnostic for an overload candidate. We've /// already generated a primary error at the call site. /// /// It really does need to be a single diagnostic with its caret /// pointed at the candidate declaration. Yes, this creates some /// major challenges of technical writing. Yes, this makes pointing /// out problems with specific arguments quite awkward. It's still /// better than generating twenty screens of text for every failed /// overload. /// /// It would be great to be able to express per-candidate problems /// more richly for those diagnostic clients that cared, but we'd /// still have to be just as careful with the default diagnostics. static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand, unsigned NumArgs, SourceLocation OpLoc) { // HLSL Change: add OpLoc FunctionDecl *Fn = Cand->Function; // Note deleted candidates, but only if they're viable. if (Cand->Viable && (Fn->isDeleted() || S.isFunctionConsideredUnavailable(Fn))) { std::string FnDesc; OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, FnDesc); S.Diag(Fn->getLocation(), diag::note_ovl_candidate_deleted) << FnKind << FnDesc << (Fn->isDeleted() ? (Fn->isDeletedAsWritten() ? 1 : 2) : 0); MaybeEmitInheritedConstructorNote(S, Fn); return; } // We don't really have anything else to say about viable candidates. if (Cand->Viable) { S.NoteOverloadCandidate(Fn); return; } switch (Cand->FailureKind) { case ovl_fail_too_many_arguments: case ovl_fail_too_few_arguments: return DiagnoseArityMismatch(S, Cand, NumArgs, OpLoc); // HLSL Change - add OpLoc case ovl_fail_bad_deduction: return DiagnoseBadDeduction(S, Cand, NumArgs, OpLoc); // HLSL Change - add OpLoc case ovl_fail_illegal_constructor: { S.Diag(Fn->getLocation(), diag::note_ovl_candidate_illegal_constructor) << (Fn->getPrimaryTemplate() ? 1 : 0); MaybeEmitInheritedConstructorNote(S, Fn); return; } case ovl_fail_trivial_conversion: case ovl_fail_bad_final_conversion: case ovl_fail_final_conversion_not_exact: return S.NoteOverloadCandidate(Fn); case ovl_fail_bad_conversion: { for (unsigned I = (Cand->IgnoreObjectArgument ? 1 : 0), N = Cand->NumConversions; I != N; ++I) { // HLSL Change: check in and out, check out conversions if (Cand->Conversions[I].isInitialized() && Cand->Conversions[I].isBad()) return DiagnoseBadConversion(S, Cand, I, Cand->Conversions[I], OpLoc); // HLSL Change: add OpLoc // HLSL Change: check in and out, check out conversions if (Cand->OutConversions[I].isInitialized() && Cand->OutConversions[I].isBad()) return DiagnoseBadConversion(S, Cand, I, Cand->OutConversions[I], OpLoc); // HLSL Change: add OpLoc } // FIXME: this currently happens when we're called from SemaInit // when user-conversion overload fails. Figure out how to handle // those conditions and diagnose them well. return S.NoteOverloadCandidate(Fn); } case ovl_fail_bad_target: return DiagnoseBadTarget(S, Cand); case ovl_fail_enable_if: return DiagnoseFailedEnableIfAttr(S, Cand); } } static void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) { // Desugar the type of the surrogate down to a function type, // retaining as many typedefs as possible while still showing // the function type (and, therefore, its parameter types). QualType FnType = Cand->Surrogate->getConversionType(); bool isLValueReference = false; bool isRValueReference = false; bool isPointer = false; if (const LValueReferenceType *FnTypeRef = FnType->getAs<LValueReferenceType>()) { FnType = FnTypeRef->getPointeeType(); isLValueReference = true; } else if (const RValueReferenceType *FnTypeRef = FnType->getAs<RValueReferenceType>()) { FnType = FnTypeRef->getPointeeType(); isRValueReference = true; } if (const PointerType *FnTypePtr = FnType->getAs<PointerType>()) { FnType = FnTypePtr->getPointeeType(); isPointer = true; } // Desugar down to a function type. FnType = QualType(FnType->getAs<FunctionType>(), 0); // Reconstruct the pointer/reference as appropriate. if (isPointer) FnType = S.Context.getPointerType(FnType); if (isRValueReference) FnType = S.Context.getRValueReferenceType(FnType); if (isLValueReference) FnType = S.Context.getLValueReferenceType(FnType); S.Diag(Cand->Surrogate->getLocation(), diag::note_ovl_surrogate_cand) << FnType; MaybeEmitInheritedConstructorNote(S, Cand->Surrogate); } static void NoteBuiltinOperatorCandidate(Sema &S, StringRef Opc, SourceLocation OpLoc, OverloadCandidate *Cand) { assert(Cand->NumConversions <= 2 && "builtin operator is not binary"); std::string TypeStr("operator"); TypeStr += Opc; TypeStr += "("; TypeStr += Cand->BuiltinTypes.ParamTypes[0].getAsString(); if (Cand->NumConversions == 1) { TypeStr += ")"; S.Diag(OpLoc, diag::note_ovl_builtin_unary_candidate) << TypeStr; } else { TypeStr += ", "; TypeStr += Cand->BuiltinTypes.ParamTypes[1].getAsString(); TypeStr += ")"; S.Diag(OpLoc, diag::note_ovl_builtin_binary_candidate) << TypeStr; } } static void NoteAmbiguousUserConversions(Sema &S, SourceLocation OpLoc, OverloadCandidate *Cand) { unsigned NoOperands = Cand->NumConversions; for (unsigned ArgIdx = 0; ArgIdx < NoOperands; ++ArgIdx) { const ImplicitConversionSequence &ICS = Cand->Conversions[ArgIdx]; if (ICS.isBad()) break; // all meaningless after first invalid if (!ICS.isAmbiguous()) continue; ICS.DiagnoseAmbiguousConversion(S, OpLoc, S.PDiag(diag::note_ambiguous_type_conversion)); } } static SourceLocation GetLocationForCandidate(const OverloadCandidate *Cand) { if (Cand->Function) return Cand->Function->getLocation(); if (Cand->IsSurrogate) return Cand->Surrogate->getLocation(); return SourceLocation(); } static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) { switch ((Sema::TemplateDeductionResult)DFI.Result) { case Sema::TDK_Success: llvm_unreachable("TDK_success while diagnosing bad deduction"); case Sema::TDK_Invalid: case Sema::TDK_Incomplete: return 1; case Sema::TDK_Underqualified: case Sema::TDK_Inconsistent: return 2; case Sema::TDK_SubstitutionFailure: case Sema::TDK_NonDeducedMismatch: case Sema::TDK_MiscellaneousDeductionFailure: return 3; case Sema::TDK_InstantiationDepth: case Sema::TDK_FailedOverloadResolution: return 4; case Sema::TDK_InvalidExplicitArguments: return 5; case Sema::TDK_TooManyArguments: case Sema::TDK_TooFewArguments: return 6; } llvm_unreachable("Unhandled deduction result"); } namespace { struct CompareOverloadCandidatesForDisplay { Sema &S; size_t NumArgs; CompareOverloadCandidatesForDisplay(Sema &S, size_t nArgs) : S(S), NumArgs(nArgs) {} bool operator()(const OverloadCandidate *L, const OverloadCandidate *R) { // Fast-path this check. if (L == R) return false; // Order first by viability. if (L->Viable) { if (!R->Viable) return true; // TODO: introduce a tri-valued comparison for overload // candidates. Would be more worthwhile if we had a sort // that could exploit it. if (isBetterOverloadCandidate(S, *L, *R, SourceLocation())) return true; if (isBetterOverloadCandidate(S, *R, *L, SourceLocation())) return false; } else if (R->Viable) return false; assert(L->Viable == R->Viable); // Criteria by which we can sort non-viable candidates: if (!L->Viable) { // 1. Arity mismatches come after other candidates. if (L->FailureKind == ovl_fail_too_many_arguments || L->FailureKind == ovl_fail_too_few_arguments) { if (R->FailureKind == ovl_fail_too_many_arguments || R->FailureKind == ovl_fail_too_few_arguments) { int LDist = std::abs((int)L->getNumParams() - (int)NumArgs); int RDist = std::abs((int)R->getNumParams() - (int)NumArgs); if (LDist == RDist) { if (L->FailureKind == R->FailureKind) // Sort non-surrogates before surrogates. return !L->IsSurrogate && R->IsSurrogate; // Sort candidates requiring fewer parameters than there were // arguments given after candidates requiring more parameters // than there were arguments given. return L->FailureKind == ovl_fail_too_many_arguments; } return LDist < RDist; } return false; } if (R->FailureKind == ovl_fail_too_many_arguments || R->FailureKind == ovl_fail_too_few_arguments) return true; // 2. Bad conversions come first and are ordered by the number // of bad conversions and quality of good conversions. if (L->FailureKind == ovl_fail_bad_conversion) { if (R->FailureKind != ovl_fail_bad_conversion) return true; // The conversion that can be fixed with a smaller number of changes, // comes first. unsigned numLFixes = L->Fix.NumConversionsFixed; unsigned numRFixes = R->Fix.NumConversionsFixed; numLFixes = (numLFixes == 0) ? UINT_MAX : numLFixes; numRFixes = (numRFixes == 0) ? UINT_MAX : numRFixes; if (numLFixes != numRFixes) { return numLFixes < numRFixes; } // If there's any ordering between the defined conversions... // FIXME: this might not be transitive. assert(L->NumConversions == R->NumConversions); int leftBetter = 0; unsigned I = (L->IgnoreObjectArgument || R->IgnoreObjectArgument); for (unsigned E = L->NumConversions; I != E; ++I) { switch (CompareImplicitConversionSequences(S, L->Conversions[I], R->Conversions[I])) { case ImplicitConversionSequence::Better: leftBetter++; break; case ImplicitConversionSequence::Worse: leftBetter--; break; case ImplicitConversionSequence::Indistinguishable: break; } } if (leftBetter > 0) return true; if (leftBetter < 0) return false; } else if (R->FailureKind == ovl_fail_bad_conversion) return false; if (L->FailureKind == ovl_fail_bad_deduction) { if (R->FailureKind != ovl_fail_bad_deduction) return true; if (L->DeductionFailure.Result != R->DeductionFailure.Result) return RankDeductionFailure(L->DeductionFailure) < RankDeductionFailure(R->DeductionFailure); } else if (R->FailureKind == ovl_fail_bad_deduction) return false; // TODO: others? } // Sort everything else by location. SourceLocation LLoc = GetLocationForCandidate(L); SourceLocation RLoc = GetLocationForCandidate(R); // Put candidates without locations (e.g. builtins) at the end. if (LLoc.isInvalid()) return false; if (RLoc.isInvalid()) return true; return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc); } }; } /// CompleteNonViableCandidate - Normally, overload resolution only /// computes up to the first. Produces the FixIt set if possible. static void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand, ArrayRef<Expr *> Args) { assert(!Cand->Viable); // Don't do anything on failures other than bad conversion. if (Cand->FailureKind != ovl_fail_bad_conversion) return; // We only want the FixIts if all the arguments can be corrected. bool Unfixable = false; // Use a implicit copy initialization to check conversion fixes. Cand->Fix.setConversionChecker(TryCopyInitialization); // Skip forward to the first bad conversion. unsigned ConvIdx = (Cand->IgnoreObjectArgument ? 1 : 0); unsigned ConvCount = Cand->NumConversions; while (true) { assert(ConvIdx != ConvCount && "no bad conversion in candidate"); ConvIdx++; if (Cand->Conversions[ConvIdx - 1].isInitialized() && Cand->Conversions[ConvIdx - 1].isBad()) { // HLSL Change - check defined Unfixable = !Cand->TryToFixBadConversion(ConvIdx - 1, S); break; } // HLSL Change Starts - check out conversions if (Cand->OutConversions[ConvIdx - 1].isInitialized() && Cand->OutConversions[ConvIdx - 1].isBad()) { // HLSL Change - check defined // Unfixable = !Cand->TryToFixBadConversion(ConvIdx - 1, S); - consider suggesting a fix Unfixable = true; break; } // HLSL Change End } if (ConvIdx == ConvCount) return; assert(!Cand->Conversions[ConvIdx].isInitialized() && "remaining conversion is initialized?"); // FIXME: this should probably be preserved from the overload // operation somehow. bool SuppressUserConversions = false; const FunctionProtoType* Proto; unsigned ArgIdx = ConvIdx; if (Cand->IsSurrogate) { QualType ConvType = Cand->Surrogate->getConversionType().getNonReferenceType(); if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>()) ConvType = ConvPtrType->getPointeeType(); Proto = ConvType->getAs<FunctionProtoType>(); ArgIdx--; } else if (Cand->Function) { Proto = Cand->Function->getType()->getAs<FunctionProtoType>(); if (isa<CXXMethodDecl>(Cand->Function) && !isa<CXXConstructorDecl>(Cand->Function)) ArgIdx--; } else { // Builtin binary operator with a bad first conversion. assert(ConvCount <= 3); for (; ConvIdx != ConvCount && ConvIdx < 3; ++ConvIdx) // HLSL Change: explicit about ConvIdx < 3 Cand->Conversions[ConvIdx] = TryCopyInitialization(S, Args[ConvIdx], Cand->BuiltinTypes.ParamTypes[ConvIdx], SuppressUserConversions, /*InOverloadResolution*/ true, /*AllowObjCWritebackConversion=*/ S.getLangOpts().ObjCAutoRefCount); return; } // Fill in the rest of the conversions. unsigned NumParams = Proto->getNumParams(); for (; ConvIdx != ConvCount; ++ConvIdx, ++ArgIdx) { if (ArgIdx < NumParams) { Cand->Conversions[ConvIdx] = TryCopyInitialization( S, Args[ArgIdx], Proto->getParamType(ArgIdx), SuppressUserConversions, /*InOverloadResolution=*/true, /*AllowObjCWritebackConversion=*/ S.getLangOpts().ObjCAutoRefCount); // Store the FixIt in the candidate if it exists. if (!Unfixable && Cand->Conversions[ConvIdx].isBad()) Unfixable = !Cand->TryToFixBadConversion(ConvIdx, S); } else Cand->Conversions[ConvIdx].setEllipsis(); } } /// PrintOverloadCandidates - When overload resolution fails, prints /// diagnostic messages containing the candidates in the candidate /// set. void OverloadCandidateSet::NoteCandidates(Sema &S, OverloadCandidateDisplayKind OCD, ArrayRef<Expr *> Args, StringRef Opc, SourceLocation OpLoc) { // Sort the candidates by viability and position. Sorting directly would // be prohibitive, so we make a set of pointers and sort those. SmallVector<OverloadCandidate*, 32> Cands; if (OCD == OCD_AllCandidates) Cands.reserve(size()); for (iterator Cand = begin(), LastCand = end(); Cand != LastCand; ++Cand) { if (Cand->Viable) Cands.push_back(Cand); else if (OCD == OCD_AllCandidates) { CompleteNonViableCandidate(S, Cand, Args); if (Cand->Function || Cand->IsSurrogate) Cands.push_back(Cand); // Otherwise, this a non-viable builtin candidate. We do not, in general, // want to list every possible builtin candidate. } } std::sort(Cands.begin(), Cands.end(), CompareOverloadCandidatesForDisplay(S, Args.size())); bool ReportedAmbiguousConversions = false; SmallVectorImpl<OverloadCandidate*>::iterator I, E; const OverloadsShown ShowOverloads = S.Diags.getShowOverloads(); unsigned CandsShown = 0; for (I = Cands.begin(), E = Cands.end(); I != E; ++I) { OverloadCandidate *Cand = *I; // Set an arbitrary limit on the number of candidate functions we'll spam // the user with. FIXME: This limit should depend on details of the // candidate list. if (CandsShown >= 4 && ShowOverloads == Ovl_Best) { break; } ++CandsShown; if (Cand->Function) NoteFunctionCandidate(S, Cand, Args.size(), OpLoc); // HLSL Change: add OpLoc else if (Cand->IsSurrogate) NoteSurrogateCandidate(S, Cand); else { assert(Cand->Viable && "Non-viable built-in candidates are not added to Cands."); // Generally we only see ambiguities including viable builtin // operators if overload resolution got screwed up by an // ambiguous user-defined conversion. // // FIXME: It's quite possible for different conversions to see // different ambiguities, though. if (!ReportedAmbiguousConversions) { NoteAmbiguousUserConversions(S, OpLoc, Cand); ReportedAmbiguousConversions = true; } // If this is a viable builtin, print it. NoteBuiltinOperatorCandidate(S, Opc, OpLoc, Cand); } } if (I != E) S.Diag(OpLoc, diag::note_ovl_too_many_candidates) << int(E - I); } static SourceLocation GetLocationForCandidate(const TemplateSpecCandidate *Cand) { return Cand->Specialization ? Cand->Specialization->getLocation() : SourceLocation(); } namespace { struct CompareTemplateSpecCandidatesForDisplay { Sema &S; CompareTemplateSpecCandidatesForDisplay(Sema &S) : S(S) {} bool operator()(const TemplateSpecCandidate *L, const TemplateSpecCandidate *R) { // Fast-path this check. if (L == R) return false; // Assuming that both candidates are not matches... // Sort by the ranking of deduction failures. if (L->DeductionFailure.Result != R->DeductionFailure.Result) return RankDeductionFailure(L->DeductionFailure) < RankDeductionFailure(R->DeductionFailure); // Sort everything else by location. SourceLocation LLoc = GetLocationForCandidate(L); SourceLocation RLoc = GetLocationForCandidate(R); // Put candidates without locations (e.g. builtins) at the end. if (LLoc.isInvalid()) return false; if (RLoc.isInvalid()) return true; return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc); } }; } /// Diagnose a template argument deduction failure. /// We are treating these failures as overload failures due to bad /// deductions. void TemplateSpecCandidate::NoteDeductionFailure(Sema &S) { DiagnoseBadDeduction(S, Specialization, // pattern DeductionFailure, /*NumArgs=*/0, SourceLocation()); // HLSL Change - add OpLoc } void TemplateSpecCandidateSet::destroyCandidates() { for (iterator i = begin(), e = end(); i != e; ++i) { i->DeductionFailure.Destroy(); } } void TemplateSpecCandidateSet::clear() { destroyCandidates(); Candidates.clear(); } /// NoteCandidates - When no template specialization match is found, prints /// diagnostic messages containing the non-matching specializations that form /// the candidate set. /// This is analoguous to OverloadCandidateSet::NoteCandidates() with /// OCD == OCD_AllCandidates and Cand->Viable == false. void TemplateSpecCandidateSet::NoteCandidates(Sema &S, SourceLocation Loc) { // Sort the candidates by position (assuming no candidate is a match). // Sorting directly would be prohibitive, so we make a set of pointers // and sort those. SmallVector<TemplateSpecCandidate *, 32> Cands; Cands.reserve(size()); for (iterator Cand = begin(), LastCand = end(); Cand != LastCand; ++Cand) { if (Cand->Specialization) Cands.push_back(Cand); // Otherwise, this is a non-matching builtin candidate. We do not, // in general, want to list every possible builtin candidate. } std::sort(Cands.begin(), Cands.end(), CompareTemplateSpecCandidatesForDisplay(S)); // FIXME: Perhaps rename OverloadsShown and getShowOverloads() // for generalization purposes (?). const OverloadsShown ShowOverloads = S.Diags.getShowOverloads(); SmallVectorImpl<TemplateSpecCandidate *>::iterator I, E; unsigned CandsShown = 0; for (I = Cands.begin(), E = Cands.end(); I != E; ++I) { TemplateSpecCandidate *Cand = *I; // Set an arbitrary limit on the number of candidates we'll spam // the user with. FIXME: This limit should depend on details of the // candidate list. if (CandsShown >= 4 && ShowOverloads == Ovl_Best) break; ++CandsShown; assert(Cand->Specialization && "Non-matching built-in candidates are not added to Cands."); Cand->NoteDeductionFailure(S); } if (I != E) S.Diag(Loc, diag::note_ovl_too_many_candidates) << int(E - I); } // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType Sema::ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType) { QualType Ret = PossiblyAFunctionType; if (const PointerType *ToTypePtr = PossiblyAFunctionType->getAs<PointerType>()) Ret = ToTypePtr->getPointeeType(); else if (const ReferenceType *ToTypeRef = PossiblyAFunctionType->getAs<ReferenceType>()) Ret = ToTypeRef->getPointeeType(); else if (const MemberPointerType *MemTypePtr = PossiblyAFunctionType->getAs<MemberPointerType>()) Ret = MemTypePtr->getPointeeType(); Ret = Context.getCanonicalType(Ret).getUnqualifiedType(); return Ret; } namespace { // A helper class to help with address of function resolution // - allows us to avoid passing around all those ugly parameters class AddressOfFunctionResolver { Sema& S; Expr* SourceExpr; const QualType& TargetType; QualType TargetFunctionType; // Extracted function type from target type bool Complain; //DeclAccessPair& ResultFunctionAccessPair; ASTContext& Context; bool TargetTypeIsNonStaticMemberFunction; bool FoundNonTemplateFunction; bool StaticMemberFunctionFromBoundPointer; OverloadExpr::FindResult OvlExprInfo; OverloadExpr *OvlExpr; TemplateArgumentListInfo OvlExplicitTemplateArgs; SmallVector<std::pair<DeclAccessPair, FunctionDecl*>, 4> Matches; TemplateSpecCandidateSet FailedCandidates; public: AddressOfFunctionResolver(Sema &S, Expr *SourceExpr, const QualType &TargetType, bool Complain) : S(S), SourceExpr(SourceExpr), TargetType(TargetType), Complain(Complain), Context(S.getASTContext()), TargetTypeIsNonStaticMemberFunction( !!TargetType->getAs<MemberPointerType>()), FoundNonTemplateFunction(false), StaticMemberFunctionFromBoundPointer(false), OvlExprInfo(OverloadExpr::find(SourceExpr)), OvlExpr(OvlExprInfo.Expression), FailedCandidates(OvlExpr->getNameLoc()) { ExtractUnqualifiedFunctionTypeFromTargetType(); if (TargetFunctionType->isFunctionType()) { if (UnresolvedMemberExpr *UME = dyn_cast<UnresolvedMemberExpr>(OvlExpr)) if (!UME->isImplicitAccess() && !S.ResolveSingleFunctionTemplateSpecialization(UME)) StaticMemberFunctionFromBoundPointer = true; } else if (OvlExpr->hasExplicitTemplateArgs()) { DeclAccessPair dap; if (FunctionDecl *Fn = S.ResolveSingleFunctionTemplateSpecialization( OvlExpr, false, &dap)) { if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) if (!Method->isStatic()) { // If the target type is a non-function type and the function found // is a non-static member function, pretend as if that was the // target, it's the only possible type to end up with. TargetTypeIsNonStaticMemberFunction = true; // And skip adding the function if its not in the proper form. // We'll diagnose this due to an empty set of functions. if (!OvlExprInfo.HasFormOfMemberPointer) return; } Matches.push_back(std::make_pair(dap, Fn)); } return; } if (OvlExpr->hasExplicitTemplateArgs()) OvlExpr->getExplicitTemplateArgs().copyInto(OvlExplicitTemplateArgs); if (FindAllFunctionsThatMatchTargetTypeExactly()) { // C++ [over.over]p4: // If more than one function is selected, [...] if (Matches.size() > 1) { if (FoundNonTemplateFunction) EliminateAllTemplateMatches(); else EliminateAllExceptMostSpecializedTemplate(); } } } private: bool isTargetTypeAFunction() const { return TargetFunctionType->isFunctionType(); } // [ToType] [Return] // R (*)(A) --> R (A), IsNonStaticMemberFunction = false // R (&)(A) --> R (A), IsNonStaticMemberFunction = false // R (S::*)(A) --> R (A), IsNonStaticMemberFunction = true void inline ExtractUnqualifiedFunctionTypeFromTargetType() { TargetFunctionType = S.ExtractUnqualifiedFunctionType(TargetType); } // return true if any matching specializations were found bool AddMatchingTemplateFunction(FunctionTemplateDecl* FunctionTemplate, const DeclAccessPair& CurAccessFunPair) { if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FunctionTemplate->getTemplatedDecl())) { // Skip non-static function templates when converting to pointer, and // static when converting to member pointer. if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction) return false; } else if (TargetTypeIsNonStaticMemberFunction) return false; // C++ [over.over]p2: // If the name is a function template, template argument deduction is // done (14.8.2.2), and if the argument deduction succeeds, the // resulting template argument list is used to generate a single // function template specialization, which is added to the set of // overloaded functions considered. FunctionDecl *Specialization = nullptr; TemplateDeductionInfo Info(FailedCandidates.getLocation()); if (Sema::TemplateDeductionResult Result = S.DeduceTemplateArguments(FunctionTemplate, &OvlExplicitTemplateArgs, TargetFunctionType, Specialization, Info, /*InOverloadResolution=*/true)) { // Make a note of the failed deduction for diagnostics. FailedCandidates.addCandidate() .set(FunctionTemplate->getTemplatedDecl(), MakeDeductionFailureInfo(Context, Result, Info)); return false; } // Template argument deduction ensures that we have an exact match or // compatible pointer-to-function arguments that would be adjusted by ICS. // This function template specicalization works. Specialization = cast<FunctionDecl>(Specialization->getCanonicalDecl()); assert(S.isSameOrCompatibleFunctionType( Context.getCanonicalType(Specialization->getType()), Context.getCanonicalType(TargetFunctionType))); Matches.push_back(std::make_pair(CurAccessFunPair, Specialization)); return true; } bool AddMatchingNonTemplateFunction(NamedDecl* Fn, const DeclAccessPair& CurAccessFunPair) { if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) { // Skip non-static functions when converting to pointer, and static // when converting to member pointer. if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction) return false; } else if (TargetTypeIsNonStaticMemberFunction) return false; if (FunctionDecl *FunDecl = dyn_cast<FunctionDecl>(Fn)) { if (S.getLangOpts().CUDA) if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext)) if (!Caller->isImplicit() && S.CheckCUDATarget(Caller, FunDecl)) return false; // If any candidate has a placeholder return type, trigger its deduction // now. if (S.getLangOpts().CPlusPlus14 && FunDecl->getReturnType()->isUndeducedType() && S.DeduceReturnType(FunDecl, SourceExpr->getLocStart(), Complain)) return false; QualType ResultTy; if (Context.hasSameUnqualifiedType(TargetFunctionType, FunDecl->getType()) || S.IsNoReturnConversion(FunDecl->getType(), TargetFunctionType, ResultTy)) { Matches.push_back(std::make_pair(CurAccessFunPair, cast<FunctionDecl>(FunDecl->getCanonicalDecl()))); FoundNonTemplateFunction = true; return true; } } return false; } bool FindAllFunctionsThatMatchTargetTypeExactly() { bool Ret = false; // If the overload expression doesn't have the form of a pointer to // member, don't try to convert it to a pointer-to-member type. if (IsInvalidFormOfPointerToMemberFunction()) return false; for (UnresolvedSetIterator I = OvlExpr->decls_begin(), E = OvlExpr->decls_end(); I != E; ++I) { // Look through any using declarations to find the underlying function. NamedDecl *Fn = (*I)->getUnderlyingDecl(); // C++ [over.over]p3: // Non-member functions and static member functions match // targets of type "pointer-to-function" or "reference-to-function." // Nonstatic member functions match targets of // type "pointer-to-member-function." // Note that according to DR 247, the containing class does not matter. if (FunctionTemplateDecl *FunctionTemplate = dyn_cast<FunctionTemplateDecl>(Fn)) { if (AddMatchingTemplateFunction(FunctionTemplate, I.getPair())) Ret = true; } // If we have explicit template arguments supplied, skip non-templates. else if (!OvlExpr->hasExplicitTemplateArgs() && AddMatchingNonTemplateFunction(Fn, I.getPair())) Ret = true; } assert(Ret || Matches.empty()); return Ret; } void EliminateAllExceptMostSpecializedTemplate() { // [...] and any given function template specialization F1 is // eliminated if the set contains a second function template // specialization whose function template is more specialized // than the function template of F1 according to the partial // ordering rules of 14.5.5.2. // The algorithm specified above is quadratic. We instead use a // two-pass algorithm (similar to the one used to identify the // best viable function in an overload set) that identifies the // best function template (if it exists). UnresolvedSet<4> MatchesCopy; // TODO: avoid! for (unsigned I = 0, E = Matches.size(); I != E; ++I) MatchesCopy.addDecl(Matches[I].second, Matches[I].first.getAccess()); // TODO: It looks like FailedCandidates does not serve much purpose // here, since the no_viable diagnostic has index 0. UnresolvedSetIterator Result = S.getMostSpecialized( MatchesCopy.begin(), MatchesCopy.end(), FailedCandidates, SourceExpr->getLocStart(), S.PDiag(), S.PDiag(diag::err_addr_ovl_ambiguous) << Matches[0] .second->getDeclName(), S.PDiag(diag::note_ovl_candidate) << (unsigned)oc_function_template, Complain, TargetFunctionType); if (Result != MatchesCopy.end()) { // Make it the first and only element Matches[0].first = Matches[Result - MatchesCopy.begin()].first; Matches[0].second = cast<FunctionDecl>(*Result); Matches.resize(1); } } void EliminateAllTemplateMatches() { // [...] any function template specializations in the set are // eliminated if the set also contains a non-template function, [...] for (unsigned I = 0, N = Matches.size(); I != N; ) { if (Matches[I].second->getPrimaryTemplate() == nullptr) ++I; else { Matches[I] = Matches[--N]; Matches.set_size(N); } } } public: void ComplainNoMatchesFound() const { assert(Matches.empty()); S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_no_viable) << OvlExpr->getName() << TargetFunctionType << OvlExpr->getSourceRange(); if (FailedCandidates.empty()) S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType); else { // We have some deduction failure messages. Use them to diagnose // the function templates, and diagnose the non-template candidates // normally. for (UnresolvedSetIterator I = OvlExpr->decls_begin(), IEnd = OvlExpr->decls_end(); I != IEnd; ++I) if (FunctionDecl *Fun = dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl())) S.NoteOverloadCandidate(Fun, TargetFunctionType); FailedCandidates.NoteCandidates(S, OvlExpr->getLocStart()); } } bool IsInvalidFormOfPointerToMemberFunction() const { return TargetTypeIsNonStaticMemberFunction && !OvlExprInfo.HasFormOfMemberPointer; } void ComplainIsInvalidFormOfPointerToMemberFunction() const { // TODO: Should we condition this on whether any functions might // have matched, or is it more appropriate to do that in callers? // TODO: a fixit wouldn't hurt. S.Diag(OvlExpr->getNameLoc(), diag::err_addr_ovl_no_qualifier) << TargetType << OvlExpr->getSourceRange(); } bool IsStaticMemberFunctionFromBoundPointer() const { return StaticMemberFunctionFromBoundPointer; } void ComplainIsStaticMemberFunctionFromBoundPointer() const { S.Diag(OvlExpr->getLocStart(), diag::err_invalid_form_pointer_member_function) << OvlExpr->getSourceRange(); } void ComplainOfInvalidConversion() const { S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_not_func_ptrref) << OvlExpr->getName() << TargetType; } void ComplainMultipleMatchesFound() const { assert(Matches.size() > 1); S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_ambiguous) << OvlExpr->getName() << OvlExpr->getSourceRange(); S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType); } bool hadMultipleCandidates() const { return (OvlExpr->getNumDecls() > 1); } int getNumMatches() const { return Matches.size(); } FunctionDecl* getMatchingFunctionDecl() const { if (Matches.size() != 1) return nullptr; return Matches[0].second; } const DeclAccessPair* getMatchingFunctionAccessPair() const { if (Matches.size() != 1) return nullptr; return &Matches[0].first; } }; } /// ResolveAddressOfOverloadedFunction - Try to resolve the address of /// an overloaded function (C++ [over.over]), where @p From is an /// expression with overloaded function type and @p ToType is the type /// we're trying to resolve to. For example: /// /// @code /// int f(double); /// int f(int); /// /// int (*pfd)(double) = f; // selects f(double) /// @endcode /// /// This routine returns the resulting FunctionDecl if it could be /// resolved, and NULL otherwise. When @p Complain is true, this /// routine will emit diagnostics if there is an error. FunctionDecl * Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &FoundResult, bool *pHadMultipleCandidates) { assert(AddressOfExpr->getType() == Context.OverloadTy); AddressOfFunctionResolver Resolver(*this, AddressOfExpr, TargetType, Complain); int NumMatches = Resolver.getNumMatches(); FunctionDecl *Fn = nullptr; if (NumMatches == 0 && Complain) { if (Resolver.IsInvalidFormOfPointerToMemberFunction()) Resolver.ComplainIsInvalidFormOfPointerToMemberFunction(); else Resolver.ComplainNoMatchesFound(); } else if (NumMatches > 1 && Complain) Resolver.ComplainMultipleMatchesFound(); else if (NumMatches == 1) { Fn = Resolver.getMatchingFunctionDecl(); assert(Fn); FoundResult = *Resolver.getMatchingFunctionAccessPair(); if (Complain) { if (Resolver.IsStaticMemberFunctionFromBoundPointer()) Resolver.ComplainIsStaticMemberFunctionFromBoundPointer(); else CheckAddressOfMemberAccess(AddressOfExpr, FoundResult); } } if (pHadMultipleCandidates) *pHadMultipleCandidates = Resolver.hadMultipleCandidates(); return Fn; } /// \brief Given an expression that refers to an overloaded function, try to /// resolve that overloaded function expression down to a single function. /// /// This routine can only resolve template-ids that refer to a single function /// template, where that template-id refers to a single template whose template /// arguments are either provided by the template-id or have defaults, /// as described in C++0x [temp.arg.explicit]p3. /// /// If no template-ids are found, no diagnostics are emitted and NULL is /// returned. FunctionDecl * Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain, DeclAccessPair *FoundResult) { // C++ [over.over]p1: // [...] [Note: any redundant set of parentheses surrounding the // overloaded function name is ignored (5.1). ] // C++ [over.over]p1: // [...] The overloaded function name can be preceded by the & // operator. // If we didn't actually find any template-ids, we're done. if (!ovl->hasExplicitTemplateArgs()) return nullptr; TemplateArgumentListInfo ExplicitTemplateArgs; ovl->getExplicitTemplateArgs().copyInto(ExplicitTemplateArgs); TemplateSpecCandidateSet FailedCandidates(ovl->getNameLoc()); // Look through all of the overloaded functions, searching for one // whose type matches exactly. FunctionDecl *Matched = nullptr; for (UnresolvedSetIterator I = ovl->decls_begin(), E = ovl->decls_end(); I != E; ++I) { // C++0x [temp.arg.explicit]p3: // [...] In contexts where deduction is done and fails, or in contexts // where deduction is not done, if a template argument list is // specified and it, along with any default template arguments, // identifies a single function template specialization, then the // template-id is an lvalue for the function template specialization. FunctionTemplateDecl *FunctionTemplate = cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()); // C++ [over.over]p2: // If the name is a function template, template argument deduction is // done (14.8.2.2), and if the argument deduction succeeds, the // resulting template argument list is used to generate a single // function template specialization, which is added to the set of // overloaded functions considered. FunctionDecl *Specialization = nullptr; TemplateDeductionInfo Info(FailedCandidates.getLocation()); if (TemplateDeductionResult Result = DeduceTemplateArguments(FunctionTemplate, &ExplicitTemplateArgs, Specialization, Info, /*InOverloadResolution=*/true)) { // Make a note of the failed deduction for diagnostics. // TODO: Actually use the failed-deduction info? FailedCandidates.addCandidate() .set(FunctionTemplate->getTemplatedDecl(), MakeDeductionFailureInfo(Context, Result, Info)); continue; } assert(Specialization && "no specialization and no error?"); // Multiple matches; we can't resolve to a single declaration. if (Matched) { if (Complain) { Diag(ovl->getExprLoc(), diag::err_addr_ovl_ambiguous) << ovl->getName(); NoteAllOverloadCandidates(ovl); } return nullptr; } Matched = Specialization; if (FoundResult) *FoundResult = I.getPair(); } if (Matched && getLangOpts().CPlusPlus14 && Matched->getReturnType()->isUndeducedType() && DeduceReturnType(Matched, ovl->getExprLoc(), Complain)) return nullptr; return Matched; } // Resolve and fix an overloaded expression that can be resolved // because it identifies a single function template specialization. // // Last three arguments should only be supplied if Complain = true // // Return true if it was logically possible to so resolve the // expression, regardless of whether or not it succeeded. Always // returns true if 'complain' is set. bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool doFunctionPointerConverion, bool complain, const SourceRange& OpRangeForComplaining, QualType DestTypeForComplaining, unsigned DiagIDForComplaining) { assert(SrcExpr.get()->getType() == Context.OverloadTy); OverloadExpr::FindResult ovl = OverloadExpr::find(SrcExpr.get()); DeclAccessPair found; ExprResult SingleFunctionExpression; if (FunctionDecl *fn = ResolveSingleFunctionTemplateSpecialization( ovl.Expression, /*complain*/ false, &found)) { if (DiagnoseUseOfDecl(fn, SrcExpr.get()->getLocStart())) { SrcExpr = ExprError(); return true; } // It is only correct to resolve to an instance method if we're // resolving a form that's permitted to be a pointer to member. // Otherwise we'll end up making a bound member expression, which // is illegal in all the contexts we resolve like this. if (!ovl.HasFormOfMemberPointer && isa<CXXMethodDecl>(fn) && cast<CXXMethodDecl>(fn)->isInstance()) { if (!complain) return false; Diag(ovl.Expression->getExprLoc(), diag::err_bound_member_function) << 0 << ovl.Expression->getSourceRange(); // TODO: I believe we only end up here if there's a mix of // static and non-static candidates (otherwise the expression // would have 'bound member' type, not 'overload' type). // Ideally we would note which candidate was chosen and why // the static candidates were rejected. SrcExpr = ExprError(); return true; } // Fix the expression to refer to 'fn'. SingleFunctionExpression = FixOverloadedFunctionReference(SrcExpr.get(), found, fn); // If desired, do function-to-pointer decay. if (doFunctionPointerConverion) { SingleFunctionExpression = DefaultFunctionArrayLvalueConversion(SingleFunctionExpression.get()); if (SingleFunctionExpression.isInvalid()) { SrcExpr = ExprError(); return true; } } } if (!SingleFunctionExpression.isUsable()) { if (complain) { Diag(OpRangeForComplaining.getBegin(), DiagIDForComplaining) << ovl.Expression->getName() << DestTypeForComplaining << OpRangeForComplaining << ovl.Expression->getQualifierLoc().getSourceRange(); NoteAllOverloadCandidates(SrcExpr.get()); SrcExpr = ExprError(); return true; } return false; } SrcExpr = SingleFunctionExpression; return true; } /// \brief Add a single candidate to the overload set. static void AddOverloadedCallCandidate(Sema &S, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading, bool KnownValid) { NamedDecl *Callee = FoundDecl.getDecl(); if (isa<UsingShadowDecl>(Callee)) Callee = cast<UsingShadowDecl>(Callee)->getTargetDecl(); if (FunctionDecl *Func = dyn_cast<FunctionDecl>(Callee)) { if (ExplicitTemplateArgs) { assert(!KnownValid && "Explicit template arguments?"); return; } S.AddOverloadCandidate(Func, FoundDecl, Args, CandidateSet, /*SuppressUsedConversions=*/false, PartialOverloading); return; } if (FunctionTemplateDecl *FuncTemplate = dyn_cast<FunctionTemplateDecl>(Callee)) { S.AddTemplateOverloadCandidate(FuncTemplate, FoundDecl, ExplicitTemplateArgs, Args, CandidateSet, /*SuppressUsedConversions=*/false, PartialOverloading); return; } assert(!KnownValid && "unhandled case in overloaded call candidate"); } /// \brief Add the overload candidates named by callee and/or found by argument /// dependent lookup to the given overload set. void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading) { #ifndef NDEBUG // Verify that ArgumentDependentLookup is consistent with the rules // in C++0x [basic.lookup.argdep]p3: // // Let X be the lookup set produced by unqualified lookup (3.4.1) // and let Y be the lookup set produced by argument dependent // lookup (defined as follows). If X contains // // -- a declaration of a class member, or // // -- a block-scope function declaration that is not a // using-declaration, or // // -- a declaration that is neither a function or a function // template // // then Y is empty. if (ULE->requiresADL()) { for (UnresolvedLookupExpr::decls_iterator I = ULE->decls_begin(), E = ULE->decls_end(); I != E; ++I) { assert(!(*I)->getDeclContext()->isRecord()); assert(isa<UsingShadowDecl>(*I) || !(*I)->getDeclContext()->isFunctionOrMethod()); assert((*I)->getUnderlyingDecl()->isFunctionOrFunctionTemplate()); } } #endif // HLSL Change - allow ExternalSource the ability to add the overloads for a call. if (ExternalSource && ExternalSource->AddOverloadedCallCandidates(ULE, Args, CandidateSet, PartialOverloading)) { return; } // It would be nice to avoid this copy. TemplateArgumentListInfo TABuffer; TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr; if (ULE->hasExplicitTemplateArgs()) { ULE->copyTemplateArgumentsInto(TABuffer); ExplicitTemplateArgs = &TABuffer; } for (UnresolvedLookupExpr::decls_iterator I = ULE->decls_begin(), E = ULE->decls_end(); I != E; ++I) AddOverloadedCallCandidate(*this, I.getPair(), ExplicitTemplateArgs, Args, CandidateSet, PartialOverloading, /*KnownValid*/ true); if (ULE->requiresADL()) AddArgumentDependentLookupCandidates(ULE->getName(), ULE->getExprLoc(), Args, ExplicitTemplateArgs, CandidateSet, PartialOverloading); } /// Determine whether a declaration with the specified name could be moved into /// a different namespace. static bool canBeDeclaredInNamespace(const DeclarationName &Name) { switch (Name.getCXXOverloadedOperator()) { case OO_New: case OO_Array_New: case OO_Delete: case OO_Array_Delete: return false; default: return true; } } /// Attempt to recover from an ill-formed use of a non-dependent name in a /// template, where the non-dependent name was declared after the template /// was defined. This is common in code written for a compilers which do not /// correctly implement two-stage name lookup. /// /// Returns true if a viable candidate was found and a diagnostic was issued. static bool DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc, const CXXScopeSpec &SS, LookupResult &R, OverloadCandidateSet::CandidateSetKind CSK, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, bool *DoDiagnoseEmptyLookup = nullptr) { if (SemaRef.ActiveTemplateInstantiations.empty() || !SS.isEmpty()) return false; for (DeclContext *DC = SemaRef.CurContext; DC; DC = DC->getParent()) { if (DC->isTransparentContext()) continue; SemaRef.LookupQualifiedName(R, DC); if (!R.empty()) { R.suppressDiagnostics(); if (isa<CXXRecordDecl>(DC)) { // Don't diagnose names we find in classes; we get much better // diagnostics for these from DiagnoseEmptyLookup. R.clear(); if (DoDiagnoseEmptyLookup) *DoDiagnoseEmptyLookup = true; return false; } OverloadCandidateSet Candidates(FnLoc, CSK); for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) AddOverloadedCallCandidate(SemaRef, I.getPair(), ExplicitTemplateArgs, Args, Candidates, false, /*KnownValid*/ false); OverloadCandidateSet::iterator Best; if (Candidates.BestViableFunction(SemaRef, FnLoc, Best) != OR_Success) { // No viable functions. Don't bother the user with notes for functions // which don't work and shouldn't be found anyway. R.clear(); return false; } // Find the namespaces where ADL would have looked, and suggest // declaring the function there instead. Sema::AssociatedNamespaceSet AssociatedNamespaces; Sema::AssociatedClassSet AssociatedClasses; SemaRef.FindAssociatedClassesAndNamespaces(FnLoc, Args, AssociatedNamespaces, AssociatedClasses); Sema::AssociatedNamespaceSet SuggestedNamespaces; if (canBeDeclaredInNamespace(R.getLookupName())) { DeclContext *Std = SemaRef.getStdNamespace(); for (Sema::AssociatedNamespaceSet::iterator it = AssociatedNamespaces.begin(), end = AssociatedNamespaces.end(); it != end; ++it) { // Never suggest declaring a function within namespace 'std'. if (Std && Std->Encloses(*it)) continue; // Never suggest declaring a function within a namespace with a // reserved name, like __gnu_cxx. NamespaceDecl *NS = dyn_cast<NamespaceDecl>(*it); if (NS && NS->getQualifiedNameAsString().find("__") != std::string::npos) continue; SuggestedNamespaces.insert(*it); } } SemaRef.Diag(R.getNameLoc(), diag::err_not_found_by_two_phase_lookup) << R.getLookupName(); if (SuggestedNamespaces.empty()) { SemaRef.Diag(Best->Function->getLocation(), diag::note_not_found_by_two_phase_lookup) << R.getLookupName() << 0; } else if (SuggestedNamespaces.size() == 1) { SemaRef.Diag(Best->Function->getLocation(), diag::note_not_found_by_two_phase_lookup) << R.getLookupName() << 1 << *SuggestedNamespaces.begin(); } else { // FIXME: It would be useful to list the associated namespaces here, // but the diagnostics infrastructure doesn't provide a way to produce // a localized representation of a list of items. SemaRef.Diag(Best->Function->getLocation(), diag::note_not_found_by_two_phase_lookup) << R.getLookupName() << 2; } // Try to recover by calling this function. return true; } R.clear(); } return false; } /// Attempt to recover from ill-formed use of a non-dependent operator in a /// template, where the non-dependent operator was declared after the template /// was defined. /// /// Returns true if a viable candidate was found and a diagnostic was issued. static bool DiagnoseTwoPhaseOperatorLookup(Sema &SemaRef, OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args) { DeclarationName OpName = SemaRef.Context.DeclarationNames.getCXXOperatorName(Op); LookupResult R(SemaRef, OpName, OpLoc, Sema::LookupOperatorName); return DiagnoseTwoPhaseLookup(SemaRef, OpLoc, CXXScopeSpec(), R, OverloadCandidateSet::CSK_Operator, /*ExplicitTemplateArgs=*/nullptr, Args); } namespace { class BuildRecoveryCallExprRAII { Sema &SemaRef; public: BuildRecoveryCallExprRAII(Sema &S) : SemaRef(S) { assert(SemaRef.IsBuildingRecoveryCallExpr == false); SemaRef.IsBuildingRecoveryCallExpr = true; } ~BuildRecoveryCallExprRAII() { SemaRef.IsBuildingRecoveryCallExpr = false; } }; } static std::unique_ptr<CorrectionCandidateCallback> MakeValidator(Sema &SemaRef, MemberExpr *ME, size_t NumArgs, bool HasTemplateArgs, bool AllowTypoCorrection) { if (!AllowTypoCorrection) return llvm::make_unique<NoTypoCorrectionCCC>(); return llvm::make_unique<FunctionCallFilterCCC>(SemaRef, NumArgs, HasTemplateArgs, ME); } /// Attempts to recover from a call where no functions were found. /// /// Returns true if new candidates were found. static ExprResult BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MutableArrayRef<Expr *> Args, SourceLocation RParenLoc, bool EmptyLookup, bool AllowTypoCorrection) { // Do not try to recover if it is already building a recovery call. // This stops infinite loops for template instantiations like // // template <typename T> auto foo(T t) -> decltype(foo(t)) {} // template <typename T> auto foo(T t) -> decltype(foo(&t)) {} // if (SemaRef.IsBuildingRecoveryCallExpr) return ExprError(); BuildRecoveryCallExprRAII RCE(SemaRef); CXXScopeSpec SS; SS.Adopt(ULE->getQualifierLoc()); SourceLocation TemplateKWLoc = ULE->getTemplateKeywordLoc(); TemplateArgumentListInfo TABuffer; TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr; if (ULE->hasExplicitTemplateArgs()) { ULE->copyTemplateArgumentsInto(TABuffer); ExplicitTemplateArgs = &TABuffer; } LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(), Sema::LookupOrdinaryName); bool DoDiagnoseEmptyLookup = EmptyLookup; if (!DiagnoseTwoPhaseLookup(SemaRef, Fn->getExprLoc(), SS, R, OverloadCandidateSet::CSK_Normal, ExplicitTemplateArgs, Args, &DoDiagnoseEmptyLookup) && (!DoDiagnoseEmptyLookup || SemaRef.DiagnoseEmptyLookup( S, SS, R, MakeValidator(SemaRef, dyn_cast<MemberExpr>(Fn), Args.size(), ExplicitTemplateArgs != nullptr, AllowTypoCorrection), ExplicitTemplateArgs, Args))) return ExprError(); assert(!R.empty() && "lookup results empty despite recovery"); // Build an implicit member call if appropriate. Just drop the // casts and such from the call, we don't really care. ExprResult NewFn = ExprError(); if ((*R.begin())->isCXXClassMember()) NewFn = SemaRef.BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R, ExplicitTemplateArgs); else if (ExplicitTemplateArgs || TemplateKWLoc.isValid()) NewFn = SemaRef.BuildTemplateIdExpr(SS, TemplateKWLoc, R, false, ExplicitTemplateArgs); else NewFn = SemaRef.BuildDeclarationNameExpr(SS, R, false); if (NewFn.isInvalid()) return ExprError(); // This shouldn't cause an infinite loop because we're giving it // an expression with viable lookup results, which should never // end up here. return SemaRef.ActOnCallExpr(/*Scope*/ nullptr, NewFn.get(), LParenLoc, MultiExprArg(Args.data(), Args.size()), RParenLoc); } /// \brief Constructs and populates an OverloadedCandidateSet from /// the given function. /// \returns true when an the ExprResult output parameter has been set. bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result) { #ifndef NDEBUG if (ULE->requiresADL()) { // To do ADL, we must have found an unqualified name. // HLSL Change Begins // // We do want to allow argument-dependent lookup for intrinsic // function names inside the "vk" namespace (which are by definition // qualified names). bool isVkNamespace = ULE->getQualifier() && ULE->getQualifier()->getKind() == NestedNameSpecifier::Namespace && ULE->getQualifier()->getAsNamespace()->getName() == "vk"; assert((!ULE->getQualifier() || isVkNamespace) && "non-vk qualified name with ADL"); // HLSL Change Ends // We don't perform ADL for implicit declarations of builtins. // Verify that this was correctly set up. FunctionDecl *F; if ( // HLSL change begin (ULE->getNumDecls() > 0) && // HLSL change end ULE->decls_begin() + 1 == ULE->decls_end() && (F = dyn_cast<FunctionDecl>(*ULE->decls_begin())) && F->getBuiltinID() && F->isImplicit()) llvm_unreachable("performing ADL for builtin"); // We don't perform ADL in C. assert(getLangOpts().CPlusPlus && "ADL enabled in C"); } #endif UnbridgedCastsSet UnbridgedCasts; if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts)) { *Result = ExprError(); return true; } // Add the functions denoted by the callee to the set of candidate // functions, including those from argument-dependent lookup. AddOverloadedCallCandidates(ULE, Args, *CandidateSet); if (getLangOpts().MSVCCompat && CurContext->isDependentContext() && !isSFINAEContext() && (isa<FunctionDecl>(CurContext) || isa<CXXRecordDecl>(CurContext))) { OverloadCandidateSet::iterator Best; if (CandidateSet->empty() || CandidateSet->BestViableFunction(*this, Fn->getLocStart(), Best) == OR_No_Viable_Function) { // In Microsoft mode, if we are inside a template class member function then // create a type dependent CallExpr. The goal is to postpone name lookup // to instantiation time to be able to search into type dependent base // classes. CallExpr *CE = new (Context) CallExpr( Context, Fn, Args, Context.DependentTy, VK_RValue, RParenLoc); CE->setTypeDependent(true); *Result = CE; return true; } } if (CandidateSet->empty()) return false; UnbridgedCasts.restore(); return false; } /// FinishOverloadedCallExpr - given an OverloadCandidateSet, builds and returns /// the completed call expression. If overload resolution fails, emits /// diagnostics and returns ExprError() static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, OverloadCandidateSet *CandidateSet, OverloadCandidateSet::iterator *Best, OverloadingResult OverloadResult, bool AllowTypoCorrection) { if (CandidateSet->empty()) return BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc, Args, RParenLoc, /*EmptyLookup=*/true, AllowTypoCorrection); switch (OverloadResult) { case OR_Success: { FunctionDecl *FDecl = (*Best)->Function; SemaRef.CheckUnresolvedLookupAccess(ULE, (*Best)->FoundDecl); if (SemaRef.DiagnoseUseOfDecl(FDecl, ULE->getNameLoc())) return ExprError(); Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl); return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc, ExecConfig); } case OR_No_Viable_Function: { // Try to recover by looking for viable functions which the user might // have meant to call. ExprResult Recovery = BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc, Args, RParenLoc, /*EmptyLookup=*/false, AllowTypoCorrection); if (!Recovery.isInvalid()) return Recovery; SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_no_viable_function_in_call) << ULE->getName() << Fn->getSourceRange(); CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args, StringRef(), ULE->getLocStart()); // HLSL Change - add loc break; } case OR_Ambiguous: SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_ambiguous_call) << ULE->getName() << Fn->getSourceRange(); CandidateSet->NoteCandidates(SemaRef, OCD_ViableCandidates, Args); break; case OR_Deleted: { SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_deleted_call) << (*Best)->Function->isDeleted() << ULE->getName() << SemaRef.getDeletedOrUnavailableSuffix((*Best)->Function) << Fn->getSourceRange(); CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args); // We emitted an error for the unvailable/deleted function call but keep // the call in the AST. FunctionDecl *FDecl = (*Best)->Function; Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl); return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc, ExecConfig); } } // Overload resolution failed. return ExprError(); } /// BuildOverloadedCallExpr - Given the call expression that calls Fn /// (which eventually refers to the declaration Func) and the call /// arguments Args/NumArgs, attempt to resolve the function call down /// to a specific function. If overload resolution succeeds, returns /// the call expression produced by overload resolution. /// Otherwise, emits diagnostics and returns ExprError. ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection) { OverloadCandidateSet CandidateSet(Fn->getExprLoc(), OverloadCandidateSet::CSK_Normal); ExprResult result; if (buildOverloadedCallSet(S, Fn, ULE, Args, LParenLoc, &CandidateSet, &result)) return result; OverloadCandidateSet::iterator Best; OverloadingResult OverloadResult = CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best); return FinishOverloadedCallExpr(*this, S, Fn, ULE, LParenLoc, Args, RParenLoc, ExecConfig, &CandidateSet, &Best, OverloadResult, AllowTypoCorrection); } static bool IsOverloaded(const UnresolvedSetImpl &Functions) { return Functions.size() > 1 || (Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin())); } /// \brief Create a unary operation that may resolve to an overloaded /// operator. /// /// \param OpLoc The location of the operator itself (e.g., '*'). /// /// \param OpcIn The UnaryOperator::Opcode that describes this /// operator. /// /// \param Fns The set of non-member functions that will be /// considered by overload resolution. The caller needs to build this /// set based on the context using, e.g., /// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This /// set should not contain any member functions; those will be added /// by CreateOverloadedUnaryOp(). /// /// \param Input The input argument. ExprResult Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn, const UnresolvedSetImpl &Fns, Expr *Input) { UnaryOperator::Opcode Opc = static_cast<UnaryOperator::Opcode>(OpcIn); OverloadedOperatorKind Op = UnaryOperator::getOverloadedOperator(Opc); assert(Op != OO_None && "Invalid opcode for overloaded unary operator"); DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op); // TODO: provide better source location info. DeclarationNameInfo OpNameInfo(OpName, OpLoc); if (checkPlaceholderForOverload(*this, Input)) return ExprError(); Expr *Args[2] = { Input, nullptr }; unsigned NumArgs = 1; // For post-increment and post-decrement, add the implicit '0' as // the second argument, so that we know this is a post-increment or // post-decrement. if (Opc == UO_PostInc || Opc == UO_PostDec) { llvm::APSInt Zero(Context.getTypeSize(Context.IntTy), false); Args[1] = IntegerLiteral::Create(Context, Zero, Context.IntTy, SourceLocation()); NumArgs = 2; } ArrayRef<Expr *> ArgsArray(Args, NumArgs); if (Input->isTypeDependent()) { if (Fns.empty()) return new (Context) UnaryOperator(Input, Opc, Context.DependentTy, VK_RValue, OK_Ordinary, OpLoc); CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(Context, NamingClass, NestedNameSpecifierLoc(), OpNameInfo, /*ADL*/ true, IsOverloaded(Fns), Fns.begin(), Fns.end()); return new (Context) CXXOperatorCallExpr(Context, Op, Fn, ArgsArray, Context.DependentTy, VK_RValue, OpLoc, false); } // Build an empty overload set. OverloadCandidateSet CandidateSet(OpLoc, OverloadCandidateSet::CSK_Operator); // Add the candidates from the given function set. AddFunctionCandidates(Fns, ArgsArray, CandidateSet); // Add operator candidates that are member functions. AddMemberOperatorCandidates(Op, OpLoc, ArgsArray, CandidateSet); // Add candidates from ADL. AddArgumentDependentLookupCandidates(OpName, OpLoc, ArgsArray, /*ExplicitTemplateArgs*/nullptr, CandidateSet); // Add builtin operator candidates. AddBuiltinOperatorCandidates(Op, OpLoc, ArgsArray, CandidateSet); bool HadMultipleCandidates = (CandidateSet.size() > 1); // Perform overload resolution. OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) { case OR_Success: { // We found a built-in operator or an overloaded operator. FunctionDecl *FnDecl = Best->Function; if (FnDecl) { // We matched an overloaded operator. Build a call to that // operator. // Convert the arguments. if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) { CheckMemberOperatorAccess(OpLoc, Args[0], nullptr, Best->FoundDecl); ExprResult InputRes = PerformObjectArgumentInitialization(Input, /*Qualifier=*/nullptr, Best->FoundDecl, Method); if (InputRes.isInvalid()) return ExprError(); Input = InputRes.get(); } else { // Convert the arguments. ExprResult InputInit = PerformCopyInitialization(InitializedEntity::InitializeParameter( Context, FnDecl->getParamDecl(0)), SourceLocation(), Input); if (InputInit.isInvalid()) return ExprError(); Input = InputInit.get(); } // Build the actual expression node. ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl, Best->FoundDecl, HadMultipleCandidates, OpLoc); if (FnExpr.isInvalid()) return ExprError(); // Determine the result type. QualType ResultTy = FnDecl->getReturnType(); ExprValueKind VK = Expr::getValueKindForType(ResultTy); ResultTy = ResultTy.getNonLValueExprType(Context); Args[0] = Input; CallExpr *TheCall = new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.get(), ArgsArray, ResultTy, VK, OpLoc, false); if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall, FnDecl)) return ExprError(); return MaybeBindToTemporary(TheCall); } else { // We matched a built-in operator. Convert the arguments, then // break out so that we will build the appropriate built-in // operator node. ExprResult InputRes = PerformImplicitConversion(Input, Best->BuiltinTypes.ParamTypes[0], Best->Conversions[0], AA_Passing); if (InputRes.isInvalid()) return ExprError(); Input = InputRes.get(); break; } } case OR_No_Viable_Function: // This is an erroneous use of an operator which can be overloaded by // a non-member function. Check for non-member operators which were // defined too late to be candidates. if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc, ArgsArray)) // FIXME: Recover by calling the found function. return ExprError(); // No viable function; fall through to handling this as a // built-in operator, which will produce an error message for us. break; case OR_Ambiguous: Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary) << UnaryOperator::getOpcodeStr(Opc) << Input->getType() << Input->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, ArgsArray, UnaryOperator::getOpcodeStr(Opc), OpLoc); return ExprError(); case OR_Deleted: Diag(OpLoc, diag::err_ovl_deleted_oper) << Best->Function->isDeleted() << UnaryOperator::getOpcodeStr(Opc) << getDeletedOrUnavailableSuffix(Best->Function) << Input->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, ArgsArray, UnaryOperator::getOpcodeStr(Opc), OpLoc); return ExprError(); } // Either we found no viable overloaded operator or we matched a // built-in operator. In either case, fall through to trying to // build a built-in operation. return CreateBuiltinUnaryOp(OpLoc, Opc, Input); } /// \brief Create a binary operation that may resolve to an overloaded /// operator. /// /// \param OpLoc The location of the operator itself (e.g., '+'). /// /// \param OpcIn The BinaryOperator::Opcode that describes this /// operator. /// /// \param Fns The set of non-member functions that will be /// considered by overload resolution. The caller needs to build this /// set based on the context using, e.g., /// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This /// set should not contain any member functions; those will be added /// by CreateOverloadedBinOp(). /// /// \param LHS Left-hand argument. /// \param RHS Right-hand argument. ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc, unsigned OpcIn, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS) { Expr *Args[2] = { LHS, RHS }; LHS=RHS=nullptr; // Please use only Args instead of LHS/RHS couple BinaryOperator::Opcode Opc = static_cast<BinaryOperator::Opcode>(OpcIn); OverloadedOperatorKind Op = BinaryOperator::getOverloadedOperator(Opc); DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op); // If either side is type-dependent, create an appropriate dependent // expression. if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) { if (Fns.empty()) { // If there are no functions to store, just build a dependent // BinaryOperator or CompoundAssignment. if (Opc <= BO_Assign || Opc > BO_OrAssign) return new (Context) BinaryOperator( Args[0], Args[1], Opc, Context.DependentTy, VK_RValue, OK_Ordinary, OpLoc, FPFeatures.fp_contract); return new (Context) CompoundAssignOperator( Args[0], Args[1], Opc, Context.DependentTy, VK_LValue, OK_Ordinary, Context.DependentTy, Context.DependentTy, OpLoc, FPFeatures.fp_contract); } // FIXME: save results of ADL from here? CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators // TODO: provide better source location info in DNLoc component. DeclarationNameInfo OpNameInfo(OpName, OpLoc); UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(Context, NamingClass, NestedNameSpecifierLoc(), OpNameInfo, /*ADL*/ true, IsOverloaded(Fns), Fns.begin(), Fns.end()); return new (Context) CXXOperatorCallExpr(Context, Op, Fn, Args, Context.DependentTy, VK_RValue, OpLoc, FPFeatures.fp_contract); } // Always do placeholder-like conversions on the RHS. if (checkPlaceholderForOverload(*this, Args[1])) return ExprError(); // Do placeholder-like conversion on the LHS; note that we should // not get here with a PseudoObject LHS. assert(Args[0]->getObjectKind() != OK_ObjCProperty); if (checkPlaceholderForOverload(*this, Args[0])) return ExprError(); // If this is the assignment operator, we only perform overload resolution // if the left-hand side is a class or enumeration type. This is actually // a hack. The standard requires that we do overload resolution between the // various built-in candidates, but as DR507 points out, this can lead to // problems. So we do it this way, which pretty much follows what GCC does. // Note that we go the traditional code path for compound assignment forms. if (Opc == BO_Assign && !Args[0]->getType()->isOverloadableType()) return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]); // If this is the .* operator, which is not overloadable, just // create a built-in binary operator. if (Opc == BO_PtrMemD) return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]); // Build an empty overload set. OverloadCandidateSet CandidateSet(OpLoc, OverloadCandidateSet::CSK_Operator); // Add the candidates from the given function set. AddFunctionCandidates(Fns, Args, CandidateSet); // Add operator candidates that are member functions. AddMemberOperatorCandidates(Op, OpLoc, Args, CandidateSet); // Add candidates from ADL. Per [over.match.oper]p2, this lookup is not // performed for an assignment operator (nor for operator[] nor operator->, // which don't get here). if (Opc != BO_Assign) AddArgumentDependentLookupCandidates(OpName, OpLoc, Args, /*ExplicitTemplateArgs*/ nullptr, CandidateSet); // Add builtin operator candidates. AddBuiltinOperatorCandidates(Op, OpLoc, Args, CandidateSet); bool HadMultipleCandidates = (CandidateSet.size() > 1); // Perform overload resolution. OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) { case OR_Success: { // We found a built-in operator or an overloaded operator. FunctionDecl *FnDecl = Best->Function; if (FnDecl) { // We matched an overloaded operator. Build a call to that // operator. // Convert the arguments. if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) { // Best->Access is only meaningful for class members. CheckMemberOperatorAccess(OpLoc, Args[0], Args[1], Best->FoundDecl); ExprResult Arg1 = PerformCopyInitialization( InitializedEntity::InitializeParameter(Context, FnDecl->getParamDecl(0)), SourceLocation(), Args[1]); if (Arg1.isInvalid()) return ExprError(); ExprResult Arg0 = PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr, Best->FoundDecl, Method); if (Arg0.isInvalid()) return ExprError(); Args[0] = Arg0.getAs<Expr>(); Args[1] = RHS = Arg1.getAs<Expr>(); } else { // Convert the arguments. ExprResult Arg0 = PerformCopyInitialization( InitializedEntity::InitializeParameter(Context, FnDecl->getParamDecl(0)), SourceLocation(), Args[0]); if (Arg0.isInvalid()) return ExprError(); ExprResult Arg1 = PerformCopyInitialization( InitializedEntity::InitializeParameter(Context, FnDecl->getParamDecl(1)), SourceLocation(), Args[1]); if (Arg1.isInvalid()) return ExprError(); Args[0] = LHS = Arg0.getAs<Expr>(); Args[1] = RHS = Arg1.getAs<Expr>(); } // Build the actual expression node. ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl, Best->FoundDecl, HadMultipleCandidates, OpLoc); if (FnExpr.isInvalid()) return ExprError(); // Determine the result type. QualType ResultTy = FnDecl->getReturnType(); ExprValueKind VK = Expr::getValueKindForType(ResultTy); ResultTy = ResultTy.getNonLValueExprType(Context); CXXOperatorCallExpr *TheCall = new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.get(), Args, ResultTy, VK, OpLoc, FPFeatures.fp_contract); if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall, FnDecl)) return ExprError(); ArrayRef<const Expr *> ArgsArray(Args, 2); // Cut off the implicit 'this'. if (isa<CXXMethodDecl>(FnDecl)) ArgsArray = ArgsArray.slice(1); // Check for a self move. if (Op == OO_Equal) DiagnoseSelfMove(Args[0], Args[1], OpLoc); checkCall(FnDecl, nullptr, ArgsArray, isa<CXXMethodDecl>(FnDecl), OpLoc, TheCall->getSourceRange(), VariadicDoesNotApply); return MaybeBindToTemporary(TheCall); } else { // We matched a built-in operator. Convert the arguments, then // break out so that we will build the appropriate built-in // operator node. ExprResult ArgsRes0 = PerformImplicitConversion(Args[0], Best->BuiltinTypes.ParamTypes[0], Best->Conversions[0], AA_Passing); if (ArgsRes0.isInvalid()) return ExprError(); Args[0] = ArgsRes0.get(); ExprResult ArgsRes1 = PerformImplicitConversion(Args[1], Best->BuiltinTypes.ParamTypes[1], Best->Conversions[1], AA_Passing); if (ArgsRes1.isInvalid()) return ExprError(); Args[1] = ArgsRes1.get(); break; } } case OR_No_Viable_Function: { // C++ [over.match.oper]p9: // If the operator is the operator , [...] and there are no // viable functions, then the operator is assumed to be the // built-in operator and interpreted according to clause 5. if (Opc == BO_Comma) break; // HLSL Change Starts if (getLangOpts().HLSL) return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]); // HLSL Change Ends // For class as left operand for assignment or compound assigment // operator do not fall through to handling in built-in, but report that // no overloaded assignment operator found ExprResult Result = ExprError(); if (Args[0]->getType()->isRecordType() && Opc >= BO_Assign && Opc <= BO_OrAssign) { Diag(OpLoc, diag::err_ovl_no_viable_oper) << BinaryOperator::getOpcodeStr(Opc) << Args[0]->getSourceRange() << Args[1]->getSourceRange(); if (Args[0]->getType()->isIncompleteType()) { Diag(OpLoc, diag::note_assign_lhs_incomplete) << Args[0]->getType() << Args[0]->getSourceRange() << Args[1]->getSourceRange(); } } else { // This is an erroneous use of an operator which can be overloaded by // a non-member function. Check for non-member operators which were // defined too late to be candidates. if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc, Args)) // FIXME: Recover by calling the found function. return ExprError(); // No viable function; try to create a built-in operation, which will // produce an error. Then, show the non-viable candidates. Result = CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]); } assert(Result.isInvalid() && "C++ binary operator overloading is missing candidates!"); if (Result.isInvalid()) CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, BinaryOperator::getOpcodeStr(Opc), OpLoc); return Result; } case OR_Ambiguous: // HLSL Change Starts if (getLangOpts().HLSL) return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]); // HLSL Change Ends Diag(OpLoc, diag::err_ovl_ambiguous_oper_binary) << BinaryOperator::getOpcodeStr(Opc) << Args[0]->getType() << Args[1]->getType() << Args[0]->getSourceRange() << Args[1]->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args, BinaryOperator::getOpcodeStr(Opc), OpLoc); return ExprError(); case OR_Deleted: if (isImplicitlyDeleted(Best->Function)) { CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function); Diag(OpLoc, diag::err_ovl_deleted_special_oper) << Context.getRecordType(Method->getParent()) << getSpecialMember(Method); // The user probably meant to call this special member. Just // explain why it's deleted. NoteDeletedFunction(Method); return ExprError(); } else { Diag(OpLoc, diag::err_ovl_deleted_oper) << Best->Function->isDeleted() << BinaryOperator::getOpcodeStr(Opc) << getDeletedOrUnavailableSuffix(Best->Function) << Args[0]->getSourceRange() << Args[1]->getSourceRange(); } CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, BinaryOperator::getOpcodeStr(Opc), OpLoc); return ExprError(); } // We matched a built-in operator; build it. return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]); } ExprResult Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base, Expr *Idx) { Expr *Args[2] = { Base, Idx }; DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Subscript); // If either side is type-dependent, create an appropriate dependent // expression. if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) { CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators // CHECKME: no 'operator' keyword? DeclarationNameInfo OpNameInfo(OpName, LLoc); OpNameInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc)); UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(Context, NamingClass, NestedNameSpecifierLoc(), OpNameInfo, /*ADL*/ true, /*Overloaded*/ false, UnresolvedSetIterator(), UnresolvedSetIterator()); // Can't add any actual overloads yet return new (Context) CXXOperatorCallExpr(Context, OO_Subscript, Fn, Args, Context.DependentTy, VK_RValue, RLoc, false); } // Handle placeholders on both operands. if (checkPlaceholderForOverload(*this, Args[0])) return ExprError(); if (checkPlaceholderForOverload(*this, Args[1])) return ExprError(); // Build an empty overload set. OverloadCandidateSet CandidateSet(LLoc, OverloadCandidateSet::CSK_Operator); // Subscript can only be overloaded as a member function. // Add operator candidates that are member functions. AddMemberOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet); // Add builtin operator candidates. AddBuiltinOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet); bool HadMultipleCandidates = (CandidateSet.size() > 1); // Perform overload resolution. OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(*this, LLoc, Best)) { case OR_Success: { // We found a built-in operator or an overloaded operator. FunctionDecl *FnDecl = Best->Function; if (FnDecl) { // We matched an overloaded operator. Build a call to that // operator. CheckMemberOperatorAccess(LLoc, Args[0], Args[1], Best->FoundDecl); // Convert the arguments. CXXMethodDecl *Method = cast<CXXMethodDecl>(FnDecl); ExprResult Arg0 = PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr, Best->FoundDecl, Method); if (Arg0.isInvalid()) return ExprError(); Args[0] = Arg0.get(); // Convert the arguments. ExprResult InputInit = PerformCopyInitialization(InitializedEntity::InitializeParameter( Context, FnDecl->getParamDecl(0)), SourceLocation(), Args[1]); if (InputInit.isInvalid()) return ExprError(); Args[1] = InputInit.getAs<Expr>(); // Build the actual expression node. DeclarationNameInfo OpLocInfo(OpName, LLoc); OpLocInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc)); ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl, Best->FoundDecl, HadMultipleCandidates, OpLocInfo.getLoc(), OpLocInfo.getInfo()); if (FnExpr.isInvalid()) return ExprError(); // Determine the result type QualType ResultTy = FnDecl->getReturnType(); ExprValueKind VK = Expr::getValueKindForType(ResultTy); ResultTy = ResultTy.getNonLValueExprType(Context); CXXOperatorCallExpr *TheCall = new (Context) CXXOperatorCallExpr(Context, OO_Subscript, FnExpr.get(), Args, ResultTy, VK, RLoc, false); if (CheckCallReturnType(FnDecl->getReturnType(), LLoc, TheCall, FnDecl)) return ExprError(); return MaybeBindToTemporary(TheCall); } else { // We matched a built-in operator. Convert the arguments, then // break out so that we will build the appropriate built-in // operator node. ExprResult ArgsRes0 = PerformImplicitConversion(Args[0], Best->BuiltinTypes.ParamTypes[0], Best->Conversions[0], AA_Passing); if (ArgsRes0.isInvalid()) return ExprError(); Args[0] = ArgsRes0.get(); ExprResult ArgsRes1 = PerformImplicitConversion(Args[1], Best->BuiltinTypes.ParamTypes[1], Best->Conversions[1], AA_Passing); if (ArgsRes1.isInvalid()) return ExprError(); Args[1] = ArgsRes1.get(); break; } } case OR_No_Viable_Function: { if (CandidateSet.empty()) Diag(LLoc, diag::err_ovl_no_oper) << Args[0]->getType() << /*subscript*/ 0 << Args[0]->getSourceRange() << Args[1]->getSourceRange(); else Diag(LLoc, diag::err_ovl_no_viable_subscript) << Args[0]->getType() << Args[0]->getSourceRange() << Args[1]->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, "[]", LLoc); return ExprError(); } case OR_Ambiguous: Diag(LLoc, diag::err_ovl_ambiguous_oper_binary) << "[]" << Args[0]->getType() << Args[1]->getType() << Args[0]->getSourceRange() << Args[1]->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args, "[]", LLoc); return ExprError(); case OR_Deleted: Diag(LLoc, diag::err_ovl_deleted_oper) << Best->Function->isDeleted() << "[]" << getDeletedOrUnavailableSuffix(Best->Function) << Args[0]->getSourceRange() << Args[1]->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, "[]", LLoc); return ExprError(); } // We matched a built-in operator; build it. return CreateBuiltinArraySubscriptExpr(Args[0], LLoc, Args[1], RLoc); } /// BuildCallToMemberFunction - Build a call to a member /// function. MemExpr is the expression that refers to the member /// function (and includes the object parameter), Args/NumArgs are the /// arguments to the function call (not including the object /// parameter). The caller needs to validate that the member /// expression refers to a non-static member function or an overloaded /// member function. ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc) { assert(MemExprE->getType() == Context.BoundMemberTy || MemExprE->getType() == Context.OverloadTy); // Dig out the member expression. This holds both the object // argument and the member function we're referring to. Expr *NakedMemExpr = MemExprE->IgnoreParens(); // Determine whether this is a call to a pointer-to-member function. if (BinaryOperator *op = dyn_cast<BinaryOperator>(NakedMemExpr)) { assert(op->getType() == Context.BoundMemberTy); assert(op->getOpcode() == BO_PtrMemD || op->getOpcode() == BO_PtrMemI); QualType fnType = op->getRHS()->getType()->castAs<MemberPointerType>()->getPointeeType(); const FunctionProtoType *proto = fnType->castAs<FunctionProtoType>(); QualType resultType = proto->getCallResultType(Context); ExprValueKind valueKind = Expr::getValueKindForType(proto->getReturnType()); // Check that the object type isn't more qualified than the // member function we're calling. Qualifiers funcQuals = Qualifiers::fromCVRMask(proto->getTypeQuals()); QualType objectType = op->getLHS()->getType(); if (op->getOpcode() == BO_PtrMemI) objectType = objectType->castAs<PointerType>()->getPointeeType(); Qualifiers objectQuals = objectType.getQualifiers(); Qualifiers difference = objectQuals - funcQuals; difference.removeObjCGCAttr(); difference.removeAddressSpace(); if (difference) { std::string qualsString = difference.getAsString(); Diag(LParenLoc, diag::err_pointer_to_member_call_drops_quals) << fnType.getUnqualifiedType() << qualsString << (qualsString.find(' ') == std::string::npos ? 1 : 2); } if (resultType->isMemberPointerType()) if (Context.getTargetInfo().getCXXABI().isMicrosoft()) RequireCompleteType(LParenLoc, resultType, 0); CXXMemberCallExpr *call = new (Context) CXXMemberCallExpr(Context, MemExprE, Args, resultType, valueKind, RParenLoc); if (CheckCallReturnType(proto->getReturnType(), op->getRHS()->getLocStart(), call, nullptr)) return ExprError(); if (ConvertArgumentsForCall(call, op, nullptr, proto, Args, RParenLoc)) return ExprError(); if (CheckOtherCall(call, proto)) return ExprError(); return MaybeBindToTemporary(call); } if (isa<CXXPseudoDestructorExpr>(NakedMemExpr)) return new (Context) CallExpr(Context, MemExprE, Args, Context.VoidTy, VK_RValue, RParenLoc); UnbridgedCastsSet UnbridgedCasts; if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts)) return ExprError(); MemberExpr *MemExpr; CXXMethodDecl *Method = nullptr; DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_public); NestedNameSpecifier *Qualifier = nullptr; if (isa<MemberExpr>(NakedMemExpr)) { MemExpr = cast<MemberExpr>(NakedMemExpr); Method = cast<CXXMethodDecl>(MemExpr->getMemberDecl()); FoundDecl = MemExpr->getFoundDecl(); Qualifier = MemExpr->getQualifier(); UnbridgedCasts.restore(); } else { UnresolvedMemberExpr *UnresExpr = cast<UnresolvedMemberExpr>(NakedMemExpr); Qualifier = UnresExpr->getQualifier(); QualType ObjectType = UnresExpr->getBaseType(); // HLSL Change Begin - This is a reference Expr::Classification ObjectClassification = (getLangOpts().HLSL || UnresExpr->isArrow()) ? Expr::Classification::makeSimpleLValue() : UnresExpr->getBase()->Classify(Context); // HLSL Change End - This is a reference // Add overload candidates OverloadCandidateSet CandidateSet(UnresExpr->getMemberLoc(), OverloadCandidateSet::CSK_Normal); // FIXME: avoid copy. TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr; if (UnresExpr->hasExplicitTemplateArgs()) { UnresExpr->copyTemplateArgumentsInto(TemplateArgsBuffer); TemplateArgs = &TemplateArgsBuffer; } for (UnresolvedMemberExpr::decls_iterator I = UnresExpr->decls_begin(), E = UnresExpr->decls_end(); I != E; ++I) { NamedDecl *Func = *I; CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(Func->getDeclContext()); if (isa<UsingShadowDecl>(Func)) Func = cast<UsingShadowDecl>(Func)->getTargetDecl(); // Microsoft supports direct constructor calls. if (getLangOpts().MicrosoftExt && isa<CXXConstructorDecl>(Func)) { AddOverloadCandidate(cast<CXXConstructorDecl>(Func), I.getPair(), Args, CandidateSet); } else if ((Method = dyn_cast<CXXMethodDecl>(Func))) { // If explicit template arguments were provided, we can't call a // non-template member function. if (TemplateArgs) continue; AddMethodCandidate(Method, I.getPair(), ActingDC, ObjectType, ObjectClassification, Args, CandidateSet, /*SuppressUserConversions=*/false); } else { AddMethodTemplateCandidate(cast<FunctionTemplateDecl>(Func), I.getPair(), ActingDC, TemplateArgs, ObjectType, ObjectClassification, Args, CandidateSet, /*SuppressUsedConversions=*/false); } } DeclarationName DeclName = UnresExpr->getMemberName(); UnbridgedCasts.restore(); OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(*this, UnresExpr->getLocStart(), Best)) { case OR_Success: Method = cast<CXXMethodDecl>(Best->Function); FoundDecl = Best->FoundDecl; CheckUnresolvedMemberAccess(UnresExpr, Best->FoundDecl); if (DiagnoseUseOfDecl(Best->FoundDecl, UnresExpr->getNameLoc())) return ExprError(); // If FoundDecl is different from Method (such as if one is a template // and the other a specialization), make sure DiagnoseUseOfDecl is // called on both. // FIXME: This would be more comprehensively addressed by modifying // DiagnoseUseOfDecl to accept both the FoundDecl and the decl // being used. if (Method != FoundDecl.getDecl() && DiagnoseUseOfDecl(Method, UnresExpr->getNameLoc())) return ExprError(); break; case OR_No_Viable_Function: Diag(UnresExpr->getMemberLoc(), diag::err_ovl_no_viable_member_function_in_call) << DeclName << MemExprE->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args, StringRef(), UnresExpr->getMemberLoc()); // HLSL Change - add member loc // FIXME: Leaking incoming expressions! return ExprError(); case OR_Ambiguous: Diag(UnresExpr->getMemberLoc(), diag::err_ovl_ambiguous_member_call) << DeclName << MemExprE->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args); // FIXME: Leaking incoming expressions! return ExprError(); case OR_Deleted: Diag(UnresExpr->getMemberLoc(), diag::err_ovl_deleted_member_call) << Best->Function->isDeleted() << DeclName << getDeletedOrUnavailableSuffix(Best->Function) << MemExprE->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args); // FIXME: Leaking incoming expressions! return ExprError(); } MemExprE = FixOverloadedFunctionReference(MemExprE, FoundDecl, Method); // If overload resolution picked a static member, build a // non-member call based on that function. if (Method->isStatic()) { return BuildResolvedCallExpr(MemExprE, Method, LParenLoc, Args, RParenLoc); } MemExpr = cast<MemberExpr>(MemExprE->IgnoreParens()); } QualType ResultType = Method->getReturnType(); ExprValueKind VK = Expr::getValueKindForType(ResultType); ResultType = ResultType.getNonLValueExprType(Context); assert(Method && "Member call to something that isn't a method?"); CXXMemberCallExpr *TheCall = new (Context) CXXMemberCallExpr(Context, MemExprE, Args, ResultType, VK, RParenLoc); // (CUDA B.1): Check for invalid calls between targets. if (getLangOpts().CUDA) { if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext)) { if (CheckCUDATarget(Caller, Method)) { Diag(MemExpr->getMemberLoc(), diag::err_ref_bad_target) << IdentifyCUDATarget(Method) << Method->getIdentifier() << IdentifyCUDATarget(Caller); return ExprError(); } } } // Check for a valid return type. if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(), TheCall, Method)) return ExprError(); // Convert the object argument (for a non-static member function call). // We only need to do this if there was actually an overload; otherwise // it was done at lookup. if (!Method->isStatic()) { ExprResult ObjectArg = PerformObjectArgumentInitialization(MemExpr->getBase(), Qualifier, FoundDecl, Method); if (ObjectArg.isInvalid()) return ExprError(); MemExpr->setBase(ObjectArg.get()); } // Convert the rest of the arguments const FunctionProtoType *Proto = Method->getType()->getAs<FunctionProtoType>(); if (ConvertArgumentsForCall(TheCall, MemExpr, Method, Proto, Args, RParenLoc)) return ExprError(); DiagnoseSentinelCalls(Method, LParenLoc, Args); if (CheckFunctionCall(Method, TheCall, Proto)) return ExprError(); if ((isa<CXXConstructorDecl>(CurContext) || isa<CXXDestructorDecl>(CurContext)) && TheCall->getMethodDecl()->isPure()) { const CXXMethodDecl *MD = TheCall->getMethodDecl(); if (isa<CXXThisExpr>(MemExpr->getBase()->IgnoreParenCasts())) { Diag(MemExpr->getLocStart(), diag::warn_call_to_pure_virtual_member_function_from_ctor_dtor) << MD->getDeclName() << isa<CXXDestructorDecl>(CurContext) << MD->getParent()->getDeclName(); Diag(MD->getLocStart(), diag::note_previous_decl) << MD->getDeclName(); } } return MaybeBindToTemporary(TheCall); } /// BuildCallToObjectOfClassType - Build a call to an object of class /// type (C++ [over.call.object]), which can end up invoking an /// overloaded function call operator (@c operator()) or performing a /// user-defined conversion on the object argument. ExprResult Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc) { if (checkPlaceholderForOverload(*this, Obj)) return ExprError(); ExprResult Object = Obj; UnbridgedCastsSet UnbridgedCasts; if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts)) return ExprError(); assert(Object.get()->getType()->isRecordType() && "Requires object type argument"); const RecordType *Record = Object.get()->getType()->getAs<RecordType>(); // C++ [over.call.object]p1: // If the primary-expression E in the function call syntax // evaluates to a class object of type "cv T", then the set of // candidate functions includes at least the function call // operators of T. The function call operators of T are obtained by // ordinary lookup of the name operator() in the context of // (E).operator(). OverloadCandidateSet CandidateSet(LParenLoc, OverloadCandidateSet::CSK_Operator); DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Call); if (RequireCompleteType(LParenLoc, Object.get()->getType(), diag::err_incomplete_object_call, Object.get())) return true; LookupResult R(*this, OpName, LParenLoc, LookupOrdinaryName); LookupQualifiedName(R, Record->getDecl()); R.suppressDiagnostics(); for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end(); Oper != OperEnd; ++Oper) { AddMethodCandidate(Oper.getPair(), Object.get()->getType(), Object.get()->Classify(Context), Args, CandidateSet, /*SuppressUserConversions=*/ false); } // C++ [over.call.object]p2: // In addition, for each (non-explicit in C++0x) conversion function // declared in T of the form // // operator conversion-type-id () cv-qualifier; // // where cv-qualifier is the same cv-qualification as, or a // greater cv-qualification than, cv, and where conversion-type-id // denotes the type "pointer to function of (P1,...,Pn) returning // R", or the type "reference to pointer to function of // (P1,...,Pn) returning R", or the type "reference to function // of (P1,...,Pn) returning R", a surrogate call function [...] // is also considered as a candidate function. Similarly, // surrogate call functions are added to the set of candidate // functions for each conversion function declared in an // accessible base class provided the function is not hidden // within T by another intervening declaration. const auto &Conversions = cast<CXXRecordDecl>(Record->getDecl())->getVisibleConversionFunctions(); for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { NamedDecl *D = *I; CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); // Skip over templated conversion functions; they aren't // surrogates. if (isa<FunctionTemplateDecl>(D)) continue; CXXConversionDecl *Conv = cast<CXXConversionDecl>(D); if (!Conv->isExplicit()) { // Strip the reference type (if any) and then the pointer type (if // any) to get down to what might be a function type. QualType ConvType = Conv->getConversionType().getNonReferenceType(); if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>()) ConvType = ConvPtrType->getPointeeType(); if (const FunctionProtoType *Proto = ConvType->getAs<FunctionProtoType>()) { AddSurrogateCandidate(Conv, I.getPair(), ActingContext, Proto, Object.get(), Args, CandidateSet); } } } bool HadMultipleCandidates = (CandidateSet.size() > 1); // Perform overload resolution. OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(*this, Object.get()->getLocStart(), Best)) { case OR_Success: // Overload resolution succeeded; we'll build the appropriate call // below. break; case OR_No_Viable_Function: if (CandidateSet.empty()) Diag(Object.get()->getLocStart(), diag::err_ovl_no_oper) << Object.get()->getType() << /*call*/ 1 << Object.get()->getSourceRange(); else Diag(Object.get()->getLocStart(), diag::err_ovl_no_viable_object_call) << Object.get()->getType() << Object.get()->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args); break; case OR_Ambiguous: Diag(Object.get()->getLocStart(), diag::err_ovl_ambiguous_object_call) << Object.get()->getType() << Object.get()->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args); break; case OR_Deleted: Diag(Object.get()->getLocStart(), diag::err_ovl_deleted_object_call) << Best->Function->isDeleted() << Object.get()->getType() << getDeletedOrUnavailableSuffix(Best->Function) << Object.get()->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args); break; } if (Best == CandidateSet.end()) return true; UnbridgedCasts.restore(); if (Best->Function == nullptr) { // Since there is no function declaration, this is one of the // surrogate candidates. Dig out the conversion function. CXXConversionDecl *Conv = cast<CXXConversionDecl>( Best->Conversions[0].UserDefined.ConversionFunction); CheckMemberOperatorAccess(LParenLoc, Object.get(), nullptr, Best->FoundDecl); if (DiagnoseUseOfDecl(Best->FoundDecl, LParenLoc)) return ExprError(); assert(Conv == Best->FoundDecl.getDecl() && "Found Decl & conversion-to-functionptr should be same, right?!"); // We selected one of the surrogate functions that converts the // object parameter to a function pointer. Perform the conversion // on the object argument, then let ActOnCallExpr finish the job. // Create an implicit member expr to refer to the conversion operator. // and then call it. ExprResult Call = BuildCXXMemberCallExpr(Object.get(), Best->FoundDecl, Conv, HadMultipleCandidates); if (Call.isInvalid()) return ExprError(); // Record usage of conversion in an implicit cast. Call = ImplicitCastExpr::Create(Context, Call.get()->getType(), CK_UserDefinedConversion, Call.get(), nullptr, VK_RValue); return ActOnCallExpr(S, Call.get(), LParenLoc, Args, RParenLoc); } CheckMemberOperatorAccess(LParenLoc, Object.get(), nullptr, Best->FoundDecl); // We found an overloaded operator(). Build a CXXOperatorCallExpr // that calls this method, using Object for the implicit object // parameter and passing along the remaining arguments. CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function); // An error diagnostic has already been printed when parsing the declaration. if (Method->isInvalidDecl()) return ExprError(); const FunctionProtoType *Proto = Method->getType()->getAs<FunctionProtoType>(); unsigned NumParams = Proto->getNumParams(); DeclarationNameInfo OpLocInfo( Context.DeclarationNames.getCXXOperatorName(OO_Call), LParenLoc); OpLocInfo.setCXXOperatorNameRange(SourceRange(LParenLoc, RParenLoc)); ExprResult NewFn = CreateFunctionRefExpr(*this, Method, Best->FoundDecl, HadMultipleCandidates, OpLocInfo.getLoc(), OpLocInfo.getInfo()); if (NewFn.isInvalid()) return true; // Build the full argument list for the method call (the implicit object // parameter is placed at the beginning of the list). std::unique_ptr<Expr * []> MethodArgs(new Expr *[Args.size() + 1]); MethodArgs[0] = Object.get(); std::copy(Args.begin(), Args.end(), &MethodArgs[1]); // Once we've built TheCall, all of the expressions are properly // owned. QualType ResultTy = Method->getReturnType(); ExprValueKind VK = Expr::getValueKindForType(ResultTy); ResultTy = ResultTy.getNonLValueExprType(Context); CXXOperatorCallExpr *TheCall = new (Context) CXXOperatorCallExpr(Context, OO_Call, NewFn.get(), llvm::makeArrayRef(MethodArgs.get(), Args.size() + 1), ResultTy, VK, RParenLoc, false); MethodArgs.reset(); if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method)) return true; // We may have default arguments. If so, we need to allocate more // slots in the call for them. if (Args.size() < NumParams) TheCall->setNumArgs(Context, NumParams + 1); bool IsError = false; // Initialize the implicit object parameter. ExprResult ObjRes = PerformObjectArgumentInitialization(Object.get(), /*Qualifier=*/nullptr, Best->FoundDecl, Method); if (ObjRes.isInvalid()) IsError = true; else Object = ObjRes; TheCall->setArg(0, Object.get()); // Check the argument types. for (unsigned i = 0; i != NumParams; i++) { Expr *Arg; if (i < Args.size()) { Arg = Args[i]; // Pass the argument. ExprResult InputInit = PerformCopyInitialization(InitializedEntity::InitializeParameter( Context, Method->getParamDecl(i)), SourceLocation(), Arg); IsError |= InputInit.isInvalid(); Arg = InputInit.getAs<Expr>(); } else { ExprResult DefArg = BuildCXXDefaultArgExpr(LParenLoc, Method, Method->getParamDecl(i)); if (DefArg.isInvalid()) { IsError = true; break; } Arg = DefArg.getAs<Expr>(); } TheCall->setArg(i + 1, Arg); } // If this is a variadic call, handle args passed through "...". if (Proto->isVariadic()) { // Promote the arguments (C99 6.5.2.2p7). for (unsigned i = NumParams, e = Args.size(); i < e; i++) { ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod, nullptr); IsError |= Arg.isInvalid(); TheCall->setArg(i + 1, Arg.get()); } } if (IsError) return true; DiagnoseSentinelCalls(Method, LParenLoc, Args); if (CheckFunctionCall(Method, TheCall, Proto)) return true; return MaybeBindToTemporary(TheCall); } /// BuildOverloadedArrowExpr - Build a call to an overloaded @c operator-> /// (if one exists), where @c Base is an expression of class type and /// @c Member is the name of the member we're trying to find. ExprResult Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound) { assert(Base->getType()->isRecordType() && "left-hand side must have class type"); if (checkPlaceholderForOverload(*this, Base)) return ExprError(); SourceLocation Loc = Base->getExprLoc(); // C++ [over.ref]p1: // // [...] An expression x->m is interpreted as (x.operator->())->m // for a class object x of type T if T::operator->() exists and if // the operator is selected as the best match function by the // overload resolution mechanism (13.3). DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Arrow); OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Operator); const RecordType *BaseRecord = Base->getType()->getAs<RecordType>(); if (RequireCompleteType(Loc, Base->getType(), diag::err_typecheck_incomplete_tag, Base)) return ExprError(); LookupResult R(*this, OpName, OpLoc, LookupOrdinaryName); LookupQualifiedName(R, BaseRecord->getDecl()); R.suppressDiagnostics(); for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end(); Oper != OperEnd; ++Oper) { AddMethodCandidate(Oper.getPair(), Base->getType(), Base->Classify(Context), None, CandidateSet, /*SuppressUserConversions=*/false); } bool HadMultipleCandidates = (CandidateSet.size() > 1); // Perform overload resolution. OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) { case OR_Success: // Overload resolution succeeded; we'll build the call below. break; case OR_No_Viable_Function: if (CandidateSet.empty()) { QualType BaseType = Base->getType(); if (NoArrowOperatorFound) { // Report this specific error to the caller instead of emitting a // diagnostic, as requested. *NoArrowOperatorFound = true; return ExprError(); } Diag(OpLoc, diag::err_typecheck_member_reference_arrow) << BaseType << Base->getSourceRange(); if (BaseType->isRecordType() && !BaseType->isPointerType()) { Diag(OpLoc, diag::note_typecheck_member_reference_suggestion) << FixItHint::CreateReplacement(OpLoc, "."); } } else Diag(OpLoc, diag::err_ovl_no_viable_oper) << "operator->" << Base->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base); return ExprError(); case OR_Ambiguous: Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary) << "->" << Base->getType() << Base->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Base); return ExprError(); case OR_Deleted: Diag(OpLoc, diag::err_ovl_deleted_oper) << Best->Function->isDeleted() << "->" << getDeletedOrUnavailableSuffix(Best->Function) << Base->getSourceRange(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base); return ExprError(); } CheckMemberOperatorAccess(OpLoc, Base, nullptr, Best->FoundDecl); // Convert the object parameter. CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function); ExprResult BaseResult = PerformObjectArgumentInitialization(Base, /*Qualifier=*/nullptr, Best->FoundDecl, Method); if (BaseResult.isInvalid()) return ExprError(); Base = BaseResult.get(); // Build the operator call. ExprResult FnExpr = CreateFunctionRefExpr(*this, Method, Best->FoundDecl, HadMultipleCandidates, OpLoc); if (FnExpr.isInvalid()) return ExprError(); QualType ResultTy = Method->getReturnType(); ExprValueKind VK = Expr::getValueKindForType(ResultTy); ResultTy = ResultTy.getNonLValueExprType(Context); CXXOperatorCallExpr *TheCall = new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr.get(), Base, ResultTy, VK, OpLoc, false); if (CheckCallReturnType(Method->getReturnType(), OpLoc, TheCall, Method)) return ExprError(); return MaybeBindToTemporary(TheCall); } /// BuildLiteralOperatorCall - Build a UserDefinedLiteral by creating a call to /// a literal operator described by the provided lookup results. ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr*> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *TemplateArgs) { SourceLocation UDSuffixLoc = SuffixInfo.getCXXLiteralOperatorNameLoc(); OverloadCandidateSet CandidateSet(UDSuffixLoc, OverloadCandidateSet::CSK_Normal); AddFunctionCandidates(R.asUnresolvedSet(), Args, CandidateSet, TemplateArgs, /*SuppressUserConversions=*/true); bool HadMultipleCandidates = (CandidateSet.size() > 1); // Perform overload resolution. This will usually be trivial, but might need // to perform substitutions for a literal operator template. OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(*this, UDSuffixLoc, Best)) { case OR_Success: case OR_Deleted: break; case OR_No_Viable_Function: Diag(UDSuffixLoc, diag::err_ovl_no_viable_function_in_call) << R.getLookupName(); CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args); return ExprError(); case OR_Ambiguous: Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName(); CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args); return ExprError(); } FunctionDecl *FD = Best->Function; ExprResult Fn = CreateFunctionRefExpr(*this, FD, Best->FoundDecl, HadMultipleCandidates, SuffixInfo.getLoc(), SuffixInfo.getInfo()); if (Fn.isInvalid()) return true; // Check the argument types. This should almost always be a no-op, except // that array-to-pointer decay is applied to string literals. Expr *ConvArgs[2]; for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) { ExprResult InputInit = PerformCopyInitialization( InitializedEntity::InitializeParameter(Context, FD->getParamDecl(ArgIdx)), SourceLocation(), Args[ArgIdx]); if (InputInit.isInvalid()) return true; ConvArgs[ArgIdx] = InputInit.get(); } QualType ResultTy = FD->getReturnType(); ExprValueKind VK = Expr::getValueKindForType(ResultTy); ResultTy = ResultTy.getNonLValueExprType(Context); UserDefinedLiteral *UDL = new (Context) UserDefinedLiteral(Context, Fn.get(), llvm::makeArrayRef(ConvArgs, Args.size()), ResultTy, VK, LitEndLoc, UDSuffixLoc); if (CheckCallReturnType(FD->getReturnType(), UDSuffixLoc, UDL, FD)) return ExprError(); if (CheckFunctionCall(FD, UDL, nullptr)) return ExprError(); return MaybeBindToTemporary(UDL); } /// Build a call to 'begin' or 'end' for a C++11 for-range statement. If the /// given LookupResult is non-empty, it is assumed to describe a member which /// will be invoked. Otherwise, the function will be found via argument /// dependent lookup. /// CallExpr is set to a valid expression and FRS_Success returned on success, /// otherwise CallExpr is set to ExprError() and some non-success value /// is returned. Sema::ForRangeStatus Sema::BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr) { CandidateSet->clear(); if (!MemberLookup.empty()) { ExprResult MemberRef = BuildMemberReferenceExpr(Range, Range->getType(), Loc, /*IsPtr=*/false, CXXScopeSpec(), /*TemplateKWLoc=*/SourceLocation(), /*FirstQualifierInScope=*/nullptr, MemberLookup, /*TemplateArgs=*/nullptr); if (MemberRef.isInvalid()) { *CallExpr = ExprError(); Diag(Range->getLocStart(), diag::note_in_for_range) << RangeLoc << BEF << Range->getType(); return FRS_DiagnosticIssued; } *CallExpr = ActOnCallExpr(S, MemberRef.get(), Loc, None, Loc, nullptr); if (CallExpr->isInvalid()) { *CallExpr = ExprError(); Diag(Range->getLocStart(), diag::note_in_for_range) << RangeLoc << BEF << Range->getType(); return FRS_DiagnosticIssued; } } else { UnresolvedSet<0> FoundNames; UnresolvedLookupExpr *Fn = UnresolvedLookupExpr::Create(Context, /*NamingClass=*/nullptr, NestedNameSpecifierLoc(), NameInfo, /*NeedsADL=*/true, /*Overloaded=*/false, FoundNames.begin(), FoundNames.end()); bool CandidateSetError = buildOverloadedCallSet(S, Fn, Fn, Range, Loc, CandidateSet, CallExpr); if (CandidateSet->empty() || CandidateSetError) { *CallExpr = ExprError(); return FRS_NoViableFunction; } OverloadCandidateSet::iterator Best; OverloadingResult OverloadResult = CandidateSet->BestViableFunction(*this, Fn->getLocStart(), Best); if (OverloadResult == OR_No_Viable_Function) { *CallExpr = ExprError(); return FRS_NoViableFunction; } *CallExpr = FinishOverloadedCallExpr(*this, S, Fn, Fn, Loc, Range, Loc, nullptr, CandidateSet, &Best, OverloadResult, /*AllowTypoCorrection=*/false); if (CallExpr->isInvalid() || OverloadResult != OR_Success) { *CallExpr = ExprError(); Diag(Range->getLocStart(), diag::note_in_for_range) << RangeLoc << BEF << Range->getType(); return FRS_DiagnosticIssued; } } return FRS_Success; } /// FixOverloadedFunctionReference - E is an expression that refers to /// a C++ overloaded function (possibly with some parentheses and /// perhaps a '&' around it). We have resolved the overloaded function /// to the function declaration Fn, so patch up the expression E to /// refer (possibly indirectly) to Fn. Returns the new expr. Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found, FunctionDecl *Fn) { if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) { Expr *SubExpr = FixOverloadedFunctionReference(PE->getSubExpr(), Found, Fn); if (SubExpr == PE->getSubExpr()) return PE; return new (Context) ParenExpr(PE->getLParen(), PE->getRParen(), SubExpr); } if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { Expr *SubExpr = FixOverloadedFunctionReference(ICE->getSubExpr(), Found, Fn); assert(Context.hasSameType(ICE->getSubExpr()->getType(), SubExpr->getType()) && "Implicit cast type cannot be determined from overload"); assert(ICE->path_empty() && "fixing up hierarchy conversion?"); if (SubExpr == ICE->getSubExpr()) return ICE; return ImplicitCastExpr::Create(Context, ICE->getType(), ICE->getCastKind(), SubExpr, nullptr, ICE->getValueKind()); } if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E)) { assert(UnOp->getOpcode() == UO_AddrOf && "Can only take the address of an overloaded function"); if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) { if (Method->isStatic()) { // Do nothing: static member functions aren't any different // from non-member functions. } else { // Fix the subexpression, which really has to be an // UnresolvedLookupExpr holding an overloaded member function // or template. Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(), Found, Fn); if (SubExpr == UnOp->getSubExpr()) return UnOp; assert(isa<DeclRefExpr>(SubExpr) && "fixed to something other than a decl ref"); assert(cast<DeclRefExpr>(SubExpr)->getQualifier() && "fixed to a member ref with no nested name qualifier"); // We have taken the address of a pointer to member // function. Perform the computation here so that we get the // appropriate pointer to member type. QualType ClassType = Context.getTypeDeclType(cast<RecordDecl>(Method->getDeclContext())); QualType MemPtrType = Context.getMemberPointerType(Fn->getType(), ClassType.getTypePtr()); return new (Context) UnaryOperator(SubExpr, UO_AddrOf, MemPtrType, VK_RValue, OK_Ordinary, UnOp->getOperatorLoc()); } } Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(), Found, Fn); if (SubExpr == UnOp->getSubExpr()) return UnOp; return new (Context) UnaryOperator(SubExpr, UO_AddrOf, Context.getPointerType(SubExpr->getType()), VK_RValue, OK_Ordinary, UnOp->getOperatorLoc()); } if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { // FIXME: avoid copy. TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr; if (ULE->hasExplicitTemplateArgs()) { ULE->copyTemplateArgumentsInto(TemplateArgsBuffer); TemplateArgs = &TemplateArgsBuffer; } DeclRefExpr *DRE = DeclRefExpr::Create(Context, ULE->getQualifierLoc(), ULE->getTemplateKeywordLoc(), Fn, /*enclosing*/ false, // FIXME? ULE->getNameLoc(), Fn->getType(), VK_LValue, Found.getDecl(), TemplateArgs); MarkDeclRefReferenced(DRE); DRE->setHadMultipleCandidates(ULE->getNumDecls() > 1); return DRE; } if (UnresolvedMemberExpr *MemExpr = dyn_cast<UnresolvedMemberExpr>(E)) { // FIXME: avoid copy. TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr; if (MemExpr->hasExplicitTemplateArgs()) { MemExpr->copyTemplateArgumentsInto(TemplateArgsBuffer); TemplateArgs = &TemplateArgsBuffer; } Expr *Base; // If we're filling in a static method where we used to have an // implicit member access, rewrite to a simple decl ref. if (MemExpr->isImplicitAccess()) { if (cast<CXXMethodDecl>(Fn)->isStatic()) { DeclRefExpr *DRE = DeclRefExpr::Create(Context, MemExpr->getQualifierLoc(), MemExpr->getTemplateKeywordLoc(), Fn, /*enclosing*/ false, MemExpr->getMemberLoc(), Fn->getType(), VK_LValue, Found.getDecl(), TemplateArgs); MarkDeclRefReferenced(DRE); DRE->setHadMultipleCandidates(MemExpr->getNumDecls() > 1); return DRE; } else { SourceLocation Loc = MemExpr->getMemberLoc(); if (MemExpr->getQualifier()) Loc = MemExpr->getQualifierLoc().getBeginLoc(); CheckCXXThisCapture(Loc); // HLSL Change Begin - This is a reference if (getLangOpts().HLSL) Base = genereateHLSLThis(Loc, MemExpr->getBaseType(), /*isImplicit=*/true); else Base = new (Context) CXXThisExpr(Loc, MemExpr->getBaseType(), /*isImplicit=*/true); // HLSL Change End - This is a reference } } else Base = MemExpr->getBase(); ExprValueKind valueKind; QualType type; if (cast<CXXMethodDecl>(Fn)->isStatic()) { valueKind = VK_LValue; type = Fn->getType(); } else { valueKind = VK_RValue; type = Context.BoundMemberTy; } MemberExpr *ME = MemberExpr::Create( Context, Base, MemExpr->isArrow(), MemExpr->getOperatorLoc(), MemExpr->getQualifierLoc(), MemExpr->getTemplateKeywordLoc(), Fn, Found, MemExpr->getMemberNameInfo(), TemplateArgs, type, valueKind, OK_Ordinary); ME->setHadMultipleCandidates(true); MarkMemberReferenced(ME); return ME; } llvm_unreachable("Invalid reference to overloaded function"); } ExprResult Sema::FixOverloadedFunctionReference(ExprResult E, DeclAccessPair Found, FunctionDecl *Fn) { return FixOverloadedFunctionReference(E.get(), Found, Fn); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaDeclObjC.cpp
//===--- SemaDeclObjC.cpp - Semantic Analysis for ObjC Declarations -------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for Objective C declarations. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/DataRecursiveASTVisitor.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/Basic/SourceManager.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseSet.h" #include "TypeLocBuilder.h" using namespace clang; /// Check whether the given method, which must be in the 'init' /// family, is a valid member of that family. /// /// \param receiverTypeIfCall - if null, check this as if declaring it; /// if non-null, check this as if making a call to it with the given /// receiver type /// /// \return true to indicate that there was an error and appropriate /// actions were taken bool Sema::checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall) { if (method->isInvalidDecl()) return true; // This castAs is safe: methods that don't return an object // pointer won't be inferred as inits and will reject an explicit // objc_method_family(init). // We ignore protocols here. Should we? What about Class? const ObjCObjectType *result = method->getReturnType()->castAs<ObjCObjectPointerType>()->getObjectType(); if (result->isObjCId()) { return false; } else if (result->isObjCClass()) { // fall through: always an error } else { ObjCInterfaceDecl *resultClass = result->getInterface(); assert(resultClass && "unexpected object type!"); // It's okay for the result type to still be a forward declaration // if we're checking an interface declaration. if (!resultClass->hasDefinition()) { if (receiverTypeIfCall.isNull() && !isa<ObjCImplementationDecl>(method->getDeclContext())) return false; // Otherwise, we try to compare class types. } else { // If this method was declared in a protocol, we can't check // anything unless we have a receiver type that's an interface. const ObjCInterfaceDecl *receiverClass = nullptr; if (isa<ObjCProtocolDecl>(method->getDeclContext())) { if (receiverTypeIfCall.isNull()) return false; receiverClass = receiverTypeIfCall->castAs<ObjCObjectPointerType>() ->getInterfaceDecl(); // This can be null for calls to e.g. id<Foo>. if (!receiverClass) return false; } else { receiverClass = method->getClassInterface(); assert(receiverClass && "method not associated with a class!"); } // If either class is a subclass of the other, it's fine. if (receiverClass->isSuperClassOf(resultClass) || resultClass->isSuperClassOf(receiverClass)) return false; } } SourceLocation loc = method->getLocation(); // If we're in a system header, and this is not a call, just make // the method unusable. if (receiverTypeIfCall.isNull() && getSourceManager().isInSystemHeader(loc)) { method->addAttr(UnavailableAttr::CreateImplicit(Context, "init method returns a type unrelated to its receiver type", loc)); return true; } // Otherwise, it's an error. Diag(loc, diag::err_arc_init_method_unrelated_result_type); method->setInvalidDecl(); return true; } void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden) { if (Overridden->hasRelatedResultType() && !NewMethod->hasRelatedResultType()) { // This can only happen when the method follows a naming convention that // implies a related result type, and the original (overridden) method has // a suitable return type, but the new (overriding) method does not have // a suitable return type. QualType ResultType = NewMethod->getReturnType(); SourceRange ResultTypeRange = NewMethod->getReturnTypeSourceRange(); // Figure out which class this method is part of, if any. ObjCInterfaceDecl *CurrentClass = dyn_cast<ObjCInterfaceDecl>(NewMethod->getDeclContext()); if (!CurrentClass) { DeclContext *DC = NewMethod->getDeclContext(); if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(DC)) CurrentClass = Cat->getClassInterface(); else if (ObjCImplDecl *Impl = dyn_cast<ObjCImplDecl>(DC)) CurrentClass = Impl->getClassInterface(); else if (ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(DC)) CurrentClass = CatImpl->getClassInterface(); } if (CurrentClass) { Diag(NewMethod->getLocation(), diag::warn_related_result_type_compatibility_class) << Context.getObjCInterfaceType(CurrentClass) << ResultType << ResultTypeRange; } else { Diag(NewMethod->getLocation(), diag::warn_related_result_type_compatibility_protocol) << ResultType << ResultTypeRange; } if (ObjCMethodFamily Family = Overridden->getMethodFamily()) Diag(Overridden->getLocation(), diag::note_related_result_type_family) << /*overridden method*/ 0 << Family; else Diag(Overridden->getLocation(), diag::note_related_result_type_overridden); } if (getLangOpts().ObjCAutoRefCount) { if ((NewMethod->hasAttr<NSReturnsRetainedAttr>() != Overridden->hasAttr<NSReturnsRetainedAttr>())) { Diag(NewMethod->getLocation(), diag::err_nsreturns_retained_attribute_mismatch) << 1; Diag(Overridden->getLocation(), diag::note_previous_decl) << "method"; } if ((NewMethod->hasAttr<NSReturnsNotRetainedAttr>() != Overridden->hasAttr<NSReturnsNotRetainedAttr>())) { Diag(NewMethod->getLocation(), diag::err_nsreturns_retained_attribute_mismatch) << 0; Diag(Overridden->getLocation(), diag::note_previous_decl) << "method"; } ObjCMethodDecl::param_const_iterator oi = Overridden->param_begin(), oe = Overridden->param_end(); for (ObjCMethodDecl::param_iterator ni = NewMethod->param_begin(), ne = NewMethod->param_end(); ni != ne && oi != oe; ++ni, ++oi) { const ParmVarDecl *oldDecl = (*oi); ParmVarDecl *newDecl = (*ni); if (newDecl->hasAttr<NSConsumedAttr>() != oldDecl->hasAttr<NSConsumedAttr>()) { Diag(newDecl->getLocation(), diag::err_nsconsumed_attribute_mismatch); Diag(oldDecl->getLocation(), diag::note_previous_decl) << "parameter"; } } } } /// \brief Check a method declaration for compatibility with the Objective-C /// ARC conventions. bool Sema::CheckARCMethodDecl(ObjCMethodDecl *method) { ObjCMethodFamily family = method->getMethodFamily(); switch (family) { case OMF_None: case OMF_finalize: case OMF_retain: case OMF_release: case OMF_autorelease: case OMF_retainCount: case OMF_self: case OMF_initialize: case OMF_performSelector: return false; case OMF_dealloc: if (!Context.hasSameType(method->getReturnType(), Context.VoidTy)) { SourceRange ResultTypeRange = method->getReturnTypeSourceRange(); if (ResultTypeRange.isInvalid()) Diag(method->getLocation(), diag::error_dealloc_bad_result_type) << method->getReturnType() << FixItHint::CreateInsertion(method->getSelectorLoc(0), "(void)"); else Diag(method->getLocation(), diag::error_dealloc_bad_result_type) << method->getReturnType() << FixItHint::CreateReplacement(ResultTypeRange, "void"); return true; } return false; case OMF_init: // If the method doesn't obey the init rules, don't bother annotating it. if (checkInitMethod(method, QualType())) return true; method->addAttr(NSConsumesSelfAttr::CreateImplicit(Context)); // Don't add a second copy of this attribute, but otherwise don't // let it be suppressed. if (method->hasAttr<NSReturnsRetainedAttr>()) return false; break; case OMF_alloc: case OMF_copy: case OMF_mutableCopy: case OMF_new: if (method->hasAttr<NSReturnsRetainedAttr>() || method->hasAttr<NSReturnsNotRetainedAttr>() || method->hasAttr<NSReturnsAutoreleasedAttr>()) return false; break; } method->addAttr(NSReturnsRetainedAttr::CreateImplicit(Context)); return false; } static void DiagnoseObjCImplementedDeprecations(Sema &S, NamedDecl *ND, SourceLocation ImplLoc, int select) { if (ND && ND->isDeprecated()) { S.Diag(ImplLoc, diag::warn_deprecated_def) << select; if (select == 0) S.Diag(ND->getLocation(), diag::note_method_declared_at) << ND->getDeclName(); else S.Diag(ND->getLocation(), diag::note_previous_decl) << "class"; } } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void Sema::AddAnyMethodToGlobalPool(Decl *D) { ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D); // If we don't have a valid method decl, simply return. if (!MDecl) return; if (MDecl->isInstanceMethod()) AddInstanceMethodToGlobalPool(MDecl, true); else AddFactoryMethodToGlobalPool(MDecl, true); } /// HasExplicitOwnershipAttr - returns true when pointer to ObjC pointer /// has explicit ownership attribute; false otherwise. static bool HasExplicitOwnershipAttr(Sema &S, ParmVarDecl *Param) { QualType T = Param->getType(); if (const PointerType *PT = T->getAs<PointerType>()) { T = PT->getPointeeType(); } else if (const ReferenceType *RT = T->getAs<ReferenceType>()) { T = RT->getPointeeType(); } else { return true; } // If we have a lifetime qualifier, but it's local, we must have // inferred it. So, it is implicit. return !T.getLocalQualifiers().hasObjCLifetime(); } /// ActOnStartOfObjCMethodDef - This routine sets up parameters; invisible /// and user declared, in the method definition's AST. void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) { assert((getCurMethodDecl() == nullptr) && "Methodparsing confused"); ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D); // If we don't have a valid method decl, simply return. if (!MDecl) return; // Allow all of Sema to see that we are entering a method definition. PushDeclContext(FnBodyScope, MDecl); PushFunctionScope(); // Create Decl objects for each parameter, entrring them in the scope for // binding to their use. // Insert the invisible arguments, self and _cmd! MDecl->createImplicitParams(Context, MDecl->getClassInterface()); PushOnScopeChains(MDecl->getSelfDecl(), FnBodyScope); PushOnScopeChains(MDecl->getCmdDecl(), FnBodyScope); // The ObjC parser requires parameter names so there's no need to check. CheckParmsForFunctionDef(MDecl->param_begin(), MDecl->param_end(), /*CheckParameterNames=*/false); // Introduce all of the other parameters into this scope. for (auto *Param : MDecl->params()) { if (!Param->isInvalidDecl() && getLangOpts().ObjCAutoRefCount && !HasExplicitOwnershipAttr(*this, Param)) Diag(Param->getLocation(), diag::warn_arc_strong_pointer_objc_pointer) << Param->getType(); if (Param->getIdentifier()) PushOnScopeChains(Param, FnBodyScope); } // In ARC, disallow definition of retain/release/autorelease/retainCount if (getLangOpts().ObjCAutoRefCount) { switch (MDecl->getMethodFamily()) { case OMF_retain: case OMF_retainCount: case OMF_release: case OMF_autorelease: Diag(MDecl->getLocation(), diag::err_arc_illegal_method_def) << 0 << MDecl->getSelector(); break; case OMF_None: case OMF_dealloc: case OMF_finalize: case OMF_alloc: case OMF_init: case OMF_mutableCopy: case OMF_copy: case OMF_new: case OMF_self: case OMF_initialize: case OMF_performSelector: break; } } // Warn on deprecated methods under -Wdeprecated-implementations, // and prepare for warning on missing super calls. if (ObjCInterfaceDecl *IC = MDecl->getClassInterface()) { ObjCMethodDecl *IMD = IC->lookupMethod(MDecl->getSelector(), MDecl->isInstanceMethod()); if (IMD) { ObjCImplDecl *ImplDeclOfMethodDef = dyn_cast<ObjCImplDecl>(MDecl->getDeclContext()); ObjCContainerDecl *ContDeclOfMethodDecl = dyn_cast<ObjCContainerDecl>(IMD->getDeclContext()); ObjCImplDecl *ImplDeclOfMethodDecl = nullptr; if (ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(ContDeclOfMethodDecl)) ImplDeclOfMethodDecl = OID->getImplementation(); else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ContDeclOfMethodDecl)) { if (CD->IsClassExtension()) { if (ObjCInterfaceDecl *OID = CD->getClassInterface()) ImplDeclOfMethodDecl = OID->getImplementation(); } else ImplDeclOfMethodDecl = CD->getImplementation(); } // No need to issue deprecated warning if deprecated mehod in class/category // is being implemented in its own implementation (no overriding is involved). if (!ImplDeclOfMethodDecl || ImplDeclOfMethodDecl != ImplDeclOfMethodDef) DiagnoseObjCImplementedDeprecations(*this, dyn_cast<NamedDecl>(IMD), MDecl->getLocation(), 0); } if (MDecl->getMethodFamily() == OMF_init) { if (MDecl->isDesignatedInitializerForTheInterface()) { getCurFunction()->ObjCIsDesignatedInit = true; getCurFunction()->ObjCWarnForNoDesignatedInitChain = IC->getSuperClass() != nullptr; } else if (IC->hasDesignatedInitializers()) { getCurFunction()->ObjCIsSecondaryInit = true; getCurFunction()->ObjCWarnForNoInitDelegation = true; } } // If this is "dealloc" or "finalize", set some bit here. // Then in ActOnSuperMessage() (SemaExprObjC), set it back to false. // Finally, in ActOnFinishFunctionBody() (SemaDecl), warn if flag is set. // Only do this if the current class actually has a superclass. if (const ObjCInterfaceDecl *SuperClass = IC->getSuperClass()) { ObjCMethodFamily Family = MDecl->getMethodFamily(); if (Family == OMF_dealloc) { if (!(getLangOpts().ObjCAutoRefCount || getLangOpts().getGC() == LangOptions::GCOnly)) getCurFunction()->ObjCShouldCallSuper = true; } else if (Family == OMF_finalize) { if (Context.getLangOpts().getGC() != LangOptions::NonGC) getCurFunction()->ObjCShouldCallSuper = true; } else { const ObjCMethodDecl *SuperMethod = SuperClass->lookupMethod(MDecl->getSelector(), MDecl->isInstanceMethod()); getCurFunction()->ObjCShouldCallSuper = (SuperMethod && SuperMethod->hasAttr<ObjCRequiresSuperAttr>()); } } } } namespace { // Callback to only accept typo corrections that are Objective-C classes. // If an ObjCInterfaceDecl* is given to the constructor, then the validation // function will reject corrections to that class. class ObjCInterfaceValidatorCCC : public CorrectionCandidateCallback { public: ObjCInterfaceValidatorCCC() : CurrentIDecl(nullptr) {} explicit ObjCInterfaceValidatorCCC(ObjCInterfaceDecl *IDecl) : CurrentIDecl(IDecl) {} bool ValidateCandidate(const TypoCorrection &candidate) override { ObjCInterfaceDecl *ID = candidate.getCorrectionDeclAs<ObjCInterfaceDecl>(); return ID && !declaresSameEntity(ID, CurrentIDecl); } private: ObjCInterfaceDecl *CurrentIDecl; }; } static void diagnoseUseOfProtocols(Sema &TheSema, ObjCContainerDecl *CD, ObjCProtocolDecl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs) { assert(ProtoRefs); // Diagnose availability in the context of the ObjC container. Sema::ContextRAII SavedContext(TheSema, CD); for (unsigned i = 0; i < NumProtoRefs; ++i) { (void)TheSema.DiagnoseUseOfDecl(ProtoRefs[i], ProtoLocs[i]); } } void Sema:: ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange) { // Check if a different kind of symbol declared in this scope. NamedDecl *PrevDecl = LookupSingleName(TUScope, SuperName, SuperLoc, LookupOrdinaryName); if (!PrevDecl) { // Try to correct for a typo in the superclass name without correcting // to the class we're defining. if (TypoCorrection Corrected = CorrectTypo( DeclarationNameInfo(SuperName, SuperLoc), LookupOrdinaryName, TUScope, NULL, llvm::make_unique<ObjCInterfaceValidatorCCC>(IDecl), CTK_ErrorRecovery)) { diagnoseTypo(Corrected, PDiag(diag::err_undef_superclass_suggest) << SuperName << ClassName); PrevDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>(); } } if (declaresSameEntity(PrevDecl, IDecl)) { Diag(SuperLoc, diag::err_recursive_superclass) << SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc); IDecl->setEndOfDefinitionLoc(ClassLoc); } else { ObjCInterfaceDecl *SuperClassDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl); QualType SuperClassType; // Diagnose classes that inherit from deprecated classes. if (SuperClassDecl) { (void)DiagnoseUseOfDecl(SuperClassDecl, SuperLoc); SuperClassType = Context.getObjCInterfaceType(SuperClassDecl); } if (PrevDecl && SuperClassDecl == 0) { // The previous declaration was not a class decl. Check if we have a // typedef. If we do, get the underlying class type. if (const TypedefNameDecl *TDecl = dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) { QualType T = TDecl->getUnderlyingType(); if (T->isObjCObjectType()) { if (NamedDecl *IDecl = T->getAs<ObjCObjectType>()->getInterface()) { SuperClassDecl = dyn_cast<ObjCInterfaceDecl>(IDecl); SuperClassType = Context.getTypeDeclType(TDecl); // This handles the following case: // @interface NewI @end // typedef NewI DeprI __attribute__((deprecated("blah"))) // @interface SI : DeprI /* warn here */ @end (void)DiagnoseUseOfDecl(const_cast<TypedefNameDecl*>(TDecl), SuperLoc); } } } // This handles the following case: // // typedef int SuperClass; // @interface MyClass : SuperClass {} @end // if (!SuperClassDecl) { Diag(SuperLoc, diag::err_redefinition_different_kind) << SuperName; Diag(PrevDecl->getLocation(), diag::note_previous_definition); } } if (!dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) { if (!SuperClassDecl) Diag(SuperLoc, diag::err_undef_superclass) << SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc); else if (RequireCompleteType(SuperLoc, SuperClassType, diag::err_forward_superclass, SuperClassDecl->getDeclName(), ClassName, SourceRange(AtInterfaceLoc, ClassLoc))) { SuperClassDecl = 0; SuperClassType = QualType(); } } if (SuperClassType.isNull()) { assert(!SuperClassDecl && "Failed to set SuperClassType?"); return; } // Handle type arguments on the superclass. TypeSourceInfo *SuperClassTInfo = nullptr; if (!SuperTypeArgs.empty()) { TypeResult fullSuperClassType = actOnObjCTypeArgsAndProtocolQualifiers( S, SuperLoc, CreateParsedType(SuperClassType, nullptr), SuperTypeArgsRange.getBegin(), SuperTypeArgs, SuperTypeArgsRange.getEnd(), SourceLocation(), { }, { }, SourceLocation()); if (!fullSuperClassType.isUsable()) return; SuperClassType = GetTypeFromParser(fullSuperClassType.get(), &SuperClassTInfo); } if (!SuperClassTInfo) { SuperClassTInfo = Context.getTrivialTypeSourceInfo(SuperClassType, SuperLoc); } IDecl->setSuperClass(SuperClassTInfo); IDecl->setEndOfDefinitionLoc(SuperClassTInfo->getTypeLoc().getLocEnd()); } } DeclResult Sema::actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType parsedTypeBound) { // If there was an explicitly-provided type bound, check it. TypeSourceInfo *typeBoundInfo = nullptr; if (parsedTypeBound) { // The type bound can be any Objective-C pointer type. QualType typeBound = GetTypeFromParser(parsedTypeBound, &typeBoundInfo); if (typeBound->isObjCObjectPointerType()) { // okay } else if (typeBound->isObjCObjectType()) { // The user forgot the * on an Objective-C pointer type, e.g., // "T : NSView". SourceLocation starLoc = PP.getLocForEndOfToken( typeBoundInfo->getTypeLoc().getEndLoc()); Diag(typeBoundInfo->getTypeLoc().getBeginLoc(), diag::err_objc_type_param_bound_missing_pointer) << typeBound << paramName << FixItHint::CreateInsertion(starLoc, " *"); // Create a new type location builder so we can update the type // location information we have. TypeLocBuilder builder; builder.pushFullCopy(typeBoundInfo->getTypeLoc()); // Create the Objective-C pointer type. typeBound = Context.getObjCObjectPointerType(typeBound); ObjCObjectPointerTypeLoc newT = builder.push<ObjCObjectPointerTypeLoc>(typeBound); newT.setStarLoc(starLoc); // Form the new type source information. typeBoundInfo = builder.getTypeSourceInfo(Context, typeBound); } else { // Not a valid type bound. Diag(typeBoundInfo->getTypeLoc().getBeginLoc(), diag::err_objc_type_param_bound_nonobject) << typeBound << paramName; // Forget the bound; we'll default to id later. typeBoundInfo = nullptr; } // Type bounds cannot have explicit nullability. if (typeBoundInfo) { // Type arguments cannot explicitly specify nullability. if (auto nullability = AttributedType::stripOuterNullability(typeBound)) { // Look at the type location information to find the nullability // specifier so we can zap it. SourceLocation nullabilityLoc = typeBoundInfo->getTypeLoc().findNullabilityLoc(); SourceLocation diagLoc = nullabilityLoc.isValid()? nullabilityLoc : typeBoundInfo->getTypeLoc().getLocStart(); Diag(diagLoc, diag::err_type_param_bound_explicit_nullability) << paramName << typeBoundInfo->getType() << FixItHint::CreateRemoval(nullabilityLoc); } } } // If there was no explicit type bound (or we removed it due to an error), // use 'id' instead. if (!typeBoundInfo) { colonLoc = SourceLocation(); typeBoundInfo = Context.getTrivialTypeSourceInfo(Context.getObjCIdType()); } // Create the type parameter. return ObjCTypeParamDecl::Create(Context, CurContext, variance, varianceLoc, index, paramLoc, paramName, colonLoc, typeBoundInfo); } ObjCTypeParamList *Sema::actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParamsIn, SourceLocation rAngleLoc) { // We know that the array only contains Objective-C type parameters. ArrayRef<ObjCTypeParamDecl *> typeParams( reinterpret_cast<ObjCTypeParamDecl * const *>(typeParamsIn.data()), typeParamsIn.size()); // Diagnose redeclarations of type parameters. // We do this now because Objective-C type parameters aren't pushed into // scope until later (after the instance variable block), but we want the // diagnostics to occur right after we parse the type parameter list. llvm::SmallDenseMap<IdentifierInfo *, ObjCTypeParamDecl *> knownParams; for (auto typeParam : typeParams) { auto known = knownParams.find(typeParam->getIdentifier()); if (known != knownParams.end()) { Diag(typeParam->getLocation(), diag::err_objc_type_param_redecl) << typeParam->getIdentifier() << SourceRange(known->second->getLocation()); typeParam->setInvalidDecl(); } else { knownParams.insert(std::make_pair(typeParam->getIdentifier(), typeParam)); // Push the type parameter into scope. PushOnScopeChains(typeParam, S, /*AddToContext=*/false); } } // Create the parameter list. return ObjCTypeParamList::create(Context, lAngleLoc, typeParams, rAngleLoc); } void Sema::popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList) { for (auto typeParam : *typeParamList) { if (!typeParam->isInvalidDecl()) { S->RemoveDecl(typeParam); IdResolver.RemoveDecl(typeParam); } } } namespace { /// The context in which an Objective-C type parameter list occurs, for use /// in diagnostics. enum class TypeParamListContext { ForwardDeclaration, Definition, Category, Extension }; } /// Check consistency between two Objective-C type parameter lists, e.g., /// between a category/extension and an \@interface or between an \@class and an /// \@interface. static bool checkTypeParamListConsistency(Sema &S, ObjCTypeParamList *prevTypeParams, ObjCTypeParamList *newTypeParams, TypeParamListContext newContext) { // If the sizes don't match, complain about that. if (prevTypeParams->size() != newTypeParams->size()) { SourceLocation diagLoc; if (newTypeParams->size() > prevTypeParams->size()) { diagLoc = newTypeParams->begin()[prevTypeParams->size()]->getLocation(); } else { diagLoc = S.PP.getLocForEndOfToken(newTypeParams->back()->getLocEnd()); } S.Diag(diagLoc, diag::err_objc_type_param_arity_mismatch) << static_cast<unsigned>(newContext) << (newTypeParams->size() > prevTypeParams->size()) << prevTypeParams->size() << newTypeParams->size(); return true; } // Match up the type parameters. for (unsigned i = 0, n = prevTypeParams->size(); i != n; ++i) { ObjCTypeParamDecl *prevTypeParam = prevTypeParams->begin()[i]; ObjCTypeParamDecl *newTypeParam = newTypeParams->begin()[i]; // Check for consistency of the variance. if (newTypeParam->getVariance() != prevTypeParam->getVariance()) { if (newTypeParam->getVariance() == ObjCTypeParamVariance::Invariant && newContext != TypeParamListContext::Definition) { // When the new type parameter is invariant and is not part // of the definition, just propagate the variance. newTypeParam->setVariance(prevTypeParam->getVariance()); } else if (prevTypeParam->getVariance() == ObjCTypeParamVariance::Invariant && !(isa<ObjCInterfaceDecl>(prevTypeParam->getDeclContext()) && cast<ObjCInterfaceDecl>(prevTypeParam->getDeclContext()) ->getDefinition() == prevTypeParam->getDeclContext())) { // When the old parameter is invariant and was not part of the // definition, just ignore the difference because it doesn't // matter. } else { { // Diagnose the conflict and update the second declaration. SourceLocation diagLoc = newTypeParam->getVarianceLoc(); if (diagLoc.isInvalid()) diagLoc = newTypeParam->getLocStart(); auto diag = S.Diag(diagLoc, diag::err_objc_type_param_variance_conflict) << static_cast<unsigned>(newTypeParam->getVariance()) << newTypeParam->getDeclName() << static_cast<unsigned>(prevTypeParam->getVariance()) << prevTypeParam->getDeclName(); switch (prevTypeParam->getVariance()) { case ObjCTypeParamVariance::Invariant: diag << FixItHint::CreateRemoval(newTypeParam->getVarianceLoc()); break; case ObjCTypeParamVariance::Covariant: case ObjCTypeParamVariance::Contravariant: { StringRef newVarianceStr = prevTypeParam->getVariance() == ObjCTypeParamVariance::Covariant ? "__covariant" : "__contravariant"; if (newTypeParam->getVariance() == ObjCTypeParamVariance::Invariant) { diag << FixItHint::CreateInsertion(newTypeParam->getLocStart(), (newVarianceStr + " ").str()); } else { diag << FixItHint::CreateReplacement(newTypeParam->getVarianceLoc(), newVarianceStr); } } } } S.Diag(prevTypeParam->getLocation(), diag::note_objc_type_param_here) << prevTypeParam->getDeclName(); // Override the variance. newTypeParam->setVariance(prevTypeParam->getVariance()); } } // If the bound types match, there's nothing to do. if (S.Context.hasSameType(prevTypeParam->getUnderlyingType(), newTypeParam->getUnderlyingType())) continue; // If the new type parameter's bound was explicit, complain about it being // different from the original. if (newTypeParam->hasExplicitBound()) { SourceRange newBoundRange = newTypeParam->getTypeSourceInfo() ->getTypeLoc().getSourceRange(); S.Diag(newBoundRange.getBegin(), diag::err_objc_type_param_bound_conflict) << newTypeParam->getUnderlyingType() << newTypeParam->getDeclName() << prevTypeParam->hasExplicitBound() << prevTypeParam->getUnderlyingType() << (newTypeParam->getDeclName() == prevTypeParam->getDeclName()) << prevTypeParam->getDeclName() << FixItHint::CreateReplacement( newBoundRange, prevTypeParam->getUnderlyingType().getAsString( S.Context.getPrintingPolicy())); S.Diag(prevTypeParam->getLocation(), diag::note_objc_type_param_here) << prevTypeParam->getDeclName(); // Override the new type parameter's bound type with the previous type, // so that it's consistent. newTypeParam->setTypeSourceInfo( S.Context.getTrivialTypeSourceInfo(prevTypeParam->getUnderlyingType())); continue; } // The new type parameter got the implicit bound of 'id'. That's okay for // categories and extensions (overwrite it later), but not for forward // declarations and @interfaces, because those must be standalone. if (newContext == TypeParamListContext::ForwardDeclaration || newContext == TypeParamListContext::Definition) { // Diagnose this problem for forward declarations and definitions. SourceLocation insertionLoc = S.PP.getLocForEndOfToken(newTypeParam->getLocation()); std::string newCode = " : " + prevTypeParam->getUnderlyingType().getAsString( S.Context.getPrintingPolicy()); S.Diag(newTypeParam->getLocation(), diag::err_objc_type_param_bound_missing) << prevTypeParam->getUnderlyingType() << newTypeParam->getDeclName() << (newContext == TypeParamListContext::ForwardDeclaration) << FixItHint::CreateInsertion(insertionLoc, newCode); S.Diag(prevTypeParam->getLocation(), diag::note_objc_type_param_here) << prevTypeParam->getDeclName(); } // Update the new type parameter's bound to match the previous one. newTypeParam->setTypeSourceInfo( S.Context.getTrivialTypeSourceInfo(prevTypeParam->getUnderlyingType())); } return false; } Decl *Sema:: ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList) { assert(ClassName && "Missing class identifier"); // Check for another declaration kind with the same name. NamedDecl *PrevDecl = LookupSingleName(TUScope, ClassName, ClassLoc, LookupOrdinaryName, ForRedeclaration); if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) { Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName; Diag(PrevDecl->getLocation(), diag::note_previous_definition); } // Create a declaration to describe this @interface. ObjCInterfaceDecl* PrevIDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl); if (PrevIDecl && PrevIDecl->getIdentifier() != ClassName) { // A previous decl with a different name is because of // @compatibility_alias, for example: // \code // @class NewImage; // @compatibility_alias OldImage NewImage; // \endcode // A lookup for 'OldImage' will return the 'NewImage' decl. // // In such a case use the real declaration name, instead of the alias one, // otherwise we will break IdentifierResolver and redecls-chain invariants. // FIXME: If necessary, add a bit to indicate that this ObjCInterfaceDecl // has been aliased. ClassName = PrevIDecl->getIdentifier(); } // If there was a forward declaration with type parameters, check // for consistency. if (PrevIDecl) { if (ObjCTypeParamList *prevTypeParamList = PrevIDecl->getTypeParamList()) { if (typeParamList) { // Both have type parameter lists; check for consistency. if (checkTypeParamListConsistency(*this, prevTypeParamList, typeParamList, TypeParamListContext::Definition)) { typeParamList = nullptr; } } else { Diag(ClassLoc, diag::err_objc_parameterized_forward_class_first) << ClassName; Diag(prevTypeParamList->getLAngleLoc(), diag::note_previous_decl) << ClassName; // Clone the type parameter list. SmallVector<ObjCTypeParamDecl *, 4> clonedTypeParams; for (auto typeParam : *prevTypeParamList) { clonedTypeParams.push_back( ObjCTypeParamDecl::Create( Context, CurContext, typeParam->getVariance(), SourceLocation(), typeParam->getIndex(), SourceLocation(), typeParam->getIdentifier(), SourceLocation(), Context.getTrivialTypeSourceInfo(typeParam->getUnderlyingType()))); } typeParamList = ObjCTypeParamList::create(Context, SourceLocation(), clonedTypeParams, SourceLocation()); } } } ObjCInterfaceDecl *IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtInterfaceLoc, ClassName, typeParamList, PrevIDecl, ClassLoc); if (PrevIDecl) { // Class already seen. Was it a definition? if (ObjCInterfaceDecl *Def = PrevIDecl->getDefinition()) { Diag(AtInterfaceLoc, diag::err_duplicate_class_def) << PrevIDecl->getDeclName(); Diag(Def->getLocation(), diag::note_previous_definition); IDecl->setInvalidDecl(); } } if (AttrList) ProcessDeclAttributeList(TUScope, IDecl, AttrList); PushOnScopeChains(IDecl, TUScope); // Start the definition of this class. If we're in a redefinition case, there // may already be a definition, so we'll end up adding to it. if (!IDecl->hasDefinition()) IDecl->startDefinition(); if (SuperName) { // Diagnose availability in the context of the @interface. ContextRAII SavedContext(*this, IDecl); ActOnSuperClassOfClassInterface(S, AtInterfaceLoc, IDecl, ClassName, ClassLoc, SuperName, SuperLoc, SuperTypeArgs, SuperTypeArgsRange); } else { // we have a root class. IDecl->setEndOfDefinitionLoc(ClassLoc); } // Check then save referenced protocols. if (NumProtoRefs) { diagnoseUseOfProtocols(*this, IDecl, (ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs); IDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs, Context); IDecl->setEndOfDefinitionLoc(EndProtoLoc); } CheckObjCDeclScope(IDecl); return ActOnObjCContainerStartDefinition(IDecl); } /// ActOnTypedefedProtocols - this action finds protocol list as part of the /// typedef'ed use for a qualified super class and adds them to the list /// of the protocols. void Sema::ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc) { if (!SuperName) return; NamedDecl* IDecl = LookupSingleName(TUScope, SuperName, SuperLoc, LookupOrdinaryName); if (!IDecl) return; if (const TypedefNameDecl *TDecl = dyn_cast_or_null<TypedefNameDecl>(IDecl)) { QualType T = TDecl->getUnderlyingType(); if (T->isObjCObjectType()) if (const ObjCObjectType *OPT = T->getAs<ObjCObjectType>()) ProtocolRefs.append(OPT->qual_begin(), OPT->qual_end()); } } /// ActOnCompatibilityAlias - this action is called after complete parsing of /// a \@compatibility_alias declaration. It sets up the alias relationships. Decl *Sema::ActOnCompatibilityAlias(SourceLocation AtLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation) { // Look for previous declaration of alias name NamedDecl *ADecl = LookupSingleName(TUScope, AliasName, AliasLocation, LookupOrdinaryName, ForRedeclaration); if (ADecl) { Diag(AliasLocation, diag::err_conflicting_aliasing_type) << AliasName; Diag(ADecl->getLocation(), diag::note_previous_declaration); return nullptr; } // Check for class declaration NamedDecl *CDeclU = LookupSingleName(TUScope, ClassName, ClassLocation, LookupOrdinaryName, ForRedeclaration); if (const TypedefNameDecl *TDecl = dyn_cast_or_null<TypedefNameDecl>(CDeclU)) { QualType T = TDecl->getUnderlyingType(); if (T->isObjCObjectType()) { if (NamedDecl *IDecl = T->getAs<ObjCObjectType>()->getInterface()) { ClassName = IDecl->getIdentifier(); CDeclU = LookupSingleName(TUScope, ClassName, ClassLocation, LookupOrdinaryName, ForRedeclaration); } } } ObjCInterfaceDecl *CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(CDeclU); if (!CDecl) { Diag(ClassLocation, diag::warn_undef_interface) << ClassName; if (CDeclU) Diag(CDeclU->getLocation(), diag::note_previous_declaration); return nullptr; } // Everything checked out, instantiate a new alias declaration AST. ObjCCompatibleAliasDecl *AliasDecl = ObjCCompatibleAliasDecl::Create(Context, CurContext, AtLoc, AliasName, CDecl); if (!CheckObjCDeclScope(AliasDecl)) PushOnScopeChains(AliasDecl, TUScope); return AliasDecl; } bool Sema::CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &Ploc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList) { bool res = false; for (ObjCList<ObjCProtocolDecl>::iterator I = PList.begin(), E = PList.end(); I != E; ++I) { if (ObjCProtocolDecl *PDecl = LookupProtocol((*I)->getIdentifier(), Ploc)) { if (PDecl->getIdentifier() == PName) { Diag(Ploc, diag::err_protocol_has_circular_dependency); Diag(PrevLoc, diag::note_previous_definition); res = true; } if (!PDecl->hasDefinition()) continue; if (CheckForwardProtocolDeclarationForCircularDependency(PName, Ploc, PDecl->getLocation(), PDecl->getReferencedProtocols())) res = true; } } return res; } Decl * Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList) { bool err = false; // FIXME: Deal with AttrList. assert(ProtocolName && "Missing protocol identifier"); ObjCProtocolDecl *PrevDecl = LookupProtocol(ProtocolName, ProtocolLoc, ForRedeclaration); ObjCProtocolDecl *PDecl = nullptr; if (ObjCProtocolDecl *Def = PrevDecl? PrevDecl->getDefinition() : nullptr) { // If we already have a definition, complain. Diag(ProtocolLoc, diag::warn_duplicate_protocol_def) << ProtocolName; Diag(Def->getLocation(), diag::note_previous_definition); // Create a new protocol that is completely distinct from previous // declarations, and do not make this protocol available for name lookup. // That way, we'll end up completely ignoring the duplicate. // FIXME: Can we turn this into an error? PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName, ProtocolLoc, AtProtoInterfaceLoc, /*PrevDecl=*/nullptr); PDecl->startDefinition(); } else { if (PrevDecl) { // Check for circular dependencies among protocol declarations. This can // only happen if this protocol was forward-declared. ObjCList<ObjCProtocolDecl> PList; PList.set((ObjCProtocolDecl *const*)ProtoRefs, NumProtoRefs, Context); err = CheckForwardProtocolDeclarationForCircularDependency( ProtocolName, ProtocolLoc, PrevDecl->getLocation(), PList); } // Create the new declaration. PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName, ProtocolLoc, AtProtoInterfaceLoc, /*PrevDecl=*/PrevDecl); PushOnScopeChains(PDecl, TUScope); PDecl->startDefinition(); } if (AttrList) ProcessDeclAttributeList(TUScope, PDecl, AttrList); // Merge attributes from previous declarations. if (PrevDecl) mergeDeclAttributes(PDecl, PrevDecl); if (!err && NumProtoRefs ) { /// Check then save referenced protocols. diagnoseUseOfProtocols(*this, PDecl, (ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs); PDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs, Context); } CheckObjCDeclScope(PDecl); return ActOnObjCContainerStartDefinition(PDecl); } static bool NestedProtocolHasNoDefinition(ObjCProtocolDecl *PDecl, ObjCProtocolDecl *&UndefinedProtocol) { if (!PDecl->hasDefinition() || PDecl->getDefinition()->isHidden()) { UndefinedProtocol = PDecl; return true; } for (auto *PI : PDecl->protocols()) if (NestedProtocolHasNoDefinition(PI, UndefinedProtocol)) { UndefinedProtocol = PI; return true; } return false; } /// FindProtocolDeclaration - This routine looks up protocols and /// issues an error if they are not declared. It returns list of /// protocol declarations in its 'Protocols' argument. void Sema::FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols) { for (unsigned i = 0; i != NumProtocols; ++i) { ObjCProtocolDecl *PDecl = LookupProtocol(ProtocolId[i].first, ProtocolId[i].second); if (!PDecl) { TypoCorrection Corrected = CorrectTypo( DeclarationNameInfo(ProtocolId[i].first, ProtocolId[i].second), LookupObjCProtocolName, TUScope, nullptr, llvm::make_unique<DeclFilterCCC<ObjCProtocolDecl>>(), CTK_ErrorRecovery); if ((PDecl = Corrected.getCorrectionDeclAs<ObjCProtocolDecl>())) diagnoseTypo(Corrected, PDiag(diag::err_undeclared_protocol_suggest) << ProtocolId[i].first); } if (!PDecl) { Diag(ProtocolId[i].second, diag::err_undeclared_protocol) << ProtocolId[i].first; continue; } // If this is a forward protocol declaration, get its definition. if (!PDecl->isThisDeclarationADefinition() && PDecl->getDefinition()) PDecl = PDecl->getDefinition(); // For an objc container, delay protocol reference checking until after we // can set the objc decl as the availability context, otherwise check now. if (!ForObjCContainer) { (void)DiagnoseUseOfDecl(PDecl, ProtocolId[i].second); } // If this is a forward declaration and we are supposed to warn in this // case, do it. // FIXME: Recover nicely in the hidden case. ObjCProtocolDecl *UndefinedProtocol; if (WarnOnDeclarations && NestedProtocolHasNoDefinition(PDecl, UndefinedProtocol)) { Diag(ProtocolId[i].second, diag::warn_undef_protocolref) << ProtocolId[i].first; Diag(UndefinedProtocol->getLocation(), diag::note_protocol_decl_undefined) << UndefinedProtocol; } Protocols.push_back(PDecl); } } namespace { // Callback to only accept typo corrections that are either // Objective-C protocols or valid Objective-C type arguments. class ObjCTypeArgOrProtocolValidatorCCC : public CorrectionCandidateCallback { ASTContext &Context; Sema::LookupNameKind LookupKind; public: ObjCTypeArgOrProtocolValidatorCCC(ASTContext &context, Sema::LookupNameKind lookupKind) : Context(context), LookupKind(lookupKind) { } bool ValidateCandidate(const TypoCorrection &candidate) override { // If we're allowed to find protocols and we have a protocol, accept it. if (LookupKind != Sema::LookupOrdinaryName) { if (candidate.getCorrectionDeclAs<ObjCProtocolDecl>()) return true; } // If we're allowed to find type names and we have one, accept it. if (LookupKind != Sema::LookupObjCProtocolName) { // If we have a type declaration, we might accept this result. if (auto typeDecl = candidate.getCorrectionDeclAs<TypeDecl>()) { // If we found a tag declaration outside of C++, skip it. This // can happy because we look for any name when there is no // bias to protocol or type names. if (isa<RecordDecl>(typeDecl) && !Context.getLangOpts().CPlusPlus) return false; // Make sure the type is something we would accept as a type // argument. auto type = Context.getTypeDeclType(typeDecl); if (type->isObjCObjectPointerType() || type->isBlockPointerType() || type->isDependentType() || type->isObjCObjectType()) return true; return false; } // If we have an Objective-C class type, accept it; there will // be another fix to add the '*'. if (candidate.getCorrectionDeclAs<ObjCInterfaceDecl>()) return true; return false; } return false; } }; } // end anonymous namespace void Sema::actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols) { // Local function that updates the declaration specifiers with // protocol information. unsigned numProtocolsResolved = 0; auto resolvedAsProtocols = [&] { assert(numProtocolsResolved == identifiers.size() && "Unresolved protocols"); // Determine whether the base type is a parameterized class, in // which case we want to warn about typos such as // "NSArray<NSObject>" (that should be NSArray<NSObject *>). ObjCInterfaceDecl *baseClass = nullptr; QualType base = GetTypeFromParser(baseType, nullptr); bool allAreTypeNames = false; SourceLocation firstClassNameLoc; if (!base.isNull()) { if (const auto *objcObjectType = base->getAs<ObjCObjectType>()) { baseClass = objcObjectType->getInterface(); if (baseClass) { if (auto typeParams = baseClass->getTypeParamList()) { if (typeParams->size() == numProtocolsResolved) { // Note that we should be looking for type names, too. allAreTypeNames = true; } } } } } for (unsigned i = 0, n = protocols.size(); i != n; ++i) { ObjCProtocolDecl *&proto = reinterpret_cast<ObjCProtocolDecl *&>(protocols[i]); // For an objc container, delay protocol reference checking until after we // can set the objc decl as the availability context, otherwise check now. if (!warnOnIncompleteProtocols) { (void)DiagnoseUseOfDecl(proto, identifierLocs[i]); } // If this is a forward protocol declaration, get its definition. if (!proto->isThisDeclarationADefinition() && proto->getDefinition()) proto = proto->getDefinition(); // If this is a forward declaration and we are supposed to warn in this // case, do it. // FIXME: Recover nicely in the hidden case. ObjCProtocolDecl *forwardDecl = nullptr; if (warnOnIncompleteProtocols && NestedProtocolHasNoDefinition(proto, forwardDecl)) { Diag(identifierLocs[i], diag::warn_undef_protocolref) << proto->getDeclName(); Diag(forwardDecl->getLocation(), diag::note_protocol_decl_undefined) << forwardDecl; } // If everything this far has been a type name (and we care // about such things), check whether this name refers to a type // as well. if (allAreTypeNames) { if (auto *decl = LookupSingleName(S, identifiers[i], identifierLocs[i], LookupOrdinaryName)) { if (isa<ObjCInterfaceDecl>(decl)) { if (firstClassNameLoc.isInvalid()) firstClassNameLoc = identifierLocs[i]; } else if (!isa<TypeDecl>(decl)) { // Not a type. allAreTypeNames = false; } } else { allAreTypeNames = false; } } } // All of the protocols listed also have type names, and at least // one is an Objective-C class name. Check whether all of the // protocol conformances are declared by the base class itself, in // which case we warn. if (allAreTypeNames && firstClassNameLoc.isValid()) { llvm::SmallPtrSet<ObjCProtocolDecl*, 8> knownProtocols; Context.CollectInheritedProtocols(baseClass, knownProtocols); bool allProtocolsDeclared = true; for (auto proto : protocols) { if (knownProtocols.count(static_cast<ObjCProtocolDecl *>(proto)) == 0) { allProtocolsDeclared = false; break; } } if (allProtocolsDeclared) { Diag(firstClassNameLoc, diag::warn_objc_redundant_qualified_class_type) << baseClass->getDeclName() << SourceRange(lAngleLoc, rAngleLoc) << FixItHint::CreateInsertion( PP.getLocForEndOfToken(firstClassNameLoc), " *"); } } protocolLAngleLoc = lAngleLoc; protocolRAngleLoc = rAngleLoc; assert(protocols.size() == identifierLocs.size()); }; // Attempt to resolve all of the identifiers as protocols. for (unsigned i = 0, n = identifiers.size(); i != n; ++i) { ObjCProtocolDecl *proto = LookupProtocol(identifiers[i], identifierLocs[i]); protocols.push_back(proto); if (proto) ++numProtocolsResolved; } // If all of the names were protocols, these were protocol qualifiers. if (numProtocolsResolved == identifiers.size()) return resolvedAsProtocols(); // Attempt to resolve all of the identifiers as type names or // Objective-C class names. The latter is technically ill-formed, // but is probably something like \c NSArray<NSView *> missing the // \c*. typedef llvm::PointerUnion<TypeDecl *, ObjCInterfaceDecl *> TypeOrClassDecl; SmallVector<TypeOrClassDecl, 4> typeDecls; unsigned numTypeDeclsResolved = 0; for (unsigned i = 0, n = identifiers.size(); i != n; ++i) { NamedDecl *decl = LookupSingleName(S, identifiers[i], identifierLocs[i], LookupOrdinaryName); if (!decl) { typeDecls.push_back(TypeOrClassDecl()); continue; } if (auto typeDecl = dyn_cast<TypeDecl>(decl)) { typeDecls.push_back(typeDecl); ++numTypeDeclsResolved; continue; } if (auto objcClass = dyn_cast<ObjCInterfaceDecl>(decl)) { typeDecls.push_back(objcClass); ++numTypeDeclsResolved; continue; } typeDecls.push_back(TypeOrClassDecl()); } AttributeFactory attrFactory; // Local function that forms a reference to the given type or // Objective-C class declaration. auto resolveTypeReference = [&](TypeOrClassDecl typeDecl, SourceLocation loc) -> TypeResult { // Form declaration specifiers. They simply refer to the type. DeclSpec DS(attrFactory); const char* prevSpec; // unused unsigned diagID; // unused QualType type; if (auto *actualTypeDecl = typeDecl.dyn_cast<TypeDecl *>()) type = Context.getTypeDeclType(actualTypeDecl); else type = Context.getObjCInterfaceType(typeDecl.get<ObjCInterfaceDecl *>()); TypeSourceInfo *parsedTSInfo = Context.getTrivialTypeSourceInfo(type, loc); ParsedType parsedType = CreateParsedType(type, parsedTSInfo); DS.SetTypeSpecType(DeclSpec::TST_typename, loc, prevSpec, diagID, parsedType, Context.getPrintingPolicy()); // Use the identifier location for the type source range. DS.SetRangeStart(loc); DS.SetRangeEnd(loc); // Form the declarator. Declarator D(DS, Declarator::TypeNameContext); // If we have a typedef of an Objective-C class type that is missing a '*', // add the '*'. if (type->getAs<ObjCInterfaceType>()) { SourceLocation starLoc = PP.getLocForEndOfToken(loc); ParsedAttributes parsedAttrs(attrFactory); D.AddTypeInfo(DeclaratorChunk::getPointer(/*typeQuals=*/0, starLoc, SourceLocation(), SourceLocation(), SourceLocation(), SourceLocation()), parsedAttrs, starLoc); // Diagnose the missing '*'. Diag(loc, diag::err_objc_type_arg_missing_star) << type << FixItHint::CreateInsertion(starLoc, " *"); } // Convert this to a type. return ActOnTypeName(S, D); }; // Local function that updates the declaration specifiers with // type argument information. auto resolvedAsTypeDecls = [&] { // We did not resolve these as protocols. protocols.clear(); assert(numTypeDeclsResolved == identifiers.size() && "Unresolved type decl"); // Map type declarations to type arguments. for (unsigned i = 0, n = identifiers.size(); i != n; ++i) { // Map type reference to a type. TypeResult type = resolveTypeReference(typeDecls[i], identifierLocs[i]); if (!type.isUsable()) { typeArgs.clear(); return; } typeArgs.push_back(type.get()); } typeArgsLAngleLoc = lAngleLoc; typeArgsRAngleLoc = rAngleLoc; }; // If all of the identifiers can be resolved as type names or // Objective-C class names, we have type arguments. if (numTypeDeclsResolved == identifiers.size()) return resolvedAsTypeDecls(); // Error recovery: some names weren't found, or we have a mix of // type and protocol names. Go resolve all of the unresolved names // and complain if we can't find a consistent answer. LookupNameKind lookupKind = LookupAnyName; for (unsigned i = 0, n = identifiers.size(); i != n; ++i) { // If we already have a protocol or type. Check whether it is the // right thing. if (protocols[i] || typeDecls[i]) { // If we haven't figured out whether we want types or protocols // yet, try to figure it out from this name. if (lookupKind == LookupAnyName) { // If this name refers to both a protocol and a type (e.g., \c // NSObject), don't conclude anything yet. if (protocols[i] && typeDecls[i]) continue; // Otherwise, let this name decide whether we'll be correcting // toward types or protocols. lookupKind = protocols[i] ? LookupObjCProtocolName : LookupOrdinaryName; continue; } // If we want protocols and we have a protocol, there's nothing // more to do. if (lookupKind == LookupObjCProtocolName && protocols[i]) continue; // If we want types and we have a type declaration, there's // nothing more to do. if (lookupKind == LookupOrdinaryName && typeDecls[i]) continue; // We have a conflict: some names refer to protocols and others // refer to types. Diag(identifierLocs[i], diag::err_objc_type_args_and_protocols) << (protocols[i] != nullptr) << identifiers[i] << identifiers[0] << SourceRange(identifierLocs[0]); protocols.clear(); typeArgs.clear(); return; } // Perform typo correction on the name. TypoCorrection corrected = CorrectTypo( DeclarationNameInfo(identifiers[i], identifierLocs[i]), lookupKind, S, nullptr, llvm::make_unique<ObjCTypeArgOrProtocolValidatorCCC>(Context, lookupKind), CTK_ErrorRecovery); if (corrected) { // Did we find a protocol? if (auto proto = corrected.getCorrectionDeclAs<ObjCProtocolDecl>()) { diagnoseTypo(corrected, PDiag(diag::err_undeclared_protocol_suggest) << identifiers[i]); lookupKind = LookupObjCProtocolName; protocols[i] = proto; ++numProtocolsResolved; continue; } // Did we find a type? if (auto typeDecl = corrected.getCorrectionDeclAs<TypeDecl>()) { diagnoseTypo(corrected, PDiag(diag::err_unknown_typename_suggest) << identifiers[i]); lookupKind = LookupOrdinaryName; typeDecls[i] = typeDecl; ++numTypeDeclsResolved; continue; } // Did we find an Objective-C class? if (auto objcClass = corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) { diagnoseTypo(corrected, PDiag(diag::err_unknown_type_or_class_name_suggest) << identifiers[i] << true); lookupKind = LookupOrdinaryName; typeDecls[i] = objcClass; ++numTypeDeclsResolved; continue; } } // We couldn't find anything. Diag(identifierLocs[i], (lookupKind == LookupAnyName ? diag::err_objc_type_arg_missing : lookupKind == LookupObjCProtocolName ? diag::err_undeclared_protocol : diag::err_unknown_typename)) << identifiers[i]; protocols.clear(); typeArgs.clear(); return; } // If all of the names were (corrected to) protocols, these were // protocol qualifiers. if (numProtocolsResolved == identifiers.size()) return resolvedAsProtocols(); // Otherwise, all of the names were (corrected to) types. assert(numTypeDeclsResolved == identifiers.size() && "Not all types?"); return resolvedAsTypeDecls(); } /// DiagnoseClassExtensionDupMethods - Check for duplicate declaration of /// a class method in its extension. /// void Sema::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID) { if (!ID) return; // Possibly due to previous error llvm::DenseMap<Selector, const ObjCMethodDecl*> MethodMap; for (auto *MD : ID->methods()) MethodMap[MD->getSelector()] = MD; if (MethodMap.empty()) return; for (const auto *Method : CAT->methods()) { const ObjCMethodDecl *&PrevMethod = MethodMap[Method->getSelector()]; if (PrevMethod && (PrevMethod->isInstanceMethod() == Method->isInstanceMethod()) && !MatchTwoMethodDeclarations(Method, PrevMethod)) { Diag(Method->getLocation(), diag::err_duplicate_method_decl) << Method->getDeclName(); Diag(PrevMethod->getLocation(), diag::note_previous_declaration); } } } /// ActOnForwardProtocolDeclaration - Handle \@protocol foo; Sema::DeclGroupPtrTy Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList) { SmallVector<Decl *, 8> DeclsInGroup; for (unsigned i = 0; i != NumElts; ++i) { IdentifierInfo *Ident = IdentList[i].first; ObjCProtocolDecl *PrevDecl = LookupProtocol(Ident, IdentList[i].second, ForRedeclaration); ObjCProtocolDecl *PDecl = ObjCProtocolDecl::Create(Context, CurContext, Ident, IdentList[i].second, AtProtocolLoc, PrevDecl); PushOnScopeChains(PDecl, TUScope); CheckObjCDeclScope(PDecl); if (attrList) ProcessDeclAttributeList(TUScope, PDecl, attrList); if (PrevDecl) mergeDeclAttributes(PDecl, PrevDecl); DeclsInGroup.push_back(PDecl); } return BuildDeclaratorGroup(DeclsInGroup, false); } Decl *Sema:: ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc) { ObjCCategoryDecl *CDecl; ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true); /// Check that class of this category is already completely declared. if (!IDecl || RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl), diag::err_category_forward_interface, CategoryName == nullptr)) { // Create an invalid ObjCCategoryDecl to serve as context for // the enclosing method declarations. We mark the decl invalid // to make it clear that this isn't a valid AST. CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc, ClassLoc, CategoryLoc, CategoryName, IDecl, typeParamList); CDecl->setInvalidDecl(); CurContext->addDecl(CDecl); if (!IDecl) Diag(ClassLoc, diag::err_undef_interface) << ClassName; return ActOnObjCContainerStartDefinition(CDecl); } if (!CategoryName && IDecl->getImplementation()) { Diag(ClassLoc, diag::err_class_extension_after_impl) << ClassName; Diag(IDecl->getImplementation()->getLocation(), diag::note_implementation_declared); } if (CategoryName) { /// Check for duplicate interface declaration for this category if (ObjCCategoryDecl *Previous = IDecl->FindCategoryDeclaration(CategoryName)) { // Class extensions can be declared multiple times, categories cannot. Diag(CategoryLoc, diag::warn_dup_category_def) << ClassName << CategoryName; Diag(Previous->getLocation(), diag::note_previous_definition); } } // If we have a type parameter list, check it. if (typeParamList) { if (auto prevTypeParamList = IDecl->getTypeParamList()) { if (checkTypeParamListConsistency(*this, prevTypeParamList, typeParamList, CategoryName ? TypeParamListContext::Category : TypeParamListContext::Extension)) typeParamList = nullptr; } else { Diag(typeParamList->getLAngleLoc(), diag::err_objc_parameterized_category_nonclass) << (CategoryName != nullptr) << ClassName << typeParamList->getSourceRange(); typeParamList = nullptr; } } CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc, ClassLoc, CategoryLoc, CategoryName, IDecl, typeParamList); // FIXME: PushOnScopeChains? CurContext->addDecl(CDecl); if (NumProtoRefs) { diagnoseUseOfProtocols(*this, CDecl, (ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs); CDecl->setProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, ProtoLocs, Context); // Protocols in the class extension belong to the class. if (CDecl->IsClassExtension()) IDecl->mergeClassExtensionProtocolList((ObjCProtocolDecl*const*)ProtoRefs, NumProtoRefs, Context); } CheckObjCDeclScope(CDecl); return ActOnObjCContainerStartDefinition(CDecl); } /// ActOnStartCategoryImplementation - Perform semantic checks on the /// category implementation declaration and build an ObjCCategoryImplDecl /// object. Decl *Sema::ActOnStartCategoryImplementation( SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc) { ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true); ObjCCategoryDecl *CatIDecl = nullptr; if (IDecl && IDecl->hasDefinition()) { CatIDecl = IDecl->FindCategoryDeclaration(CatName); if (!CatIDecl) { // Category @implementation with no corresponding @interface. // Create and install one. CatIDecl = ObjCCategoryDecl::Create(Context, CurContext, AtCatImplLoc, ClassLoc, CatLoc, CatName, IDecl, /*typeParamList=*/nullptr); CatIDecl->setImplicit(); } } ObjCCategoryImplDecl *CDecl = ObjCCategoryImplDecl::Create(Context, CurContext, CatName, IDecl, ClassLoc, AtCatImplLoc, CatLoc); /// Check that class of this category is already completely declared. if (!IDecl) { Diag(ClassLoc, diag::err_undef_interface) << ClassName; CDecl->setInvalidDecl(); } else if (RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl), diag::err_undef_interface)) { CDecl->setInvalidDecl(); } // FIXME: PushOnScopeChains? CurContext->addDecl(CDecl); // If the interface is deprecated/unavailable, warn/error about it. if (IDecl) DiagnoseUseOfDecl(IDecl, ClassLoc); /// Check that CatName, category name, is not used in another implementation. if (CatIDecl) { if (CatIDecl->getImplementation()) { Diag(ClassLoc, diag::err_dup_implementation_category) << ClassName << CatName; Diag(CatIDecl->getImplementation()->getLocation(), diag::note_previous_definition); CDecl->setInvalidDecl(); } else { CatIDecl->setImplementation(CDecl); // Warn on implementating category of deprecated class under // -Wdeprecated-implementations flag. DiagnoseObjCImplementedDeprecations(*this, dyn_cast<NamedDecl>(IDecl), CDecl->getLocation(), 2); } } CheckObjCDeclScope(CDecl); return ActOnObjCContainerStartDefinition(CDecl); } Decl *Sema::ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc) { ObjCInterfaceDecl *IDecl = nullptr; // Check for another declaration kind with the same name. NamedDecl *PrevDecl = LookupSingleName(TUScope, ClassName, ClassLoc, LookupOrdinaryName, ForRedeclaration); if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) { Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName; Diag(PrevDecl->getLocation(), diag::note_previous_definition); } else if ((IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl))) { RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl), diag::warn_undef_interface); } else { // We did not find anything with the name ClassName; try to correct for // typos in the class name. TypoCorrection Corrected = CorrectTypo( DeclarationNameInfo(ClassName, ClassLoc), LookupOrdinaryName, TUScope, nullptr, llvm::make_unique<ObjCInterfaceValidatorCCC>(), CTK_NonError); if (Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) { // Suggest the (potentially) correct interface name. Don't provide a // code-modification hint or use the typo name for recovery, because // this is just a warning. The program may actually be correct. diagnoseTypo(Corrected, PDiag(diag::warn_undef_interface_suggest) << ClassName, /*ErrorRecovery*/false); } else { Diag(ClassLoc, diag::warn_undef_interface) << ClassName; } } // Check that super class name is valid class name ObjCInterfaceDecl *SDecl = nullptr; if (SuperClassname) { // Check if a different kind of symbol declared in this scope. PrevDecl = LookupSingleName(TUScope, SuperClassname, SuperClassLoc, LookupOrdinaryName); if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) { Diag(SuperClassLoc, diag::err_redefinition_different_kind) << SuperClassname; Diag(PrevDecl->getLocation(), diag::note_previous_definition); } else { SDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl); if (SDecl && !SDecl->hasDefinition()) SDecl = nullptr; if (!SDecl) Diag(SuperClassLoc, diag::err_undef_superclass) << SuperClassname << ClassName; else if (IDecl && !declaresSameEntity(IDecl->getSuperClass(), SDecl)) { // This implementation and its interface do not have the same // super class. Diag(SuperClassLoc, diag::err_conflicting_super_class) << SDecl->getDeclName(); Diag(SDecl->getLocation(), diag::note_previous_definition); } } } if (!IDecl) { // Legacy case of @implementation with no corresponding @interface. // Build, chain & install the interface decl into the identifier. // FIXME: Do we support attributes on the @implementation? If so we should // copy them over. IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtClassImplLoc, ClassName, /*typeParamList=*/nullptr, /*PrevDecl=*/nullptr, ClassLoc, true); IDecl->startDefinition(); if (SDecl) { IDecl->setSuperClass(Context.getTrivialTypeSourceInfo( Context.getObjCInterfaceType(SDecl), SuperClassLoc)); IDecl->setEndOfDefinitionLoc(SuperClassLoc); } else { IDecl->setEndOfDefinitionLoc(ClassLoc); } PushOnScopeChains(IDecl, TUScope); } else { // Mark the interface as being completed, even if it was just as // @class ....; // declaration; the user cannot reopen it. if (!IDecl->hasDefinition()) IDecl->startDefinition(); } ObjCImplementationDecl* IMPDecl = ObjCImplementationDecl::Create(Context, CurContext, IDecl, SDecl, ClassLoc, AtClassImplLoc, SuperClassLoc); if (CheckObjCDeclScope(IMPDecl)) return ActOnObjCContainerStartDefinition(IMPDecl); // Check that there is no duplicate implementation of this class. if (IDecl->getImplementation()) { // FIXME: Don't leak everything! Diag(ClassLoc, diag::err_dup_implementation_class) << ClassName; Diag(IDecl->getImplementation()->getLocation(), diag::note_previous_definition); IMPDecl->setInvalidDecl(); } else { // add it to the list. IDecl->setImplementation(IMPDecl); PushOnScopeChains(IMPDecl, TUScope); // Warn on implementating deprecated class under // -Wdeprecated-implementations flag. DiagnoseObjCImplementedDeprecations(*this, dyn_cast<NamedDecl>(IDecl), IMPDecl->getLocation(), 1); } return ActOnObjCContainerStartDefinition(IMPDecl); } Sema::DeclGroupPtrTy Sema::ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls) { SmallVector<Decl *, 64> DeclsInGroup; DeclsInGroup.reserve(Decls.size() + 1); for (unsigned i = 0, e = Decls.size(); i != e; ++i) { Decl *Dcl = Decls[i]; if (!Dcl) continue; if (Dcl->getDeclContext()->isFileContext()) Dcl->setTopLevelDeclInObjCContainer(); DeclsInGroup.push_back(Dcl); } DeclsInGroup.push_back(ObjCImpDecl); return BuildDeclaratorGroup(DeclsInGroup, false); } void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **ivars, unsigned numIvars, SourceLocation RBrace) { assert(ImpDecl && "missing implementation decl"); ObjCInterfaceDecl* IDecl = ImpDecl->getClassInterface(); if (!IDecl) return; /// Check case of non-existing \@interface decl. /// (legacy objective-c \@implementation decl without an \@interface decl). /// Add implementations's ivar to the synthesize class's ivar list. if (IDecl->isImplicitInterfaceDecl()) { IDecl->setEndOfDefinitionLoc(RBrace); // Add ivar's to class's DeclContext. for (unsigned i = 0, e = numIvars; i != e; ++i) { ivars[i]->setLexicalDeclContext(ImpDecl); IDecl->makeDeclVisibleInContext(ivars[i]); ImpDecl->addDecl(ivars[i]); } return; } // If implementation has empty ivar list, just return. if (numIvars == 0) return; assert(ivars && "missing @implementation ivars"); if (LangOpts.ObjCRuntime.isNonFragile()) { if (ImpDecl->getSuperClass()) Diag(ImpDecl->getLocation(), diag::warn_on_superclass_use); for (unsigned i = 0; i < numIvars; i++) { ObjCIvarDecl* ImplIvar = ivars[i]; if (const ObjCIvarDecl *ClsIvar = IDecl->getIvarDecl(ImplIvar->getIdentifier())) { Diag(ImplIvar->getLocation(), diag::err_duplicate_ivar_declaration); Diag(ClsIvar->getLocation(), diag::note_previous_definition); continue; } // Check class extensions (unnamed categories) for duplicate ivars. for (const auto *CDecl : IDecl->visible_extensions()) { if (const ObjCIvarDecl *ClsExtIvar = CDecl->getIvarDecl(ImplIvar->getIdentifier())) { Diag(ImplIvar->getLocation(), diag::err_duplicate_ivar_declaration); Diag(ClsExtIvar->getLocation(), diag::note_previous_definition); continue; } } // Instance ivar to Implementation's DeclContext. ImplIvar->setLexicalDeclContext(ImpDecl); IDecl->makeDeclVisibleInContext(ImplIvar); ImpDecl->addDecl(ImplIvar); } return; } // Check interface's Ivar list against those in the implementation. // names and types must match. // unsigned j = 0; ObjCInterfaceDecl::ivar_iterator IVI = IDecl->ivar_begin(), IVE = IDecl->ivar_end(); for (; numIvars > 0 && IVI != IVE; ++IVI) { ObjCIvarDecl* ImplIvar = ivars[j++]; ObjCIvarDecl* ClsIvar = *IVI; assert (ImplIvar && "missing implementation ivar"); assert (ClsIvar && "missing class ivar"); // First, make sure the types match. if (!Context.hasSameType(ImplIvar->getType(), ClsIvar->getType())) { Diag(ImplIvar->getLocation(), diag::err_conflicting_ivar_type) << ImplIvar->getIdentifier() << ImplIvar->getType() << ClsIvar->getType(); Diag(ClsIvar->getLocation(), diag::note_previous_definition); } else if (ImplIvar->isBitField() && ClsIvar->isBitField() && ImplIvar->getBitWidthValue(Context) != ClsIvar->getBitWidthValue(Context)) { Diag(ImplIvar->getBitWidth()->getLocStart(), diag::err_conflicting_ivar_bitwidth) << ImplIvar->getIdentifier(); Diag(ClsIvar->getBitWidth()->getLocStart(), diag::note_previous_definition); } // Make sure the names are identical. if (ImplIvar->getIdentifier() != ClsIvar->getIdentifier()) { Diag(ImplIvar->getLocation(), diag::err_conflicting_ivar_name) << ImplIvar->getIdentifier() << ClsIvar->getIdentifier(); Diag(ClsIvar->getLocation(), diag::note_previous_definition); } --numIvars; } if (numIvars > 0) Diag(ivars[j]->getLocation(), diag::err_inconsistent_ivar_count); else if (IVI != IVE) Diag(IVI->getLocation(), diag::err_inconsistent_ivar_count); } static void WarnUndefinedMethod(Sema &S, SourceLocation ImpLoc, ObjCMethodDecl *method, bool &IncompleteImpl, unsigned DiagID, NamedDecl *NeededFor = nullptr) { // No point warning no definition of method which is 'unavailable'. switch (method->getAvailability()) { case AR_Available: case AR_Deprecated: break; // Don't warn about unavailable or not-yet-introduced methods. case AR_NotYetIntroduced: case AR_Unavailable: return; } // FIXME: For now ignore 'IncompleteImpl'. // Previously we grouped all unimplemented methods under a single // warning, but some users strongly voiced that they would prefer // separate warnings. We will give that approach a try, as that // matches what we do with protocols. { const Sema::SemaDiagnosticBuilder &B = S.Diag(ImpLoc, DiagID); B << method; if (NeededFor) B << NeededFor; } // Issue a note to the original declaration. SourceLocation MethodLoc = method->getLocStart(); if (MethodLoc.isValid()) S.Diag(MethodLoc, diag::note_method_declared_at) << method; } /// Determines if type B can be substituted for type A. Returns true if we can /// guarantee that anything that the user will do to an object of type A can /// also be done to an object of type B. This is trivially true if the two /// types are the same, or if B is a subclass of A. It becomes more complex /// in cases where protocols are involved. /// /// Object types in Objective-C describe the minimum requirements for an /// object, rather than providing a complete description of a type. For /// example, if A is a subclass of B, then B* may refer to an instance of A. /// The principle of substitutability means that we may use an instance of A /// anywhere that we may use an instance of B - it will implement all of the /// ivars of B and all of the methods of B. /// /// This substitutability is important when type checking methods, because /// the implementation may have stricter type definitions than the interface. /// The interface specifies minimum requirements, but the implementation may /// have more accurate ones. For example, a method may privately accept /// instances of B, but only publish that it accepts instances of A. Any /// object passed to it will be type checked against B, and so will implicitly /// by a valid A*. Similarly, a method may return a subclass of the class that /// it is declared as returning. /// /// This is most important when considering subclassing. A method in a /// subclass must accept any object as an argument that its superclass's /// implementation accepts. It may, however, accept a more general type /// without breaking substitutability (i.e. you can still use the subclass /// anywhere that you can use the superclass, but not vice versa). The /// converse requirement applies to return types: the return type for a /// subclass method must be a valid object of the kind that the superclass /// advertises, but it may be specified more accurately. This avoids the need /// for explicit down-casting by callers. /// /// Note: This is a stricter requirement than for assignment. static bool isObjCTypeSubstitutable(ASTContext &Context, const ObjCObjectPointerType *A, const ObjCObjectPointerType *B, bool rejectId) { // Reject a protocol-unqualified id. if (rejectId && B->isObjCIdType()) return false; // If B is a qualified id, then A must also be a qualified id and it must // implement all of the protocols in B. It may not be a qualified class. // For example, MyClass<A> can be assigned to id<A>, but MyClass<A> is a // stricter definition so it is not substitutable for id<A>. if (B->isObjCQualifiedIdType()) { return A->isObjCQualifiedIdType() && Context.ObjCQualifiedIdTypesAreCompatible(QualType(A, 0), QualType(B,0), false); } /* // id is a special type that bypasses type checking completely. We want a // warning when it is used in one place but not another. if (C.isObjCIdType(A) || C.isObjCIdType(B)) return false; // If B is a qualified id, then A must also be a qualified id (which it isn't // if we've got this far) if (B->isObjCQualifiedIdType()) return false; */ // Now we know that A and B are (potentially-qualified) class types. The // normal rules for assignment apply. return Context.canAssignObjCInterfaces(A, B); } static SourceRange getTypeRange(TypeSourceInfo *TSI) { return (TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange()); } /// Determine whether two set of Objective-C declaration qualifiers conflict. static bool objcModifiersConflict(Decl::ObjCDeclQualifier x, Decl::ObjCDeclQualifier y) { return (x & ~Decl::OBJC_TQ_CSNullability) != (y & ~Decl::OBJC_TQ_CSNullability); } static bool CheckMethodOverrideReturn(Sema &S, ObjCMethodDecl *MethodImpl, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl, bool IsOverridingMode, bool Warn) { if (IsProtocolMethodDecl && objcModifiersConflict(MethodDecl->getObjCDeclQualifier(), MethodImpl->getObjCDeclQualifier())) { if (Warn) { S.Diag(MethodImpl->getLocation(), (IsOverridingMode ? diag::warn_conflicting_overriding_ret_type_modifiers : diag::warn_conflicting_ret_type_modifiers)) << MethodImpl->getDeclName() << MethodImpl->getReturnTypeSourceRange(); S.Diag(MethodDecl->getLocation(), diag::note_previous_declaration) << MethodDecl->getReturnTypeSourceRange(); } else return false; } if (Warn && IsOverridingMode && !isa<ObjCImplementationDecl>(MethodImpl->getDeclContext()) && !S.Context.hasSameNullabilityTypeQualifier(MethodImpl->getReturnType(), MethodDecl->getReturnType(), false)) { auto nullabilityMethodImpl = *MethodImpl->getReturnType()->getNullability(S.Context); auto nullabilityMethodDecl = *MethodDecl->getReturnType()->getNullability(S.Context); S.Diag(MethodImpl->getLocation(), diag::warn_conflicting_nullability_attr_overriding_ret_types) << DiagNullabilityKind( nullabilityMethodImpl, ((MethodImpl->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability) != 0)) << DiagNullabilityKind( nullabilityMethodDecl, ((MethodDecl->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability) != 0)); S.Diag(MethodDecl->getLocation(), diag::note_previous_declaration); } if (S.Context.hasSameUnqualifiedType(MethodImpl->getReturnType(), MethodDecl->getReturnType())) return true; if (!Warn) return false; unsigned DiagID = IsOverridingMode ? diag::warn_conflicting_overriding_ret_types : diag::warn_conflicting_ret_types; // Mismatches between ObjC pointers go into a different warning // category, and sometimes they're even completely whitelisted. if (const ObjCObjectPointerType *ImplPtrTy = MethodImpl->getReturnType()->getAs<ObjCObjectPointerType>()) { if (const ObjCObjectPointerType *IfacePtrTy = MethodDecl->getReturnType()->getAs<ObjCObjectPointerType>()) { // Allow non-matching return types as long as they don't violate // the principle of substitutability. Specifically, we permit // return types that are subclasses of the declared return type, // or that are more-qualified versions of the declared type. if (isObjCTypeSubstitutable(S.Context, IfacePtrTy, ImplPtrTy, false)) return false; DiagID = IsOverridingMode ? diag::warn_non_covariant_overriding_ret_types : diag::warn_non_covariant_ret_types; } } S.Diag(MethodImpl->getLocation(), DiagID) << MethodImpl->getDeclName() << MethodDecl->getReturnType() << MethodImpl->getReturnType() << MethodImpl->getReturnTypeSourceRange(); S.Diag(MethodDecl->getLocation(), IsOverridingMode ? diag::note_previous_declaration : diag::note_previous_definition) << MethodDecl->getReturnTypeSourceRange(); return false; } static bool CheckMethodOverrideParam(Sema &S, ObjCMethodDecl *MethodImpl, ObjCMethodDecl *MethodDecl, ParmVarDecl *ImplVar, ParmVarDecl *IfaceVar, bool IsProtocolMethodDecl, bool IsOverridingMode, bool Warn) { if (IsProtocolMethodDecl && objcModifiersConflict(ImplVar->getObjCDeclQualifier(), IfaceVar->getObjCDeclQualifier())) { if (Warn) { if (IsOverridingMode) S.Diag(ImplVar->getLocation(), diag::warn_conflicting_overriding_param_modifiers) << getTypeRange(ImplVar->getTypeSourceInfo()) << MethodImpl->getDeclName(); else S.Diag(ImplVar->getLocation(), diag::warn_conflicting_param_modifiers) << getTypeRange(ImplVar->getTypeSourceInfo()) << MethodImpl->getDeclName(); S.Diag(IfaceVar->getLocation(), diag::note_previous_declaration) << getTypeRange(IfaceVar->getTypeSourceInfo()); } else return false; } QualType ImplTy = ImplVar->getType(); QualType IfaceTy = IfaceVar->getType(); if (Warn && IsOverridingMode && !isa<ObjCImplementationDecl>(MethodImpl->getDeclContext()) && !S.Context.hasSameNullabilityTypeQualifier(ImplTy, IfaceTy, true)) { S.Diag(ImplVar->getLocation(), diag::warn_conflicting_nullability_attr_overriding_param_types) << DiagNullabilityKind( *ImplTy->getNullability(S.Context), ((ImplVar->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability) != 0)) << DiagNullabilityKind( *IfaceTy->getNullability(S.Context), ((IfaceVar->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability) != 0)); S.Diag(IfaceVar->getLocation(), diag::note_previous_declaration); } if (S.Context.hasSameUnqualifiedType(ImplTy, IfaceTy)) return true; if (!Warn) return false; unsigned DiagID = IsOverridingMode ? diag::warn_conflicting_overriding_param_types : diag::warn_conflicting_param_types; // Mismatches between ObjC pointers go into a different warning // category, and sometimes they're even completely whitelisted. if (const ObjCObjectPointerType *ImplPtrTy = ImplTy->getAs<ObjCObjectPointerType>()) { if (const ObjCObjectPointerType *IfacePtrTy = IfaceTy->getAs<ObjCObjectPointerType>()) { // Allow non-matching argument types as long as they don't // violate the principle of substitutability. Specifically, the // implementation must accept any objects that the superclass // accepts, however it may also accept others. if (isObjCTypeSubstitutable(S.Context, ImplPtrTy, IfacePtrTy, true)) return false; DiagID = IsOverridingMode ? diag::warn_non_contravariant_overriding_param_types : diag::warn_non_contravariant_param_types; } } S.Diag(ImplVar->getLocation(), DiagID) << getTypeRange(ImplVar->getTypeSourceInfo()) << MethodImpl->getDeclName() << IfaceTy << ImplTy; S.Diag(IfaceVar->getLocation(), (IsOverridingMode ? diag::note_previous_declaration : diag::note_previous_definition)) << getTypeRange(IfaceVar->getTypeSourceInfo()); return false; } /// In ARC, check whether the conventional meanings of the two methods /// match. If they don't, it's a hard error. static bool checkMethodFamilyMismatch(Sema &S, ObjCMethodDecl *impl, ObjCMethodDecl *decl) { ObjCMethodFamily implFamily = impl->getMethodFamily(); ObjCMethodFamily declFamily = decl->getMethodFamily(); if (implFamily == declFamily) return false; // Since conventions are sorted by selector, the only possibility is // that the types differ enough to cause one selector or the other // to fall out of the family. assert(implFamily == OMF_None || declFamily == OMF_None); // No further diagnostics required on invalid declarations. if (impl->isInvalidDecl() || decl->isInvalidDecl()) return true; const ObjCMethodDecl *unmatched = impl; ObjCMethodFamily family = declFamily; unsigned errorID = diag::err_arc_lost_method_convention; unsigned noteID = diag::note_arc_lost_method_convention; if (declFamily == OMF_None) { unmatched = decl; family = implFamily; errorID = diag::err_arc_gained_method_convention; noteID = diag::note_arc_gained_method_convention; } // Indexes into a %select clause in the diagnostic. enum FamilySelector { F_alloc, F_copy, F_mutableCopy = F_copy, F_init, F_new }; FamilySelector familySelector = FamilySelector(); switch (family) { case OMF_None: llvm_unreachable("logic error, no method convention"); case OMF_retain: case OMF_release: case OMF_autorelease: case OMF_dealloc: case OMF_finalize: case OMF_retainCount: case OMF_self: case OMF_initialize: case OMF_performSelector: // Mismatches for these methods don't change ownership // conventions, so we don't care. return false; case OMF_init: familySelector = F_init; break; case OMF_alloc: familySelector = F_alloc; break; case OMF_copy: familySelector = F_copy; break; case OMF_mutableCopy: familySelector = F_mutableCopy; break; case OMF_new: familySelector = F_new; break; } enum ReasonSelector { R_NonObjectReturn, R_UnrelatedReturn }; ReasonSelector reasonSelector; // The only reason these methods don't fall within their families is // due to unusual result types. if (unmatched->getReturnType()->isObjCObjectPointerType()) { reasonSelector = R_UnrelatedReturn; } else { reasonSelector = R_NonObjectReturn; } S.Diag(impl->getLocation(), errorID) << int(familySelector) << int(reasonSelector); S.Diag(decl->getLocation(), noteID) << int(familySelector) << int(reasonSelector); return true; } void Sema::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl) { if (getLangOpts().ObjCAutoRefCount && checkMethodFamilyMismatch(*this, ImpMethodDecl, MethodDecl)) return; CheckMethodOverrideReturn(*this, ImpMethodDecl, MethodDecl, IsProtocolMethodDecl, false, true); for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(), IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end(), EF = MethodDecl->param_end(); IM != EM && IF != EF; ++IM, ++IF) { CheckMethodOverrideParam(*this, ImpMethodDecl, MethodDecl, *IM, *IF, IsProtocolMethodDecl, false, true); } if (ImpMethodDecl->isVariadic() != MethodDecl->isVariadic()) { Diag(ImpMethodDecl->getLocation(), diag::warn_conflicting_variadic); Diag(MethodDecl->getLocation(), diag::note_previous_declaration); } } void Sema::CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl) { CheckMethodOverrideReturn(*this, Method, Overridden, IsProtocolMethodDecl, true, true); for (ObjCMethodDecl::param_iterator IM = Method->param_begin(), IF = Overridden->param_begin(), EM = Method->param_end(), EF = Overridden->param_end(); IM != EM && IF != EF; ++IM, ++IF) { CheckMethodOverrideParam(*this, Method, Overridden, *IM, *IF, IsProtocolMethodDecl, true, true); } if (Method->isVariadic() != Overridden->isVariadic()) { Diag(Method->getLocation(), diag::warn_conflicting_overriding_variadic); Diag(Overridden->getLocation(), diag::note_previous_declaration); } } /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void Sema::WarnExactTypedMethods(ObjCMethodDecl *ImpMethodDecl, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl) { // don't issue warning when protocol method is optional because primary // class is not required to implement it and it is safe for protocol // to implement it. if (MethodDecl->getImplementationControl() == ObjCMethodDecl::Optional) return; // don't issue warning when primary class's method is // depecated/unavailable. if (MethodDecl->hasAttr<UnavailableAttr>() || MethodDecl->hasAttr<DeprecatedAttr>()) return; bool match = CheckMethodOverrideReturn(*this, ImpMethodDecl, MethodDecl, IsProtocolMethodDecl, false, false); if (match) for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(), IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end(), EF = MethodDecl->param_end(); IM != EM && IF != EF; ++IM, ++IF) { match = CheckMethodOverrideParam(*this, ImpMethodDecl, MethodDecl, *IM, *IF, IsProtocolMethodDecl, false, false); if (!match) break; } if (match) match = (ImpMethodDecl->isVariadic() == MethodDecl->isVariadic()); if (match) match = !(MethodDecl->isClassMethod() && MethodDecl->getSelector() == GetNullarySelector("load", Context)); if (match) { Diag(ImpMethodDecl->getLocation(), diag::warn_category_method_impl_match); Diag(MethodDecl->getLocation(), diag::note_method_declared_at) << MethodDecl->getDeclName(); } } /// FIXME: Type hierarchies in Objective-C can be deep. We could most likely /// improve the efficiency of selector lookups and type checking by associating /// with each protocol / interface / category the flattened instance tables. If /// we used an immutable set to keep the table then it wouldn't add significant /// memory cost and it would be handy for lookups. typedef llvm::DenseSet<IdentifierInfo*> ProtocolNameSet; typedef std::unique_ptr<ProtocolNameSet> LazyProtocolNameSet; static void findProtocolsWithExplicitImpls(const ObjCProtocolDecl *PDecl, ProtocolNameSet &PNS) { if (PDecl->hasAttr<ObjCExplicitProtocolImplAttr>()) PNS.insert(PDecl->getIdentifier()); for (const auto *PI : PDecl->protocols()) findProtocolsWithExplicitImpls(PI, PNS); } /// Recursively populates a set with all conformed protocols in a class /// hierarchy that have the 'objc_protocol_requires_explicit_implementation' /// attribute. static void findProtocolsWithExplicitImpls(const ObjCInterfaceDecl *Super, ProtocolNameSet &PNS) { if (!Super) return; for (const auto *I : Super->all_referenced_protocols()) findProtocolsWithExplicitImpls(I, PNS); findProtocolsWithExplicitImpls(Super->getSuperClass(), PNS); } /// CheckProtocolMethodDefs - This routine checks unimplemented methods /// Declared in protocol, and those referenced by it. static void CheckProtocolMethodDefs(Sema &S, SourceLocation ImpLoc, ObjCProtocolDecl *PDecl, bool& IncompleteImpl, const Sema::SelectorSet &InsMap, const Sema::SelectorSet &ClsMap, ObjCContainerDecl *CDecl, LazyProtocolNameSet &ProtocolsExplictImpl) { ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl); ObjCInterfaceDecl *IDecl = C ? C->getClassInterface() : dyn_cast<ObjCInterfaceDecl>(CDecl); assert (IDecl && "CheckProtocolMethodDefs - IDecl is null"); ObjCInterfaceDecl *Super = IDecl->getSuperClass(); ObjCInterfaceDecl *NSIDecl = nullptr; // If this protocol is marked 'objc_protocol_requires_explicit_implementation' // then we should check if any class in the super class hierarchy also // conforms to this protocol, either directly or via protocol inheritance. // If so, we can skip checking this protocol completely because we // know that a parent class already satisfies this protocol. // // Note: we could generalize this logic for all protocols, and merely // add the limit on looking at the super class chain for just // specially marked protocols. This may be a good optimization. This // change is restricted to 'objc_protocol_requires_explicit_implementation' // protocols for now for controlled evaluation. if (PDecl->hasAttr<ObjCExplicitProtocolImplAttr>()) { if (!ProtocolsExplictImpl) { ProtocolsExplictImpl.reset(new ProtocolNameSet); findProtocolsWithExplicitImpls(Super, *ProtocolsExplictImpl); } if (ProtocolsExplictImpl->find(PDecl->getIdentifier()) != ProtocolsExplictImpl->end()) return; // If no super class conforms to the protocol, we should not search // for methods in the super class to implicitly satisfy the protocol. Super = nullptr; } if (S.getLangOpts().ObjCRuntime.isNeXTFamily()) { // check to see if class implements forwardInvocation method and objects // of this class are derived from 'NSProxy' so that to forward requests // from one object to another. // Under such conditions, which means that every method possible is // implemented in the class, we should not issue "Method definition not // found" warnings. // FIXME: Use a general GetUnarySelector method for this. IdentifierInfo* II = &S.Context.Idents.get("forwardInvocation"); Selector fISelector = S.Context.Selectors.getSelector(1, &II); if (InsMap.count(fISelector)) // Is IDecl derived from 'NSProxy'? If so, no instance methods // need be implemented in the implementation. NSIDecl = IDecl->lookupInheritedClass(&S.Context.Idents.get("NSProxy")); } // If this is a forward protocol declaration, get its definition. if (!PDecl->isThisDeclarationADefinition() && PDecl->getDefinition()) PDecl = PDecl->getDefinition(); // If a method lookup fails locally we still need to look and see if // the method was implemented by a base class or an inherited // protocol. This lookup is slow, but occurs rarely in correct code // and otherwise would terminate in a warning. // check unimplemented instance methods. if (!NSIDecl) for (auto *method : PDecl->instance_methods()) { if (method->getImplementationControl() != ObjCMethodDecl::Optional && !method->isPropertyAccessor() && !InsMap.count(method->getSelector()) && (!Super || !Super->lookupMethod(method->getSelector(), true /* instance */, false /* shallowCategory */, true /* followsSuper */, nullptr /* category */))) { // If a method is not implemented in the category implementation but // has been declared in its primary class, superclass, // or in one of their protocols, no need to issue the warning. // This is because method will be implemented in the primary class // or one of its super class implementation. // Ugly, but necessary. Method declared in protcol might have // have been synthesized due to a property declared in the class which // uses the protocol. if (ObjCMethodDecl *MethodInClass = IDecl->lookupMethod(method->getSelector(), true /* instance */, true /* shallowCategoryLookup */, false /* followSuper */)) if (C || MethodInClass->isPropertyAccessor()) continue; unsigned DIAG = diag::warn_unimplemented_protocol_method; if (!S.Diags.isIgnored(DIAG, ImpLoc)) { WarnUndefinedMethod(S, ImpLoc, method, IncompleteImpl, DIAG, PDecl); } } } // check unimplemented class methods for (auto *method : PDecl->class_methods()) { if (method->getImplementationControl() != ObjCMethodDecl::Optional && !ClsMap.count(method->getSelector()) && (!Super || !Super->lookupMethod(method->getSelector(), false /* class method */, false /* shallowCategoryLookup */, true /* followSuper */, nullptr /* category */))) { // See above comment for instance method lookups. if (C && IDecl->lookupMethod(method->getSelector(), false /* class */, true /* shallowCategoryLookup */, false /* followSuper */)) continue; unsigned DIAG = diag::warn_unimplemented_protocol_method; if (!S.Diags.isIgnored(DIAG, ImpLoc)) { WarnUndefinedMethod(S, ImpLoc, method, IncompleteImpl, DIAG, PDecl); } } } // Check on this protocols's referenced protocols, recursively. for (auto *PI : PDecl->protocols()) CheckProtocolMethodDefs(S, ImpLoc, PI, IncompleteImpl, InsMap, ClsMap, CDecl, ProtocolsExplictImpl); } /// MatchAllMethodDeclarations - Check methods declared in interface /// or protocol against those declared in their implementations. /// void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* CDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl) { // Check and see if instance methods in class interface have been // implemented in the implementation class. If so, their types match. for (auto *I : CDecl->instance_methods()) { if (!InsMapSeen.insert(I->getSelector()).second) continue; if (!I->isPropertyAccessor() && !InsMap.count(I->getSelector())) { if (ImmediateClass) WarnUndefinedMethod(*this, IMPDecl->getLocation(), I, IncompleteImpl, diag::warn_undef_method_impl); continue; } else { ObjCMethodDecl *ImpMethodDecl = IMPDecl->getInstanceMethod(I->getSelector()); assert(CDecl->getInstanceMethod(I->getSelector()) && "Expected to find the method through lookup as well"); // ImpMethodDecl may be null as in a @dynamic property. if (ImpMethodDecl) { if (!WarnCategoryMethodImpl) WarnConflictingTypedMethods(ImpMethodDecl, I, isa<ObjCProtocolDecl>(CDecl)); else if (!I->isPropertyAccessor()) WarnExactTypedMethods(ImpMethodDecl, I, isa<ObjCProtocolDecl>(CDecl)); } } } // Check and see if class methods in class interface have been // implemented in the implementation class. If so, their types match. for (auto *I : CDecl->class_methods()) { if (!ClsMapSeen.insert(I->getSelector()).second) continue; if (!ClsMap.count(I->getSelector())) { if (ImmediateClass) WarnUndefinedMethod(*this, IMPDecl->getLocation(), I, IncompleteImpl, diag::warn_undef_method_impl); } else { ObjCMethodDecl *ImpMethodDecl = IMPDecl->getClassMethod(I->getSelector()); assert(CDecl->getClassMethod(I->getSelector()) && "Expected to find the method through lookup as well"); if (!WarnCategoryMethodImpl) WarnConflictingTypedMethods(ImpMethodDecl, I, isa<ObjCProtocolDecl>(CDecl)); else WarnExactTypedMethods(ImpMethodDecl, I, isa<ObjCProtocolDecl>(CDecl)); } } if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl> (CDecl)) { // Also, check for methods declared in protocols inherited by // this protocol. for (auto *PI : PD->protocols()) MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen, IMPDecl, PI, IncompleteImpl, false, WarnCategoryMethodImpl); } if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) { // when checking that methods in implementation match their declaration, // i.e. when WarnCategoryMethodImpl is false, check declarations in class // extension; as well as those in categories. if (!WarnCategoryMethodImpl) { for (auto *Cat : I->visible_categories()) MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen, IMPDecl, Cat, IncompleteImpl, false, WarnCategoryMethodImpl); } else { // Also methods in class extensions need be looked at next. for (auto *Ext : I->visible_extensions()) MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen, IMPDecl, Ext, IncompleteImpl, false, WarnCategoryMethodImpl); } // Check for any implementation of a methods declared in protocol. for (auto *PI : I->all_referenced_protocols()) MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen, IMPDecl, PI, IncompleteImpl, false, WarnCategoryMethodImpl); // FIXME. For now, we are not checking for extact match of methods // in category implementation and its primary class's super class. if (!WarnCategoryMethodImpl && I->getSuperClass()) MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen, IMPDecl, I->getSuperClass(), IncompleteImpl, false); } } /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void Sema::CheckCategoryVsClassMethodMatches( ObjCCategoryImplDecl *CatIMPDecl) { // Get category's primary class. ObjCCategoryDecl *CatDecl = CatIMPDecl->getCategoryDecl(); if (!CatDecl) return; ObjCInterfaceDecl *IDecl = CatDecl->getClassInterface(); if (!IDecl) return; ObjCInterfaceDecl *SuperIDecl = IDecl->getSuperClass(); SelectorSet InsMap, ClsMap; for (const auto *I : CatIMPDecl->instance_methods()) { Selector Sel = I->getSelector(); // When checking for methods implemented in the category, skip over // those declared in category class's super class. This is because // the super class must implement the method. if (SuperIDecl && SuperIDecl->lookupMethod(Sel, true)) continue; InsMap.insert(Sel); } for (const auto *I : CatIMPDecl->class_methods()) { Selector Sel = I->getSelector(); if (SuperIDecl && SuperIDecl->lookupMethod(Sel, false)) continue; ClsMap.insert(Sel); } if (InsMap.empty() && ClsMap.empty()) return; SelectorSet InsMapSeen, ClsMapSeen; bool IncompleteImpl = false; MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen, CatIMPDecl, IDecl, IncompleteImpl, false, true /*WarnCategoryMethodImpl*/); } void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* CDecl, bool IncompleteImpl) { SelectorSet InsMap; // Check and see if instance methods in class interface have been // implemented in the implementation class. for (const auto *I : IMPDecl->instance_methods()) InsMap.insert(I->getSelector()); // Check and see if properties declared in the interface have either 1) // an implementation or 2) there is a @synthesize/@dynamic implementation // of the property in the @implementation. if (const ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) { bool SynthesizeProperties = LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCRuntime.isNonFragile() && !IDecl->isObjCRequiresPropertyDefs(); DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, SynthesizeProperties); } // Diagnose null-resettable synthesized setters. diagnoseNullResettableSynthesizedSetters(IMPDecl); SelectorSet ClsMap; for (const auto *I : IMPDecl->class_methods()) ClsMap.insert(I->getSelector()); // Check for type conflict of methods declared in a class/protocol and // its implementation; if any. SelectorSet InsMapSeen, ClsMapSeen; MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen, IMPDecl, CDecl, IncompleteImpl, true); // check all methods implemented in category against those declared // in its primary class. if (ObjCCategoryImplDecl *CatDecl = dyn_cast<ObjCCategoryImplDecl>(IMPDecl)) CheckCategoryVsClassMethodMatches(CatDecl); // Check the protocol list for unimplemented methods in the @implementation // class. // Check and see if class methods in class interface have been // implemented in the implementation class. LazyProtocolNameSet ExplicitImplProtocols; if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) { for (auto *PI : I->all_referenced_protocols()) CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), PI, IncompleteImpl, InsMap, ClsMap, I, ExplicitImplProtocols); // Check class extensions (unnamed categories) for (auto *Ext : I->visible_extensions()) ImplMethodsVsClassMethods(S, IMPDecl, Ext, IncompleteImpl); } else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) { // For extended class, unimplemented methods in its protocols will // be reported in the primary class. if (!C->IsClassExtension()) { for (auto *P : C->protocols()) CheckProtocolMethodDefs(*this, IMPDecl->getLocation(), P, IncompleteImpl, InsMap, ClsMap, CDecl, ExplicitImplProtocols); DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, /*SynthesizeProperties=*/false); } } else llvm_unreachable("invalid ObjCContainerDecl type."); } Sema::DeclGroupPtrTy Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts) { SmallVector<Decl *, 8> DeclsInGroup; for (unsigned i = 0; i != NumElts; ++i) { // Check for another declaration kind with the same name. NamedDecl *PrevDecl = LookupSingleName(TUScope, IdentList[i], IdentLocs[i], LookupOrdinaryName, ForRedeclaration); if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) { // GCC apparently allows the following idiom: // // typedef NSObject < XCElementTogglerP > XCElementToggler; // @class XCElementToggler; // // Here we have chosen to ignore the forward class declaration // with a warning. Since this is the implied behavior. TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(PrevDecl); if (!TDD || !TDD->getUnderlyingType()->isObjCObjectType()) { Diag(AtClassLoc, diag::err_redefinition_different_kind) << IdentList[i]; Diag(PrevDecl->getLocation(), diag::note_previous_definition); } else { // a forward class declaration matching a typedef name of a class refers // to the underlying class. Just ignore the forward class with a warning // as this will force the intended behavior which is to lookup the // typedef name. if (isa<ObjCObjectType>(TDD->getUnderlyingType())) { Diag(AtClassLoc, diag::warn_forward_class_redefinition) << IdentList[i]; Diag(PrevDecl->getLocation(), diag::note_previous_definition); continue; } } } // Create a declaration to describe this forward declaration. ObjCInterfaceDecl *PrevIDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl); IdentifierInfo *ClassName = IdentList[i]; if (PrevIDecl && PrevIDecl->getIdentifier() != ClassName) { // A previous decl with a different name is because of // @compatibility_alias, for example: // \code // @class NewImage; // @compatibility_alias OldImage NewImage; // \endcode // A lookup for 'OldImage' will return the 'NewImage' decl. // // In such a case use the real declaration name, instead of the alias one, // otherwise we will break IdentifierResolver and redecls-chain invariants. // FIXME: If necessary, add a bit to indicate that this ObjCInterfaceDecl // has been aliased. ClassName = PrevIDecl->getIdentifier(); } // If this forward declaration has type parameters, compare them with the // type parameters of the previous declaration. ObjCTypeParamList *TypeParams = TypeParamLists[i]; if (PrevIDecl && TypeParams) { if (ObjCTypeParamList *PrevTypeParams = PrevIDecl->getTypeParamList()) { // Check for consistency with the previous declaration. if (checkTypeParamListConsistency( *this, PrevTypeParams, TypeParams, TypeParamListContext::ForwardDeclaration)) { TypeParams = nullptr; } } else if (ObjCInterfaceDecl *Def = PrevIDecl->getDefinition()) { // The @interface does not have type parameters. Complain. Diag(IdentLocs[i], diag::err_objc_parameterized_forward_class) << ClassName << TypeParams->getSourceRange(); Diag(Def->getLocation(), diag::note_defined_here) << ClassName; TypeParams = nullptr; } } ObjCInterfaceDecl *IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtClassLoc, ClassName, TypeParams, PrevIDecl, IdentLocs[i]); IDecl->setAtEndRange(IdentLocs[i]); PushOnScopeChains(IDecl, TUScope); CheckObjCDeclScope(IDecl); DeclsInGroup.push_back(IDecl); } return BuildDeclaratorGroup(DeclsInGroup, false); } static bool tryMatchRecordTypes(ASTContext &Context, Sema::MethodMatchStrategy strategy, const Type *left, const Type *right); static bool matchTypes(ASTContext &Context, Sema::MethodMatchStrategy strategy, QualType leftQT, QualType rightQT) { const Type *left = Context.getCanonicalType(leftQT).getUnqualifiedType().getTypePtr(); const Type *right = Context.getCanonicalType(rightQT).getUnqualifiedType().getTypePtr(); if (left == right) return true; // If we're doing a strict match, the types have to match exactly. if (strategy == Sema::MMS_strict) return false; if (left->isIncompleteType() || right->isIncompleteType()) return false; // Otherwise, use this absurdly complicated algorithm to try to // validate the basic, low-level compatibility of the two types. // As a minimum, require the sizes and alignments to match. TypeInfo LeftTI = Context.getTypeInfo(left); TypeInfo RightTI = Context.getTypeInfo(right); if (LeftTI.Width != RightTI.Width) return false; if (LeftTI.Align != RightTI.Align) return false; // Consider all the kinds of non-dependent canonical types: // - functions and arrays aren't possible as return and parameter types // - vector types of equal size can be arbitrarily mixed if (isa<VectorType>(left)) return isa<VectorType>(right); if (isa<VectorType>(right)) return false; // - references should only match references of identical type // - structs, unions, and Objective-C objects must match more-or-less // exactly // - everything else should be a scalar if (!left->isScalarType() || !right->isScalarType()) return tryMatchRecordTypes(Context, strategy, left, right); // Make scalars agree in kind, except count bools as chars, and group // all non-member pointers together. Type::ScalarTypeKind leftSK = left->getScalarTypeKind(); Type::ScalarTypeKind rightSK = right->getScalarTypeKind(); if (leftSK == Type::STK_Bool) leftSK = Type::STK_Integral; if (rightSK == Type::STK_Bool) rightSK = Type::STK_Integral; if (leftSK == Type::STK_CPointer || leftSK == Type::STK_BlockPointer) leftSK = Type::STK_ObjCObjectPointer; if (rightSK == Type::STK_CPointer || rightSK == Type::STK_BlockPointer) rightSK = Type::STK_ObjCObjectPointer; // Note that data member pointers and function member pointers don't // intermix because of the size differences. return (leftSK == rightSK); } static bool tryMatchRecordTypes(ASTContext &Context, Sema::MethodMatchStrategy strategy, const Type *lt, const Type *rt) { assert(lt && rt && lt != rt); if (!isa<RecordType>(lt) || !isa<RecordType>(rt)) return false; RecordDecl *left = cast<RecordType>(lt)->getDecl(); RecordDecl *right = cast<RecordType>(rt)->getDecl(); // Require union-hood to match. if (left->isUnion() != right->isUnion()) return false; // Require an exact match if either is non-POD. if ((isa<CXXRecordDecl>(left) && !cast<CXXRecordDecl>(left)->isPOD()) || (isa<CXXRecordDecl>(right) && !cast<CXXRecordDecl>(right)->isPOD())) return false; // Require size and alignment to match. TypeInfo LeftTI = Context.getTypeInfo(lt); TypeInfo RightTI = Context.getTypeInfo(rt); if (LeftTI.Width != RightTI.Width) return false; if (LeftTI.Align != RightTI.Align) return false; // Require fields to match. RecordDecl::field_iterator li = left->field_begin(), le = left->field_end(); RecordDecl::field_iterator ri = right->field_begin(), re = right->field_end(); for (; li != le && ri != re; ++li, ++ri) { if (!matchTypes(Context, strategy, li->getType(), ri->getType())) return false; } return (li == le && ri == re); } /// MatchTwoMethodDeclarations - Checks that two methods have matching type and /// returns true, or false, accordingly. /// TODO: Handle protocol list; such as id<p1,p2> in type comparisons bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left, const ObjCMethodDecl *right, MethodMatchStrategy strategy) { if (!matchTypes(Context, strategy, left->getReturnType(), right->getReturnType())) return false; // If either is hidden, it is not considered to match. if (left->isHidden() || right->isHidden()) return false; if (getLangOpts().ObjCAutoRefCount && (left->hasAttr<NSReturnsRetainedAttr>() != right->hasAttr<NSReturnsRetainedAttr>() || left->hasAttr<NSConsumesSelfAttr>() != right->hasAttr<NSConsumesSelfAttr>())) return false; ObjCMethodDecl::param_const_iterator li = left->param_begin(), le = left->param_end(), ri = right->param_begin(), re = right->param_end(); for (; li != le && ri != re; ++li, ++ri) { assert(ri != right->param_end() && "Param mismatch"); const ParmVarDecl *lparm = *li, *rparm = *ri; if (!matchTypes(Context, strategy, lparm->getType(), rparm->getType())) return false; if (getLangOpts().ObjCAutoRefCount && lparm->hasAttr<NSConsumedAttr>() != rparm->hasAttr<NSConsumedAttr>()) return false; } return true; } void Sema::addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method) { // Record at the head of the list whether there were 0, 1, or >= 2 methods // inside categories. if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(Method->getDeclContext())) if (!CD->IsClassExtension() && List->getBits() < 2) List->setBits(List->getBits() + 1); // If the list is empty, make it a singleton list. if (List->getMethod() == nullptr) { List->setMethod(Method); List->setNext(nullptr); return; } // We've seen a method with this name, see if we have already seen this type // signature. ObjCMethodList *Previous = List; for (; List; Previous = List, List = List->getNext()) { // If we are building a module, keep all of the methods. if (getLangOpts().Modules && !getLangOpts().CurrentModule.empty()) continue; if (!MatchTwoMethodDeclarations(Method, List->getMethod())) { // Even if two method types do not match, we would like to say // there is more than one declaration so unavailability/deprecated // warning is not too noisy. if (!Method->isDefined()) List->setHasMoreThanOneDecl(true); continue; } ObjCMethodDecl *PrevObjCMethod = List->getMethod(); // Propagate the 'defined' bit. if (Method->isDefined()) PrevObjCMethod->setDefined(true); else { // Objective-C doesn't allow an @interface for a class after its // @implementation. So if Method is not defined and there already is // an entry for this type signature, Method has to be for a different // class than PrevObjCMethod. List->setHasMoreThanOneDecl(true); } // If a method is deprecated, push it in the global pool. // This is used for better diagnostics. if (Method->isDeprecated()) { if (!PrevObjCMethod->isDeprecated()) List->setMethod(Method); } // If the new method is unavailable, push it into global pool // unless previous one is deprecated. if (Method->isUnavailable()) { if (PrevObjCMethod->getAvailability() < AR_Deprecated) List->setMethod(Method); } return; } // We have a new signature for an existing method - add it. // This is extremely rare. Only 1% of Cocoa selectors are "overloaded". ObjCMethodList *Mem = BumpAlloc.Allocate<ObjCMethodList>(); Previous->setNext(new (Mem) ObjCMethodList(Method)); } /// \brief Read the contents of the method pool for a given selector from /// external storage. void Sema::ReadMethodPool(Selector Sel) { assert(ExternalSource && "We need an external AST source"); ExternalSource->ReadMethodPool(Sel); } void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance) { // Ignore methods of invalid containers. if (cast<Decl>(Method->getDeclContext())->isInvalidDecl()) return; if (ExternalSource) ReadMethodPool(Method->getSelector()); GlobalMethodPool::iterator Pos = MethodPool.find(Method->getSelector()); if (Pos == MethodPool.end()) Pos = MethodPool.insert(std::make_pair(Method->getSelector(), GlobalMethods())).first; Method->setDefined(impl); ObjCMethodList &Entry = instance ? Pos->second.first : Pos->second.second; addMethodToGlobalList(&Entry, Method); } /// Determines if this is an "acceptable" loose mismatch in the global /// method pool. This exists mostly as a hack to get around certain /// global mismatches which we can't afford to make warnings / errors. /// Really, what we want is a way to take a method out of the global /// method pool. static bool isAcceptableMethodMismatch(ObjCMethodDecl *chosen, ObjCMethodDecl *other) { if (!chosen->isInstanceMethod()) return false; Selector sel = chosen->getSelector(); if (!sel.isUnarySelector() || sel.getNameForSlot(0) != "length") return false; // Don't complain about mismatches for -length if the method we // chose has an integral result type. return (chosen->getReturnType()->isIntegerType()); } bool Sema::CollectMultipleMethodsInGlobalPool( Selector Sel, SmallVectorImpl<ObjCMethodDecl *> &Methods, bool instance) { if (ExternalSource) ReadMethodPool(Sel); GlobalMethodPool::iterator Pos = MethodPool.find(Sel); if (Pos == MethodPool.end()) return false; // Gather the non-hidden methods. ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second; for (ObjCMethodList *M = &MethList; M; M = M->getNext()) if (M->getMethod() && !M->getMethod()->isHidden()) Methods.push_back(M->getMethod()); return Methods.size() > 1; } bool Sema::AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass) { GlobalMethodPool::iterator Pos = MethodPool.find(Sel); // Test for no method in the pool which should not trigger any warning by // caller. if (Pos == MethodPool.end()) return true; ObjCMethodList &MethList = BestMethod->isInstanceMethod() ? Pos->second.first : Pos->second.second; // Diagnose finding more than one method in global pool SmallVector<ObjCMethodDecl *, 4> Methods; Methods.push_back(BestMethod); for (ObjCMethodList *ML = &MethList; ML; ML = ML->getNext()) if (ObjCMethodDecl *M = ML->getMethod()) if (!M->isHidden() && M != BestMethod && !M->hasAttr<UnavailableAttr>()) Methods.push_back(M); if (Methods.size() > 1) DiagnoseMultipleMethodInGlobalPool(Methods, Sel, R, receiverIdOrClass); return MethList.hasMoreThanOneDecl(); } ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance) { if (ExternalSource) ReadMethodPool(Sel); GlobalMethodPool::iterator Pos = MethodPool.find(Sel); if (Pos == MethodPool.end()) return nullptr; // Gather the non-hidden methods. ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second; SmallVector<ObjCMethodDecl *, 4> Methods; for (ObjCMethodList *M = &MethList; M; M = M->getNext()) { if (M->getMethod() && !M->getMethod()->isHidden()) return M->getMethod(); } return nullptr; } void Sema::DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass) { // We found multiple methods, so we may have to complain. bool issueDiagnostic = false, issueError = false; // We support a warning which complains about *any* difference in // method signature. bool strictSelectorMatch = receiverIdOrClass && !Diags.isIgnored(diag::warn_strict_multiple_method_decl, R.getBegin()); if (strictSelectorMatch) { for (unsigned I = 1, N = Methods.size(); I != N; ++I) { if (!MatchTwoMethodDeclarations(Methods[0], Methods[I], MMS_strict)) { issueDiagnostic = true; break; } } } // If we didn't see any strict differences, we won't see any loose // differences. In ARC, however, we also need to check for loose // mismatches, because most of them are errors. if (!strictSelectorMatch || (issueDiagnostic && getLangOpts().ObjCAutoRefCount)) for (unsigned I = 1, N = Methods.size(); I != N; ++I) { // This checks if the methods differ in type mismatch. if (!MatchTwoMethodDeclarations(Methods[0], Methods[I], MMS_loose) && !isAcceptableMethodMismatch(Methods[0], Methods[I])) { issueDiagnostic = true; if (getLangOpts().ObjCAutoRefCount) issueError = true; break; } } if (issueDiagnostic) { if (issueError) Diag(R.getBegin(), diag::err_arc_multiple_method_decl) << Sel << R; else if (strictSelectorMatch) Diag(R.getBegin(), diag::warn_strict_multiple_method_decl) << Sel << R; else Diag(R.getBegin(), diag::warn_multiple_method_decl) << Sel << R; Diag(Methods[0]->getLocStart(), issueError ? diag::note_possibility : diag::note_using) << Methods[0]->getSourceRange(); for (unsigned I = 1, N = Methods.size(); I != N; ++I) { Diag(Methods[I]->getLocStart(), diag::note_also_found) << Methods[I]->getSourceRange(); } } } ObjCMethodDecl *Sema::LookupImplementedMethodInGlobalPool(Selector Sel) { GlobalMethodPool::iterator Pos = MethodPool.find(Sel); if (Pos == MethodPool.end()) return nullptr; GlobalMethods &Methods = Pos->second; for (const ObjCMethodList *Method = &Methods.first; Method; Method = Method->getNext()) if (Method->getMethod() && (Method->getMethod()->isDefined() || Method->getMethod()->isPropertyAccessor())) return Method->getMethod(); for (const ObjCMethodList *Method = &Methods.second; Method; Method = Method->getNext()) if (Method->getMethod() && (Method->getMethod()->isDefined() || Method->getMethod()->isPropertyAccessor())) return Method->getMethod(); return nullptr; } static void HelperSelectorsForTypoCorrection( SmallVectorImpl<const ObjCMethodDecl *> &BestMethod, StringRef Typo, const ObjCMethodDecl * Method) { const unsigned MaxEditDistance = 1; unsigned BestEditDistance = MaxEditDistance + 1; std::string MethodName = Method->getSelector().getAsString(); unsigned MinPossibleEditDistance = abs((int)MethodName.size() - (int)Typo.size()); if (MinPossibleEditDistance > 0 && Typo.size() / MinPossibleEditDistance < 1) return; unsigned EditDistance = Typo.edit_distance(MethodName, true, MaxEditDistance); if (EditDistance > MaxEditDistance) return; if (EditDistance == BestEditDistance) BestMethod.push_back(Method); else if (EditDistance < BestEditDistance) { BestMethod.clear(); BestMethod.push_back(Method); } } static bool HelperIsMethodInObjCType(Sema &S, Selector Sel, QualType ObjectType) { if (ObjectType.isNull()) return true; if (S.LookupMethodInObjectType(Sel, ObjectType, true/*Instance method*/)) return true; return S.LookupMethodInObjectType(Sel, ObjectType, false/*Class method*/) != nullptr; } const ObjCMethodDecl * Sema::SelectorsForTypoCorrection(Selector Sel, QualType ObjectType) { unsigned NumArgs = Sel.getNumArgs(); SmallVector<const ObjCMethodDecl *, 8> Methods; bool ObjectIsId = true, ObjectIsClass = true; if (ObjectType.isNull()) ObjectIsId = ObjectIsClass = false; else if (!ObjectType->isObjCObjectPointerType()) return nullptr; else if (const ObjCObjectPointerType *ObjCPtr = ObjectType->getAsObjCInterfacePointerType()) { ObjectType = QualType(ObjCPtr->getInterfaceType(), 0); ObjectIsId = ObjectIsClass = false; } else if (ObjectType->isObjCIdType() || ObjectType->isObjCQualifiedIdType()) ObjectIsClass = false; else if (ObjectType->isObjCClassType() || ObjectType->isObjCQualifiedClassType()) ObjectIsId = false; else return nullptr; for (GlobalMethodPool::iterator b = MethodPool.begin(), e = MethodPool.end(); b != e; b++) { // instance methods for (ObjCMethodList *M = &b->second.first; M; M=M->getNext()) if (M->getMethod() && (M->getMethod()->getSelector().getNumArgs() == NumArgs) && (M->getMethod()->getSelector() != Sel)) { if (ObjectIsId) Methods.push_back(M->getMethod()); else if (!ObjectIsClass && HelperIsMethodInObjCType(*this, M->getMethod()->getSelector(), ObjectType)) Methods.push_back(M->getMethod()); } // class methods for (ObjCMethodList *M = &b->second.second; M; M=M->getNext()) if (M->getMethod() && (M->getMethod()->getSelector().getNumArgs() == NumArgs) && (M->getMethod()->getSelector() != Sel)) { if (ObjectIsClass) Methods.push_back(M->getMethod()); else if (!ObjectIsId && HelperIsMethodInObjCType(*this, M->getMethod()->getSelector(), ObjectType)) Methods.push_back(M->getMethod()); } } SmallVector<const ObjCMethodDecl *, 8> SelectedMethods; for (unsigned i = 0, e = Methods.size(); i < e; i++) { HelperSelectorsForTypoCorrection(SelectedMethods, Sel.getAsString(), Methods[i]); } return (SelectedMethods.size() == 1) ? SelectedMethods[0] : nullptr; } /// DiagnoseDuplicateIvars - /// Check for duplicate ivars in the entire class at the start of /// \@implementation. This becomes necesssary because class extension can /// add ivars to a class in random order which will not be known until /// class's \@implementation is seen. void Sema::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID) { for (auto *Ivar : ID->ivars()) { if (Ivar->isInvalidDecl()) continue; if (IdentifierInfo *II = Ivar->getIdentifier()) { ObjCIvarDecl* prevIvar = SID->lookupInstanceVariable(II); if (prevIvar) { Diag(Ivar->getLocation(), diag::err_duplicate_member) << II; Diag(prevIvar->getLocation(), diag::note_previous_declaration); Ivar->setInvalidDecl(); } } } } Sema::ObjCContainerKind Sema::getObjCContainerKind() const { switch (CurContext->getDeclKind()) { case Decl::ObjCInterface: return Sema::OCK_Interface; case Decl::ObjCProtocol: return Sema::OCK_Protocol; case Decl::ObjCCategory: if (cast<ObjCCategoryDecl>(CurContext)->IsClassExtension()) return Sema::OCK_ClassExtension; return Sema::OCK_Category; case Decl::ObjCImplementation: return Sema::OCK_Implementation; case Decl::ObjCCategoryImpl: return Sema::OCK_CategoryImplementation; default: return Sema::OCK_None; } } // Note: For class/category implementations, allMethods is always null. Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods, ArrayRef<DeclGroupPtrTy> allTUVars) { #if 1 // HLSL Change Starts (void)(S); (void)(AtEnd); (void)(allMethods); (void)(allTUVars); return nullptr; #else if (getObjCContainerKind() == Sema::OCK_None) return nullptr; assert(AtEnd.isValid() && "Invalid location for '@end'"); ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext); Decl *ClassDecl = cast<Decl>(OCD); bool isInterfaceDeclKind = isa<ObjCInterfaceDecl>(ClassDecl) || isa<ObjCCategoryDecl>(ClassDecl) || isa<ObjCProtocolDecl>(ClassDecl); bool checkIdenticalMethods = isa<ObjCImplementationDecl>(ClassDecl); // FIXME: Remove these and use the ObjCContainerDecl/DeclContext. llvm::DenseMap<Selector, const ObjCMethodDecl*> InsMap; llvm::DenseMap<Selector, const ObjCMethodDecl*> ClsMap; for (unsigned i = 0, e = allMethods.size(); i != e; i++ ) { ObjCMethodDecl *Method = cast_or_null<ObjCMethodDecl>(allMethods[i]); if (!Method) continue; // Already issued a diagnostic. if (Method->isInstanceMethod()) { /// Check for instance method of the same name with incompatible types const ObjCMethodDecl *&PrevMethod = InsMap[Method->getSelector()]; bool match = PrevMethod ? MatchTwoMethodDeclarations(Method, PrevMethod) : false; if ((isInterfaceDeclKind && PrevMethod && !match) || (checkIdenticalMethods && match)) { Diag(Method->getLocation(), diag::err_duplicate_method_decl) << Method->getDeclName(); Diag(PrevMethod->getLocation(), diag::note_previous_declaration); Method->setInvalidDecl(); } else { if (PrevMethod) { Method->setAsRedeclaration(PrevMethod); if (!Context.getSourceManager().isInSystemHeader( Method->getLocation())) Diag(Method->getLocation(), diag::warn_duplicate_method_decl) << Method->getDeclName(); Diag(PrevMethod->getLocation(), diag::note_previous_declaration); } InsMap[Method->getSelector()] = Method; /// The following allows us to typecheck messages to "id". AddInstanceMethodToGlobalPool(Method); } } else { /// Check for class method of the same name with incompatible types const ObjCMethodDecl *&PrevMethod = ClsMap[Method->getSelector()]; bool match = PrevMethod ? MatchTwoMethodDeclarations(Method, PrevMethod) : false; if ((isInterfaceDeclKind && PrevMethod && !match) || (checkIdenticalMethods && match)) { Diag(Method->getLocation(), diag::err_duplicate_method_decl) << Method->getDeclName(); Diag(PrevMethod->getLocation(), diag::note_previous_declaration); Method->setInvalidDecl(); } else { if (PrevMethod) { Method->setAsRedeclaration(PrevMethod); if (!Context.getSourceManager().isInSystemHeader( Method->getLocation())) Diag(Method->getLocation(), diag::warn_duplicate_method_decl) << Method->getDeclName(); Diag(PrevMethod->getLocation(), diag::note_previous_declaration); } ClsMap[Method->getSelector()] = Method; AddFactoryMethodToGlobalPool(Method); } } } if (isa<ObjCInterfaceDecl>(ClassDecl)) { // Nothing to do here. } else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(ClassDecl)) { // Categories are used to extend the class by declaring new methods. // By the same token, they are also used to add new properties. No // need to compare the added property to those in the class. if (C->IsClassExtension()) { ObjCInterfaceDecl *CCPrimary = C->getClassInterface(); DiagnoseClassExtensionDupMethods(C, CCPrimary); } } if (ObjCContainerDecl *CDecl = dyn_cast<ObjCContainerDecl>(ClassDecl)) { if (CDecl->getIdentifier()) // ProcessPropertyDecl is responsible for diagnosing conflicts with any // user-defined setter/getter. It also synthesizes setter/getter methods // and adds them to the DeclContext and global method pools. for (auto *I : CDecl->properties()) ProcessPropertyDecl(I, CDecl); CDecl->setAtEndRange(AtEnd); } if (ObjCImplementationDecl *IC=dyn_cast<ObjCImplementationDecl>(ClassDecl)) { IC->setAtEndRange(AtEnd); if (ObjCInterfaceDecl* IDecl = IC->getClassInterface()) { // Any property declared in a class extension might have user // declared setter or getter in current class extension or one // of the other class extensions. Mark them as synthesized as // property will be synthesized when property with same name is // seen in the @implementation. for (const auto *Ext : IDecl->visible_extensions()) { for (const auto *Property : Ext->properties()) { // Skip over properties declared @dynamic if (const ObjCPropertyImplDecl *PIDecl = IC->FindPropertyImplDecl(Property->getIdentifier())) if (PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) continue; for (const auto *Ext : IDecl->visible_extensions()) { if (ObjCMethodDecl *GetterMethod = Ext->getInstanceMethod(Property->getGetterName())) GetterMethod->setPropertyAccessor(true); if (!Property->isReadOnly()) if (ObjCMethodDecl *SetterMethod = Ext->getInstanceMethod(Property->getSetterName())) SetterMethod->setPropertyAccessor(true); } } } ImplMethodsVsClassMethods(S, IC, IDecl); AtomicPropertySetterGetterRules(IC, IDecl); DiagnoseOwningPropertyGetterSynthesis(IC); DiagnoseUnusedBackingIvarInAccessor(S, IC); if (IDecl->hasDesignatedInitializers()) DiagnoseMissingDesignatedInitOverrides(IC, IDecl); bool HasRootClassAttr = IDecl->hasAttr<ObjCRootClassAttr>(); if (IDecl->getSuperClass() == nullptr) { // This class has no superclass, so check that it has been marked with // __attribute((objc_root_class)). if (!HasRootClassAttr) { SourceLocation DeclLoc(IDecl->getLocation()); SourceLocation SuperClassLoc(getLocForEndOfToken(DeclLoc)); Diag(DeclLoc, diag::warn_objc_root_class_missing) << IDecl->getIdentifier(); // See if NSObject is in the current scope, and if it is, suggest // adding " : NSObject " to the class declaration. NamedDecl *IF = LookupSingleName(TUScope, NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject), DeclLoc, LookupOrdinaryName); ObjCInterfaceDecl *NSObjectDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF); if (NSObjectDecl && NSObjectDecl->getDefinition()) { Diag(SuperClassLoc, diag::note_objc_needs_superclass) << FixItHint::CreateInsertion(SuperClassLoc, " : NSObject "); } else { Diag(SuperClassLoc, diag::note_objc_needs_superclass); } } } else if (HasRootClassAttr) { // Complain that only root classes may have this attribute. Diag(IDecl->getLocation(), diag::err_objc_root_class_subclass); } if (LangOpts.ObjCRuntime.isNonFragile()) { while (IDecl->getSuperClass()) { DiagnoseDuplicateIvars(IDecl, IDecl->getSuperClass()); IDecl = IDecl->getSuperClass(); } } } SetIvarInitializers(IC); } else if (ObjCCategoryImplDecl* CatImplClass = dyn_cast<ObjCCategoryImplDecl>(ClassDecl)) { CatImplClass->setAtEndRange(AtEnd); // Find category interface decl and then check that all methods declared // in this interface are implemented in the category @implementation. if (ObjCInterfaceDecl* IDecl = CatImplClass->getClassInterface()) { if (ObjCCategoryDecl *Cat = IDecl->FindCategoryDeclaration(CatImplClass->getIdentifier())) { ImplMethodsVsClassMethods(S, CatImplClass, Cat); } } } if (isInterfaceDeclKind) { // Reject invalid vardecls. for (unsigned i = 0, e = allTUVars.size(); i != e; i++) { DeclGroupRef DG = allTUVars[i].get(); for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) if (VarDecl *VDecl = dyn_cast<VarDecl>(*I)) { if (!VDecl->hasExternalStorage()) Diag(VDecl->getLocation(), diag::err_objc_var_decl_inclass); } } } ActOnObjCContainerFinishDefinition(); for (unsigned i = 0, e = allTUVars.size(); i != e; i++) { DeclGroupRef DG = allTUVars[i].get(); for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) (*I)->setTopLevelDeclInObjCContainer(); Consumer.HandleTopLevelDeclInObjCContainer(DG); } ActOnDocumentableDecl(ClassDecl); return ClassDecl; #endif // HLSL Change Ends } #if 0 // HLSL Change Starts /// CvtQTToAstBitMask - utility routine to produce an AST bitmask for /// objective-c's type qualifier from the parser version of the same info. static Decl::ObjCDeclQualifier CvtQTToAstBitMask(ObjCDeclSpec::ObjCDeclQualifier PQTVal) { return (Decl::ObjCDeclQualifier) (unsigned) PQTVal; } /// \brief Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. /// static Sema::ResultTypeCompatibilityKind CheckRelatedResultTypeCompatibility(Sema &S, ObjCMethodDecl *Method, ObjCInterfaceDecl *CurrentClass) { QualType ResultType = Method->getReturnType(); // If an Objective-C method inherits its related result type, then its // declared result type must be compatible with its own class type. The // declared result type is compatible if: if (const ObjCObjectPointerType *ResultObjectType = ResultType->getAs<ObjCObjectPointerType>()) { // - it is id or qualified id, or if (ResultObjectType->isObjCIdType() || ResultObjectType->isObjCQualifiedIdType()) return Sema::RTC_Compatible; if (CurrentClass) { if (ObjCInterfaceDecl *ResultClass = ResultObjectType->getInterfaceDecl()) { // - it is the same as the method's class type, or if (declaresSameEntity(CurrentClass, ResultClass)) return Sema::RTC_Compatible; // - it is a superclass of the method's class type if (ResultClass->isSuperClassOf(CurrentClass)) return Sema::RTC_Compatible; } } else { // Any Objective-C pointer type might be acceptable for a protocol // method; we just don't know. return Sema::RTC_Unknown; } } return Sema::RTC_Incompatible; } #endif // HLSL Change Ends namespace { /// A helper class for searching for methods which a particular method /// overrides. class OverrideSearch { public: Sema &S; ObjCMethodDecl *Method; llvm::SmallPtrSet<ObjCMethodDecl*, 4> Overridden; bool Recursive; public: OverrideSearch(Sema &S, ObjCMethodDecl *method) : S(S), Method(method) { Selector selector = method->getSelector(); // Bypass this search if we've never seen an instance/class method // with this selector before. Sema::GlobalMethodPool::iterator it = S.MethodPool.find(selector); if (it == S.MethodPool.end()) { if (!S.getExternalSource()) return; S.ReadMethodPool(selector); it = S.MethodPool.find(selector); if (it == S.MethodPool.end()) return; } ObjCMethodList &list = method->isInstanceMethod() ? it->second.first : it->second.second; if (!list.getMethod()) return; ObjCContainerDecl *container = cast<ObjCContainerDecl>(method->getDeclContext()); // Prevent the search from reaching this container again. This is // important with categories, which override methods from the // interface and each other. if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(container)) { searchFromContainer(container); if (ObjCInterfaceDecl *Interface = Category->getClassInterface()) searchFromContainer(Interface); } else { searchFromContainer(container); } } typedef llvm::SmallPtrSet<ObjCMethodDecl*, 128>::iterator iterator; iterator begin() const { return Overridden.begin(); } iterator end() const { return Overridden.end(); } private: void searchFromContainer(ObjCContainerDecl *container) { if (container->isInvalidDecl()) return; switch (container->getDeclKind()) { #define OBJCCONTAINER(type, base) \ case Decl::type: \ searchFrom(cast<type##Decl>(container)); \ break; #define ABSTRACT_DECL(expansion) #define DECL(type, base) \ case Decl::type: #include "clang/AST/DeclNodes.inc" llvm_unreachable("not an ObjC container!"); } } void searchFrom(ObjCProtocolDecl *protocol) { if (!protocol->hasDefinition()) return; // A method in a protocol declaration overrides declarations from // referenced ("parent") protocols. search(protocol->getReferencedProtocols()); } void searchFrom(ObjCCategoryDecl *category) { // A method in a category declaration overrides declarations from // the main class and from protocols the category references. // The main class is handled in the constructor. search(category->getReferencedProtocols()); } void searchFrom(ObjCCategoryImplDecl *impl) { // A method in a category definition that has a category // declaration overrides declarations from the category // declaration. if (ObjCCategoryDecl *category = impl->getCategoryDecl()) { search(category); if (ObjCInterfaceDecl *Interface = category->getClassInterface()) search(Interface); // Otherwise it overrides declarations from the class. } else if (ObjCInterfaceDecl *Interface = impl->getClassInterface()) { search(Interface); } } void searchFrom(ObjCInterfaceDecl *iface) { // A method in a class declaration overrides declarations from if (!iface->hasDefinition()) return; // - categories, for (auto *Cat : iface->known_categories()) search(Cat); // - the super class, and if (ObjCInterfaceDecl *super = iface->getSuperClass()) search(super); // - any referenced protocols. search(iface->getReferencedProtocols()); } void searchFrom(ObjCImplementationDecl *impl) { // A method in a class implementation overrides declarations from // the class interface. if (ObjCInterfaceDecl *Interface = impl->getClassInterface()) search(Interface); } void search(const ObjCProtocolList &protocols) { for (ObjCProtocolList::iterator i = protocols.begin(), e = protocols.end(); i != e; ++i) search(*i); } void search(ObjCContainerDecl *container) { // Check for a method in this container which matches this selector. ObjCMethodDecl *meth = container->getMethod(Method->getSelector(), Method->isInstanceMethod(), /*AllowHidden=*/true); // If we find one, record it and bail out. if (meth) { Overridden.insert(meth); return; } // Otherwise, search for methods that a hypothetical method here // would have overridden. // Note that we're now in a recursive case. Recursive = true; searchFromContainer(container); } }; } void Sema::CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC) { // Search for overridden methods and merge information down from them. OverrideSearch overrides(*this, ObjCMethod); // Keep track if the method overrides any method in the class's base classes, // its protocols, or its categories' protocols; we will keep that info // in the ObjCMethodDecl. // For this info, a method in an implementation is not considered as // overriding the same method in the interface or its categories. bool hasOverriddenMethodsInBaseOrProtocol = false; for (OverrideSearch::iterator i = overrides.begin(), e = overrides.end(); i != e; ++i) { ObjCMethodDecl *overridden = *i; if (!hasOverriddenMethodsInBaseOrProtocol) { if (isa<ObjCProtocolDecl>(overridden->getDeclContext()) || CurrentClass != overridden->getClassInterface() || overridden->isOverriding()) { hasOverriddenMethodsInBaseOrProtocol = true; } else if (isa<ObjCImplDecl>(ObjCMethod->getDeclContext())) { // OverrideSearch will return as "overridden" the same method in the // interface. For hasOverriddenMethodsInBaseOrProtocol, we need to // check whether a category of a base class introduced a method with the // same selector, after the interface method declaration. // To avoid unnecessary lookups in the majority of cases, we use the // extra info bits in GlobalMethodPool to check whether there were any // category methods with this selector. GlobalMethodPool::iterator It = MethodPool.find(ObjCMethod->getSelector()); if (It != MethodPool.end()) { ObjCMethodList &List = ObjCMethod->isInstanceMethod()? It->second.first: It->second.second; unsigned CategCount = List.getBits(); if (CategCount > 0) { // If the method is in a category we'll do lookup if there were at // least 2 category methods recorded, otherwise only one will do. if (CategCount > 1 || !isa<ObjCCategoryImplDecl>(overridden->getDeclContext())) { OverrideSearch overrides(*this, overridden); for (OverrideSearch::iterator OI= overrides.begin(), OE= overrides.end(); OI!=OE; ++OI) { ObjCMethodDecl *SuperOverridden = *OI; if (isa<ObjCProtocolDecl>(SuperOverridden->getDeclContext()) || CurrentClass != SuperOverridden->getClassInterface()) { hasOverriddenMethodsInBaseOrProtocol = true; overridden->setOverriding(true); break; } } } } } } } // Propagate down the 'related result type' bit from overridden methods. if (RTC != Sema::RTC_Incompatible && overridden->hasRelatedResultType()) ObjCMethod->SetRelatedResultType(); // Then merge the declarations. mergeObjCMethodDecls(ObjCMethod, overridden); if (ObjCMethod->isImplicit() && overridden->isImplicit()) continue; // Conflicting properties are detected elsewhere. // Check for overriding methods if (isa<ObjCInterfaceDecl>(ObjCMethod->getDeclContext()) || isa<ObjCImplementationDecl>(ObjCMethod->getDeclContext())) CheckConflictingOverridingMethod(ObjCMethod, overridden, isa<ObjCProtocolDecl>(overridden->getDeclContext())); if (CurrentClass && overridden->getDeclContext() != CurrentClass && isa<ObjCInterfaceDecl>(overridden->getDeclContext()) && !overridden->isImplicit() /* not meant for properties */) { ObjCMethodDecl::param_iterator ParamI = ObjCMethod->param_begin(), E = ObjCMethod->param_end(); ObjCMethodDecl::param_iterator PrevI = overridden->param_begin(), PrevE = overridden->param_end(); for (; ParamI != E && PrevI != PrevE; ++ParamI, ++PrevI) { assert(PrevI != overridden->param_end() && "Param mismatch"); QualType T1 = Context.getCanonicalType((*ParamI)->getType()); QualType T2 = Context.getCanonicalType((*PrevI)->getType()); // If type of argument of method in this class does not match its // respective argument type in the super class method, issue warning; if (!Context.typesAreCompatible(T1, T2)) { Diag((*ParamI)->getLocation(), diag::ext_typecheck_base_super) << T1 << T2; Diag(overridden->getLocation(), diag::note_previous_declaration); break; } } } } ObjCMethod->setOverriding(hasOverriddenMethodsInBaseOrProtocol); } #if 0 // HLSL Change Starts - HLSL does not support ObjC constructs /// Merge type nullability from for a redeclaration of the same entity, /// producing the updated type of the redeclared entity. static QualType mergeTypeNullabilityForRedecl(Sema &S, SourceLocation loc, QualType type, bool usesCSKeyword, SourceLocation prevLoc, QualType prevType, bool prevUsesCSKeyword) { // Determine the nullability of both types. auto nullability = type->getNullability(S.Context); auto prevNullability = prevType->getNullability(S.Context); // Easy case: both have nullability. if (nullability.hasValue() == prevNullability.hasValue()) { // Neither has nullability; continue. if (!nullability) return type; // The nullabilities are equivalent; do nothing. if (*nullability == *prevNullability) return type; // Complain about mismatched nullability. S.Diag(loc, diag::err_nullability_conflicting) << DiagNullabilityKind(*nullability, usesCSKeyword) << DiagNullabilityKind(*prevNullability, prevUsesCSKeyword); return type; } // If it's the redeclaration that has nullability, don't change anything. if (nullability) return type; // Otherwise, provide the result with the same nullability. return S.Context.getAttributedType( AttributedType::getNullabilityAttrKind(*prevNullability), type, type); } /// Merge information from the declaration of a method in the \@interface /// (or a category/extension) into the corresponding method in the /// @implementation (for a class or category). static void mergeInterfaceMethodToImpl(Sema &S, ObjCMethodDecl *method, ObjCMethodDecl *prevMethod) { // Merge the objc_requires_super attribute. if (prevMethod->hasAttr<ObjCRequiresSuperAttr>() && !method->hasAttr<ObjCRequiresSuperAttr>()) { // merge the attribute into implementation. method->addAttr( ObjCRequiresSuperAttr::CreateImplicit(S.Context, method->getLocation())); } // Merge nullability of the result type. QualType newReturnType = mergeTypeNullabilityForRedecl( S, method->getReturnTypeSourceRange().getBegin(), method->getReturnType(), method->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability, prevMethod->getReturnTypeSourceRange().getBegin(), prevMethod->getReturnType(), prevMethod->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability); method->setReturnType(newReturnType); // Handle each of the parameters. unsigned numParams = method->param_size(); unsigned numPrevParams = prevMethod->param_size(); for (unsigned i = 0, n = std::min(numParams, numPrevParams); i != n; ++i) { ParmVarDecl *param = method->param_begin()[i]; ParmVarDecl *prevParam = prevMethod->param_begin()[i]; // Merge nullability. QualType newParamType = mergeTypeNullabilityForRedecl( S, param->getLocation(), param->getType(), param->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability, prevParam->getLocation(), prevParam->getType(), prevParam->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability); param->setType(newParamType); } } #endif // HLSL Change Ends - HLSL does not support ObjC constructs Decl *Sema::ActOnMethodDeclaration( Scope *S, SourceLocation MethodLoc, SourceLocation EndLoc, tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodDeclKind, bool isVariadic, bool MethodDefinition) { // HLSL Change Starts llvm_unreachable("HLSL does not support ObjC constructs"); #if 0 // Make sure we can establish a context for the method. if (!CurContext->isObjCContainer()) { Diag(MethodLoc, diag::error_missing_method_context); return nullptr; } ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext); Decl *ClassDecl = cast<Decl>(OCD); QualType resultDeclType; bool HasRelatedResultType = false; TypeSourceInfo *ReturnTInfo = nullptr; if (ReturnType) { resultDeclType = GetTypeFromParser(ReturnType, &ReturnTInfo); if (CheckFunctionReturnType(resultDeclType, MethodLoc)) return nullptr; QualType bareResultType = resultDeclType; (void)AttributedType::stripOuterNullability(bareResultType); HasRelatedResultType = (bareResultType == Context.getObjCInstanceType()); } else { // get the type for "id". resultDeclType = Context.getObjCIdType(); Diag(MethodLoc, diag::warn_missing_method_return_type) << FixItHint::CreateInsertion(SelectorLocs.front(), "(id)"); } ObjCMethodDecl *ObjCMethod = ObjCMethodDecl::Create( Context, MethodLoc, EndLoc, Sel, resultDeclType, ReturnTInfo, CurContext, MethodType == tok::minus, isVariadic, /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/false, /*isDefined=*/false, MethodDeclKind == tok::objc_optional ? ObjCMethodDecl::Optional : ObjCMethodDecl::Required, HasRelatedResultType); SmallVector<ParmVarDecl*, 16> Params; for (unsigned i = 0, e = Sel.getNumArgs(); i != e; ++i) { QualType ArgType; TypeSourceInfo *DI; if (!ArgInfo[i].Type) { ArgType = Context.getObjCIdType(); DI = nullptr; } else { ArgType = GetTypeFromParser(ArgInfo[i].Type, &DI); } LookupResult R(*this, ArgInfo[i].Name, ArgInfo[i].NameLoc, LookupOrdinaryName, ForRedeclaration); LookupName(R, S); if (R.isSingleResult()) { NamedDecl *PrevDecl = R.getFoundDecl(); if (S->isDeclScope(PrevDecl)) { Diag(ArgInfo[i].NameLoc, (MethodDefinition ? diag::warn_method_param_redefinition : diag::warn_method_param_declaration)) << ArgInfo[i].Name; Diag(PrevDecl->getLocation(), diag::note_previous_declaration); } } SourceLocation StartLoc = DI ? DI->getTypeLoc().getBeginLoc() : ArgInfo[i].NameLoc; ParmVarDecl* Param = CheckParameter(ObjCMethod, StartLoc, ArgInfo[i].NameLoc, ArgInfo[i].Name, ArgType, DI, SC_None); Param->setObjCMethodScopeInfo(i); Param->setObjCDeclQualifier( CvtQTToAstBitMask(ArgInfo[i].DeclSpec.getObjCDeclQualifier())); // Apply the attributes to the parameter. ProcessDeclAttributeList(TUScope, Param, ArgInfo[i].ArgAttrs); if (Param->hasAttr<BlocksAttr>()) { Diag(Param->getLocation(), diag::err_block_on_nonlocal); Param->setInvalidDecl(); } S->AddDecl(Param); IdResolver.AddDecl(Param); Params.push_back(Param); } for (unsigned i = 0, e = CNumArgs; i != e; ++i) { ParmVarDecl *Param = cast<ParmVarDecl>(CParamInfo[i].Param); QualType ArgType = Param->getType(); if (ArgType.isNull()) ArgType = Context.getObjCIdType(); else // Perform the default array/function conversions (C99 6.7.5.3p[7,8]). ArgType = Context.getAdjustedParameterType(ArgType); Param->setDeclContext(ObjCMethod); Params.push_back(Param); } ObjCMethod->setMethodParams(Context, Params, SelectorLocs); ObjCMethod->setObjCDeclQualifier( CvtQTToAstBitMask(ReturnQT.getObjCDeclQualifier())); if (AttrList) ProcessDeclAttributeList(TUScope, ObjCMethod, AttrList); // Add the method now. const ObjCMethodDecl *PrevMethod = nullptr; if (ObjCImplDecl *ImpDecl = dyn_cast<ObjCImplDecl>(ClassDecl)) { if (MethodType == tok::minus) { PrevMethod = ImpDecl->getInstanceMethod(Sel); ImpDecl->addInstanceMethod(ObjCMethod); } else { PrevMethod = ImpDecl->getClassMethod(Sel); ImpDecl->addClassMethod(ObjCMethod); } // Merge information from the @interface declaration into the // @implementation. if (ObjCInterfaceDecl *IDecl = ImpDecl->getClassInterface()) { if (auto *IMD = IDecl->lookupMethod(ObjCMethod->getSelector(), ObjCMethod->isInstanceMethod())) { mergeInterfaceMethodToImpl(*this, ObjCMethod, IMD); // Warn about defining -dealloc in a category. if (isa<ObjCCategoryImplDecl>(ImpDecl) && IMD->isOverriding() && ObjCMethod->getSelector().getMethodFamily() == OMF_dealloc) { Diag(ObjCMethod->getLocation(), diag::warn_dealloc_in_category) << ObjCMethod->getDeclName(); } } } } else { cast<DeclContext>(ClassDecl)->addDecl(ObjCMethod); } if (PrevMethod) { // You can never have two method definitions with the same name. Diag(ObjCMethod->getLocation(), diag::err_duplicate_method_decl) << ObjCMethod->getDeclName(); Diag(PrevMethod->getLocation(), diag::note_previous_declaration); ObjCMethod->setInvalidDecl(); return ObjCMethod; } // If this Objective-C method does not have a related result type, but we // are allowed to infer related result types, try to do so based on the // method family. ObjCInterfaceDecl *CurrentClass = dyn_cast<ObjCInterfaceDecl>(ClassDecl); if (!CurrentClass) { if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(ClassDecl)) CurrentClass = Cat->getClassInterface(); else if (ObjCImplDecl *Impl = dyn_cast<ObjCImplDecl>(ClassDecl)) CurrentClass = Impl->getClassInterface(); else if (ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(ClassDecl)) CurrentClass = CatImpl->getClassInterface(); } ResultTypeCompatibilityKind RTC = CheckRelatedResultTypeCompatibility(*this, ObjCMethod, CurrentClass); CheckObjCMethodOverrides(ObjCMethod, CurrentClass, RTC); bool ARCError = false; if (getLangOpts().ObjCAutoRefCount) ARCError = CheckARCMethodDecl(ObjCMethod); // Infer the related result type when possible. if (!ARCError && RTC == Sema::RTC_Compatible && !ObjCMethod->hasRelatedResultType() && LangOpts.ObjCInferRelatedResultType) { bool InferRelatedResultType = false; switch (ObjCMethod->getMethodFamily()) { case OMF_None: case OMF_copy: case OMF_dealloc: case OMF_finalize: case OMF_mutableCopy: case OMF_release: case OMF_retainCount: case OMF_initialize: case OMF_performSelector: break; case OMF_alloc: case OMF_new: InferRelatedResultType = ObjCMethod->isClassMethod(); break; case OMF_init: case OMF_autorelease: case OMF_retain: case OMF_self: InferRelatedResultType = ObjCMethod->isInstanceMethod(); break; } if (InferRelatedResultType && !ObjCMethod->getReturnType()->isObjCIndependentClassType()) ObjCMethod->SetRelatedResultType(); } ActOnDocumentableDecl(ObjCMethod); return ObjCMethod; #endif // HLSL Change Ends } bool Sema::CheckObjCDeclScope(Decl *D) { // Following is also an error. But it is caused by a missing @end // and diagnostic is issued elsewhere. if (isa<ObjCContainerDecl>(CurContext->getRedeclContext())) return false; // If we switched context to translation unit while we are still lexically in // an objc container, it means the parser missed emitting an error. if (isa<TranslationUnitDecl>(getCurLexicalContext()->getRedeclContext())) return false; Diag(D->getLocation(), diag::err_objc_decls_may_only_appear_in_global_scope); D->setInvalidDecl(); return true; } /// Called whenever \@defs(ClassName) is encountered in the source. Inserts the /// instance variables of ClassName into Decls. void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl*> &Decls) { // Check that ClassName is a valid class ObjCInterfaceDecl *Class = getObjCInterfaceDecl(ClassName, DeclStart); if (!Class) { Diag(DeclStart, diag::err_undef_interface) << ClassName; return; } if (LangOpts.ObjCRuntime.isNonFragile()) { Diag(DeclStart, diag::err_atdef_nonfragile_interface); return; } // Collect the instance variables SmallVector<const ObjCIvarDecl*, 32> Ivars; Context.DeepCollectObjCIvars(Class, true, Ivars); // For each ivar, create a fresh ObjCAtDefsFieldDecl. for (unsigned i = 0; i < Ivars.size(); i++) { const FieldDecl* ID = cast<FieldDecl>(Ivars[i]); RecordDecl *Record = dyn_cast<RecordDecl>(TagD); Decl *FD = ObjCAtDefsFieldDecl::Create(Context, Record, /*FIXME: StartL=*/ID->getLocation(), ID->getLocation(), ID->getIdentifier(), ID->getType(), ID->getBitWidth()); Decls.push_back(FD); } // Introduce all of these fields into the appropriate scope. for (SmallVectorImpl<Decl*>::iterator D = Decls.begin(); D != Decls.end(); ++D) { FieldDecl *FD = cast<FieldDecl>(*D); if (getLangOpts().CPlusPlus) PushOnScopeChains(cast<FieldDecl>(FD), S); else if (RecordDecl *Record = dyn_cast<RecordDecl>(TagD)) Record->addDecl(FD); } } /// \brief Build a type-check a new Objective-C exception variable declaration. VarDecl *Sema::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid) { // ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage // duration shall not be qualified by an address-space qualifier." // Since all parameters have automatic store duration, they can not have // an address space. if (T.getAddressSpace() != 0) { Diag(IdLoc, diag::err_arg_with_address_space); Invalid = true; } // An @catch parameter must be an unqualified object pointer type; // FIXME: Recover from "NSObject foo" by inserting the * in "NSObject *foo"? if (Invalid) { // Don't do any further checking. } else if (T->isDependentType()) { // Okay: we don't know what this type will instantiate to. } else if (!T->isObjCObjectPointerType()) { Invalid = true; Diag(IdLoc ,diag::err_catch_param_not_objc_type); } else if (T->isObjCQualifiedIdType()) { Invalid = true; Diag(IdLoc, diag::err_illegal_qualifiers_on_catch_parm); } VarDecl *New = VarDecl::Create(Context, CurContext, StartLoc, IdLoc, Id, T, TInfo, SC_None); New->setExceptionVariable(true); // In ARC, infer 'retaining' for variables of retainable type. if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(New)) Invalid = true; if (Invalid) New->setInvalidDecl(); return New; } Decl *Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) { const DeclSpec &DS = D.getDeclSpec(); // We allow the "register" storage class on exception variables because // GCC did, but we drop it completely. Any other storage class is an error. if (DS.getStorageClassSpec() == DeclSpec::SCS_register) { Diag(DS.getStorageClassSpecLoc(), diag::warn_register_objc_catch_parm) << FixItHint::CreateRemoval(SourceRange(DS.getStorageClassSpecLoc())); } else if (DeclSpec::SCS SCS = DS.getStorageClassSpec()) { Diag(DS.getStorageClassSpecLoc(), diag::err_storage_spec_on_catch_parm) << DeclSpec::getSpecifierName(SCS); } if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), diag::err_invalid_thread) << DeclSpec::getSpecifierName(TSCS); D.getMutableDeclSpec().ClearStorageClassSpecs(); DiagnoseFunctionSpecifiers(D.getDeclSpec()); // Check that there are no default arguments inside the type of this // exception object (C++ only). if (getLangOpts().CPlusPlus) CheckExtraCXXDefaultArguments(D); TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); QualType ExceptionType = TInfo->getType(); VarDecl *New = BuildObjCExceptionDecl(TInfo, ExceptionType, D.getSourceRange().getBegin(), D.getIdentifierLoc(), D.getIdentifier(), D.isInvalidType()); // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1). if (D.getCXXScopeSpec().isSet()) { Diag(D.getIdentifierLoc(), diag::err_qualified_objc_catch_parm) << D.getCXXScopeSpec().getRange(); New->setInvalidDecl(); } // Add the parameter declaration into this scope. S->AddDecl(New); if (D.getIdentifier()) IdResolver.AddDecl(New); ProcessDeclAttributes(S, New, D); if (New->hasAttr<BlocksAttr>()) Diag(New->getLocation(), diag::err_block_on_nonlocal); return New; } /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void Sema::CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars) { for (ObjCIvarDecl *Iv = OI->all_declared_ivar_begin(); Iv; Iv= Iv->getNextIvar()) { QualType QT = Context.getBaseElementType(Iv->getType()); if (QT->isRecordType()) Ivars.push_back(Iv); } } void Sema::DiagnoseUseOfUnimplementedSelectors() { // Load referenced selectors from the external source. if (ExternalSource) { SmallVector<std::pair<Selector, SourceLocation>, 4> Sels; ExternalSource->ReadReferencedSelectors(Sels); for (unsigned I = 0, N = Sels.size(); I != N; ++I) ReferencedSelectors[Sels[I].first] = Sels[I].second; } // Warning will be issued only when selector table is // generated (which means there is at lease one implementation // in the TU). This is to match gcc's behavior. if (ReferencedSelectors.empty() || !Context.AnyObjCImplementation()) return; for (auto &SelectorAndLocation : ReferencedSelectors) { Selector Sel = SelectorAndLocation.first; SourceLocation Loc = SelectorAndLocation.second; if (!LookupImplementedMethodInGlobalPool(Sel)) Diag(Loc, diag::warn_unimplemented_selector) << Sel; } return; } ObjCIvarDecl * Sema::GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const { if (Method->isClassMethod()) return nullptr; const ObjCInterfaceDecl *IDecl = Method->getClassInterface(); if (!IDecl) return nullptr; Method = IDecl->lookupMethod(Method->getSelector(), /*isInstance=*/true, /*shallowCategoryLookup=*/false, /*followSuper=*/false); if (!Method || !Method->isPropertyAccessor()) return nullptr; if ((PDecl = Method->findPropertyDecl())) if (ObjCIvarDecl *IV = PDecl->getPropertyIvarDecl()) { // property backing ivar must belong to property's class // or be a private ivar in class's implementation. // FIXME. fix the const-ness issue. IV = const_cast<ObjCInterfaceDecl *>(IDecl)->lookupInstanceVariable( IV->getIdentifier()); return IV; } return nullptr; } namespace { /// Used by Sema::DiagnoseUnusedBackingIvarInAccessor to check if a property /// accessor references the backing ivar. class UnusedBackingIvarChecker : public DataRecursiveASTVisitor<UnusedBackingIvarChecker> { public: Sema &S; const ObjCMethodDecl *Method; const ObjCIvarDecl *IvarD; bool AccessedIvar; bool InvokedSelfMethod; UnusedBackingIvarChecker(Sema &S, const ObjCMethodDecl *Method, const ObjCIvarDecl *IvarD) : S(S), Method(Method), IvarD(IvarD), AccessedIvar(false), InvokedSelfMethod(false) { assert(IvarD); } bool VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { if (E->getDecl() == IvarD) { AccessedIvar = true; return false; } return true; } bool VisitObjCMessageExpr(ObjCMessageExpr *E) { if (E->getReceiverKind() == ObjCMessageExpr::Instance && S.isSelfExpr(E->getInstanceReceiver(), Method)) { InvokedSelfMethod = true; } return true; } }; } void Sema::DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD) { #if 0 // HLSL Change if (S->hasUnrecoverableErrorOccurred()) return; for (const auto *CurMethod : ImplD->instance_methods()) { unsigned DIAG = diag::warn_unused_property_backing_ivar; SourceLocation Loc = CurMethod->getLocation(); if (Diags.isIgnored(DIAG, Loc)) continue; const ObjCPropertyDecl *PDecl; const ObjCIvarDecl *IV = GetIvarBackingPropertyAccessor(CurMethod, PDecl); if (!IV) continue; UnusedBackingIvarChecker Checker(*this, CurMethod, IV); Checker.TraverseStmt(CurMethod->getBody()); if (Checker.AccessedIvar) continue; // Do not issue this warning if backing ivar is used somewhere and accessor // implementation makes a self call. This is to prevent false positive in // cases where the ivar is accessed by another method that the accessor // delegates to. if (!IV->isReferenced() || !Checker.InvokedSelfMethod) { Diag(Loc, DIAG) << IV; Diag(PDecl->getLocation(), diag::note_property_declare); } } #endif // HLSL Change }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaObjCProperty.cpp
//===--- SemaObjCProperty.cpp - Semantic Analysis for ObjC @property ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for Objective C @property and // @synthesize declarations. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/Basic/SourceManager.h" #include "clang/Lex/Lexer.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SmallString.h" using namespace clang; // HLSL Change Starts // No ObjC parse/sema support, so simply skip all of this compilation. // Here are enough stubs to link the current targets. #if 1 Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *isOverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC) { llvm_unreachable("HLSL does not support ObjC constructs"); } ObjCPropertyDecl * Sema::HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind) { llvm_unreachable("HLSL does not support ObjC constructs"); } ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TInfo, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC) { llvm_unreachable("HLSL does not support ObjC constructs"); } Decl *Sema::ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool Synthesize, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *inheritedName, bool OverridingProtocolProperty) { llvm_unreachable("HLSL does not support ObjC constructs"); } bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property, ObjCMethodDecl *GetterMethod, SourceLocation Loc) { llvm_unreachable("HLSL does not support ObjC constructs"); } bool Sema::IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::DefaultSynthesizeProperties(Scope *S, Decl *D) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty, ObjCContainerDecl *lexicalDC) { llvm_unreachable("HLSL does not support ObjC constructs"); } void Sema::CheckObjCPropertyAttributes(Decl *PDecl, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass) { llvm_unreachable("HLSL does not support ObjC constructs"); } // HLSL Change Ends #else // HLSL Change Ends //===----------------------------------------------------------------------===// // Grammar actions. //===----------------------------------------------------------------------===// /// getImpliedARCOwnership - Given a set of property attributes and a /// type, infer an expected lifetime. The type's ownership qualification /// is not considered. /// /// Returns OCL_None if the attributes as stated do not imply an ownership. /// Never returns OCL_Autoreleasing. static Qualifiers::ObjCLifetime getImpliedARCOwnership( ObjCPropertyDecl::PropertyAttributeKind attrs, QualType type) { // retain, strong, copy, weak, and unsafe_unretained are only legal // on properties of retainable pointer type. if (attrs & (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong | ObjCPropertyDecl::OBJC_PR_copy)) { return Qualifiers::OCL_Strong; } else if (attrs & ObjCPropertyDecl::OBJC_PR_weak) { return Qualifiers::OCL_Weak; } else if (attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) { return Qualifiers::OCL_ExplicitNone; } // assign can appear on other types, so we have to check the // property type. if (attrs & ObjCPropertyDecl::OBJC_PR_assign && type->isObjCRetainableType()) { return Qualifiers::OCL_ExplicitNone; } return Qualifiers::OCL_None; } /// Check the internal consistency of a property declaration. static void checkARCPropertyDecl(Sema &S, ObjCPropertyDecl *property) { if (property->isInvalidDecl()) return; ObjCPropertyDecl::PropertyAttributeKind propertyKind = property->getPropertyAttributes(); Qualifiers::ObjCLifetime propertyLifetime = property->getType().getObjCLifetime(); // Nothing to do if we don't have a lifetime. if (propertyLifetime == Qualifiers::OCL_None) return; Qualifiers::ObjCLifetime expectedLifetime = getImpliedARCOwnership(propertyKind, property->getType()); if (!expectedLifetime) { // We have a lifetime qualifier but no dominating property // attribute. That's okay, but restore reasonable invariants by // setting the property attribute according to the lifetime // qualifier. ObjCPropertyDecl::PropertyAttributeKind attr; if (propertyLifetime == Qualifiers::OCL_Strong) { attr = ObjCPropertyDecl::OBJC_PR_strong; } else if (propertyLifetime == Qualifiers::OCL_Weak) { attr = ObjCPropertyDecl::OBJC_PR_weak; } else { assert(propertyLifetime == Qualifiers::OCL_ExplicitNone); attr = ObjCPropertyDecl::OBJC_PR_unsafe_unretained; } property->setPropertyAttributes(attr); return; } if (propertyLifetime == expectedLifetime) return; property->setInvalidDecl(); S.Diag(property->getLocation(), diag::err_arc_inconsistent_property_ownership) << property->getDeclName() << expectedLifetime << propertyLifetime; } /// \brief Check this Objective-C property against a property declared in the /// given protocol. static void CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop, ObjCProtocolDecl *Proto, llvm::SmallPtrSetImpl<ObjCProtocolDecl *> &Known) { // Have we seen this protocol before? if (!Known.insert(Proto).second) return; // Look for a property with the same name. DeclContext::lookup_result R = Proto->lookup(Prop->getDeclName()); for (unsigned I = 0, N = R.size(); I != N; ++I) { if (ObjCPropertyDecl *ProtoProp = dyn_cast<ObjCPropertyDecl>(R[I])) { S.DiagnosePropertyMismatch(Prop, ProtoProp, Proto->getIdentifier(), true); return; } } // Check this property against any protocols we inherit. for (auto *P : Proto->protocols()) CheckPropertyAgainstProtocol(S, Prop, P, Known); } Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *isOverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC) { unsigned Attributes = ODS.getPropertyAttributes(); FD.D.setObjCWeakProperty((Attributes & ObjCDeclSpec::DQ_PR_weak) != 0); TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S); QualType T = TSI->getType(); Attributes |= deduceWeakPropertyFromType(T); bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) || // default is readwrite! !(Attributes & ObjCDeclSpec::DQ_PR_readonly)); // property is defaulted to 'assign' if it is readwrite and is // not retain or copy bool isAssign = ((Attributes & ObjCDeclSpec::DQ_PR_assign) || (isReadWrite && !(Attributes & ObjCDeclSpec::DQ_PR_retain) && !(Attributes & ObjCDeclSpec::DQ_PR_strong) && !(Attributes & ObjCDeclSpec::DQ_PR_copy) && !(Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) && !(Attributes & ObjCDeclSpec::DQ_PR_weak))); // Proceed with constructing the ObjCPropertyDecls. ObjCContainerDecl *ClassDecl = cast<ObjCContainerDecl>(CurContext); ObjCPropertyDecl *Res = nullptr; if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl)) { if (CDecl->IsClassExtension()) { Res = HandlePropertyInClassExtension(S, AtLoc, LParenLoc, FD, GetterSel, SetterSel, isAssign, isReadWrite, Attributes, ODS.getPropertyAttributes(), isOverridingProperty, T, TSI, MethodImplKind); if (!Res) return nullptr; } } if (!Res) { Res = CreatePropertyDecl(S, ClassDecl, AtLoc, LParenLoc, FD, GetterSel, SetterSel, isAssign, isReadWrite, Attributes, ODS.getPropertyAttributes(), T, TSI, MethodImplKind); if (lexicalDC) Res->setLexicalDeclContext(lexicalDC); } // Validate the attributes on the @property. CheckObjCPropertyAttributes(Res, AtLoc, Attributes, (isa<ObjCInterfaceDecl>(ClassDecl) || isa<ObjCProtocolDecl>(ClassDecl))); if (getLangOpts().ObjCAutoRefCount) checkARCPropertyDecl(*this, Res); llvm::SmallPtrSet<ObjCProtocolDecl *, 16> KnownProtos; if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(ClassDecl)) { // For a class, compare the property against a property in our superclass. bool FoundInSuper = false; ObjCInterfaceDecl *CurrentInterfaceDecl = IFace; while (ObjCInterfaceDecl *Super = CurrentInterfaceDecl->getSuperClass()) { DeclContext::lookup_result R = Super->lookup(Res->getDeclName()); for (unsigned I = 0, N = R.size(); I != N; ++I) { if (ObjCPropertyDecl *SuperProp = dyn_cast<ObjCPropertyDecl>(R[I])) { DiagnosePropertyMismatch(Res, SuperProp, Super->getIdentifier(), false); FoundInSuper = true; break; } } if (FoundInSuper) break; else CurrentInterfaceDecl = Super; } if (FoundInSuper) { // Also compare the property against a property in our protocols. for (auto *P : CurrentInterfaceDecl->protocols()) { CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos); } } else { // Slower path: look in all protocols we referenced. for (auto *P : IFace->all_referenced_protocols()) { CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos); } } } else if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(ClassDecl)) { for (auto *P : Cat->protocols()) CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos); } else { ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(ClassDecl); for (auto *P : Proto->protocols()) CheckPropertyAgainstProtocol(*this, Res, P, KnownProtos); } ActOnDocumentableDecl(Res); return Res; } static ObjCPropertyDecl::PropertyAttributeKind makePropertyAttributesAsWritten(unsigned Attributes) { unsigned attributesAsWritten = 0; if (Attributes & ObjCDeclSpec::DQ_PR_readonly) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readonly; if (Attributes & ObjCDeclSpec::DQ_PR_readwrite) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readwrite; if (Attributes & ObjCDeclSpec::DQ_PR_getter) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_getter; if (Attributes & ObjCDeclSpec::DQ_PR_setter) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_setter; if (Attributes & ObjCDeclSpec::DQ_PR_assign) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_assign; if (Attributes & ObjCDeclSpec::DQ_PR_retain) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_retain; if (Attributes & ObjCDeclSpec::DQ_PR_strong) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_strong; if (Attributes & ObjCDeclSpec::DQ_PR_weak) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_weak; if (Attributes & ObjCDeclSpec::DQ_PR_copy) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_copy; if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_unsafe_unretained; if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_nonatomic; if (Attributes & ObjCDeclSpec::DQ_PR_atomic) attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_atomic; return (ObjCPropertyDecl::PropertyAttributeKind)attributesAsWritten; } static bool LocPropertyAttribute( ASTContext &Context, const char *attrName, SourceLocation LParenLoc, SourceLocation &Loc) { if (LParenLoc.isMacroID()) return false; SourceManager &SM = Context.getSourceManager(); std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(LParenLoc); // Try to load the file buffer. bool invalidTemp = false; StringRef file = SM.getBufferData(locInfo.first, &invalidTemp); if (invalidTemp) return false; const char *tokenBegin = file.data() + locInfo.second; // Lex from the start of the given location. Lexer lexer(SM.getLocForStartOfFile(locInfo.first), Context.getLangOpts(), file.begin(), tokenBegin, file.end()); Token Tok; do { lexer.LexFromRawLexer(Tok); if (Tok.is(tok::raw_identifier) && Tok.getRawIdentifier() == attrName) { Loc = Tok.getLocation(); return true; } } while (Tok.isNot(tok::r_paren)); return false; } static unsigned getOwnershipRule(unsigned attr) { return attr & (ObjCPropertyDecl::OBJC_PR_assign | ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_copy | ObjCPropertyDecl::OBJC_PR_weak | ObjCPropertyDecl::OBJC_PR_strong | ObjCPropertyDecl::OBJC_PR_unsafe_unretained); } ObjCPropertyDecl * Sema::HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind) { ObjCCategoryDecl *CDecl = cast<ObjCCategoryDecl>(CurContext); // Diagnose if this property is already in continuation class. DeclContext *DC = CurContext; IdentifierInfo *PropertyId = FD.D.getIdentifier(); ObjCInterfaceDecl *CCPrimary = CDecl->getClassInterface(); if (CCPrimary) { // Check for duplicate declaration of this property in current and // other class extensions. for (const auto *Ext : CCPrimary->known_extensions()) { if (ObjCPropertyDecl *prevDecl = ObjCPropertyDecl::findPropertyDecl(Ext, PropertyId)) { Diag(AtLoc, diag::err_duplicate_property); Diag(prevDecl->getLocation(), diag::note_property_declare); return nullptr; } } } // Create a new ObjCPropertyDecl with the DeclContext being // the class extension. // FIXME. We should really be using CreatePropertyDecl for this. ObjCPropertyDecl *PDecl = ObjCPropertyDecl::Create(Context, DC, FD.D.getIdentifierLoc(), PropertyId, AtLoc, LParenLoc, T, TSI); PDecl->setPropertyAttributesAsWritten( makePropertyAttributesAsWritten(AttributesAsWritten)); if (Attributes & ObjCDeclSpec::DQ_PR_readonly) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly); if (Attributes & ObjCDeclSpec::DQ_PR_readwrite) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readwrite); if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic); if (Attributes & ObjCDeclSpec::DQ_PR_atomic) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_atomic); if (Attributes & ObjCDeclSpec::DQ_PR_nullability) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nullability); if (Attributes & ObjCDeclSpec::DQ_PR_null_resettable) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_null_resettable); // Set setter/getter selector name. Needed later. PDecl->setGetterName(GetterSel); PDecl->setSetterName(SetterSel); ProcessDeclAttributes(S, PDecl, FD.D); DC->addDecl(PDecl); // We need to look in the @interface to see if the @property was // already declared. if (!CCPrimary) { Diag(CDecl->getLocation(), diag::err_continuation_class); *isOverridingProperty = true; return nullptr; } // Find the property in continuation class's primary class only. ObjCPropertyDecl *PIDecl = CCPrimary->FindPropertyVisibleInPrimaryClass(PropertyId); if (!PIDecl) { // No matching property found in the primary class. Just fall thru // and add property to continuation class's primary class. ObjCPropertyDecl *PrimaryPDecl = CreatePropertyDecl(S, CCPrimary, AtLoc, LParenLoc, FD, GetterSel, SetterSel, isAssign, isReadWrite, Attributes,AttributesAsWritten, T, TSI, MethodImplKind, DC); // A case of continuation class adding a new property in the class. This // is not what it was meant for. However, gcc supports it and so should we. // Make sure setter/getters are declared here. ProcessPropertyDecl(PrimaryPDecl, CCPrimary, /* redeclaredProperty = */ nullptr, /* lexicalDC = */ CDecl); PDecl->setGetterMethodDecl(PrimaryPDecl->getGetterMethodDecl()); PDecl->setSetterMethodDecl(PrimaryPDecl->getSetterMethodDecl()); if (ASTMutationListener *L = Context.getASTMutationListener()) L->AddedObjCPropertyInClassExtension(PrimaryPDecl, /*OrigProp=*/nullptr, CDecl); return PrimaryPDecl; } if (!Context.hasSameType(PIDecl->getType(), PDecl->getType())) { bool IncompatibleObjC = false; QualType ConvertedType; // Relax the strict type matching for property type in continuation class. // Allow property object type of continuation class to be different as long // as it narrows the object type in its primary class property. Note that // this conversion is safe only because the wider type is for a 'readonly' // property in primary class and 'narrowed' type for a 'readwrite' property // in continuation class. QualType PrimaryClassPropertyT = Context.getCanonicalType(PIDecl->getType()); QualType ClassExtPropertyT = Context.getCanonicalType(PDecl->getType()); if (!isa<ObjCObjectPointerType>(PrimaryClassPropertyT) || !isa<ObjCObjectPointerType>(ClassExtPropertyT) || (!isObjCPointerConversion(ClassExtPropertyT, PrimaryClassPropertyT, ConvertedType, IncompatibleObjC)) || IncompatibleObjC) { Diag(AtLoc, diag::err_type_mismatch_continuation_class) << PDecl->getType(); Diag(PIDecl->getLocation(), diag::note_property_declare); return nullptr; } } // The property 'PIDecl's readonly attribute will be over-ridden // with continuation class's readwrite property attribute! unsigned PIkind = PIDecl->getPropertyAttributesAsWritten(); if (isReadWrite && (PIkind & ObjCPropertyDecl::OBJC_PR_readonly)) { PIkind &= ~ObjCPropertyDecl::OBJC_PR_readonly; PIkind |= ObjCPropertyDecl::OBJC_PR_readwrite; PIkind |= deduceWeakPropertyFromType(PIDecl->getType()); unsigned ClassExtensionMemoryModel = getOwnershipRule(Attributes); unsigned PrimaryClassMemoryModel = getOwnershipRule(PIkind); if (PrimaryClassMemoryModel && ClassExtensionMemoryModel && (PrimaryClassMemoryModel != ClassExtensionMemoryModel)) { Diag(AtLoc, diag::warn_property_attr_mismatch); Diag(PIDecl->getLocation(), diag::note_property_declare); } else if (getLangOpts().ObjCAutoRefCount) { QualType PrimaryPropertyQT = Context.getCanonicalType(PIDecl->getType()).getUnqualifiedType(); if (isa<ObjCObjectPointerType>(PrimaryPropertyQT)) { bool PropertyIsWeak = ((PIkind & ObjCPropertyDecl::OBJC_PR_weak) != 0); Qualifiers::ObjCLifetime PrimaryPropertyLifeTime = PrimaryPropertyQT.getObjCLifetime(); if (PrimaryPropertyLifeTime == Qualifiers::OCL_None && (Attributes & ObjCDeclSpec::DQ_PR_weak) && !PropertyIsWeak) { Diag(AtLoc, diag::warn_property_implicitly_mismatched); Diag(PIDecl->getLocation(), diag::note_property_declare); } } } DeclContext *DC = cast<DeclContext>(CCPrimary); if (!ObjCPropertyDecl::findPropertyDecl(DC, PIDecl->getDeclName().getAsIdentifierInfo())) { // In mrr mode, 'readwrite' property must have an explicit // memory attribute. If none specified, select the default (assign). if (!getLangOpts().ObjCAutoRefCount) { if (!(PIkind & (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong | ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_unsafe_unretained | ObjCDeclSpec::DQ_PR_weak))) PIkind |= ObjCPropertyDecl::OBJC_PR_assign; } // Protocol is not in the primary class. Must build one for it. ObjCDeclSpec ProtocolPropertyODS; // FIXME. Assuming that ObjCDeclSpec::ObjCPropertyAttributeKind // and ObjCPropertyDecl::PropertyAttributeKind have identical // values. Should consolidate both into one enum type. ProtocolPropertyODS. setPropertyAttributes((ObjCDeclSpec::ObjCPropertyAttributeKind) PIkind); // Must re-establish the context from class extension to primary // class context. ContextRAII SavedContext(*this, CCPrimary); Decl *ProtocolPtrTy = ActOnProperty(S, AtLoc, LParenLoc, FD, ProtocolPropertyODS, PIDecl->getGetterName(), PIDecl->getSetterName(), isOverridingProperty, MethodImplKind, /* lexicalDC = */ CDecl); PIDecl = cast<ObjCPropertyDecl>(ProtocolPtrTy); } PIDecl->makeitReadWriteAttribute(); if (Attributes & ObjCDeclSpec::DQ_PR_retain) PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain); if (Attributes & ObjCDeclSpec::DQ_PR_strong) PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong); if (Attributes & ObjCDeclSpec::DQ_PR_copy) PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy); PIDecl->setSetterName(SetterSel); } else { // Tailor the diagnostics for the common case where a readwrite // property is declared both in the @interface and the continuation. // This is a common error where the user often intended the original // declaration to be readonly. unsigned diag = (Attributes & ObjCDeclSpec::DQ_PR_readwrite) && (PIkind & ObjCPropertyDecl::OBJC_PR_readwrite) ? diag::err_use_continuation_class_redeclaration_readwrite : diag::err_use_continuation_class; Diag(AtLoc, diag) << CCPrimary->getDeclName(); Diag(PIDecl->getLocation(), diag::note_property_declare); return nullptr; } *isOverridingProperty = true; // Make sure setter decl is synthesized, and added to primary class's list. ProcessPropertyDecl(PIDecl, CCPrimary, PDecl, CDecl); PDecl->setGetterMethodDecl(PIDecl->getGetterMethodDecl()); PDecl->setSetterMethodDecl(PIDecl->getSetterMethodDecl()); if (ASTMutationListener *L = Context.getASTMutationListener()) L->AddedObjCPropertyInClassExtension(PDecl, PIDecl, CDecl); return PDecl; } ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TInfo, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC){ IdentifierInfo *PropertyId = FD.D.getIdentifier(); // Issue a warning if property is 'assign' as default and its object, which is // gc'able conforms to NSCopying protocol if (getLangOpts().getGC() != LangOptions::NonGC && isAssign && !(Attributes & ObjCDeclSpec::DQ_PR_assign)) if (const ObjCObjectPointerType *ObjPtrTy = T->getAs<ObjCObjectPointerType>()) { ObjCInterfaceDecl *IDecl = ObjPtrTy->getObjectType()->getInterface(); if (IDecl) if (ObjCProtocolDecl* PNSCopying = LookupProtocol(&Context.Idents.get("NSCopying"), AtLoc)) if (IDecl->ClassImplementsProtocol(PNSCopying, true)) Diag(AtLoc, diag::warn_implements_nscopying) << PropertyId; } if (T->isObjCObjectType()) { SourceLocation StarLoc = TInfo->getTypeLoc().getLocEnd(); StarLoc = getLocForEndOfToken(StarLoc); Diag(FD.D.getIdentifierLoc(), diag::err_statically_allocated_object) << FixItHint::CreateInsertion(StarLoc, "*"); T = Context.getObjCObjectPointerType(T); SourceLocation TLoc = TInfo->getTypeLoc().getLocStart(); TInfo = Context.getTrivialTypeSourceInfo(T, TLoc); } DeclContext *DC = cast<DeclContext>(CDecl); ObjCPropertyDecl *PDecl = ObjCPropertyDecl::Create(Context, DC, FD.D.getIdentifierLoc(), PropertyId, AtLoc, LParenLoc, T, TInfo); if (ObjCPropertyDecl *prevDecl = ObjCPropertyDecl::findPropertyDecl(DC, PropertyId)) { Diag(PDecl->getLocation(), diag::err_duplicate_property); Diag(prevDecl->getLocation(), diag::note_property_declare); PDecl->setInvalidDecl(); } else { DC->addDecl(PDecl); if (lexicalDC) PDecl->setLexicalDeclContext(lexicalDC); } if (T->isArrayType() || T->isFunctionType()) { Diag(AtLoc, diag::err_property_type) << T; PDecl->setInvalidDecl(); } ProcessDeclAttributes(S, PDecl, FD.D); // Regardless of setter/getter attribute, we save the default getter/setter // selector names in anticipation of declaration of setter/getter methods. PDecl->setGetterName(GetterSel); PDecl->setSetterName(SetterSel); PDecl->setPropertyAttributesAsWritten( makePropertyAttributesAsWritten(AttributesAsWritten)); if (Attributes & ObjCDeclSpec::DQ_PR_readonly) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly); if (Attributes & ObjCDeclSpec::DQ_PR_getter) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_getter); if (Attributes & ObjCDeclSpec::DQ_PR_setter) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_setter); if (isReadWrite) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readwrite); if (Attributes & ObjCDeclSpec::DQ_PR_retain) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain); if (Attributes & ObjCDeclSpec::DQ_PR_strong) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong); if (Attributes & ObjCDeclSpec::DQ_PR_weak) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak); if (Attributes & ObjCDeclSpec::DQ_PR_copy) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy); if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained); if (isAssign) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign); // In the semantic attributes, one of nonatomic or atomic is always set. if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic); else PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_atomic); // 'unsafe_unretained' is alias for 'assign'. if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign); if (isAssign) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained); if (MethodImplKind == tok::objc_required) PDecl->setPropertyImplementation(ObjCPropertyDecl::Required); else if (MethodImplKind == tok::objc_optional) PDecl->setPropertyImplementation(ObjCPropertyDecl::Optional); if (Attributes & ObjCDeclSpec::DQ_PR_nullability) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nullability); if (Attributes & ObjCDeclSpec::DQ_PR_null_resettable) PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_null_resettable); return PDecl; } static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc, ObjCPropertyDecl *property, ObjCIvarDecl *ivar) { if (property->isInvalidDecl() || ivar->isInvalidDecl()) return; QualType ivarType = ivar->getType(); Qualifiers::ObjCLifetime ivarLifetime = ivarType.getObjCLifetime(); // The lifetime implied by the property's attributes. Qualifiers::ObjCLifetime propertyLifetime = getImpliedARCOwnership(property->getPropertyAttributes(), property->getType()); // We're fine if they match. if (propertyLifetime == ivarLifetime) return; // These aren't valid lifetimes for object ivars; don't diagnose twice. if (ivarLifetime == Qualifiers::OCL_None || ivarLifetime == Qualifiers::OCL_Autoreleasing) return; // If the ivar is private, and it's implicitly __unsafe_unretained // becaues of its type, then pretend it was actually implicitly // __strong. This is only sound because we're processing the // property implementation before parsing any method bodies. if (ivarLifetime == Qualifiers::OCL_ExplicitNone && propertyLifetime == Qualifiers::OCL_Strong && ivar->getAccessControl() == ObjCIvarDecl::Private) { SplitQualType split = ivarType.split(); if (split.Quals.hasObjCLifetime()) { assert(ivarType->isObjCARCImplicitlyUnretainedType()); split.Quals.setObjCLifetime(Qualifiers::OCL_Strong); ivarType = S.Context.getQualifiedType(split); ivar->setType(ivarType); return; } } switch (propertyLifetime) { case Qualifiers::OCL_Strong: S.Diag(ivar->getLocation(), diag::err_arc_strong_property_ownership) << property->getDeclName() << ivar->getDeclName() << ivarLifetime; break; case Qualifiers::OCL_Weak: S.Diag(ivar->getLocation(), diag::error_weak_property) << property->getDeclName() << ivar->getDeclName(); break; case Qualifiers::OCL_ExplicitNone: S.Diag(ivar->getLocation(), diag::err_arc_assign_property_ownership) << property->getDeclName() << ivar->getDeclName() << ((property->getPropertyAttributesAsWritten() & ObjCPropertyDecl::OBJC_PR_assign) != 0); break; case Qualifiers::OCL_Autoreleasing: llvm_unreachable("properties cannot be autoreleasing"); case Qualifiers::OCL_None: // Any other property should be ignored. return; } S.Diag(property->getLocation(), diag::note_property_declare); if (propertyImplLoc.isValid()) S.Diag(propertyImplLoc, diag::note_property_synthesize); } /// setImpliedPropertyAttributeForReadOnlyProperty - /// This routine evaludates life-time attributes for a 'readonly' /// property with no known lifetime of its own, using backing /// 'ivar's attribute, if any. If no backing 'ivar', property's /// life-time is assumed 'strong'. static void setImpliedPropertyAttributeForReadOnlyProperty( ObjCPropertyDecl *property, ObjCIvarDecl *ivar) { Qualifiers::ObjCLifetime propertyLifetime = getImpliedARCOwnership(property->getPropertyAttributes(), property->getType()); if (propertyLifetime != Qualifiers::OCL_None) return; if (!ivar) { // if no backing ivar, make property 'strong'. property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong); return; } // property assumes owenership of backing ivar. QualType ivarType = ivar->getType(); Qualifiers::ObjCLifetime ivarLifetime = ivarType.getObjCLifetime(); if (ivarLifetime == Qualifiers::OCL_Strong) property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong); else if (ivarLifetime == Qualifiers::OCL_Weak) property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak); return; } /// DiagnosePropertyMismatchDeclInProtocols - diagnose properties declared /// in inherited protocols with mismatched types. Since any of them can /// be candidate for synthesis. static void DiagnosePropertyMismatchDeclInProtocols(Sema &S, SourceLocation AtLoc, ObjCInterfaceDecl *ClassDecl, ObjCPropertyDecl *Property) { ObjCInterfaceDecl::ProtocolPropertyMap PropMap; for (const auto *PI : ClassDecl->all_referenced_protocols()) { if (const ObjCProtocolDecl *PDecl = PI->getDefinition()) PDecl->collectInheritedProtocolProperties(Property, PropMap); } if (ObjCInterfaceDecl *SDecl = ClassDecl->getSuperClass()) while (SDecl) { for (const auto *PI : SDecl->all_referenced_protocols()) { if (const ObjCProtocolDecl *PDecl = PI->getDefinition()) PDecl->collectInheritedProtocolProperties(Property, PropMap); } SDecl = SDecl->getSuperClass(); } if (PropMap.empty()) return; QualType RHSType = S.Context.getCanonicalType(Property->getType()); bool FirsTime = true; for (ObjCInterfaceDecl::ProtocolPropertyMap::iterator I = PropMap.begin(), E = PropMap.end(); I != E; I++) { ObjCPropertyDecl *Prop = I->second; QualType LHSType = S.Context.getCanonicalType(Prop->getType()); if (!S.Context.propertyTypesAreCompatible(LHSType, RHSType)) { bool IncompatibleObjC = false; QualType ConvertedType; if (!S.isObjCPointerConversion(RHSType, LHSType, ConvertedType, IncompatibleObjC) || IncompatibleObjC) { if (FirsTime) { S.Diag(Property->getLocation(), diag::warn_protocol_property_mismatch) << Property->getType(); FirsTime = false; } S.Diag(Prop->getLocation(), diag::note_protocol_property_declare) << Prop->getType(); } } } if (!FirsTime && AtLoc.isValid()) S.Diag(AtLoc, diag::note_property_synthesize); } /// ActOnPropertyImplDecl - This routine performs semantic checks and /// builds the AST node for a property implementation declaration; declared /// as \@synthesize or \@dynamic. /// Decl *Sema::ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool Synthesize, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc) { ObjCContainerDecl *ClassImpDecl = dyn_cast<ObjCContainerDecl>(CurContext); // Make sure we have a context for the property implementation declaration. if (!ClassImpDecl) { Diag(AtLoc, diag::error_missing_property_context); return nullptr; } if (PropertyIvarLoc.isInvalid()) PropertyIvarLoc = PropertyLoc; SourceLocation PropertyDiagLoc = PropertyLoc; if (PropertyDiagLoc.isInvalid()) PropertyDiagLoc = ClassImpDecl->getLocStart(); ObjCPropertyDecl *property = nullptr; ObjCInterfaceDecl *IDecl = nullptr; // Find the class or category class where this property must have // a declaration. ObjCImplementationDecl *IC = nullptr; ObjCCategoryImplDecl *CatImplClass = nullptr; if ((IC = dyn_cast<ObjCImplementationDecl>(ClassImpDecl))) { IDecl = IC->getClassInterface(); // We always synthesize an interface for an implementation // without an interface decl. So, IDecl is always non-zero. assert(IDecl && "ActOnPropertyImplDecl - @implementation without @interface"); // Look for this property declaration in the @implementation's @interface property = IDecl->FindPropertyDeclaration(PropertyId); if (!property) { Diag(PropertyLoc, diag::error_bad_property_decl) << IDecl->getDeclName(); return nullptr; } unsigned PIkind = property->getPropertyAttributesAsWritten(); if ((PIkind & (ObjCPropertyDecl::OBJC_PR_atomic | ObjCPropertyDecl::OBJC_PR_nonatomic) ) == 0) { if (AtLoc.isValid()) Diag(AtLoc, diag::warn_implicit_atomic_property); else Diag(IC->getLocation(), diag::warn_auto_implicit_atomic_property); Diag(property->getLocation(), diag::note_property_declare); } if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(property->getDeclContext())) { if (!CD->IsClassExtension()) { Diag(PropertyLoc, diag::error_category_property) << CD->getDeclName(); Diag(property->getLocation(), diag::note_property_declare); return nullptr; } } if (Synthesize&& (PIkind & ObjCPropertyDecl::OBJC_PR_readonly) && property->hasAttr<IBOutletAttr>() && !AtLoc.isValid()) { bool ReadWriteProperty = false; // Search into the class extensions and see if 'readonly property is // redeclared 'readwrite', then no warning is to be issued. for (auto *Ext : IDecl->known_extensions()) { DeclContext::lookup_result R = Ext->lookup(property->getDeclName()); if (!R.empty()) if (ObjCPropertyDecl *ExtProp = dyn_cast<ObjCPropertyDecl>(R[0])) { PIkind = ExtProp->getPropertyAttributesAsWritten(); if (PIkind & ObjCPropertyDecl::OBJC_PR_readwrite) { ReadWriteProperty = true; break; } } } if (!ReadWriteProperty) { Diag(property->getLocation(), diag::warn_auto_readonly_iboutlet_property) << property; SourceLocation readonlyLoc; if (LocPropertyAttribute(Context, "readonly", property->getLParenLoc(), readonlyLoc)) { SourceLocation endLoc = readonlyLoc.getLocWithOffset(strlen("readonly")-1); SourceRange ReadonlySourceRange(readonlyLoc, endLoc); Diag(property->getLocation(), diag::note_auto_readonly_iboutlet_fixup_suggest) << FixItHint::CreateReplacement(ReadonlySourceRange, "readwrite"); } } } if (Synthesize && isa<ObjCProtocolDecl>(property->getDeclContext())) DiagnosePropertyMismatchDeclInProtocols(*this, AtLoc, IDecl, property); } else if ((CatImplClass = dyn_cast<ObjCCategoryImplDecl>(ClassImpDecl))) { if (Synthesize) { Diag(AtLoc, diag::error_synthesize_category_decl); return nullptr; } IDecl = CatImplClass->getClassInterface(); if (!IDecl) { Diag(AtLoc, diag::error_missing_property_interface); return nullptr; } ObjCCategoryDecl *Category = IDecl->FindCategoryDeclaration(CatImplClass->getIdentifier()); // If category for this implementation not found, it is an error which // has already been reported eralier. if (!Category) return nullptr; // Look for this property declaration in @implementation's category property = Category->FindPropertyDeclaration(PropertyId); if (!property) { Diag(PropertyLoc, diag::error_bad_category_property_decl) << Category->getDeclName(); return nullptr; } } else { Diag(AtLoc, diag::error_bad_property_context); return nullptr; } ObjCIvarDecl *Ivar = nullptr; bool CompleteTypeErr = false; bool compat = true; // Check that we have a valid, previously declared ivar for @synthesize if (Synthesize) { // @synthesize if (!PropertyIvar) PropertyIvar = PropertyId; // Check that this is a previously declared 'ivar' in 'IDecl' interface ObjCInterfaceDecl *ClassDeclared; Ivar = IDecl->lookupInstanceVariable(PropertyIvar, ClassDeclared); QualType PropType = property->getType(); QualType PropertyIvarType = PropType.getNonReferenceType(); if (RequireCompleteType(PropertyDiagLoc, PropertyIvarType, diag::err_incomplete_synthesized_property, property->getDeclName())) { Diag(property->getLocation(), diag::note_property_declare); CompleteTypeErr = true; } if (getLangOpts().ObjCAutoRefCount && (property->getPropertyAttributesAsWritten() & ObjCPropertyDecl::OBJC_PR_readonly) && PropertyIvarType->isObjCRetainableType()) { setImpliedPropertyAttributeForReadOnlyProperty(property, Ivar); } ObjCPropertyDecl::PropertyAttributeKind kind = property->getPropertyAttributes(); // Add GC __weak to the ivar type if the property is weak. if ((kind & ObjCPropertyDecl::OBJC_PR_weak) && getLangOpts().getGC() != LangOptions::NonGC) { assert(!getLangOpts().ObjCAutoRefCount); if (PropertyIvarType.isObjCGCStrong()) { Diag(PropertyDiagLoc, diag::err_gc_weak_property_strong_type); Diag(property->getLocation(), diag::note_property_declare); } else { PropertyIvarType = Context.getObjCGCQualType(PropertyIvarType, Qualifiers::Weak); } } if (AtLoc.isInvalid()) { // Check when default synthesizing a property that there is // an ivar matching property name and issue warning; since this // is the most common case of not using an ivar used for backing // property in non-default synthesis case. ObjCInterfaceDecl *ClassDeclared=nullptr; ObjCIvarDecl *originalIvar = IDecl->lookupInstanceVariable(property->getIdentifier(), ClassDeclared); if (originalIvar) { Diag(PropertyDiagLoc, diag::warn_autosynthesis_property_ivar_match) << PropertyId << (Ivar == nullptr) << PropertyIvar << originalIvar->getIdentifier(); Diag(property->getLocation(), diag::note_property_declare); Diag(originalIvar->getLocation(), diag::note_ivar_decl); } } if (!Ivar) { // In ARC, give the ivar a lifetime qualifier based on the // property attributes. if (getLangOpts().ObjCAutoRefCount && !PropertyIvarType.getObjCLifetime() && PropertyIvarType->isObjCRetainableType()) { // It's an error if we have to do this and the user didn't // explicitly write an ownership attribute on the property. if (!property->hasWrittenStorageAttribute() && !(kind & ObjCPropertyDecl::OBJC_PR_strong)) { Diag(PropertyDiagLoc, diag::err_arc_objc_property_default_assign_on_object); Diag(property->getLocation(), diag::note_property_declare); } else { Qualifiers::ObjCLifetime lifetime = getImpliedARCOwnership(kind, PropertyIvarType); assert(lifetime && "no lifetime for property?"); if (lifetime == Qualifiers::OCL_Weak) { bool err = false; if (const ObjCObjectPointerType *ObjT = PropertyIvarType->getAs<ObjCObjectPointerType>()) { const ObjCInterfaceDecl *ObjI = ObjT->getInterfaceDecl(); if (ObjI && ObjI->isArcWeakrefUnavailable()) { Diag(property->getLocation(), diag::err_arc_weak_unavailable_property) << PropertyIvarType; Diag(ClassImpDecl->getLocation(), diag::note_implemented_by_class) << ClassImpDecl->getName(); err = true; } } if (!err && !getLangOpts().ObjCARCWeak) { Diag(PropertyDiagLoc, diag::err_arc_weak_no_runtime); Diag(property->getLocation(), diag::note_property_declare); } } Qualifiers qs; qs.addObjCLifetime(lifetime); PropertyIvarType = Context.getQualifiedType(PropertyIvarType, qs); } } if (kind & ObjCPropertyDecl::OBJC_PR_weak && !getLangOpts().ObjCAutoRefCount && getLangOpts().getGC() == LangOptions::NonGC) { Diag(PropertyDiagLoc, diag::error_synthesize_weak_non_arc_or_gc); Diag(property->getLocation(), diag::note_property_declare); } Ivar = ObjCIvarDecl::Create(Context, ClassImpDecl, PropertyIvarLoc,PropertyIvarLoc, PropertyIvar, PropertyIvarType, /*Dinfo=*/nullptr, ObjCIvarDecl::Private, (Expr *)nullptr, true); if (RequireNonAbstractType(PropertyIvarLoc, PropertyIvarType, diag::err_abstract_type_in_decl, AbstractSynthesizedIvarType)) { Diag(property->getLocation(), diag::note_property_declare); Ivar->setInvalidDecl(); } else if (CompleteTypeErr) Ivar->setInvalidDecl(); ClassImpDecl->addDecl(Ivar); IDecl->makeDeclVisibleInContext(Ivar); if (getLangOpts().ObjCRuntime.isFragile()) Diag(PropertyDiagLoc, diag::error_missing_property_ivar_decl) << PropertyId; // Note! I deliberately want it to fall thru so, we have a // a property implementation and to avoid future warnings. } else if (getLangOpts().ObjCRuntime.isNonFragile() && !declaresSameEntity(ClassDeclared, IDecl)) { Diag(PropertyDiagLoc, diag::error_ivar_in_superclass_use) << property->getDeclName() << Ivar->getDeclName() << ClassDeclared->getDeclName(); Diag(Ivar->getLocation(), diag::note_previous_access_declaration) << Ivar << Ivar->getName(); // Note! I deliberately want it to fall thru so more errors are caught. } property->setPropertyIvarDecl(Ivar); QualType IvarType = Context.getCanonicalType(Ivar->getType()); // Check that type of property and its ivar are type compatible. if (!Context.hasSameType(PropertyIvarType, IvarType)) { if (isa<ObjCObjectPointerType>(PropertyIvarType) && isa<ObjCObjectPointerType>(IvarType)) compat = Context.canAssignObjCInterfaces( PropertyIvarType->getAs<ObjCObjectPointerType>(), IvarType->getAs<ObjCObjectPointerType>()); else { compat = (CheckAssignmentConstraints(PropertyIvarLoc, PropertyIvarType, IvarType) == Compatible); } if (!compat) { Diag(PropertyDiagLoc, diag::error_property_ivar_type) << property->getDeclName() << PropType << Ivar->getDeclName() << IvarType; Diag(Ivar->getLocation(), diag::note_ivar_decl); // Note! I deliberately want it to fall thru so, we have a // a property implementation and to avoid future warnings. } else { // FIXME! Rules for properties are somewhat different that those // for assignments. Use a new routine to consolidate all cases; // specifically for property redeclarations as well as for ivars. QualType lhsType =Context.getCanonicalType(PropertyIvarType).getUnqualifiedType(); QualType rhsType =Context.getCanonicalType(IvarType).getUnqualifiedType(); if (lhsType != rhsType && lhsType->isArithmeticType()) { Diag(PropertyDiagLoc, diag::error_property_ivar_type) << property->getDeclName() << PropType << Ivar->getDeclName() << IvarType; Diag(Ivar->getLocation(), diag::note_ivar_decl); // Fall thru - see previous comment } } // __weak is explicit. So it works on Canonical type. if ((PropType.isObjCGCWeak() && !IvarType.isObjCGCWeak() && getLangOpts().getGC() != LangOptions::NonGC)) { Diag(PropertyDiagLoc, diag::error_weak_property) << property->getDeclName() << Ivar->getDeclName(); Diag(Ivar->getLocation(), diag::note_ivar_decl); // Fall thru - see previous comment } // Fall thru - see previous comment if ((property->getType()->isObjCObjectPointerType() || PropType.isObjCGCStrong()) && IvarType.isObjCGCWeak() && getLangOpts().getGC() != LangOptions::NonGC) { Diag(PropertyDiagLoc, diag::error_strong_property) << property->getDeclName() << Ivar->getDeclName(); // Fall thru - see previous comment } } if (getLangOpts().ObjCAutoRefCount) checkARCPropertyImpl(*this, PropertyLoc, property, Ivar); } else if (PropertyIvar) // @dynamic Diag(PropertyDiagLoc, diag::error_dynamic_property_ivar_decl); assert (property && "ActOnPropertyImplDecl - property declaration missing"); ObjCPropertyImplDecl *PIDecl = ObjCPropertyImplDecl::Create(Context, CurContext, AtLoc, PropertyLoc, property, (Synthesize ? ObjCPropertyImplDecl::Synthesize : ObjCPropertyImplDecl::Dynamic), Ivar, PropertyIvarLoc); if (CompleteTypeErr || !compat) PIDecl->setInvalidDecl(); if (ObjCMethodDecl *getterMethod = property->getGetterMethodDecl()) { getterMethod->createImplicitParams(Context, IDecl); if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr && Ivar->getType()->isRecordType()) { // For Objective-C++, need to synthesize the AST for the IVAR object to be // returned by the getter as it must conform to C++'s copy-return rules. // FIXME. Eventually we want to do this for Objective-C as well. SynthesizedFunctionScope Scope(*this, getterMethod); ImplicitParamDecl *SelfDecl = getterMethod->getSelfDecl(); DeclRefExpr *SelfExpr = new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(), VK_LValue, PropertyDiagLoc); MarkDeclRefReferenced(SelfExpr); Expr *LoadSelfExpr = ImplicitCastExpr::Create(Context, SelfDecl->getType(), CK_LValueToRValue, SelfExpr, nullptr, VK_RValue); Expr *IvarRefExpr = new (Context) ObjCIvarRefExpr(Ivar, Ivar->getUsageType(SelfDecl->getType()), PropertyDiagLoc, Ivar->getLocation(), LoadSelfExpr, true, true); ExprResult Res = PerformCopyInitialization( InitializedEntity::InitializeResult(PropertyDiagLoc, getterMethod->getReturnType(), /*NRVO=*/false), PropertyDiagLoc, IvarRefExpr); if (!Res.isInvalid()) { Expr *ResExpr = Res.getAs<Expr>(); if (ResExpr) ResExpr = MaybeCreateExprWithCleanups(ResExpr); PIDecl->setGetterCXXConstructor(ResExpr); } } if (property->hasAttr<NSReturnsNotRetainedAttr>() && !getterMethod->hasAttr<NSReturnsNotRetainedAttr>()) { Diag(getterMethod->getLocation(), diag::warn_property_getter_owning_mismatch); Diag(property->getLocation(), diag::note_property_declare); } if (getLangOpts().ObjCAutoRefCount && Synthesize) switch (getterMethod->getMethodFamily()) { case OMF_retain: case OMF_retainCount: case OMF_release: case OMF_autorelease: Diag(getterMethod->getLocation(), diag::err_arc_illegal_method_def) << 1 << getterMethod->getSelector(); break; default: break; } } if (ObjCMethodDecl *setterMethod = property->getSetterMethodDecl()) { setterMethod->createImplicitParams(Context, IDecl); if (getLangOpts().CPlusPlus && Synthesize && !CompleteTypeErr && Ivar->getType()->isRecordType()) { // FIXME. Eventually we want to do this for Objective-C as well. SynthesizedFunctionScope Scope(*this, setterMethod); ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl(); DeclRefExpr *SelfExpr = new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(), VK_LValue, PropertyDiagLoc); MarkDeclRefReferenced(SelfExpr); Expr *LoadSelfExpr = ImplicitCastExpr::Create(Context, SelfDecl->getType(), CK_LValueToRValue, SelfExpr, nullptr, VK_RValue); Expr *lhs = new (Context) ObjCIvarRefExpr(Ivar, Ivar->getUsageType(SelfDecl->getType()), PropertyDiagLoc, Ivar->getLocation(), LoadSelfExpr, true, true); ObjCMethodDecl::param_iterator P = setterMethod->param_begin(); ParmVarDecl *Param = (*P); QualType T = Param->getType().getNonReferenceType(); DeclRefExpr *rhs = new (Context) DeclRefExpr(Param, false, T, VK_LValue, PropertyDiagLoc); MarkDeclRefReferenced(rhs); ExprResult Res = BuildBinOp(S, PropertyDiagLoc, BO_Assign, lhs, rhs); if (property->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic) { Expr *callExpr = Res.getAs<Expr>(); if (const CXXOperatorCallExpr *CXXCE = dyn_cast_or_null<CXXOperatorCallExpr>(callExpr)) if (const FunctionDecl *FuncDecl = CXXCE->getDirectCallee()) if (!FuncDecl->isTrivial()) if (property->getType()->isReferenceType()) { Diag(PropertyDiagLoc, diag::err_atomic_property_nontrivial_assign_op) << property->getType(); Diag(FuncDecl->getLocStart(), diag::note_callee_decl) << FuncDecl; } } PIDecl->setSetterCXXAssignment(Res.getAs<Expr>()); } } if (IC) { if (Synthesize) if (ObjCPropertyImplDecl *PPIDecl = IC->FindPropertyImplIvarDecl(PropertyIvar)) { Diag(PropertyLoc, diag::error_duplicate_ivar_use) << PropertyId << PPIDecl->getPropertyDecl()->getIdentifier() << PropertyIvar; Diag(PPIDecl->getLocation(), diag::note_previous_use); } if (ObjCPropertyImplDecl *PPIDecl = IC->FindPropertyImplDecl(PropertyId)) { Diag(PropertyLoc, diag::error_property_implemented) << PropertyId; Diag(PPIDecl->getLocation(), diag::note_previous_declaration); return nullptr; } IC->addPropertyImplementation(PIDecl); if (getLangOpts().ObjCDefaultSynthProperties && getLangOpts().ObjCRuntime.isNonFragile() && !IDecl->isObjCRequiresPropertyDefs()) { // Diagnose if an ivar was lazily synthesdized due to a previous // use and if 1) property is @dynamic or 2) property is synthesized // but it requires an ivar of different name. ObjCInterfaceDecl *ClassDeclared=nullptr; ObjCIvarDecl *Ivar = nullptr; if (!Synthesize) Ivar = IDecl->lookupInstanceVariable(PropertyId, ClassDeclared); else { if (PropertyIvar && PropertyIvar != PropertyId) Ivar = IDecl->lookupInstanceVariable(PropertyId, ClassDeclared); } // Issue diagnostics only if Ivar belongs to current class. if (Ivar && Ivar->getSynthesize() && declaresSameEntity(IC->getClassInterface(), ClassDeclared)) { Diag(Ivar->getLocation(), diag::err_undeclared_var_use) << PropertyId; Ivar->setInvalidDecl(); } } } else { if (Synthesize) if (ObjCPropertyImplDecl *PPIDecl = CatImplClass->FindPropertyImplIvarDecl(PropertyIvar)) { Diag(PropertyDiagLoc, diag::error_duplicate_ivar_use) << PropertyId << PPIDecl->getPropertyDecl()->getIdentifier() << PropertyIvar; Diag(PPIDecl->getLocation(), diag::note_previous_use); } if (ObjCPropertyImplDecl *PPIDecl = CatImplClass->FindPropertyImplDecl(PropertyId)) { Diag(PropertyDiagLoc, diag::error_property_implemented) << PropertyId; Diag(PPIDecl->getLocation(), diag::note_previous_declaration); return nullptr; } CatImplClass->addPropertyImplementation(PIDecl); } return PIDecl; } //===----------------------------------------------------------------------===// // Helper methods. //===----------------------------------------------------------------------===// /// DiagnosePropertyMismatch - Compares two properties for their /// attributes and types and warns on a variety of inconsistencies. /// void Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *inheritedName, bool OverridingProtocolProperty) { ObjCPropertyDecl::PropertyAttributeKind CAttr = Property->getPropertyAttributes(); ObjCPropertyDecl::PropertyAttributeKind SAttr = SuperProperty->getPropertyAttributes(); // We allow readonly properties without an explicit ownership // (assign/unsafe_unretained/weak/retain/strong/copy) in super class // to be overridden by a property with any explicit ownership in the subclass. if (!OverridingProtocolProperty && !getOwnershipRule(SAttr) && getOwnershipRule(CAttr)) ; else { if ((CAttr & ObjCPropertyDecl::OBJC_PR_readonly) && (SAttr & ObjCPropertyDecl::OBJC_PR_readwrite)) Diag(Property->getLocation(), diag::warn_readonly_property) << Property->getDeclName() << inheritedName; if ((CAttr & ObjCPropertyDecl::OBJC_PR_copy) != (SAttr & ObjCPropertyDecl::OBJC_PR_copy)) Diag(Property->getLocation(), diag::warn_property_attribute) << Property->getDeclName() << "copy" << inheritedName; else if (!(SAttr & ObjCPropertyDecl::OBJC_PR_readonly)){ unsigned CAttrRetain = (CAttr & (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong)); unsigned SAttrRetain = (SAttr & (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong)); bool CStrong = (CAttrRetain != 0); bool SStrong = (SAttrRetain != 0); if (CStrong != SStrong) Diag(Property->getLocation(), diag::warn_property_attribute) << Property->getDeclName() << "retain (or strong)" << inheritedName; } } if ((CAttr & ObjCPropertyDecl::OBJC_PR_nonatomic) != (SAttr & ObjCPropertyDecl::OBJC_PR_nonatomic)) { Diag(Property->getLocation(), diag::warn_property_attribute) << Property->getDeclName() << "atomic" << inheritedName; Diag(SuperProperty->getLocation(), diag::note_property_declare); } if (Property->getSetterName() != SuperProperty->getSetterName()) { Diag(Property->getLocation(), diag::warn_property_attribute) << Property->getDeclName() << "setter" << inheritedName; Diag(SuperProperty->getLocation(), diag::note_property_declare); } if (Property->getGetterName() != SuperProperty->getGetterName()) { Diag(Property->getLocation(), diag::warn_property_attribute) << Property->getDeclName() << "getter" << inheritedName; Diag(SuperProperty->getLocation(), diag::note_property_declare); } QualType LHSType = Context.getCanonicalType(SuperProperty->getType()); QualType RHSType = Context.getCanonicalType(Property->getType()); if (!Context.propertyTypesAreCompatible(LHSType, RHSType)) { // Do cases not handled in above. // FIXME. For future support of covariant property types, revisit this. bool IncompatibleObjC = false; QualType ConvertedType; if (!isObjCPointerConversion(RHSType, LHSType, ConvertedType, IncompatibleObjC) || IncompatibleObjC) { Diag(Property->getLocation(), diag::warn_property_types_are_incompatible) << Property->getType() << SuperProperty->getType() << inheritedName; Diag(SuperProperty->getLocation(), diag::note_property_declare); } } } bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property, ObjCMethodDecl *GetterMethod, SourceLocation Loc) { if (!GetterMethod) return false; QualType GetterType = GetterMethod->getReturnType().getNonReferenceType(); QualType PropertyIvarType = property->getType().getNonReferenceType(); bool compat = Context.hasSameType(PropertyIvarType, GetterType); if (!compat) { if (isa<ObjCObjectPointerType>(PropertyIvarType) && isa<ObjCObjectPointerType>(GetterType)) compat = Context.canAssignObjCInterfaces( GetterType->getAs<ObjCObjectPointerType>(), PropertyIvarType->getAs<ObjCObjectPointerType>()); else if (CheckAssignmentConstraints(Loc, GetterType, PropertyIvarType) != Compatible) { Diag(Loc, diag::error_property_accessor_type) << property->getDeclName() << PropertyIvarType << GetterMethod->getSelector() << GetterType; Diag(GetterMethod->getLocation(), diag::note_declared_at); return true; } else { compat = true; QualType lhsType =Context.getCanonicalType(PropertyIvarType).getUnqualifiedType(); QualType rhsType =Context.getCanonicalType(GetterType).getUnqualifiedType(); if (lhsType != rhsType && lhsType->isArithmeticType()) compat = false; } } if (!compat) { Diag(Loc, diag::warn_accessor_property_type_mismatch) << property->getDeclName() << GetterMethod->getSelector(); Diag(GetterMethod->getLocation(), diag::note_declared_at); return true; } return false; } /// CollectImmediateProperties - This routine collects all properties in /// the class and its conforming protocols; but not those in its super class. static void CollectImmediateProperties(ObjCContainerDecl *CDecl, ObjCContainerDecl::PropertyMap &PropMap, ObjCContainerDecl::PropertyMap &SuperPropMap, bool IncludeProtocols = true) { if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) { for (auto *Prop : IDecl->properties()) PropMap[Prop->getIdentifier()] = Prop; if (IncludeProtocols) { // Scan through class's protocols. for (auto *PI : IDecl->all_referenced_protocols()) CollectImmediateProperties(PI, PropMap, SuperPropMap); } } if (ObjCCategoryDecl *CATDecl = dyn_cast<ObjCCategoryDecl>(CDecl)) { if (!CATDecl->IsClassExtension()) for (auto *Prop : CATDecl->properties()) PropMap[Prop->getIdentifier()] = Prop; if (IncludeProtocols) { // Scan through class's protocols. for (auto *PI : CATDecl->protocols()) CollectImmediateProperties(PI, PropMap, SuperPropMap); } } else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(CDecl)) { for (auto *Prop : PDecl->properties()) { ObjCPropertyDecl *PropertyFromSuper = SuperPropMap[Prop->getIdentifier()]; // Exclude property for protocols which conform to class's super-class, // as super-class has to implement the property. if (!PropertyFromSuper || PropertyFromSuper->getIdentifier() != Prop->getIdentifier()) { ObjCPropertyDecl *&PropEntry = PropMap[Prop->getIdentifier()]; if (!PropEntry) PropEntry = Prop; } } // scan through protocol's protocols. for (auto *PI : PDecl->protocols()) CollectImmediateProperties(PI, PropMap, SuperPropMap); } } /// CollectSuperClassPropertyImplementations - This routine collects list of /// properties to be implemented in super class(s) and also coming from their /// conforming protocols. static void CollectSuperClassPropertyImplementations(ObjCInterfaceDecl *CDecl, ObjCInterfaceDecl::PropertyMap &PropMap) { if (ObjCInterfaceDecl *SDecl = CDecl->getSuperClass()) { ObjCInterfaceDecl::PropertyDeclOrder PO; while (SDecl) { SDecl->collectPropertiesToImplement(PropMap, PO); SDecl = SDecl->getSuperClass(); } } } /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool Sema::IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV) { if (!IV->getSynthesize()) return false; ObjCMethodDecl *IMD = IFace->lookupMethod(Method->getSelector(), Method->isInstanceMethod()); if (!IMD || !IMD->isPropertyAccessor()) return false; // look up a property declaration whose one of its accessors is implemented // by this method. for (const auto *Property : IFace->properties()) { if ((Property->getGetterName() == IMD->getSelector() || Property->getSetterName() == IMD->getSelector()) && (Property->getPropertyIvarDecl() == IV)) return true; } return false; } static bool SuperClassImplementsProperty(ObjCInterfaceDecl *IDecl, ObjCPropertyDecl *Prop) { bool SuperClassImplementsGetter = false; bool SuperClassImplementsSetter = false; if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly) SuperClassImplementsSetter = true; while (IDecl->getSuperClass()) { ObjCInterfaceDecl *SDecl = IDecl->getSuperClass(); if (!SuperClassImplementsGetter && SDecl->getInstanceMethod(Prop->getGetterName())) SuperClassImplementsGetter = true; if (!SuperClassImplementsSetter && SDecl->getInstanceMethod(Prop->getSetterName())) SuperClassImplementsSetter = true; if (SuperClassImplementsGetter && SuperClassImplementsSetter) return true; IDecl = IDecl->getSuperClass(); } return false; } /// \brief Default synthesizes all properties which must be synthesized /// in class's \@implementation. void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl) { ObjCInterfaceDecl::PropertyMap PropMap; ObjCInterfaceDecl::PropertyDeclOrder PropertyOrder; IDecl->collectPropertiesToImplement(PropMap, PropertyOrder); if (PropMap.empty()) return; ObjCInterfaceDecl::PropertyMap SuperPropMap; CollectSuperClassPropertyImplementations(IDecl, SuperPropMap); for (unsigned i = 0, e = PropertyOrder.size(); i != e; i++) { ObjCPropertyDecl *Prop = PropertyOrder[i]; // Is there a matching property synthesize/dynamic? if (Prop->isInvalidDecl() || Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional) continue; // Property may have been synthesized by user. if (IMPDecl->FindPropertyImplDecl(Prop->getIdentifier())) continue; if (IMPDecl->getInstanceMethod(Prop->getGetterName())) { if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly) continue; if (IMPDecl->getInstanceMethod(Prop->getSetterName())) continue; } if (ObjCPropertyImplDecl *PID = IMPDecl->FindPropertyImplIvarDecl(Prop->getIdentifier())) { Diag(Prop->getLocation(), diag::warn_no_autosynthesis_shared_ivar_property) << Prop->getIdentifier(); if (!PID->getLocation().isInvalid()) Diag(PID->getLocation(), diag::note_property_synthesize); continue; } ObjCPropertyDecl *PropInSuperClass = SuperPropMap[Prop->getIdentifier()]; if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(Prop->getDeclContext())) { // We won't auto-synthesize properties declared in protocols. // Suppress the warning if class's superclass implements property's // getter and implements property's setter (if readwrite property). // Or, if property is going to be implemented in its super class. if (!SuperClassImplementsProperty(IDecl, Prop) && !PropInSuperClass) { Diag(IMPDecl->getLocation(), diag::warn_auto_synthesizing_protocol_property) << Prop << Proto; Diag(Prop->getLocation(), diag::note_property_declare); } continue; } // If property to be implemented in the super class, ignore. if (PropInSuperClass) { if ((Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readwrite) && (PropInSuperClass->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly) && !IMPDecl->getInstanceMethod(Prop->getSetterName()) && !IDecl->HasUserDeclaredSetterMethod(Prop)) { Diag(Prop->getLocation(), diag::warn_no_autosynthesis_property) << Prop->getIdentifier(); Diag(PropInSuperClass->getLocation(), diag::note_property_declare); } else { Diag(Prop->getLocation(), diag::warn_autosynthesis_property_in_superclass) << Prop->getIdentifier(); Diag(PropInSuperClass->getLocation(), diag::note_property_declare); Diag(IMPDecl->getLocation(), diag::note_while_in_implementation); } continue; } // We use invalid SourceLocations for the synthesized ivars since they // aren't really synthesized at a particular location; they just exist. // Saying that they are located at the @implementation isn't really going // to help users. ObjCPropertyImplDecl *PIDecl = dyn_cast_or_null<ObjCPropertyImplDecl>( ActOnPropertyImplDecl(S, SourceLocation(), SourceLocation(), true, /* property = */ Prop->getIdentifier(), /* ivar = */ Prop->getDefaultSynthIvarName(Context), Prop->getLocation())); if (PIDecl) { Diag(Prop->getLocation(), diag::warn_missing_explicit_synthesis); Diag(IMPDecl->getLocation(), diag::note_while_in_implementation); } } } void Sema::DefaultSynthesizeProperties(Scope *S, Decl *D) { if (!LangOpts.ObjCDefaultSynthProperties || LangOpts.ObjCRuntime.isFragile()) return; ObjCImplementationDecl *IC=dyn_cast_or_null<ObjCImplementationDecl>(D); if (!IC) return; if (ObjCInterfaceDecl* IDecl = IC->getClassInterface()) if (!IDecl->isObjCRequiresPropertyDefs()) DefaultSynthesizeProperties(S, IC, IDecl); } static void DiagnoseUnimplementedAccessor(Sema &S, ObjCInterfaceDecl *PrimaryClass, Selector Method, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, ObjCCategoryDecl *C, ObjCPropertyDecl *Prop, Sema::SelectorSet &SMap) { // When reporting on missing property setter/getter implementation in // categories, do not report when they are declared in primary class, // class's protocol, or one of it super classes. This is because, // the class is going to implement them. if (!SMap.count(Method) && (PrimaryClass == nullptr || !PrimaryClass->lookupPropertyAccessor(Method, C))) { S.Diag(IMPDecl->getLocation(), isa<ObjCCategoryDecl>(CDecl) ? diag::warn_setter_getter_impl_required_in_category : diag::warn_setter_getter_impl_required) << Prop->getDeclName() << Method; S.Diag(Prop->getLocation(), diag::note_property_declare); if (S.LangOpts.ObjCDefaultSynthProperties && S.LangOpts.ObjCRuntime.isNonFragile()) if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl)) if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs()) S.Diag(RID->getLocation(), diag::note_suppressed_class_declare); } } void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties) { ObjCContainerDecl::PropertyMap PropMap; ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl); if (!SynthesizeProperties) { ObjCContainerDecl::PropertyMap NoNeedToImplPropMap; // Gather properties which need not be implemented in this class // or category. if (!IDecl) if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) { // For categories, no need to implement properties declared in // its primary class (and its super classes) if property is // declared in one of those containers. if ((IDecl = C->getClassInterface())) { ObjCInterfaceDecl::PropertyDeclOrder PO; IDecl->collectPropertiesToImplement(NoNeedToImplPropMap, PO); } } if (IDecl) CollectSuperClassPropertyImplementations(IDecl, NoNeedToImplPropMap); CollectImmediateProperties(CDecl, PropMap, NoNeedToImplPropMap); } // Scan the @interface to see if any of the protocols it adopts // require an explicit implementation, via attribute // 'objc_protocol_requires_explicit_implementation'. if (IDecl) { std::unique_ptr<ObjCContainerDecl::PropertyMap> LazyMap; for (auto *PDecl : IDecl->all_referenced_protocols()) { if (!PDecl->hasAttr<ObjCExplicitProtocolImplAttr>()) continue; // Lazily construct a set of all the properties in the @interface // of the class, without looking at the superclass. We cannot // use the call to CollectImmediateProperties() above as that // utilizes information from the super class's properties as well // as scans the adopted protocols. This work only triggers for protocols // with the attribute, which is very rare, and only occurs when // analyzing the @implementation. if (!LazyMap) { ObjCContainerDecl::PropertyMap NoNeedToImplPropMap; LazyMap.reset(new ObjCContainerDecl::PropertyMap()); CollectImmediateProperties(CDecl, *LazyMap, NoNeedToImplPropMap, /* IncludeProtocols */ false); } // Add the properties of 'PDecl' to the list of properties that // need to be implemented. for (auto *PropDecl : PDecl->properties()) { if ((*LazyMap)[PropDecl->getIdentifier()]) continue; PropMap[PropDecl->getIdentifier()] = PropDecl; } } } if (PropMap.empty()) return; llvm::DenseSet<ObjCPropertyDecl *> PropImplMap; for (const auto *I : IMPDecl->property_impls()) PropImplMap.insert(I->getPropertyDecl()); SelectorSet InsMap; // Collect property accessors implemented in current implementation. for (const auto *I : IMPDecl->instance_methods()) InsMap.insert(I->getSelector()); ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl); ObjCInterfaceDecl *PrimaryClass = nullptr; if (C && !C->IsClassExtension()) if ((PrimaryClass = C->getClassInterface())) // Report unimplemented properties in the category as well. if (ObjCImplDecl *IMP = PrimaryClass->getImplementation()) { // When reporting on missing setter/getters, do not report when // setter/getter is implemented in category's primary class // implementation. for (const auto *I : IMP->instance_methods()) InsMap.insert(I->getSelector()); } for (ObjCContainerDecl::PropertyMap::iterator P = PropMap.begin(), E = PropMap.end(); P != E; ++P) { ObjCPropertyDecl *Prop = P->second; // Is there a matching propery synthesize/dynamic? if (Prop->isInvalidDecl() || Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional || PropImplMap.count(Prop) || Prop->getAvailability() == AR_Unavailable) continue; // Diagnose unimplemented getters and setters. DiagnoseUnimplementedAccessor(*this, PrimaryClass, Prop->getGetterName(), IMPDecl, CDecl, C, Prop, InsMap); if (!Prop->isReadOnly()) DiagnoseUnimplementedAccessor(*this, PrimaryClass, Prop->getSetterName(), IMPDecl, CDecl, C, Prop, InsMap); } } void Sema::diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl) { for (const auto *propertyImpl : impDecl->property_impls()) { const auto *property = propertyImpl->getPropertyDecl(); // Warn about null_resettable properties with synthesized setters, // because the setter won't properly handle nil. if (propertyImpl->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize && (property->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_null_resettable) && property->getGetterMethodDecl() && property->getSetterMethodDecl()) { auto *getterMethod = property->getGetterMethodDecl(); auto *setterMethod = property->getSetterMethodDecl(); if (!impDecl->getInstanceMethod(setterMethod->getSelector()) && !impDecl->getInstanceMethod(getterMethod->getSelector())) { SourceLocation loc = propertyImpl->getLocation(); if (loc.isInvalid()) loc = impDecl->getLocStart(); Diag(loc, diag::warn_null_resettable_setter) << setterMethod->getSelector() << property->getDeclName(); } } } } void Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl) { // Rules apply in non-GC mode only if (getLangOpts().getGC() != LangOptions::NonGC) return; for (const auto *Property : IDecl->properties()) { ObjCMethodDecl *GetterMethod = nullptr; ObjCMethodDecl *SetterMethod = nullptr; bool LookedUpGetterSetter = false; unsigned Attributes = Property->getPropertyAttributes(); unsigned AttributesAsWritten = Property->getPropertyAttributesAsWritten(); if (!(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic) && !(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_nonatomic)) { GetterMethod = IMPDecl->getInstanceMethod(Property->getGetterName()); SetterMethod = IMPDecl->getInstanceMethod(Property->getSetterName()); LookedUpGetterSetter = true; if (GetterMethod) { Diag(GetterMethod->getLocation(), diag::warn_default_atomic_custom_getter_setter) << Property->getIdentifier() << 0; Diag(Property->getLocation(), diag::note_property_declare); } if (SetterMethod) { Diag(SetterMethod->getLocation(), diag::warn_default_atomic_custom_getter_setter) << Property->getIdentifier() << 1; Diag(Property->getLocation(), diag::note_property_declare); } } // We only care about readwrite atomic property. if ((Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) || !(Attributes & ObjCPropertyDecl::OBJC_PR_readwrite)) continue; if (const ObjCPropertyImplDecl *PIDecl = IMPDecl->FindPropertyImplDecl(Property->getIdentifier())) { if (PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic) continue; if (!LookedUpGetterSetter) { GetterMethod = IMPDecl->getInstanceMethod(Property->getGetterName()); SetterMethod = IMPDecl->getInstanceMethod(Property->getSetterName()); } if ((GetterMethod && !SetterMethod) || (!GetterMethod && SetterMethod)) { SourceLocation MethodLoc = (GetterMethod ? GetterMethod->getLocation() : SetterMethod->getLocation()); Diag(MethodLoc, diag::warn_atomic_property_rule) << Property->getIdentifier() << (GetterMethod != nullptr) << (SetterMethod != nullptr); // fixit stuff. if (!AttributesAsWritten) { if (Property->getLParenLoc().isValid()) { // @property () ... case. SourceRange PropSourceRange(Property->getAtLoc(), Property->getLParenLoc()); Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) << FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic"); } else { //@property id etc. SourceLocation endLoc = Property->getTypeSourceInfo()->getTypeLoc().getBeginLoc(); endLoc = endLoc.getLocWithOffset(-1); SourceRange PropSourceRange(Property->getAtLoc(), endLoc); Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) << FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic) "); } } else if (!(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic)) { // @property () ... case. SourceLocation endLoc = Property->getLParenLoc(); SourceRange PropSourceRange(Property->getAtLoc(), endLoc); Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) << FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic, "); } else Diag(MethodLoc, diag::note_atomic_property_fixup_suggest); Diag(Property->getLocation(), diag::note_property_declare); } } } } void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D) { if (getLangOpts().getGC() == LangOptions::GCOnly) return; for (const auto *PID : D->property_impls()) { const ObjCPropertyDecl *PD = PID->getPropertyDecl(); if (PD && !PD->hasAttr<NSReturnsNotRetainedAttr>() && !D->getInstanceMethod(PD->getGetterName())) { ObjCMethodDecl *method = PD->getGetterMethodDecl(); if (!method) continue; ObjCMethodFamily family = method->getMethodFamily(); if (family == OMF_alloc || family == OMF_copy || family == OMF_mutableCopy || family == OMF_new) { if (getLangOpts().ObjCAutoRefCount) Diag(PD->getLocation(), diag::err_cocoa_naming_owned_rule); else Diag(PD->getLocation(), diag::warn_cocoa_naming_owned_rule); // Look for a getter explicitly declared alongside the property. // If we find one, use its location for the note. SourceLocation noteLoc = PD->getLocation(); SourceLocation fixItLoc; for (auto *getterRedecl : method->redecls()) { if (getterRedecl->isImplicit()) continue; if (getterRedecl->getDeclContext() != PD->getDeclContext()) continue; noteLoc = getterRedecl->getLocation(); fixItLoc = getterRedecl->getLocEnd(); } Preprocessor &PP = getPreprocessor(); TokenValue tokens[] = { tok::kw___attribute, tok::l_paren, tok::l_paren, PP.getIdentifierInfo("objc_method_family"), tok::l_paren, PP.getIdentifierInfo("none"), tok::r_paren, tok::r_paren, tok::r_paren }; StringRef spelling = "__attribute__((objc_method_family(none)))"; StringRef macroName = PP.getLastMacroWithSpelling(noteLoc, tokens); if (!macroName.empty()) spelling = macroName; auto noteDiag = Diag(noteLoc, diag::note_cocoa_naming_declare_family) << method->getDeclName() << spelling; if (fixItLoc.isValid()) { SmallString<64> fixItText(" "); fixItText += spelling; noteDiag << FixItHint::CreateInsertion(fixItLoc, fixItText); } } } } } void Sema::DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD) { assert(IFD->hasDesignatedInitializers()); const ObjCInterfaceDecl *SuperD = IFD->getSuperClass(); if (!SuperD) return; SelectorSet InitSelSet; for (const auto *I : ImplD->instance_methods()) if (I->getMethodFamily() == OMF_init) InitSelSet.insert(I->getSelector()); SmallVector<const ObjCMethodDecl *, 8> DesignatedInits; SuperD->getDesignatedInitializers(DesignatedInits); for (SmallVector<const ObjCMethodDecl *, 8>::iterator I = DesignatedInits.begin(), E = DesignatedInits.end(); I != E; ++I) { const ObjCMethodDecl *MD = *I; if (!InitSelSet.count(MD->getSelector())) { Diag(ImplD->getLocation(), diag::warn_objc_implementation_missing_designated_init_override) << MD->getSelector(); Diag(MD->getLocation(), diag::note_objc_designated_init_marked_here); } } } /// AddPropertyAttrs - Propagates attributes from a property to the /// implicitly-declared getter or setter for that property. static void AddPropertyAttrs(Sema &S, ObjCMethodDecl *PropertyMethod, ObjCPropertyDecl *Property) { // Should we just clone all attributes over? for (const auto *A : Property->attrs()) { if (isa<DeprecatedAttr>(A) || isa<UnavailableAttr>(A) || isa<AvailabilityAttr>(A)) PropertyMethod->addAttr(A->clone(S.Context)); } } /// ProcessPropertyDecl - Make sure that any user-defined setter/getter methods /// have the property type and issue diagnostics if they don't. /// Also synthesize a getter/setter method if none exist (and update the /// appropriate lookup tables. FIXME: Should reconsider if adding synthesized /// methods is the "right" thing to do. void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty, ObjCContainerDecl *lexicalDC) { ObjCMethodDecl *GetterMethod, *SetterMethod; if (CD->isInvalidDecl()) return; GetterMethod = CD->getInstanceMethod(property->getGetterName()); SetterMethod = CD->getInstanceMethod(property->getSetterName()); DiagnosePropertyAccessorMismatch(property, GetterMethod, property->getLocation()); if (SetterMethod) { ObjCPropertyDecl::PropertyAttributeKind CAttr = property->getPropertyAttributes(); if ((!(CAttr & ObjCPropertyDecl::OBJC_PR_readonly)) && Context.getCanonicalType(SetterMethod->getReturnType()) != Context.VoidTy) Diag(SetterMethod->getLocation(), diag::err_setter_type_void); if (SetterMethod->param_size() != 1 || !Context.hasSameUnqualifiedType( (*SetterMethod->param_begin())->getType().getNonReferenceType(), property->getType().getNonReferenceType())) { Diag(property->getLocation(), diag::warn_accessor_property_type_mismatch) << property->getDeclName() << SetterMethod->getSelector(); Diag(SetterMethod->getLocation(), diag::note_declared_at); } } // Synthesize getter/setter methods if none exist. // Find the default getter and if one not found, add one. // FIXME: The synthesized property we set here is misleading. We almost always // synthesize these methods unless the user explicitly provided prototypes // (which is odd, but allowed). Sema should be typechecking that the // declarations jive in that situation (which it is not currently). if (!GetterMethod) { // No instance method of same name as property getter name was found. // Declare a getter method and add it to the list of methods // for this class. SourceLocation Loc = redeclaredProperty ? redeclaredProperty->getLocation() : property->getLocation(); // If the property is null_resettable, the getter returns nonnull. QualType resultTy = property->getType(); if (property->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_null_resettable) { QualType modifiedTy = resultTy; if (auto nullability = AttributedType::stripOuterNullability(modifiedTy)) { if (*nullability == NullabilityKind::Unspecified) resultTy = Context.getAttributedType(AttributedType::attr_nonnull, modifiedTy, modifiedTy); } } GetterMethod = ObjCMethodDecl::Create(Context, Loc, Loc, property->getGetterName(), resultTy, nullptr, CD, /*isInstance=*/true, /*isVariadic=*/false, /*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) ? ObjCMethodDecl::Optional : ObjCMethodDecl::Required); CD->addDecl(GetterMethod); AddPropertyAttrs(*this, GetterMethod, property); // FIXME: Eventually this shouldn't be needed, as the lexical context // and the real context should be the same. if (lexicalDC) GetterMethod->setLexicalDeclContext(lexicalDC); if (property->hasAttr<NSReturnsNotRetainedAttr>()) GetterMethod->addAttr(NSReturnsNotRetainedAttr::CreateImplicit(Context, Loc)); if (property->hasAttr<ObjCReturnsInnerPointerAttr>()) GetterMethod->addAttr( ObjCReturnsInnerPointerAttr::CreateImplicit(Context, Loc)); if (const SectionAttr *SA = property->getAttr<SectionAttr>()) GetterMethod->addAttr( SectionAttr::CreateImplicit(Context, SectionAttr::GNU_section, SA->getName(), Loc)); if (getLangOpts().ObjCAutoRefCount) CheckARCMethodDecl(GetterMethod); } else // A user declared getter will be synthesize when @synthesize of // the property with the same name is seen in the @implementation GetterMethod->setPropertyAccessor(true); property->setGetterMethodDecl(GetterMethod); // Skip setter if property is read-only. if (!property->isReadOnly()) { // Find the default setter and if one not found, add one. if (!SetterMethod) { // No instance method of same name as property setter name was found. // Declare a setter method and add it to the list of methods // for this class. SourceLocation Loc = redeclaredProperty ? redeclaredProperty->getLocation() : property->getLocation(); SetterMethod = ObjCMethodDecl::Create(Context, Loc, Loc, property->getSetterName(), Context.VoidTy, nullptr, CD, /*isInstance=*/true, /*isVariadic=*/false, /*isPropertyAccessor=*/true, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) ? ObjCMethodDecl::Optional : ObjCMethodDecl::Required); // If the property is null_resettable, the setter accepts a // nullable value. QualType paramTy = property->getType().getUnqualifiedType(); if (property->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_null_resettable) { QualType modifiedTy = paramTy; if (auto nullability = AttributedType::stripOuterNullability(modifiedTy)){ if (*nullability == NullabilityKind::Unspecified) paramTy = Context.getAttributedType(AttributedType::attr_nullable, modifiedTy, modifiedTy); } } // Invent the arguments for the setter. We don't bother making a // nice name for the argument. ParmVarDecl *Argument = ParmVarDecl::Create(Context, SetterMethod, Loc, Loc, property->getIdentifier(), paramTy, /*TInfo=*/nullptr, SC_None, nullptr); SetterMethod->setMethodParams(Context, Argument, None); AddPropertyAttrs(*this, SetterMethod, property); CD->addDecl(SetterMethod); // FIXME: Eventually this shouldn't be needed, as the lexical context // and the real context should be the same. if (lexicalDC) SetterMethod->setLexicalDeclContext(lexicalDC); if (const SectionAttr *SA = property->getAttr<SectionAttr>()) SetterMethod->addAttr( SectionAttr::CreateImplicit(Context, SectionAttr::GNU_section, SA->getName(), Loc)); // It's possible for the user to have set a very odd custom // setter selector that causes it to have a method family. if (getLangOpts().ObjCAutoRefCount) CheckARCMethodDecl(SetterMethod); } else // A user declared setter will be synthesize when @synthesize of // the property with the same name is seen in the @implementation SetterMethod->setPropertyAccessor(true); property->setSetterMethodDecl(SetterMethod); } // Add any synthesized methods to the global pool. This allows us to // handle the following, which is supported by GCC (and part of the design). // // @interface Foo // @property double bar; // @end // // void thisIsUnfortunate() { // id foo; // double bar = [foo bar]; // } // if (GetterMethod) AddInstanceMethodToGlobalPool(GetterMethod); if (SetterMethod) AddInstanceMethodToGlobalPool(SetterMethod); ObjCInterfaceDecl *CurrentClass = dyn_cast<ObjCInterfaceDecl>(CD); if (!CurrentClass) { if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(CD)) CurrentClass = Cat->getClassInterface(); else if (ObjCImplDecl *Impl = dyn_cast<ObjCImplDecl>(CD)) CurrentClass = Impl->getClassInterface(); } if (GetterMethod) CheckObjCMethodOverrides(GetterMethod, CurrentClass, Sema::RTC_Unknown); if (SetterMethod) CheckObjCMethodOverrides(SetterMethod, CurrentClass, Sema::RTC_Unknown); } void Sema::CheckObjCPropertyAttributes(Decl *PDecl, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass) { // FIXME: Improve the reported location. if (!PDecl || PDecl->isInvalidDecl()) return; if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) && (Attributes & ObjCDeclSpec::DQ_PR_readwrite)) Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "readonly" << "readwrite"; ObjCPropertyDecl *PropertyDecl = cast<ObjCPropertyDecl>(PDecl); QualType PropertyTy = PropertyDecl->getType(); unsigned PropertyOwnership = getOwnershipRule(Attributes); // 'readonly' property with no obvious lifetime. // its life time will be determined by its backing ivar. if (getLangOpts().ObjCAutoRefCount && Attributes & ObjCDeclSpec::DQ_PR_readonly && PropertyTy->isObjCRetainableType() && !PropertyOwnership) return; // Check for copy or retain on non-object types. if ((Attributes & (ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong)) && !PropertyTy->isObjCRetainableType() && !PropertyDecl->hasAttr<ObjCNSObjectAttr>()) { Diag(Loc, diag::err_objc_property_requires_object) << (Attributes & ObjCDeclSpec::DQ_PR_weak ? "weak" : Attributes & ObjCDeclSpec::DQ_PR_copy ? "copy" : "retain (or strong)"); Attributes &= ~(ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong); PropertyDecl->setInvalidDecl(); } // Check for more than one of { assign, copy, retain }. if (Attributes & ObjCDeclSpec::DQ_PR_assign) { if (Attributes & ObjCDeclSpec::DQ_PR_copy) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "assign" << "copy"; Attributes &= ~ObjCDeclSpec::DQ_PR_copy; } if (Attributes & ObjCDeclSpec::DQ_PR_retain) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "assign" << "retain"; Attributes &= ~ObjCDeclSpec::DQ_PR_retain; } if (Attributes & ObjCDeclSpec::DQ_PR_strong) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "assign" << "strong"; Attributes &= ~ObjCDeclSpec::DQ_PR_strong; } if (getLangOpts().ObjCAutoRefCount && (Attributes & ObjCDeclSpec::DQ_PR_weak)) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "assign" << "weak"; Attributes &= ~ObjCDeclSpec::DQ_PR_weak; } if (PropertyDecl->hasAttr<IBOutletCollectionAttr>()) Diag(Loc, diag::warn_iboutletcollection_property_assign); } else if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) { if (Attributes & ObjCDeclSpec::DQ_PR_copy) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "unsafe_unretained" << "copy"; Attributes &= ~ObjCDeclSpec::DQ_PR_copy; } if (Attributes & ObjCDeclSpec::DQ_PR_retain) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "unsafe_unretained" << "retain"; Attributes &= ~ObjCDeclSpec::DQ_PR_retain; } if (Attributes & ObjCDeclSpec::DQ_PR_strong) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "unsafe_unretained" << "strong"; Attributes &= ~ObjCDeclSpec::DQ_PR_strong; } if (getLangOpts().ObjCAutoRefCount && (Attributes & ObjCDeclSpec::DQ_PR_weak)) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "unsafe_unretained" << "weak"; Attributes &= ~ObjCDeclSpec::DQ_PR_weak; } } else if (Attributes & ObjCDeclSpec::DQ_PR_copy) { if (Attributes & ObjCDeclSpec::DQ_PR_retain) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "copy" << "retain"; Attributes &= ~ObjCDeclSpec::DQ_PR_retain; } if (Attributes & ObjCDeclSpec::DQ_PR_strong) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "copy" << "strong"; Attributes &= ~ObjCDeclSpec::DQ_PR_strong; } if (Attributes & ObjCDeclSpec::DQ_PR_weak) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "copy" << "weak"; Attributes &= ~ObjCDeclSpec::DQ_PR_weak; } } else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) && (Attributes & ObjCDeclSpec::DQ_PR_weak)) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "retain" << "weak"; Attributes &= ~ObjCDeclSpec::DQ_PR_retain; } else if ((Attributes & ObjCDeclSpec::DQ_PR_strong) && (Attributes & ObjCDeclSpec::DQ_PR_weak)) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "strong" << "weak"; Attributes &= ~ObjCDeclSpec::DQ_PR_weak; } if (Attributes & ObjCDeclSpec::DQ_PR_weak) { // 'weak' and 'nonnull' are mutually exclusive. if (auto nullability = PropertyTy->getNullability(Context)) { if (*nullability == NullabilityKind::NonNull) Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "nonnull" << "weak"; } else { PropertyTy = Context.getAttributedType( AttributedType::getNullabilityAttrKind(NullabilityKind::Nullable), PropertyTy, PropertyTy); TypeSourceInfo *TSInfo = PropertyDecl->getTypeSourceInfo(); PropertyDecl->setType(PropertyTy, TSInfo); } } if ((Attributes & ObjCDeclSpec::DQ_PR_atomic) && (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)) { Diag(Loc, diag::err_objc_property_attr_mutually_exclusive) << "atomic" << "nonatomic"; Attributes &= ~ObjCDeclSpec::DQ_PR_atomic; } // Warn if user supplied no assignment attribute, property is // readwrite, and this is an object type. if (!(Attributes & (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_copy | ObjCDeclSpec::DQ_PR_unsafe_unretained | ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong | ObjCDeclSpec::DQ_PR_weak)) && PropertyTy->isObjCObjectPointerType()) { if (getLangOpts().ObjCAutoRefCount) // With arc, @property definitions should default to (strong) when // not specified; including when property is 'readonly'. PropertyDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong); else if (!(Attributes & ObjCDeclSpec::DQ_PR_readonly)) { bool isAnyClassTy = (PropertyTy->isObjCClassType() || PropertyTy->isObjCQualifiedClassType()); // In non-gc, non-arc mode, 'Class' is treated as a 'void *' no need to // issue any warning. if (isAnyClassTy && getLangOpts().getGC() == LangOptions::NonGC) ; else if (propertyInPrimaryClass) { // Don't issue warning on property with no life time in class // extension as it is inherited from property in primary class. // Skip this warning in gc-only mode. if (getLangOpts().getGC() != LangOptions::GCOnly) Diag(Loc, diag::warn_objc_property_no_assignment_attribute); // If non-gc code warn that this is likely inappropriate. if (getLangOpts().getGC() == LangOptions::NonGC) Diag(Loc, diag::warn_objc_property_default_assign_on_object); } } // FIXME: Implement warning dependent on NSCopying being // implemented. See also: // <rdar://5168496&4855821&5607453&5096644&4947311&5698469&4947014&5168496> // (please trim this list while you are at it). } if (!(Attributes & ObjCDeclSpec::DQ_PR_copy) &&!(Attributes & ObjCDeclSpec::DQ_PR_readonly) && getLangOpts().getGC() == LangOptions::GCOnly && PropertyTy->isBlockPointerType()) Diag(Loc, diag::warn_objc_property_copy_missing_on_block); else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) && !(Attributes & ObjCDeclSpec::DQ_PR_readonly) && !(Attributes & ObjCDeclSpec::DQ_PR_strong) && PropertyTy->isBlockPointerType()) Diag(Loc, diag::warn_objc_property_retain_of_block); if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) && (Attributes & ObjCDeclSpec::DQ_PR_setter)) Diag(Loc, diag::warn_objc_readonly_property_has_setter); } #endif // HLSL Change
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/IdentifierResolver.cpp
//===- IdentifierResolver.cpp - Lexical Scope Name lookup -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the IdentifierResolver class, which is used for lexical // scoped lookup, based on declaration names. // //===----------------------------------------------------------------------===// #include "clang/Sema/IdentifierResolver.h" #include "clang/AST/Decl.h" #include "clang/Basic/LangOptions.h" #include "clang/Lex/ExternalPreprocessorSource.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Scope.h" using namespace clang; //===----------------------------------------------------------------------===// // IdDeclInfoMap class //===----------------------------------------------------------------------===// /// IdDeclInfoMap - Associates IdDeclInfos with declaration names. /// Allocates 'pools' (vectors of IdDeclInfos) to avoid allocating each /// individual IdDeclInfo to heap. class IdentifierResolver::IdDeclInfoMap { static const unsigned int POOL_SIZE = 512; /// We use our own linked-list implementation because it is sadly /// impossible to add something to a pre-C++0x STL container without /// a completely unnecessary copy. struct IdDeclInfoPool { IdDeclInfoPool(IdDeclInfoPool *Next) : Next(Next) {} IdDeclInfoPool *Next; IdDeclInfo Pool[POOL_SIZE]; }; IdDeclInfoPool *CurPool; unsigned int CurIndex; public: IdDeclInfoMap() : CurPool(nullptr), CurIndex(POOL_SIZE) {} ~IdDeclInfoMap() { IdDeclInfoPool *Cur = CurPool; while (IdDeclInfoPool *P = Cur) { Cur = Cur->Next; delete P; } } /// Returns the IdDeclInfo associated to the DeclarationName. /// It creates a new IdDeclInfo if one was not created before for this id. IdDeclInfo &operator[](DeclarationName Name); }; //===----------------------------------------------------------------------===// // IdDeclInfo Implementation //===----------------------------------------------------------------------===// /// RemoveDecl - Remove the decl from the scope chain. /// The decl must already be part of the decl chain. void IdentifierResolver::IdDeclInfo::RemoveDecl(NamedDecl *D) { for (DeclsTy::iterator I = Decls.end(); I != Decls.begin(); --I) { if (D == *(I-1)) { Decls.erase(I-1); return; } } llvm_unreachable("Didn't find this decl on its identifier's chain!"); } //===----------------------------------------------------------------------===// // IdentifierResolver Implementation //===----------------------------------------------------------------------===// IdentifierResolver::IdentifierResolver(Preprocessor &PP) : LangOpt(PP.getLangOpts()), PP(PP), IdDeclInfos(new IdDeclInfoMap) { } IdentifierResolver::~IdentifierResolver() { delete IdDeclInfos; } /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx, Scope *S, bool AllowInlineNamespace) const { Ctx = Ctx->getRedeclContext(); if (Ctx->isFunctionOrMethod() || (S && S->isFunctionPrototypeScope())) { // Ignore the scopes associated within transparent declaration contexts. while (S->getEntity() && S->getEntity()->isTransparentContext()) S = S->getParent(); if (S->isDeclScope(D)) return true; if (LangOpt.CPlusPlus) { // C++ 3.3.2p3: // The name declared in a catch exception-declaration is local to the // handler and shall not be redeclared in the outermost block of the // handler. // C++ 3.3.2p4: // Names declared in the for-init-statement, and in the condition of if, // while, for, and switch statements are local to the if, while, for, or // switch statement (including the controlled statement), and shall not be // redeclared in a subsequent condition of that statement nor in the // outermost block (or, for the if statement, any of the outermost blocks) // of the controlled statement. // assert(S->getParent() && "No TUScope?"); if (S->getParent()->getFlags() & Scope::ControlScope) { S = S->getParent(); if (S->isDeclScope(D)) return true; } if (S->getFlags() & Scope::FnTryCatchScope) return S->getParent()->isDeclScope(D); } return false; } // FIXME: If D is a local extern declaration, this check doesn't make sense; // we should be checking its lexical context instead in that case, because // that is its scope. DeclContext *DCtx = D->getDeclContext()->getRedeclContext(); return AllowInlineNamespace ? Ctx->InEnclosingNamespaceSetOf(DCtx) : Ctx->Equals(DCtx); } /// AddDecl - Link the decl to its shadowed decl chain. void IdentifierResolver::AddDecl(NamedDecl *D) { DeclarationName Name = D->getDeclName(); if (IdentifierInfo *II = Name.getAsIdentifierInfo()) updatingIdentifier(*II); void *Ptr = Name.getFETokenInfo<void>(); if (!Ptr) { Name.setFETokenInfo(D); return; } IdDeclInfo *IDI; if (isDeclPtr(Ptr)) { Name.setFETokenInfo(nullptr); IDI = &(*IdDeclInfos)[Name]; NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr); IDI->AddDecl(PrevD); } else IDI = toIdDeclInfo(Ptr); IDI->AddDecl(D); } void IdentifierResolver::InsertDeclAfter(iterator Pos, NamedDecl *D) { DeclarationName Name = D->getDeclName(); if (IdentifierInfo *II = Name.getAsIdentifierInfo()) updatingIdentifier(*II); void *Ptr = Name.getFETokenInfo<void>(); if (!Ptr) { AddDecl(D); return; } if (isDeclPtr(Ptr)) { // We only have a single declaration: insert before or after it, // as appropriate. if (Pos == iterator()) { // Add the new declaration before the existing declaration. NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr); RemoveDecl(PrevD); AddDecl(D); AddDecl(PrevD); } else { // Add new declaration after the existing declaration. AddDecl(D); } return; } // General case: insert the declaration at the appropriate point in the // list, which already has at least two elements. IdDeclInfo *IDI = toIdDeclInfo(Ptr); if (Pos.isIterator()) { IDI->InsertDecl(Pos.getIterator() + 1, D); } else IDI->InsertDecl(IDI->decls_begin(), D); } /// RemoveDecl - Unlink the decl from its shadowed decl chain. /// The decl must already be part of the decl chain. void IdentifierResolver::RemoveDecl(NamedDecl *D) { assert(D && "null param passed"); DeclarationName Name = D->getDeclName(); if (IdentifierInfo *II = Name.getAsIdentifierInfo()) updatingIdentifier(*II); void *Ptr = Name.getFETokenInfo<void>(); assert(Ptr && "Didn't find this decl on its identifier's chain!"); if (isDeclPtr(Ptr)) { assert(D == Ptr && "Didn't find this decl on its identifier's chain!"); Name.setFETokenInfo(nullptr); return; } return toIdDeclInfo(Ptr)->RemoveDecl(D); } /// begin - Returns an iterator for decls with name 'Name'. IdentifierResolver::iterator IdentifierResolver::begin(DeclarationName Name) { if (IdentifierInfo *II = Name.getAsIdentifierInfo()) readingIdentifier(*II); void *Ptr = Name.getFETokenInfo<void>(); if (!Ptr) return end(); if (isDeclPtr(Ptr)) return iterator(static_cast<NamedDecl*>(Ptr)); IdDeclInfo *IDI = toIdDeclInfo(Ptr); IdDeclInfo::DeclsTy::iterator I = IDI->decls_end(); if (I != IDI->decls_begin()) return iterator(I-1); // No decls found. return end(); } namespace { enum DeclMatchKind { DMK_Different, DMK_Replace, DMK_Ignore }; } /// \brief Compare two declarations to see whether they are different or, /// if they are the same, whether the new declaration should replace the /// existing declaration. static DeclMatchKind compareDeclarations(NamedDecl *Existing, NamedDecl *New) { // If the declarations are identical, ignore the new one. if (Existing == New) return DMK_Ignore; // If the declarations have different kinds, they're obviously different. if (Existing->getKind() != New->getKind()) return DMK_Different; // If the declarations are redeclarations of each other, keep the newest one. if (Existing->getCanonicalDecl() == New->getCanonicalDecl()) { // If we're adding an imported declaration, don't replace another imported // declaration. if (Existing->isFromASTFile() && New->isFromASTFile()) return DMK_Different; // If either of these is the most recent declaration, use it. Decl *MostRecent = Existing->getMostRecentDecl(); if (Existing == MostRecent) return DMK_Ignore; if (New == MostRecent) return DMK_Replace; // If the existing declaration is somewhere in the previous declaration // chain of the new declaration, then prefer the new declaration. for (auto RD : New->redecls()) { if (RD == Existing) return DMK_Replace; if (RD->isCanonicalDecl()) break; } return DMK_Ignore; } return DMK_Different; } bool IdentifierResolver::tryAddTopLevelDecl(NamedDecl *D, DeclarationName Name){ if (IdentifierInfo *II = Name.getAsIdentifierInfo()) readingIdentifier(*II); void *Ptr = Name.getFETokenInfo<void>(); if (!Ptr) { Name.setFETokenInfo(D); return true; } IdDeclInfo *IDI; if (isDeclPtr(Ptr)) { NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr); switch (compareDeclarations(PrevD, D)) { case DMK_Different: break; case DMK_Ignore: return false; case DMK_Replace: Name.setFETokenInfo(D); return true; } Name.setFETokenInfo(nullptr); IDI = &(*IdDeclInfos)[Name]; // If the existing declaration is not visible in translation unit scope, // then add the new top-level declaration first. if (!PrevD->getDeclContext()->getRedeclContext()->isTranslationUnit()) { IDI->AddDecl(D); IDI->AddDecl(PrevD); } else { IDI->AddDecl(PrevD); IDI->AddDecl(D); } return true; } IDI = toIdDeclInfo(Ptr); // See whether this declaration is identical to any existing declarations. // If not, find the right place to insert it. for (IdDeclInfo::DeclsTy::iterator I = IDI->decls_begin(), IEnd = IDI->decls_end(); I != IEnd; ++I) { switch (compareDeclarations(*I, D)) { case DMK_Different: break; case DMK_Ignore: return false; case DMK_Replace: *I = D; return true; } if (!(*I)->getDeclContext()->getRedeclContext()->isTranslationUnit()) { // We've found a declaration that is not visible from the translation // unit (it's in an inner scope). Insert our declaration here. IDI->InsertDecl(I, D); return true; } } // Add the declaration to the end. IDI->AddDecl(D); return true; } void IdentifierResolver::readingIdentifier(IdentifierInfo &II) { if (II.isOutOfDate()) PP.getExternalSource()->updateOutOfDateIdentifier(II); } void IdentifierResolver::updatingIdentifier(IdentifierInfo &II) { if (II.isOutOfDate()) PP.getExternalSource()->updateOutOfDateIdentifier(II); if (II.isFromAST()) II.setChangedSinceDeserialization(); } //===----------------------------------------------------------------------===// // IdDeclInfoMap Implementation //===----------------------------------------------------------------------===// /// Returns the IdDeclInfo associated to the DeclarationName. /// It creates a new IdDeclInfo if one was not created before for this id. IdentifierResolver::IdDeclInfo & IdentifierResolver::IdDeclInfoMap::operator[](DeclarationName Name) { void *Ptr = Name.getFETokenInfo<void>(); if (Ptr) return *toIdDeclInfo(Ptr); if (CurIndex == POOL_SIZE) { CurPool = new IdDeclInfoPool(CurPool); CurIndex = 0; } IdDeclInfo *IDI = &CurPool->Pool[CurIndex]; Name.setFETokenInfo(reinterpret_cast<void*>( reinterpret_cast<uintptr_t>(IDI) | 0x1) ); ++CurIndex; return *IDI; } void IdentifierResolver::iterator::incrementSlowCase() { NamedDecl *D = **this; void *InfoPtr = D->getDeclName().getFETokenInfo<void>(); assert(!isDeclPtr(InfoPtr) && "Decl with wrong id ?"); IdDeclInfo *Info = toIdDeclInfo(InfoPtr); BaseIter I = getIterator(); if (I != Info->decls_begin()) *this = iterator(I-1); else // No more decls. *this = iterator(); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaChecking.cpp
//===--- SemaChecking.cpp - Extra Semantic Checking -----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements extra semantic analysis beyond what is enforced // by the C type system. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTContext.h" #include "clang/AST/CharUnits.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/Analysis/Analyses/FormatString.h" #include "clang/Basic/CharInfo.h" #include "clang/Basic/TargetBuiltins.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/Sema.h" #include "clang/Sema/SemaHLSL.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallString.h" #include "llvm/Support/ConvertUTF.h" #include "llvm/Support/raw_ostream.h" #include <limits> using namespace clang; using namespace sema; SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const { return SL->getLocationOfByte(ByteNo, getSourceManager(), LangOpts, Context.getTargetInfo()); } /// Checks that a call expression's argument count is the desired number. /// This is useful when doing custom type-checking. Returns true on error. static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) { unsigned argCount = call->getNumArgs(); if (argCount == desiredArgCount) return false; if (argCount < desiredArgCount) return S.Diag(call->getLocEnd(), diag::err_typecheck_call_too_few_args) << 0 /*function call*/ << desiredArgCount << argCount << call->getSourceRange(); // Highlight all the excess arguments. SourceRange range(call->getArg(desiredArgCount)->getLocStart(), call->getArg(argCount - 1)->getLocEnd()); return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args) << 0 /*function call*/ << desiredArgCount << argCount << call->getArg(1)->getSourceRange(); } /// Check that the first argument to __builtin_annotation is an integer /// and the second argument is a non-wide string literal. static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) { if (checkArgCount(S, TheCall, 2)) return true; // First argument should be an integer. Expr *ValArg = TheCall->getArg(0); QualType Ty = ValArg->getType(); if (!Ty->isIntegerType()) { S.Diag(ValArg->getLocStart(), diag::err_builtin_annotation_first_arg) << ValArg->getSourceRange(); return true; } // Second argument should be a constant string. Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts(); StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg); if (!Literal || !Literal->isAscii()) { S.Diag(StrArg->getLocStart(), diag::err_builtin_annotation_second_arg) << StrArg->getSourceRange(); return true; } TheCall->setType(Ty); return false; } /// Check that the argument to __builtin_addressof is a glvalue, and set the /// result type to the corresponding pointer type. static bool SemaBuiltinAddressof(Sema &S, CallExpr *TheCall) { if (checkArgCount(S, TheCall, 1)) return true; ExprResult Arg(TheCall->getArg(0)); QualType ResultType = S.CheckAddressOfOperand(Arg, TheCall->getLocStart()); if (ResultType.isNull()) return true; TheCall->setArg(0, Arg.get()); TheCall->setType(ResultType); return false; } static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl, CallExpr *TheCall, unsigned SizeIdx, unsigned DstSizeIdx) { if (TheCall->getNumArgs() <= SizeIdx || TheCall->getNumArgs() <= DstSizeIdx) return; const Expr *SizeArg = TheCall->getArg(SizeIdx); const Expr *DstSizeArg = TheCall->getArg(DstSizeIdx); llvm::APSInt Size, DstSize; // find out if both sizes are known at compile time if (!SizeArg->EvaluateAsInt(Size, S.Context) || !DstSizeArg->EvaluateAsInt(DstSize, S.Context)) return; if (Size.ule(DstSize)) return; // confirmed overflow so generate the diagnostic. IdentifierInfo *FnName = FDecl->getIdentifier(); SourceLocation SL = TheCall->getLocStart(); SourceRange SR = TheCall->getSourceRange(); S.Diag(SL, diag::warn_memcpy_chk_overflow) << SR << FnName; } static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) { if (checkArgCount(S, BuiltinCall, 2)) return true; SourceLocation BuiltinLoc = BuiltinCall->getLocStart(); Expr *Builtin = BuiltinCall->getCallee()->IgnoreImpCasts(); Expr *Call = BuiltinCall->getArg(0); Expr *Chain = BuiltinCall->getArg(1); if (Call->getStmtClass() != Stmt::CallExprClass) { S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_not_call) << Call->getSourceRange(); return true; } auto CE = cast<CallExpr>(Call); if (CE->getCallee()->getType()->isBlockPointerType()) { S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_block_call) << Call->getSourceRange(); return true; } const Decl *TargetDecl = CE->getCalleeDecl(); if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) if (FD->getBuiltinID()) { S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_builtin_call) << Call->getSourceRange(); return true; } if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens())) { S.Diag(BuiltinLoc, diag::err_first_argument_to_cwsc_pdtor_call) << Call->getSourceRange(); return true; } ExprResult ChainResult = S.UsualUnaryConversions(Chain); if (ChainResult.isInvalid()) return true; if (!ChainResult.get()->getType()->isPointerType()) { S.Diag(BuiltinLoc, diag::err_second_argument_to_cwsc_not_pointer) << Chain->getSourceRange(); return true; } QualType ReturnTy = CE->getCallReturnType(S.Context); QualType ArgTys[2] = { ReturnTy, ChainResult.get()->getType() }; QualType BuiltinTy = S.Context.getFunctionType( ReturnTy, ArgTys, FunctionProtoType::ExtProtoInfo(), None); // HLSL Change - builtins are all in parameters QualType BuiltinPtrTy = S.Context.getPointerType(BuiltinTy); Builtin = S.ImpCastExprToType(Builtin, BuiltinPtrTy, CK_BuiltinFnToFnPtr).get(); BuiltinCall->setType(CE->getType()); BuiltinCall->setValueKind(CE->getValueKind()); BuiltinCall->setObjectKind(CE->getObjectKind()); BuiltinCall->setCallee(Builtin); BuiltinCall->setArg(1, ChainResult.get()); return false; } static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall, Scope::ScopeFlags NeededScopeFlags, unsigned DiagID) { // Scopes aren't available during instantiation. Fortunately, builtin // functions cannot be template args so they cannot be formed through template // instantiation. Therefore checking once during the parse is sufficient. if (!SemaRef.ActiveTemplateInstantiations.empty()) return false; Scope *S = SemaRef.getCurScope(); while (S && !S->isSEHExceptScope()) S = S->getParent(); if (!S || !(S->getFlags() & NeededScopeFlags)) { auto *DRE = cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); SemaRef.Diag(TheCall->getExprLoc(), DiagID) << DRE->getDecl()->getIdentifier(); return true; } return false; } ExprResult Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall) { ExprResult TheCallResult(TheCall); // Find out if any arguments are required to be integer constant expressions. unsigned ICEArguments = 0; ASTContext::GetBuiltinTypeError Error; Context.GetBuiltinType(BuiltinID, Error, &ICEArguments); if (Error != ASTContext::GE_None) ICEArguments = 0; // Don't diagnose previously diagnosed errors. // If any arguments are required to be ICE's, check and diagnose. for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) { // Skip arguments not required to be ICE's. if ((ICEArguments & (1 << ArgNo)) == 0) continue; llvm::APSInt Result; if (SemaBuiltinConstantArg(TheCall, ArgNo, Result)) return true; ICEArguments &= ~(1 << ArgNo); } switch (BuiltinID) { case Builtin::BI__builtin___CFStringMakeConstantString: assert(TheCall->getNumArgs() == 1 && "Wrong # arguments to builtin CFStringMakeConstantString"); if (CheckObjCString(TheCall->getArg(0))) return ExprError(); break; case Builtin::BI__builtin_stdarg_start: case Builtin::BI__builtin_va_start: if (SemaBuiltinVAStart(TheCall)) return ExprError(); break; case Builtin::BI__va_start: { switch (Context.getTargetInfo().getTriple().getArch()) { case llvm::Triple::arm: case llvm::Triple::thumb: if (SemaBuiltinVAStartARM(TheCall)) return ExprError(); break; default: if (SemaBuiltinVAStart(TheCall)) return ExprError(); break; } break; } case Builtin::BI__builtin_isgreater: case Builtin::BI__builtin_isgreaterequal: case Builtin::BI__builtin_isless: case Builtin::BI__builtin_islessequal: case Builtin::BI__builtin_islessgreater: case Builtin::BI__builtin_isunordered: if (SemaBuiltinUnorderedCompare(TheCall)) return ExprError(); break; case Builtin::BI__builtin_fpclassify: if (SemaBuiltinFPClassification(TheCall, 6)) return ExprError(); break; case Builtin::BI__builtin_isfinite: case Builtin::BI__builtin_isinf: case Builtin::BI__builtin_isinf_sign: case Builtin::BI__builtin_isnan: case Builtin::BI__builtin_isnormal: if (SemaBuiltinFPClassification(TheCall, 1)) return ExprError(); break; case Builtin::BI__builtin_shufflevector: return SemaBuiltinShuffleVector(TheCall); // TheCall will be freed by the smart pointer here, but that's fine, since // SemaBuiltinShuffleVector guts it, but then doesn't release it. case Builtin::BI__builtin_prefetch: if (SemaBuiltinPrefetch(TheCall)) return ExprError(); break; case Builtin::BI__assume: case Builtin::BI__builtin_assume: if (SemaBuiltinAssume(TheCall)) return ExprError(); break; case Builtin::BI__builtin_assume_aligned: if (SemaBuiltinAssumeAligned(TheCall)) return ExprError(); break; case Builtin::BI__builtin_object_size: if (SemaBuiltinConstantArgRange(TheCall, 1, 0, 3)) return ExprError(); break; case Builtin::BI__builtin_longjmp: if (SemaBuiltinLongjmp(TheCall)) return ExprError(); break; case Builtin::BI__builtin_setjmp: if (SemaBuiltinSetjmp(TheCall)) return ExprError(); break; case Builtin::BI_setjmp: case Builtin::BI_setjmpex: if (checkArgCount(*this, TheCall, 1)) return true; break; case Builtin::BI__builtin_classify_type: if (checkArgCount(*this, TheCall, 1)) return true; TheCall->setType(Context.IntTy); break; case Builtin::BI__builtin_constant_p: if (checkArgCount(*this, TheCall, 1)) return true; TheCall->setType(Context.IntTy); break; case Builtin::BI__sync_fetch_and_add: case Builtin::BI__sync_fetch_and_add_1: case Builtin::BI__sync_fetch_and_add_2: case Builtin::BI__sync_fetch_and_add_4: case Builtin::BI__sync_fetch_and_add_8: case Builtin::BI__sync_fetch_and_add_16: case Builtin::BI__sync_fetch_and_sub: case Builtin::BI__sync_fetch_and_sub_1: case Builtin::BI__sync_fetch_and_sub_2: case Builtin::BI__sync_fetch_and_sub_4: case Builtin::BI__sync_fetch_and_sub_8: case Builtin::BI__sync_fetch_and_sub_16: case Builtin::BI__sync_fetch_and_or: case Builtin::BI__sync_fetch_and_or_1: case Builtin::BI__sync_fetch_and_or_2: case Builtin::BI__sync_fetch_and_or_4: case Builtin::BI__sync_fetch_and_or_8: case Builtin::BI__sync_fetch_and_or_16: case Builtin::BI__sync_fetch_and_and: case Builtin::BI__sync_fetch_and_and_1: case Builtin::BI__sync_fetch_and_and_2: case Builtin::BI__sync_fetch_and_and_4: case Builtin::BI__sync_fetch_and_and_8: case Builtin::BI__sync_fetch_and_and_16: case Builtin::BI__sync_fetch_and_xor: case Builtin::BI__sync_fetch_and_xor_1: case Builtin::BI__sync_fetch_and_xor_2: case Builtin::BI__sync_fetch_and_xor_4: case Builtin::BI__sync_fetch_and_xor_8: case Builtin::BI__sync_fetch_and_xor_16: case Builtin::BI__sync_fetch_and_nand: case Builtin::BI__sync_fetch_and_nand_1: case Builtin::BI__sync_fetch_and_nand_2: case Builtin::BI__sync_fetch_and_nand_4: case Builtin::BI__sync_fetch_and_nand_8: case Builtin::BI__sync_fetch_and_nand_16: case Builtin::BI__sync_add_and_fetch: case Builtin::BI__sync_add_and_fetch_1: case Builtin::BI__sync_add_and_fetch_2: case Builtin::BI__sync_add_and_fetch_4: case Builtin::BI__sync_add_and_fetch_8: case Builtin::BI__sync_add_and_fetch_16: case Builtin::BI__sync_sub_and_fetch: case Builtin::BI__sync_sub_and_fetch_1: case Builtin::BI__sync_sub_and_fetch_2: case Builtin::BI__sync_sub_and_fetch_4: case Builtin::BI__sync_sub_and_fetch_8: case Builtin::BI__sync_sub_and_fetch_16: case Builtin::BI__sync_and_and_fetch: case Builtin::BI__sync_and_and_fetch_1: case Builtin::BI__sync_and_and_fetch_2: case Builtin::BI__sync_and_and_fetch_4: case Builtin::BI__sync_and_and_fetch_8: case Builtin::BI__sync_and_and_fetch_16: case Builtin::BI__sync_or_and_fetch: case Builtin::BI__sync_or_and_fetch_1: case Builtin::BI__sync_or_and_fetch_2: case Builtin::BI__sync_or_and_fetch_4: case Builtin::BI__sync_or_and_fetch_8: case Builtin::BI__sync_or_and_fetch_16: case Builtin::BI__sync_xor_and_fetch: case Builtin::BI__sync_xor_and_fetch_1: case Builtin::BI__sync_xor_and_fetch_2: case Builtin::BI__sync_xor_and_fetch_4: case Builtin::BI__sync_xor_and_fetch_8: case Builtin::BI__sync_xor_and_fetch_16: case Builtin::BI__sync_nand_and_fetch: case Builtin::BI__sync_nand_and_fetch_1: case Builtin::BI__sync_nand_and_fetch_2: case Builtin::BI__sync_nand_and_fetch_4: case Builtin::BI__sync_nand_and_fetch_8: case Builtin::BI__sync_nand_and_fetch_16: case Builtin::BI__sync_val_compare_and_swap: case Builtin::BI__sync_val_compare_and_swap_1: case Builtin::BI__sync_val_compare_and_swap_2: case Builtin::BI__sync_val_compare_and_swap_4: case Builtin::BI__sync_val_compare_and_swap_8: case Builtin::BI__sync_val_compare_and_swap_16: case Builtin::BI__sync_bool_compare_and_swap: case Builtin::BI__sync_bool_compare_and_swap_1: case Builtin::BI__sync_bool_compare_and_swap_2: case Builtin::BI__sync_bool_compare_and_swap_4: case Builtin::BI__sync_bool_compare_and_swap_8: case Builtin::BI__sync_bool_compare_and_swap_16: case Builtin::BI__sync_lock_test_and_set: case Builtin::BI__sync_lock_test_and_set_1: case Builtin::BI__sync_lock_test_and_set_2: case Builtin::BI__sync_lock_test_and_set_4: case Builtin::BI__sync_lock_test_and_set_8: case Builtin::BI__sync_lock_test_and_set_16: case Builtin::BI__sync_lock_release: case Builtin::BI__sync_lock_release_1: case Builtin::BI__sync_lock_release_2: case Builtin::BI__sync_lock_release_4: case Builtin::BI__sync_lock_release_8: case Builtin::BI__sync_lock_release_16: case Builtin::BI__sync_swap: case Builtin::BI__sync_swap_1: case Builtin::BI__sync_swap_2: case Builtin::BI__sync_swap_4: case Builtin::BI__sync_swap_8: case Builtin::BI__sync_swap_16: return SemaBuiltinAtomicOverloaded(TheCallResult); #define BUILTIN(ID, TYPE, ATTRS) #define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \ case Builtin::BI##ID: \ return SemaAtomicOpsOverloaded(TheCallResult, AtomicExpr::AO##ID); #include "clang/Basic/Builtins.def" case Builtin::BI__builtin_annotation: if (SemaBuiltinAnnotation(*this, TheCall)) return ExprError(); break; case Builtin::BI__builtin_addressof: if (SemaBuiltinAddressof(*this, TheCall)) return ExprError(); break; case Builtin::BI__builtin_operator_new: case Builtin::BI__builtin_operator_delete: if (!getLangOpts().CPlusPlus) { Diag(TheCall->getExprLoc(), diag::err_builtin_requires_language) << (BuiltinID == Builtin::BI__builtin_operator_new ? "__builtin_operator_new" : "__builtin_operator_delete") << "C++"; return ExprError(); } // CodeGen assumes it can find the global new and delete to call, // so ensure that they are declared. DeclareGlobalNewDelete(); break; // check secure string manipulation functions where overflows // are detectable at compile time case Builtin::BI__builtin___memcpy_chk: case Builtin::BI__builtin___memmove_chk: case Builtin::BI__builtin___memset_chk: case Builtin::BI__builtin___strlcat_chk: case Builtin::BI__builtin___strlcpy_chk: case Builtin::BI__builtin___strncat_chk: case Builtin::BI__builtin___strncpy_chk: case Builtin::BI__builtin___stpncpy_chk: SemaBuiltinMemChkCall(*this, FDecl, TheCall, 2, 3); break; case Builtin::BI__builtin___memccpy_chk: SemaBuiltinMemChkCall(*this, FDecl, TheCall, 3, 4); break; case Builtin::BI__builtin___snprintf_chk: case Builtin::BI__builtin___vsnprintf_chk: SemaBuiltinMemChkCall(*this, FDecl, TheCall, 1, 3); break; case Builtin::BI__builtin_call_with_static_chain: if (SemaBuiltinCallWithStaticChain(*this, TheCall)) return ExprError(); break; case Builtin::BI__exception_code: case Builtin::BI_exception_code: { if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHExceptScope, diag::err_seh___except_block)) return ExprError(); break; } case Builtin::BI__exception_info: case Builtin::BI_exception_info: { if (SemaBuiltinSEHScopeCheck(*this, TheCall, Scope::SEHFilterScope, diag::err_seh___except_filter)) return ExprError(); break; } case Builtin::BI__GetExceptionInfo: if (checkArgCount(*this, TheCall, 1)) return ExprError(); if (CheckCXXThrowOperand( TheCall->getLocStart(), Context.getExceptionObjectType(FDecl->getParamDecl(0)->getType()), TheCall)) return ExprError(); TheCall->setType(Context.VoidPtrTy); break; } // Since the target specific builtins for each arch overlap, only check those // of the arch we are compiling for. #if 0 // HLSL Change Starts if (BuiltinID >= Builtin::FirstTSBuiltin) { switch (Context.getTargetInfo().getTriple().getArch()) { case llvm::Triple::arm: case llvm::Triple::armeb: case llvm::Triple::thumb: case llvm::Triple::thumbeb: if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall)) return ExprError(); break; case llvm::Triple::aarch64: case llvm::Triple::aarch64_be: if (CheckAArch64BuiltinFunctionCall(BuiltinID, TheCall)) return ExprError(); break; case llvm::Triple::mips: case llvm::Triple::mipsel: case llvm::Triple::mips64: case llvm::Triple::mips64el: if (CheckMipsBuiltinFunctionCall(BuiltinID, TheCall)) return ExprError(); break; case llvm::Triple::systemz: if (CheckSystemZBuiltinFunctionCall(BuiltinID, TheCall)) return ExprError(); break; case llvm::Triple::x86: case llvm::Triple::x86_64: if (CheckX86BuiltinFunctionCall(BuiltinID, TheCall)) return ExprError(); break; case llvm::Triple::ppc: case llvm::Triple::ppc64: case llvm::Triple::ppc64le: if (CheckPPCBuiltinFunctionCall(BuiltinID, TheCall)) return ExprError(); break; default: break; } } #endif // HLSL Change Ends return TheCallResult; } #if 0 // HLSL Change Starts // Get the valid immediate range for the specified NEON type code. static unsigned RFT(unsigned t, bool shift = false, bool ForceQuad = false) { NeonTypeFlags Type(t); int IsQuad = ForceQuad ? true : Type.isQuad(); switch (Type.getEltType()) { case NeonTypeFlags::Int8: case NeonTypeFlags::Poly8: return shift ? 7 : (8 << IsQuad) - 1; case NeonTypeFlags::Int16: case NeonTypeFlags::Poly16: return shift ? 15 : (4 << IsQuad) - 1; case NeonTypeFlags::Int32: return shift ? 31 : (2 << IsQuad) - 1; case NeonTypeFlags::Int64: case NeonTypeFlags::Poly64: return shift ? 63 : (1 << IsQuad) - 1; case NeonTypeFlags::Poly128: return shift ? 127 : (1 << IsQuad) - 1; case NeonTypeFlags::Float16: assert(!shift && "cannot shift float types!"); return (4 << IsQuad) - 1; case NeonTypeFlags::Float32: assert(!shift && "cannot shift float types!"); return (2 << IsQuad) - 1; case NeonTypeFlags::Float64: assert(!shift && "cannot shift float types!"); return (1 << IsQuad) - 1; } llvm_unreachable("Invalid NeonTypeFlag!"); } /// getNeonEltType - Return the QualType corresponding to the elements of /// the vector type specified by the NeonTypeFlags. This is used to check /// the pointer arguments for Neon load/store intrinsics. static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context, bool IsPolyUnsigned, bool IsInt64Long) { switch (Flags.getEltType()) { case NeonTypeFlags::Int8: return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy; case NeonTypeFlags::Int16: return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy; case NeonTypeFlags::Int32: return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy; case NeonTypeFlags::Int64: if (IsInt64Long) return Flags.isUnsigned() ? Context.UnsignedLongTy : Context.LongTy; else return Flags.isUnsigned() ? Context.UnsignedLongLongTy : Context.LongLongTy; case NeonTypeFlags::Poly8: return IsPolyUnsigned ? Context.UnsignedCharTy : Context.SignedCharTy; case NeonTypeFlags::Poly16: return IsPolyUnsigned ? Context.UnsignedShortTy : Context.ShortTy; case NeonTypeFlags::Poly64: if (IsInt64Long) return Context.UnsignedLongTy; else return Context.UnsignedLongLongTy; case NeonTypeFlags::Poly128: break; case NeonTypeFlags::Float16: return Context.HalfTy; case NeonTypeFlags::Float32: return Context.FloatTy; case NeonTypeFlags::Float64: return Context.DoubleTy; } llvm_unreachable("Invalid NeonTypeFlag!"); } bool Sema::CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { llvm::APSInt Result; uint64_t mask = 0; unsigned TV = 0; int PtrArgNum = -1; bool HasConstPtr = false; switch (BuiltinID) { #define GET_NEON_OVERLOAD_CHECK #include "clang/Basic/arm_neon.inc" #undef GET_NEON_OVERLOAD_CHECK } // For NEON intrinsics which are overloaded on vector element type, validate // the immediate which specifies which variant to emit. unsigned ImmArg = TheCall->getNumArgs()-1; if (mask) { if (SemaBuiltinConstantArg(TheCall, ImmArg, Result)) return true; TV = Result.getLimitedValue(64); if ((TV > 63) || (mask & (1ULL << TV)) == 0) return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code) << TheCall->getArg(ImmArg)->getSourceRange(); } if (PtrArgNum >= 0) { // Check that pointer arguments have the specified type. Expr *Arg = TheCall->getArg(PtrArgNum); if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) Arg = ICE->getSubExpr(); ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg); QualType RHSTy = RHS.get()->getType(); llvm::Triple::ArchType Arch = Context.getTargetInfo().getTriple().getArch(); bool IsPolyUnsigned = Arch == llvm::Triple::aarch64; bool IsInt64Long = Context.getTargetInfo().getInt64Type() == TargetInfo::SignedLong; QualType EltTy = getNeonEltType(NeonTypeFlags(TV), Context, IsPolyUnsigned, IsInt64Long); if (HasConstPtr) EltTy = EltTy.withConst(); QualType LHSTy = Context.getPointerType(EltTy); AssignConvertType ConvTy; ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); if (RHS.isInvalid()) return true; if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy, RHS.get(), AA_Assigning)) return true; } // For NEON intrinsics which take an immediate value as part of the // instruction, range check them here. unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { default: return false; #define GET_NEON_IMMEDIATE_CHECK #include "clang/Basic/arm_neon.inc" #undef GET_NEON_IMMEDIATE_CHECK } return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); } bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth) { assert((BuiltinID == ARM::BI__builtin_arm_ldrex || BuiltinID == ARM::BI__builtin_arm_ldaex || BuiltinID == ARM::BI__builtin_arm_strex || BuiltinID == ARM::BI__builtin_arm_stlex || BuiltinID == AArch64::BI__builtin_arm_ldrex || BuiltinID == AArch64::BI__builtin_arm_ldaex || BuiltinID == AArch64::BI__builtin_arm_strex || BuiltinID == AArch64::BI__builtin_arm_stlex) && "unexpected ARM builtin"); bool IsLdrex = BuiltinID == ARM::BI__builtin_arm_ldrex || BuiltinID == ARM::BI__builtin_arm_ldaex || BuiltinID == AArch64::BI__builtin_arm_ldrex || BuiltinID == AArch64::BI__builtin_arm_ldaex; DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); // Ensure that we have the proper number of arguments. if (checkArgCount(*this, TheCall, IsLdrex ? 1 : 2)) return true; // Inspect the pointer argument of the atomic builtin. This should always be // a pointer type, whose element is an integral scalar or pointer type. // Because it is a pointer type, we don't have to worry about any implicit // casts here. Expr *PointerArg = TheCall->getArg(IsLdrex ? 0 : 1); ExprResult PointerArgRes = DefaultFunctionArrayLvalueConversion(PointerArg); if (PointerArgRes.isInvalid()) return true; PointerArg = PointerArgRes.get(); const PointerType *pointerType = PointerArg->getType()->getAs<PointerType>(); if (!pointerType) { Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) << PointerArg->getType() << PointerArg->getSourceRange(); return true; } // ldrex takes a "const volatile T*" and strex takes a "volatile T*". Our next // task is to insert the appropriate casts into the AST. First work out just // what the appropriate type is. QualType ValType = pointerType->getPointeeType(); QualType AddrType = ValType.getUnqualifiedType().withVolatile(); if (IsLdrex) AddrType.addConst(); // Issue a warning if the cast is dodgy. CastKind CastNeeded = CK_NoOp; if (!AddrType.isAtLeastAsQualifiedAs(ValType)) { CastNeeded = CK_BitCast; Diag(DRE->getLocStart(), diag::ext_typecheck_convert_discards_qualifiers) << PointerArg->getType() << Context.getPointerType(AddrType) << AA_Passing << PointerArg->getSourceRange(); } // Finally, do the cast and replace the argument with the corrected version. AddrType = Context.getPointerType(AddrType); PointerArgRes = ImpCastExprToType(PointerArg, AddrType, CastNeeded); if (PointerArgRes.isInvalid()) return true; PointerArg = PointerArgRes.get(); TheCall->setArg(IsLdrex ? 0 : 1, PointerArg); // In general, we allow ints, floats and pointers to be loaded and stored. if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && !ValType->isBlockPointerType() && !ValType->isFloatingType()) { Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intfltptr) << PointerArg->getType() << PointerArg->getSourceRange(); return true; } // But ARM doesn't have instructions to deal with 128-bit versions. if (Context.getTypeSize(ValType) > MaxWidth) { assert(MaxWidth == 64 && "Diagnostic unexpectedly inaccurate"); Diag(DRE->getLocStart(), diag::err_atomic_exclusive_builtin_pointer_size) << PointerArg->getType() << PointerArg->getSourceRange(); return true; } switch (ValType.getObjCLifetime()) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: // okay break; case Qualifiers::OCL_Weak: case Qualifiers::OCL_Strong: case Qualifiers::OCL_Autoreleasing: Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership) << ValType << PointerArg->getSourceRange(); return true; } if (IsLdrex) { TheCall->setType(ValType); return false; } // Initialize the argument to be stored. ExprResult ValArg = TheCall->getArg(0); InitializedEntity Entity = InitializedEntity::InitializeParameter( Context, ValType, /*consume*/ false); ValArg = PerformCopyInitialization(Entity, SourceLocation(), ValArg); if (ValArg.isInvalid()) return true; TheCall->setArg(0, ValArg.get()); // __builtin_arm_strex always returns an int. It's marked as such in the .def, // but the custom checker bypasses all default analysis. TheCall->setType(Context.IntTy); return false; } bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { llvm::APSInt Result; if (BuiltinID == ARM::BI__builtin_arm_ldrex || BuiltinID == ARM::BI__builtin_arm_ldaex || BuiltinID == ARM::BI__builtin_arm_strex || BuiltinID == ARM::BI__builtin_arm_stlex) { return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 64); } if (BuiltinID == ARM::BI__builtin_arm_prefetch) { return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || SemaBuiltinConstantArgRange(TheCall, 2, 0, 1); } if (BuiltinID == ARM::BI__builtin_arm_rsr64 || BuiltinID == ARM::BI__builtin_arm_wsr64) return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 3, false); if (BuiltinID == ARM::BI__builtin_arm_rsr || BuiltinID == ARM::BI__builtin_arm_rsrp || BuiltinID == ARM::BI__builtin_arm_wsr || BuiltinID == ARM::BI__builtin_arm_wsrp) return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) return true; // For intrinsics which take an immediate value as part of the instruction, // range check them here. unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { default: return false; case ARM::BI__builtin_arm_ssat: i = 1; l = 1; u = 31; break; case ARM::BI__builtin_arm_usat: i = 1; u = 31; break; case ARM::BI__builtin_arm_vcvtr_f: case ARM::BI__builtin_arm_vcvtr_d: i = 1; u = 1; break; case ARM::BI__builtin_arm_dmb: case ARM::BI__builtin_arm_dsb: case ARM::BI__builtin_arm_isb: case ARM::BI__builtin_arm_dbg: l = 0; u = 15; break; } // FIXME: VFP Intrinsics should error if VFP not present. return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); } bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { llvm::APSInt Result; if (BuiltinID == AArch64::BI__builtin_arm_ldrex || BuiltinID == AArch64::BI__builtin_arm_ldaex || BuiltinID == AArch64::BI__builtin_arm_strex || BuiltinID == AArch64::BI__builtin_arm_stlex) { return CheckARMBuiltinExclusiveCall(BuiltinID, TheCall, 128); } if (BuiltinID == AArch64::BI__builtin_arm_prefetch) { return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || SemaBuiltinConstantArgRange(TheCall, 2, 0, 2) || SemaBuiltinConstantArgRange(TheCall, 3, 0, 1) || SemaBuiltinConstantArgRange(TheCall, 4, 0, 1); } if (BuiltinID == AArch64::BI__builtin_arm_rsr64 || BuiltinID == AArch64::BI__builtin_arm_wsr64) return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, false); if (BuiltinID == AArch64::BI__builtin_arm_rsr || BuiltinID == AArch64::BI__builtin_arm_rsrp || BuiltinID == AArch64::BI__builtin_arm_wsr || BuiltinID == AArch64::BI__builtin_arm_wsrp) return SemaBuiltinARMSpecialReg(BuiltinID, TheCall, 0, 5, true); if (CheckNeonBuiltinFunctionCall(BuiltinID, TheCall)) return true; // For intrinsics which take an immediate value as part of the instruction, // range check them here. unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { default: return false; case AArch64::BI__builtin_arm_dmb: case AArch64::BI__builtin_arm_dsb: case AArch64::BI__builtin_arm_isb: l = 0; u = 15; break; } return SemaBuiltinConstantArgRange(TheCall, i, l, u + l); } bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { default: return false; case Mips::BI__builtin_mips_wrdsp: i = 1; l = 0; u = 63; break; case Mips::BI__builtin_mips_rddsp: i = 0; l = 0; u = 63; break; case Mips::BI__builtin_mips_append: i = 2; l = 0; u = 31; break; case Mips::BI__builtin_mips_balign: i = 2; l = 0; u = 3; break; case Mips::BI__builtin_mips_precr_sra_ph_w: i = 2; l = 0; u = 31; break; case Mips::BI__builtin_mips_precr_sra_r_ph_w: i = 2; l = 0; u = 31; break; case Mips::BI__builtin_mips_prepend: i = 2; l = 0; u = 31; break; } return SemaBuiltinConstantArgRange(TheCall, i, l, u); } bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { unsigned i = 0, l = 0, u = 0; bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde || BuiltinID == PPC::BI__builtin_divdeu || BuiltinID == PPC::BI__builtin_bpermd; bool IsTarget64Bit = Context.getTargetInfo() .getTypeWidth(Context .getTargetInfo() .getIntPtrType()) == 64; bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe || BuiltinID == PPC::BI__builtin_divweu || BuiltinID == PPC::BI__builtin_divde || BuiltinID == PPC::BI__builtin_divdeu; if (Is64BitBltin && !IsTarget64Bit) return Diag(TheCall->getLocStart(), diag::err_64_bit_builtin_32_bit_tgt) << TheCall->getSourceRange(); if ((IsBltinExtDiv && !Context.getTargetInfo().hasFeature("extdiv")) || (BuiltinID == PPC::BI__builtin_bpermd && !Context.getTargetInfo().hasFeature("bpermd"))) return Diag(TheCall->getLocStart(), diag::err_ppc_builtin_only_on_pwr7) << TheCall->getSourceRange(); switch (BuiltinID) { default: return false; case PPC::BI__builtin_altivec_crypto_vshasigmaw: case PPC::BI__builtin_altivec_crypto_vshasigmad: return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1) || SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); case PPC::BI__builtin_tbegin: case PPC::BI__builtin_tend: i = 0; l = 0; u = 1; break; case PPC::BI__builtin_tsr: i = 0; l = 0; u = 7; break; case PPC::BI__builtin_tabortwc: case PPC::BI__builtin_tabortdc: i = 0; l = 0; u = 31; break; case PPC::BI__builtin_tabortwci: case PPC::BI__builtin_tabortdci: return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) || SemaBuiltinConstantArgRange(TheCall, 2, 0, 31); } return SemaBuiltinConstantArgRange(TheCall, i, l, u); } bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { if (BuiltinID == SystemZ::BI__builtin_tabort) { Expr *Arg = TheCall->getArg(0); llvm::APSInt AbortCode(32); if (Arg->isIntegerConstantExpr(AbortCode, Context) && AbortCode.getSExtValue() >= 0 && AbortCode.getSExtValue() < 256) return Diag(Arg->getLocStart(), diag::err_systemz_invalid_tabort_code) << Arg->getSourceRange(); } // For intrinsics which take an immediate value as part of the instruction, // range check them here. unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { default: return false; case SystemZ::BI__builtin_s390_lcbb: i = 1; l = 0; u = 15; break; case SystemZ::BI__builtin_s390_verimb: case SystemZ::BI__builtin_s390_verimh: case SystemZ::BI__builtin_s390_verimf: case SystemZ::BI__builtin_s390_verimg: i = 3; l = 0; u = 255; break; case SystemZ::BI__builtin_s390_vfaeb: case SystemZ::BI__builtin_s390_vfaeh: case SystemZ::BI__builtin_s390_vfaef: case SystemZ::BI__builtin_s390_vfaebs: case SystemZ::BI__builtin_s390_vfaehs: case SystemZ::BI__builtin_s390_vfaefs: case SystemZ::BI__builtin_s390_vfaezb: case SystemZ::BI__builtin_s390_vfaezh: case SystemZ::BI__builtin_s390_vfaezf: case SystemZ::BI__builtin_s390_vfaezbs: case SystemZ::BI__builtin_s390_vfaezhs: case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break; case SystemZ::BI__builtin_s390_vfidb: return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) || SemaBuiltinConstantArgRange(TheCall, 2, 0, 15); case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break; case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break; case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break; case SystemZ::BI__builtin_s390_vsldb: i = 2; l = 0; u = 15; break; case SystemZ::BI__builtin_s390_vstrcb: case SystemZ::BI__builtin_s390_vstrch: case SystemZ::BI__builtin_s390_vstrcf: case SystemZ::BI__builtin_s390_vstrczb: case SystemZ::BI__builtin_s390_vstrczh: case SystemZ::BI__builtin_s390_vstrczf: case SystemZ::BI__builtin_s390_vstrcbs: case SystemZ::BI__builtin_s390_vstrchs: case SystemZ::BI__builtin_s390_vstrcfs: case SystemZ::BI__builtin_s390_vstrczbs: case SystemZ::BI__builtin_s390_vstrczhs: case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break; } return SemaBuiltinConstantArgRange(TheCall, i, l, u); } bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) { unsigned i = 0, l = 0, u = 0; switch (BuiltinID) { default: return false; case X86::BI__builtin_cpu_supports: return SemaBuiltinCpuSupports(TheCall); case X86::BI_mm_prefetch: i = 1; l = 0; u = 3; break; case X86::BI__builtin_ia32_sha1rnds4: i = 2, l = 0; u = 3; break; case X86::BI__builtin_ia32_vpermil2pd: case X86::BI__builtin_ia32_vpermil2pd256: case X86::BI__builtin_ia32_vpermil2ps: case X86::BI__builtin_ia32_vpermil2ps256: i = 3, l = 0; u = 3; break; case X86::BI__builtin_ia32_cmpb128_mask: case X86::BI__builtin_ia32_cmpw128_mask: case X86::BI__builtin_ia32_cmpd128_mask: case X86::BI__builtin_ia32_cmpq128_mask: case X86::BI__builtin_ia32_cmpb256_mask: case X86::BI__builtin_ia32_cmpw256_mask: case X86::BI__builtin_ia32_cmpd256_mask: case X86::BI__builtin_ia32_cmpq256_mask: case X86::BI__builtin_ia32_cmpb512_mask: case X86::BI__builtin_ia32_cmpw512_mask: case X86::BI__builtin_ia32_cmpd512_mask: case X86::BI__builtin_ia32_cmpq512_mask: case X86::BI__builtin_ia32_ucmpb128_mask: case X86::BI__builtin_ia32_ucmpw128_mask: case X86::BI__builtin_ia32_ucmpd128_mask: case X86::BI__builtin_ia32_ucmpq128_mask: case X86::BI__builtin_ia32_ucmpb256_mask: case X86::BI__builtin_ia32_ucmpw256_mask: case X86::BI__builtin_ia32_ucmpd256_mask: case X86::BI__builtin_ia32_ucmpq256_mask: case X86::BI__builtin_ia32_ucmpb512_mask: case X86::BI__builtin_ia32_ucmpw512_mask: case X86::BI__builtin_ia32_ucmpd512_mask: case X86::BI__builtin_ia32_ucmpq512_mask: i = 2; l = 0; u = 7; break; case X86::BI__builtin_ia32_roundps: case X86::BI__builtin_ia32_roundpd: case X86::BI__builtin_ia32_roundps256: case X86::BI__builtin_ia32_roundpd256: i = 1, l = 0; u = 15; break; case X86::BI__builtin_ia32_roundss: case X86::BI__builtin_ia32_roundsd: i = 2, l = 0; u = 15; break; case X86::BI__builtin_ia32_cmpps: case X86::BI__builtin_ia32_cmpss: case X86::BI__builtin_ia32_cmppd: case X86::BI__builtin_ia32_cmpsd: case X86::BI__builtin_ia32_cmpps256: case X86::BI__builtin_ia32_cmppd256: case X86::BI__builtin_ia32_cmpps512_mask: case X86::BI__builtin_ia32_cmppd512_mask: i = 2; l = 0; u = 31; break; case X86::BI__builtin_ia32_vpcomub: case X86::BI__builtin_ia32_vpcomuw: case X86::BI__builtin_ia32_vpcomud: case X86::BI__builtin_ia32_vpcomuq: case X86::BI__builtin_ia32_vpcomb: case X86::BI__builtin_ia32_vpcomw: case X86::BI__builtin_ia32_vpcomd: case X86::BI__builtin_ia32_vpcomq: i = 2; l = 0; u = 7; break; } return SemaBuiltinConstantArgRange(TheCall, i, l, u); } #endif // HLSL Change Ends /// Given a FunctionDecl's FormatAttr, attempts to populate the FomatStringInfo /// parameter with the FormatAttr's correct format_idx and firstDataArg. /// Returns true when the format fits the function and the FormatStringInfo has /// been populated. bool Sema::getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI) { FSI->HasVAListArg = Format->getFirstArg() == 0; FSI->FormatIdx = Format->getFormatIdx() - 1; FSI->FirstDataArg = FSI->HasVAListArg ? 0 : Format->getFirstArg() - 1; // The way the format attribute works in GCC, the implicit this argument // of member functions is counted. However, it doesn't appear in our own // lists, so decrement format_idx in that case. if (IsCXXMember) { if(FSI->FormatIdx == 0) return false; --FSI->FormatIdx; if (FSI->FirstDataArg != 0) --FSI->FirstDataArg; } return true; } /// Checks if a the given expression evaluates to null. /// /// \brief Returns true if the value evaluates to null. static bool CheckNonNullExpr(Sema &S, const Expr *Expr) { // If the expression has non-null type, it doesn't evaluate to null. if (auto nullability = Expr->IgnoreImplicit()->getType()->getNullability(S.Context)) { if (*nullability == NullabilityKind::NonNull) return false; } // As a special case, transparent unions initialized with zero are // considered null for the purposes of the nonnull attribute. if (const RecordType *UT = Expr->getType()->getAsUnionType()) { if (UT->getDecl()->hasAttr<TransparentUnionAttr>()) if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(Expr)) if (const InitListExpr *ILE = dyn_cast<InitListExpr>(CLE->getInitializer())) Expr = ILE->getInit(0); } bool Result; return (!Expr->isValueDependent() && Expr->EvaluateAsBooleanCondition(Result, S.Context) && !Result); } static void CheckNonNullArgument(Sema &S, const Expr *ArgExpr, SourceLocation CallSiteLoc) { if (CheckNonNullExpr(S, ArgExpr)) S.Diag(CallSiteLoc, diag::warn_null_arg) << ArgExpr->getSourceRange(); } bool Sema::GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx) { #if 0 // HLSL Change Starts FormatStringInfo FSI; if ((GetFormatStringType(Format) == FST_NSString) && getFormatStringInfo(Format, false, &FSI)) { Idx = FSI.FormatIdx; return true; } #endif // HLSL Change Ends return false; } #if 0 // HLSL Change Starts /// \brief Diagnose use of %s directive in an NSString which is being passed /// as formatting string to formatting method. static void DiagnoseCStringFormatDirectiveInCFAPI(Sema &S, const NamedDecl *FDecl, Expr **Args, unsigned NumArgs) { unsigned Idx = 0; bool Format = false; ObjCStringFormatFamily SFFamily = FDecl->getObjCFStringFormattingFamily(); if (SFFamily == ObjCStringFormatFamily::SFF_CFString) { Idx = 2; Format = true; } else for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { if (S.GetFormatNSStringIdx(I, Idx)) { Format = true; break; } } if (!Format || NumArgs <= Idx) return; const Expr *FormatExpr = Args[Idx]; if (const CStyleCastExpr *CSCE = dyn_cast<CStyleCastExpr>(FormatExpr)) FormatExpr = CSCE->getSubExpr(); const StringLiteral *FormatString; if (const ObjCStringLiteral *OSL = dyn_cast<ObjCStringLiteral>(FormatExpr->IgnoreParenImpCasts())) FormatString = OSL->getString(); else FormatString = dyn_cast<StringLiteral>(FormatExpr->IgnoreParenImpCasts()); if (!FormatString) return; if (S.FormatStringHasSArg(FormatString)) { S.Diag(FormatExpr->getExprLoc(), diag::warn_objc_cdirective_format_string) << "%s" << 1 << 1; S.Diag(FDecl->getLocation(), diag::note_entity_declared_at) << FDecl->getDeclName(); } } #endif // HLSL Change Ends /// Determine whether the given type has a non-null nullability annotation. static bool isNonNullType(ASTContext &ctx, QualType type) { if (auto nullability = type->getNullability(ctx)) return *nullability == NullabilityKind::NonNull; return false; } static void CheckNonNullArguments(Sema &S, const NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, SourceLocation CallSiteLoc) { assert((FDecl || Proto) && "Need a function declaration or prototype"); // Check the attributes attached to the method/function itself. llvm::SmallBitVector NonNullArgs; if (FDecl) { // Handle the nonnull attribute on the function/method declaration itself. for (const auto *NonNull : FDecl->specific_attrs<NonNullAttr>()) { if (!NonNull->args_size()) { // Easy case: all pointer arguments are nonnull. for (const auto *Arg : Args) if (S.isValidPointerAttrType(Arg->getType())) CheckNonNullArgument(S, Arg, CallSiteLoc); return; } for (unsigned Val : NonNull->args()) { if (Val >= Args.size()) continue; if (NonNullArgs.empty()) NonNullArgs.resize(Args.size()); NonNullArgs.set(Val); } } } if (FDecl && (isa<FunctionDecl>(FDecl) || isa<ObjCMethodDecl>(FDecl))) { // Handle the nonnull attribute on the parameters of the // function/method. ArrayRef<ParmVarDecl*> parms; if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FDecl)) parms = FD->parameters(); else parms = cast<ObjCMethodDecl>(FDecl)->parameters(); unsigned ParamIndex = 0; for (ArrayRef<ParmVarDecl*>::iterator I = parms.begin(), E = parms.end(); I != E; ++I, ++ParamIndex) { const ParmVarDecl *PVD = *I; if (PVD->hasAttr<NonNullAttr>() || isNonNullType(S.Context, PVD->getType())) { if (NonNullArgs.empty()) NonNullArgs.resize(Args.size()); NonNullArgs.set(ParamIndex); } } } else { // If we have a non-function, non-method declaration but no // function prototype, try to dig out the function prototype. if (!Proto) { if (const ValueDecl *VD = dyn_cast<ValueDecl>(FDecl)) { QualType type = VD->getType().getNonReferenceType(); if (auto pointerType = type->getAs<PointerType>()) type = pointerType->getPointeeType(); else if (auto blockType = type->getAs<BlockPointerType>()) type = blockType->getPointeeType(); // FIXME: data member pointers? // Dig out the function prototype, if there is one. Proto = type->getAs<FunctionProtoType>(); } } // Fill in non-null argument information from the nullability // information on the parameter types (if we have them). if (Proto) { unsigned Index = 0; for (auto paramType : Proto->getParamTypes()) { if (isNonNullType(S.Context, paramType)) { if (NonNullArgs.empty()) NonNullArgs.resize(Args.size()); NonNullArgs.set(Index); } ++Index; } } } // Check for non-null arguments. for (unsigned ArgIndex = 0, ArgIndexEnd = NonNullArgs.size(); ArgIndex != ArgIndexEnd; ++ArgIndex) { if (NonNullArgs[ArgIndex]) CheckNonNullArgument(S, Args[ArgIndex], CallSiteLoc); } } /// Handles the checks for format strings, non-POD arguments to vararg /// functions, and NULL arguments passed to non-NULL parameters. void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType) { // FIXME: We should check as much as we can in the template definition. if (CurContext->isDependentContext()) return; #if 1 // HLSL Change - no format string support (void)(IsMemberFunction); (void)(CallType); (void)(Range); #else // Printf and scanf checking. llvm::SmallBitVector CheckedVarArgs; if (FDecl) { for (const auto *I : FDecl->specific_attrs<FormatAttr>()) { // Only create vector if there are format attributes. CheckedVarArgs.resize(Args.size()); CheckFormatArguments(I, Args, IsMemberFunction, CallType, Loc, Range, CheckedVarArgs); } } // Refuse POD arguments that weren't caught by the format string // checks above. if (CallType != VariadicDoesNotApply) { unsigned NumParams = Proto ? Proto->getNumParams() : FDecl && isa<FunctionDecl>(FDecl) ? cast<FunctionDecl>(FDecl)->getNumParams() : FDecl && isa<ObjCMethodDecl>(FDecl) ? cast<ObjCMethodDecl>(FDecl)->param_size() : 0; for (unsigned ArgIdx = NumParams; ArgIdx < Args.size(); ++ArgIdx) { // Args[ArgIdx] can be null in malformed code. if (const Expr *Arg = Args[ArgIdx]) { if (CheckedVarArgs.empty() || !CheckedVarArgs[ArgIdx]) checkVariadicArgument(Arg, CallType); } } } #endif // HLSL Change - no format string support if (FDecl || Proto) { CheckNonNullArguments(*this, FDecl, Proto, Args, Loc); // Type safety checking. if (FDecl) { for (const auto *I : FDecl->specific_attrs<ArgumentWithTypeTagAttr>()) CheckArgumentWithTypeTag(I, Args.data()); } } } /// CheckConstructorCall - Check a constructor call for correctness and safety /// properties not enforced by the C type system. void Sema::CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc) { VariadicCallType CallType = Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply; checkCall(FDecl, Proto, Args, /*IsMemberFunction=*/true, Loc, SourceRange(), CallType); } /// CheckFunctionCall - Check a direct function call for various correctness /// and safety properties not strictly enforced by the C type system. bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto) { bool IsMemberOperatorCall = isa<CXXOperatorCallExpr>(TheCall) && isa<CXXMethodDecl>(FDecl); bool IsMemberFunction = isa<CXXMemberCallExpr>(TheCall) || IsMemberOperatorCall; VariadicCallType CallType = getVariadicCallType(FDecl, Proto, TheCall->getCallee()); Expr** Args = TheCall->getArgs(); unsigned NumArgs = TheCall->getNumArgs(); if (IsMemberOperatorCall) { // If this is a call to a member operator, hide the first argument // from checkCall. // FIXME: Our choice of AST representation here is less than ideal. ++Args; --NumArgs; } checkCall(FDecl, Proto, llvm::makeArrayRef(Args, NumArgs), IsMemberFunction, TheCall->getRParenLoc(), TheCall->getCallee()->getSourceRange(), CallType); IdentifierInfo *FnInfo = FDecl->getIdentifier(); // None of the checks below are needed for functions that don't have // simple names (e.g., C++ conversion functions). if (!FnInfo) return false; CheckAbsoluteValueFunction(TheCall, FDecl, FnInfo); #if 0 // HLSL Change Starts if (getLangOpts().ObjC1) DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs); unsigned CMId = FDecl->getMemoryFunctionKind(); if (CMId == 0) return false; // Handle memory setting and copying functions. if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat) CheckStrlcpycatArguments(TheCall, FnInfo); else if (CMId == Builtin::BIstrncat) CheckStrncatArguments(TheCall, FnInfo); else CheckMemaccessArguments(TheCall, CMId, FnInfo); #endif // HLSL Change Ends CheckHLSLFunctionCall(FDecl, TheCall, Proto); // HLSL Change return false; } bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac, ArrayRef<const Expr *> Args) { #if 0 // HLSL Change Starts VariadicCallType CallType = Method->isVariadic() ? VariadicMethod : VariadicDoesNotApply; checkCall(Method, nullptr, Args, /*IsMemberFunction=*/false, lbrac, Method->getSourceRange(), CallType); #endif // HLSL Change Ends return false; } bool Sema::CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto) { #if 0 // HLSL Change Starts QualType Ty; if (const auto *V = dyn_cast<VarDecl>(NDecl)) Ty = V->getType().getNonReferenceType(); else if (const auto *F = dyn_cast<FieldDecl>(NDecl)) Ty = F->getType().getNonReferenceType(); else return false; if (!Ty->isBlockPointerType() && !Ty->isFunctionPointerType() && !Ty->isFunctionProtoType()) return false; VariadicCallType CallType; if (!Proto || !Proto->isVariadic()) { CallType = VariadicDoesNotApply; } else if (Ty->isBlockPointerType()) { CallType = VariadicBlock; } else { // Ty->isFunctionPointerType() CallType = VariadicFunction; } checkCall(NDecl, Proto, llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), /*IsMemberFunction=*/false, TheCall->getRParenLoc(), TheCall->getCallee()->getSourceRange(), CallType); #endif // HLSL Change Ends return false; } /// Checks function calls when a FunctionDecl or a NamedDecl is not available, /// such as function pointers returned from functions. bool Sema::CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto) { VariadicCallType CallType = getVariadicCallType(/*FDecl=*/nullptr, Proto, TheCall->getCallee()); checkCall(/*FDecl=*/nullptr, Proto, llvm::makeArrayRef(TheCall->getArgs(), TheCall->getNumArgs()), /*IsMemberFunction=*/false, TheCall->getRParenLoc(), TheCall->getCallee()->getSourceRange(), CallType); return false; } static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { if (Ordering < AtomicExpr::AO_ABI_memory_order_relaxed || Ordering > AtomicExpr::AO_ABI_memory_order_seq_cst) return false; switch (Op) { case AtomicExpr::AO__c11_atomic_init: llvm_unreachable("There is no ordering argument for an init"); case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__atomic_load_n: case AtomicExpr::AO__atomic_load: return Ordering != AtomicExpr::AO_ABI_memory_order_release && Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel; case AtomicExpr::AO__c11_atomic_store: case AtomicExpr::AO__atomic_store: case AtomicExpr::AO__atomic_store_n: return Ordering != AtomicExpr::AO_ABI_memory_order_consume && Ordering != AtomicExpr::AO_ABI_memory_order_acquire && Ordering != AtomicExpr::AO_ABI_memory_order_acq_rel; default: return true; } } ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op) { CallExpr *TheCall = cast<CallExpr>(TheCallResult.get()); DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); // All these operations take one of the following forms: enum { // C __c11_atomic_init(A *, C) Init, // C __c11_atomic_load(A *, int) Load, // void __atomic_load(A *, CP, int) Copy, // C __c11_atomic_add(A *, M, int) Arithmetic, // C __atomic_exchange_n(A *, CP, int) Xchg, // void __atomic_exchange(A *, C *, CP, int) GNUXchg, // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int) C11CmpXchg, // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) GNUCmpXchg } Form = Init; const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 4, 5, 6 }; const unsigned NumVals[] = { 1, 0, 1, 1, 1, 2, 2, 3 }; // where: // C is an appropriate type, // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise, // M is C if C is an integer, and ptrdiff_t if C is a pointer, and // the int parameters are for orderings. static_assert(AtomicExpr::AO__c11_atomic_init == 0 && AtomicExpr::AO__c11_atomic_fetch_xor + 1 == AtomicExpr::AO__atomic_load, "need to update code for modified C11 atomics"); bool IsC11 = Op >= AtomicExpr::AO__c11_atomic_init && Op <= AtomicExpr::AO__c11_atomic_fetch_xor; bool IsN = Op == AtomicExpr::AO__atomic_load_n || Op == AtomicExpr::AO__atomic_store_n || Op == AtomicExpr::AO__atomic_exchange_n || Op == AtomicExpr::AO__atomic_compare_exchange_n; bool IsAddSub = false; switch (Op) { case AtomicExpr::AO__c11_atomic_init: Form = Init; break; case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__atomic_load_n: Form = Load; break; case AtomicExpr::AO__c11_atomic_store: case AtomicExpr::AO__atomic_load: case AtomicExpr::AO__atomic_store: case AtomicExpr::AO__atomic_store_n: Form = Copy; break; case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_sub: case AtomicExpr::AO__atomic_add_fetch: case AtomicExpr::AO__atomic_sub_fetch: IsAddSub = true; LLVM_FALLTHROUGH; // HLSL Change case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_nand: case AtomicExpr::AO__atomic_and_fetch: case AtomicExpr::AO__atomic_or_fetch: case AtomicExpr::AO__atomic_xor_fetch: case AtomicExpr::AO__atomic_nand_fetch: Form = Arithmetic; break; case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__atomic_exchange_n: Form = Xchg; break; case AtomicExpr::AO__atomic_exchange: Form = GNUXchg; break; case AtomicExpr::AO__c11_atomic_compare_exchange_strong: case AtomicExpr::AO__c11_atomic_compare_exchange_weak: Form = C11CmpXchg; break; case AtomicExpr::AO__atomic_compare_exchange: case AtomicExpr::AO__atomic_compare_exchange_n: Form = GNUCmpXchg; break; } // Check we have the right number of arguments. if (TheCall->getNumArgs() < NumArgs[Form]) { Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) << 0 << NumArgs[Form] << TheCall->getNumArgs() << TheCall->getCallee()->getSourceRange(); return ExprError(); } else if (TheCall->getNumArgs() > NumArgs[Form]) { Diag(TheCall->getArg(NumArgs[Form])->getLocStart(), diag::err_typecheck_call_too_many_args) << 0 << NumArgs[Form] << TheCall->getNumArgs() << TheCall->getCallee()->getSourceRange(); return ExprError(); } // Inspect the first argument of the atomic operation. Expr *Ptr = TheCall->getArg(0); Ptr = DefaultFunctionArrayLvalueConversion(Ptr).get(); const PointerType *pointerType = Ptr->getType()->getAs<PointerType>(); if (!pointerType) { Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) << Ptr->getType() << Ptr->getSourceRange(); return ExprError(); } // For a __c11 builtin, this should be a pointer to an _Atomic type. QualType AtomTy = pointerType->getPointeeType(); // 'A' QualType ValType = AtomTy; // 'C' if (IsC11) { if (!AtomTy->isAtomicType()) { Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic) << Ptr->getType() << Ptr->getSourceRange(); return ExprError(); } if (AtomTy.isConstQualified()) { Diag(DRE->getLocStart(), diag::err_atomic_op_needs_non_const_atomic) << Ptr->getType() << Ptr->getSourceRange(); return ExprError(); } ValType = AtomTy->getAs<AtomicType>()->getValueType(); } // For an arithmetic operation, the implied arithmetic must be well-formed. if (Form == Arithmetic) { // gcc does not enforce these rules for GNU atomics, but we do so for sanity. if (IsAddSub && !ValType->isIntegerType() && !ValType->isPointerType()) { Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr) << IsC11 << Ptr->getType() << Ptr->getSourceRange(); return ExprError(); } if (!IsAddSub && !ValType->isIntegerType()) { Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int) << IsC11 << Ptr->getType() << Ptr->getSourceRange(); return ExprError(); } if (IsC11 && ValType->isPointerType() && RequireCompleteType(Ptr->getLocStart(), ValType->getPointeeType(), diag::err_incomplete_type)) { return ExprError(); } } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) { // For __atomic_*_n operations, the value type must be a scalar integral or // pointer type which is 1, 2, 4, 8 or 16 bytes in length. Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr) << IsC11 << Ptr->getType() << Ptr->getSourceRange(); return ExprError(); } if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) && !AtomTy->isScalarType()) { // For GNU atomics, require a trivially-copyable type. This is not part of // the GNU atomics specification, but we enforce it for sanity. Diag(DRE->getLocStart(), diag::err_atomic_op_needs_trivial_copy) << Ptr->getType() << Ptr->getSourceRange(); return ExprError(); } // FIXME: For any builtin other than a load, the ValType must not be // const-qualified. switch (ValType.getObjCLifetime()) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: // okay break; case Qualifiers::OCL_Weak: case Qualifiers::OCL_Strong: case Qualifiers::OCL_Autoreleasing: // FIXME: Can this happen? By this point, ValType should be known // to be trivially copyable. Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership) << ValType << Ptr->getSourceRange(); return ExprError(); } // atomic_fetch_or takes a pointer to a volatile 'A'. We shouldn't let the // volatile-ness of the pointee-type inject itself into the result or the // other operands. ValType.removeLocalVolatile(); QualType ResultType = ValType; if (Form == Copy || Form == GNUXchg || Form == Init) ResultType = Context.VoidTy; else if (Form == C11CmpXchg || Form == GNUCmpXchg) ResultType = Context.BoolTy; // The type of a parameter passed 'by value'. In the GNU atomics, such // arguments are actually passed as pointers. QualType ByValType = ValType; // 'CP' if (!IsC11 && !IsN) ByValType = Ptr->getType(); // The first argument --- the pointer --- has a fixed type; we // deduce the types of the rest of the arguments accordingly. Walk // the remaining arguments, converting them to the deduced value type. for (unsigned i = 1; i != NumArgs[Form]; ++i) { QualType Ty; if (i < NumVals[Form] + 1) { switch (i) { case 1: // The second argument is the non-atomic operand. For arithmetic, this // is always passed by value, and for a compare_exchange it is always // passed by address. For the rest, GNU uses by-address and C11 uses // by-value. assert(Form != Load); if (Form == Init || (Form == Arithmetic && ValType->isIntegerType())) Ty = ValType; else if (Form == Copy || Form == Xchg) Ty = ByValType; else if (Form == Arithmetic) Ty = Context.getPointerDiffType(); else Ty = Context.getPointerType(ValType.getUnqualifiedType()); break; case 2: // The third argument to compare_exchange / GNU exchange is a // (pointer to a) desired value. Ty = ByValType; break; case 3: // The fourth argument to GNU compare_exchange is a 'weak' flag. Ty = Context.BoolTy; break; } } else { // The order(s) are always converted to int. Ty = Context.IntTy; } InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, Ty, false); ExprResult Arg = TheCall->getArg(i); Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); if (Arg.isInvalid()) return true; TheCall->setArg(i, Arg.get()); } // Permute the arguments into a 'consistent' order. SmallVector<Expr*, 5> SubExprs; SubExprs.push_back(Ptr); switch (Form) { case Init: // Note, AtomicExpr::getVal1() has a special case for this atomic. SubExprs.push_back(TheCall->getArg(1)); // Val1 break; case Load: SubExprs.push_back(TheCall->getArg(1)); // Order break; case Copy: case Arithmetic: case Xchg: SubExprs.push_back(TheCall->getArg(2)); // Order SubExprs.push_back(TheCall->getArg(1)); // Val1 break; case GNUXchg: // Note, AtomicExpr::getVal2() has a special case for this atomic. SubExprs.push_back(TheCall->getArg(3)); // Order SubExprs.push_back(TheCall->getArg(1)); // Val1 SubExprs.push_back(TheCall->getArg(2)); // Val2 break; case C11CmpXchg: SubExprs.push_back(TheCall->getArg(3)); // Order SubExprs.push_back(TheCall->getArg(1)); // Val1 SubExprs.push_back(TheCall->getArg(4)); // OrderFail SubExprs.push_back(TheCall->getArg(2)); // Val2 break; case GNUCmpXchg: SubExprs.push_back(TheCall->getArg(4)); // Order SubExprs.push_back(TheCall->getArg(1)); // Val1 SubExprs.push_back(TheCall->getArg(5)); // OrderFail SubExprs.push_back(TheCall->getArg(2)); // Val2 SubExprs.push_back(TheCall->getArg(3)); // Weak break; } if (SubExprs.size() >= 2 && Form != Init) { llvm::APSInt Result(32); if (SubExprs[1]->isIntegerConstantExpr(Result, Context) && !isValidOrderingForOp(Result.getSExtValue(), Op)) Diag(SubExprs[1]->getLocStart(), diag::warn_atomic_op_has_invalid_memory_order) << SubExprs[1]->getSourceRange(); } AtomicExpr *AE = new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(), SubExprs, ResultType, Op, TheCall->getRParenLoc()); if ((Op == AtomicExpr::AO__c11_atomic_load || (Op == AtomicExpr::AO__c11_atomic_store)) && Context.AtomicUsesUnsupportedLibcall(AE)) Diag(AE->getLocStart(), diag::err_atomic_load_store_uses_lib) << ((Op == AtomicExpr::AO__c11_atomic_load) ? 0 : 1); return AE; } /// checkBuiltinArgument - Given a call to a builtin function, perform /// normal type-checking on the given argument, updating the call in /// place. This is useful when a builtin function requires custom /// type-checking for some of its arguments but not necessarily all of /// them. /// /// Returns true on error. static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) { FunctionDecl *Fn = E->getDirectCallee(); assert(Fn && "builtin call without direct callee!"); ParmVarDecl *Param = Fn->getParamDecl(ArgIndex); InitializedEntity Entity = InitializedEntity::InitializeParameter(S.Context, Param); ExprResult Arg = E->getArg(0); Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg); if (Arg.isInvalid()) return true; E->setArg(ArgIndex, Arg.get()); return false; } /// SemaBuiltinAtomicOverloaded - We have a call to a function like /// __sync_fetch_and_add, which is an overloaded function based on the pointer /// type of its first argument. The main ActOnCallExpr routines have already /// promoted the types of arguments because all of these calls are prototyped as /// void(...). /// /// This function goes through and does final semantic checking for these /// builtins, ExprResult Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) { CallExpr *TheCall = (CallExpr *)TheCallResult.get(); DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); // Ensure that we have at least one argument to do type inference from. if (TheCall->getNumArgs() < 1) { Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) << 0 << 1 << TheCall->getNumArgs() << TheCall->getCallee()->getSourceRange(); return ExprError(); } // Inspect the first argument of the atomic builtin. This should always be // a pointer type, whose element is an integral scalar or pointer type. // Because it is a pointer type, we don't have to worry about any implicit // casts here. // FIXME: We don't allow floating point scalars as input. Expr *FirstArg = TheCall->getArg(0); ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg); if (FirstArgResult.isInvalid()) return ExprError(); FirstArg = FirstArgResult.get(); TheCall->setArg(0, FirstArg); const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>(); if (!pointerType) { Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer) << FirstArg->getType() << FirstArg->getSourceRange(); return ExprError(); } QualType ValType = pointerType->getPointeeType(); if (!ValType->isIntegerType() && !ValType->isAnyPointerType() && !ValType->isBlockPointerType()) { Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr) << FirstArg->getType() << FirstArg->getSourceRange(); return ExprError(); } switch (ValType.getObjCLifetime()) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: // okay break; case Qualifiers::OCL_Weak: case Qualifiers::OCL_Strong: case Qualifiers::OCL_Autoreleasing: Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership) << ValType << FirstArg->getSourceRange(); return ExprError(); } // Strip any qualifiers off ValType. ValType = ValType.getUnqualifiedType(); // The majority of builtins return a value, but a few have special return // types, so allow them to override appropriately below. QualType ResultType = ValType; // We need to figure out which concrete builtin this maps onto. For example, // __sync_fetch_and_add with a 2 byte object turns into // __sync_fetch_and_add_2. #define BUILTIN_ROW(x) \ { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \ Builtin::BI##x##_8, Builtin::BI##x##_16 } static const unsigned BuiltinIndices[][5] = { BUILTIN_ROW(__sync_fetch_and_add), BUILTIN_ROW(__sync_fetch_and_sub), BUILTIN_ROW(__sync_fetch_and_or), BUILTIN_ROW(__sync_fetch_and_and), BUILTIN_ROW(__sync_fetch_and_xor), BUILTIN_ROW(__sync_fetch_and_nand), BUILTIN_ROW(__sync_add_and_fetch), BUILTIN_ROW(__sync_sub_and_fetch), BUILTIN_ROW(__sync_and_and_fetch), BUILTIN_ROW(__sync_or_and_fetch), BUILTIN_ROW(__sync_xor_and_fetch), BUILTIN_ROW(__sync_nand_and_fetch), BUILTIN_ROW(__sync_val_compare_and_swap), BUILTIN_ROW(__sync_bool_compare_and_swap), BUILTIN_ROW(__sync_lock_test_and_set), BUILTIN_ROW(__sync_lock_release), BUILTIN_ROW(__sync_swap) }; #undef BUILTIN_ROW // Determine the index of the size. unsigned SizeIndex; switch (Context.getTypeSizeInChars(ValType).getQuantity()) { case 1: SizeIndex = 0; break; case 2: SizeIndex = 1; break; case 4: SizeIndex = 2; break; case 8: SizeIndex = 3; break; case 16: SizeIndex = 4; break; default: Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size) << FirstArg->getType() << FirstArg->getSourceRange(); return ExprError(); } // Each of these builtins has one pointer argument, followed by some number of // values (0, 1 or 2) followed by a potentially empty varags list of stuff // that we ignore. Find out which row of BuiltinIndices to read from as well // as the number of fixed args. unsigned BuiltinID = FDecl->getBuiltinID(); unsigned BuiltinIndex, NumFixed = 1; bool WarnAboutSemanticsChange = false; switch (BuiltinID) { default: llvm_unreachable("Unknown overloaded atomic builtin!"); case Builtin::BI__sync_fetch_and_add: case Builtin::BI__sync_fetch_and_add_1: case Builtin::BI__sync_fetch_and_add_2: case Builtin::BI__sync_fetch_and_add_4: case Builtin::BI__sync_fetch_and_add_8: case Builtin::BI__sync_fetch_and_add_16: BuiltinIndex = 0; break; case Builtin::BI__sync_fetch_and_sub: case Builtin::BI__sync_fetch_and_sub_1: case Builtin::BI__sync_fetch_and_sub_2: case Builtin::BI__sync_fetch_and_sub_4: case Builtin::BI__sync_fetch_and_sub_8: case Builtin::BI__sync_fetch_and_sub_16: BuiltinIndex = 1; break; case Builtin::BI__sync_fetch_and_or: case Builtin::BI__sync_fetch_and_or_1: case Builtin::BI__sync_fetch_and_or_2: case Builtin::BI__sync_fetch_and_or_4: case Builtin::BI__sync_fetch_and_or_8: case Builtin::BI__sync_fetch_and_or_16: BuiltinIndex = 2; break; case Builtin::BI__sync_fetch_and_and: case Builtin::BI__sync_fetch_and_and_1: case Builtin::BI__sync_fetch_and_and_2: case Builtin::BI__sync_fetch_and_and_4: case Builtin::BI__sync_fetch_and_and_8: case Builtin::BI__sync_fetch_and_and_16: BuiltinIndex = 3; break; case Builtin::BI__sync_fetch_and_xor: case Builtin::BI__sync_fetch_and_xor_1: case Builtin::BI__sync_fetch_and_xor_2: case Builtin::BI__sync_fetch_and_xor_4: case Builtin::BI__sync_fetch_and_xor_8: case Builtin::BI__sync_fetch_and_xor_16: BuiltinIndex = 4; break; case Builtin::BI__sync_fetch_and_nand: case Builtin::BI__sync_fetch_and_nand_1: case Builtin::BI__sync_fetch_and_nand_2: case Builtin::BI__sync_fetch_and_nand_4: case Builtin::BI__sync_fetch_and_nand_8: case Builtin::BI__sync_fetch_and_nand_16: BuiltinIndex = 5; WarnAboutSemanticsChange = true; break; case Builtin::BI__sync_add_and_fetch: case Builtin::BI__sync_add_and_fetch_1: case Builtin::BI__sync_add_and_fetch_2: case Builtin::BI__sync_add_and_fetch_4: case Builtin::BI__sync_add_and_fetch_8: case Builtin::BI__sync_add_and_fetch_16: BuiltinIndex = 6; break; case Builtin::BI__sync_sub_and_fetch: case Builtin::BI__sync_sub_and_fetch_1: case Builtin::BI__sync_sub_and_fetch_2: case Builtin::BI__sync_sub_and_fetch_4: case Builtin::BI__sync_sub_and_fetch_8: case Builtin::BI__sync_sub_and_fetch_16: BuiltinIndex = 7; break; case Builtin::BI__sync_and_and_fetch: case Builtin::BI__sync_and_and_fetch_1: case Builtin::BI__sync_and_and_fetch_2: case Builtin::BI__sync_and_and_fetch_4: case Builtin::BI__sync_and_and_fetch_8: case Builtin::BI__sync_and_and_fetch_16: BuiltinIndex = 8; break; case Builtin::BI__sync_or_and_fetch: case Builtin::BI__sync_or_and_fetch_1: case Builtin::BI__sync_or_and_fetch_2: case Builtin::BI__sync_or_and_fetch_4: case Builtin::BI__sync_or_and_fetch_8: case Builtin::BI__sync_or_and_fetch_16: BuiltinIndex = 9; break; case Builtin::BI__sync_xor_and_fetch: case Builtin::BI__sync_xor_and_fetch_1: case Builtin::BI__sync_xor_and_fetch_2: case Builtin::BI__sync_xor_and_fetch_4: case Builtin::BI__sync_xor_and_fetch_8: case Builtin::BI__sync_xor_and_fetch_16: BuiltinIndex = 10; break; case Builtin::BI__sync_nand_and_fetch: case Builtin::BI__sync_nand_and_fetch_1: case Builtin::BI__sync_nand_and_fetch_2: case Builtin::BI__sync_nand_and_fetch_4: case Builtin::BI__sync_nand_and_fetch_8: case Builtin::BI__sync_nand_and_fetch_16: BuiltinIndex = 11; WarnAboutSemanticsChange = true; break; case Builtin::BI__sync_val_compare_and_swap: case Builtin::BI__sync_val_compare_and_swap_1: case Builtin::BI__sync_val_compare_and_swap_2: case Builtin::BI__sync_val_compare_and_swap_4: case Builtin::BI__sync_val_compare_and_swap_8: case Builtin::BI__sync_val_compare_and_swap_16: BuiltinIndex = 12; NumFixed = 2; break; case Builtin::BI__sync_bool_compare_and_swap: case Builtin::BI__sync_bool_compare_and_swap_1: case Builtin::BI__sync_bool_compare_and_swap_2: case Builtin::BI__sync_bool_compare_and_swap_4: case Builtin::BI__sync_bool_compare_and_swap_8: case Builtin::BI__sync_bool_compare_and_swap_16: BuiltinIndex = 13; NumFixed = 2; ResultType = Context.BoolTy; break; case Builtin::BI__sync_lock_test_and_set: case Builtin::BI__sync_lock_test_and_set_1: case Builtin::BI__sync_lock_test_and_set_2: case Builtin::BI__sync_lock_test_and_set_4: case Builtin::BI__sync_lock_test_and_set_8: case Builtin::BI__sync_lock_test_and_set_16: BuiltinIndex = 14; break; case Builtin::BI__sync_lock_release: case Builtin::BI__sync_lock_release_1: case Builtin::BI__sync_lock_release_2: case Builtin::BI__sync_lock_release_4: case Builtin::BI__sync_lock_release_8: case Builtin::BI__sync_lock_release_16: BuiltinIndex = 15; NumFixed = 0; ResultType = Context.VoidTy; break; case Builtin::BI__sync_swap: case Builtin::BI__sync_swap_1: case Builtin::BI__sync_swap_2: case Builtin::BI__sync_swap_4: case Builtin::BI__sync_swap_8: case Builtin::BI__sync_swap_16: BuiltinIndex = 16; break; } // Now that we know how many fixed arguments we expect, first check that we // have at least that many. if (TheCall->getNumArgs() < 1+NumFixed) { Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) << 0 << 1+NumFixed << TheCall->getNumArgs() << TheCall->getCallee()->getSourceRange(); return ExprError(); } if (WarnAboutSemanticsChange) { Diag(TheCall->getLocEnd(), diag::warn_sync_fetch_and_nand_semantics_change) << TheCall->getCallee()->getSourceRange(); } // Get the decl for the concrete builtin from this, we can tell what the // concrete integer type we should convert to is. unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex]; const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID); FunctionDecl *NewBuiltinDecl; if (NewBuiltinID == BuiltinID) NewBuiltinDecl = FDecl; else { // Perform builtin lookup to avoid redeclaring it. DeclarationName DN(&Context.Idents.get(NewBuiltinName)); LookupResult Res(*this, DN, DRE->getLocStart(), LookupOrdinaryName); LookupName(Res, TUScope, /*AllowBuiltinCreation=*/true); assert(Res.getFoundDecl()); NewBuiltinDecl = dyn_cast<FunctionDecl>(Res.getFoundDecl()); if (!NewBuiltinDecl) return ExprError(); } // The first argument --- the pointer --- has a fixed type; we // deduce the types of the rest of the arguments accordingly. Walk // the remaining arguments, converting them to the deduced value type. for (unsigned i = 0; i != NumFixed; ++i) { ExprResult Arg = TheCall->getArg(i+1); // GCC does an implicit conversion to the pointer or integer ValType. This // can fail in some cases (1i -> int**), check for this error case now. // Initialize the argument. InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, ValType, /*consume*/ false); Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); if (Arg.isInvalid()) return ExprError(); // Okay, we have something that *can* be converted to the right type. Check // to see if there is a potentially weird extension going on here. This can // happen when you do an atomic operation on something like an char* and // pass in 42. The 42 gets converted to char. This is even more strange // for things like 45.123 -> char, etc. // FIXME: Do this check. TheCall->setArg(i+1, Arg.get()); } ASTContext& Context = this->getASTContext(); // Create a new DeclRefExpr to refer to the new decl. DeclRefExpr* NewDRE = DeclRefExpr::Create( Context, DRE->getQualifierLoc(), SourceLocation(), NewBuiltinDecl, /*enclosing*/ false, DRE->getLocation(), Context.BuiltinFnTy, DRE->getValueKind()); // Set the callee in the CallExpr. // FIXME: This loses syntactic information. QualType CalleePtrTy = Context.getPointerType(NewBuiltinDecl->getType()); ExprResult PromotedCall = ImpCastExprToType(NewDRE, CalleePtrTy, CK_BuiltinFnToFnPtr); TheCall->setCallee(PromotedCall.get()); // Change the result type of the call to match the original value type. This // is arbitrary, but the codegen for these builtins ins design to handle it // gracefully. TheCall->setType(ResultType); return TheCallResult; } /// CheckObjCString - Checks that the argument to the builtin /// CFString constructor is correct /// Note: It might also make sense to do the UTF-16 conversion here (would /// simplify the backend). bool Sema::CheckObjCString(Expr *Arg) { #if 0 // HLSL Change Starts Arg = Arg->IgnoreParenCasts(); StringLiteral *Literal = dyn_cast<StringLiteral>(Arg); if (!Literal || !Literal->isAscii()) { Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant) << Arg->getSourceRange(); return true; } if (Literal->containsNonAsciiOrNull()) { StringRef String = Literal->getString(); unsigned NumBytes = String.size(); SmallVector<UTF16, 128> ToBuf(NumBytes); const UTF8 *FromPtr = (const UTF8 *)String.data(); UTF16 *ToPtr = &ToBuf[0]; ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr, ToPtr + NumBytes, strictConversion); // Check for conversion failure. if (Result != conversionOK) Diag(Arg->getLocStart(), diag::warn_cfstring_truncated) << Arg->getSourceRange(); } #endif // HLSL Change Ends return false; } /// SemaBuiltinVAStart - Check the arguments to __builtin_va_start for validity. /// Emit an error and return true on failure, return false on success. bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) { Expr *Fn = TheCall->getCallee(); if (TheCall->getNumArgs() > 2) { Diag(TheCall->getArg(2)->getLocStart(), diag::err_typecheck_call_too_many_args) << 0 /*function call*/ << 2 << TheCall->getNumArgs() << Fn->getSourceRange() << SourceRange(TheCall->getArg(2)->getLocStart(), (*(TheCall->arg_end()-1))->getLocEnd()); return true; } if (TheCall->getNumArgs() < 2) { return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) << 0 /*function call*/ << 2 << TheCall->getNumArgs(); } // Type-check the first argument normally. if (checkBuiltinArgument(*this, TheCall, 0)) return true; // Determine whether the current function is variadic or not. BlockScopeInfo *CurBlock = getCurBlock(); bool isVariadic; if (CurBlock) isVariadic = CurBlock->TheDecl->isVariadic(); else if (FunctionDecl *FD = getCurFunctionDecl()) isVariadic = FD->isVariadic(); else isVariadic = getCurMethodDecl()->isVariadic(); if (!isVariadic) { Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function); return true; } // Verify that the second argument to the builtin is the last argument of the // current function or method. bool SecondArgIsLastNamedArgument = false; const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts(); // These are valid if SecondArgIsLastNamedArgument is false after the next // block. QualType Type; SourceLocation ParamLoc; if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) { if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) { // FIXME: This isn't correct for methods (results in bogus warning). // Get the last formal in the current function. const ParmVarDecl *LastArg; if (CurBlock) LastArg = *(CurBlock->TheDecl->param_end()-1); else if (FunctionDecl *FD = getCurFunctionDecl()) LastArg = *(FD->param_end()-1); else LastArg = *(getCurMethodDecl()->param_end()-1); SecondArgIsLastNamedArgument = PV == LastArg; Type = PV->getType(); ParamLoc = PV->getLocation(); } } if (!SecondArgIsLastNamedArgument) Diag(TheCall->getArg(1)->getLocStart(), diag::warn_second_parameter_of_va_start_not_last_named_argument); else if (Type->isReferenceType()) { Diag(Arg->getLocStart(), diag::warn_va_start_of_reference_type_is_undefined); Diag(ParamLoc, diag::note_parameter_type) << Type; } TheCall->setType(Context.VoidTy); return false; } bool Sema::SemaBuiltinVAStartARM(CallExpr *Call) { // void __va_start(va_list *ap, const char *named_addr, size_t slot_size, // const char *named_addr); Expr *Func = Call->getCallee(); if (Call->getNumArgs() < 3) return Diag(Call->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) << 0 /*function call*/ << 3 << Call->getNumArgs(); // Determine whether the current function is variadic or not. bool IsVariadic; if (BlockScopeInfo *CurBlock = getCurBlock()) IsVariadic = CurBlock->TheDecl->isVariadic(); else if (FunctionDecl *FD = getCurFunctionDecl()) IsVariadic = FD->isVariadic(); else if (ObjCMethodDecl *MD = getCurMethodDecl()) IsVariadic = MD->isVariadic(); else llvm_unreachable("unexpected statement type"); if (!IsVariadic) { Diag(Func->getLocStart(), diag::err_va_start_used_in_non_variadic_function); return true; } // Type-check the first argument normally. if (checkBuiltinArgument(*this, Call, 0)) return true; const struct { unsigned ArgNo; QualType Type; } ArgumentTypes[] = { { 1, Context.getPointerType(Context.CharTy.withConst()) }, { 2, Context.getSizeType() }, }; for (const auto &AT : ArgumentTypes) { const Expr *Arg = Call->getArg(AT.ArgNo)->IgnoreParens(); if (Arg->getType().getCanonicalType() == AT.Type.getCanonicalType()) continue; Diag(Arg->getLocStart(), diag::err_typecheck_convert_incompatible) << Arg->getType() << AT.Type << 1 /* different class */ << 0 /* qualifier difference */ << 3 /* parameter mismatch */ << AT.ArgNo + 1 << Arg->getType() << AT.Type; } return false; } /// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and /// friends. This is declared to take (...), so we have to check everything. bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) { if (TheCall->getNumArgs() < 2) return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) << 0 << 2 << TheCall->getNumArgs()/*function call*/; if (TheCall->getNumArgs() > 2) return Diag(TheCall->getArg(2)->getLocStart(), diag::err_typecheck_call_too_many_args) << 0 /*function call*/ << 2 << TheCall->getNumArgs() << SourceRange(TheCall->getArg(2)->getLocStart(), (*(TheCall->arg_end()-1))->getLocEnd()); ExprResult OrigArg0 = TheCall->getArg(0); ExprResult OrigArg1 = TheCall->getArg(1); // Do standard promotions between the two arguments, returning their common // type. QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false); if (OrigArg0.isInvalid() || OrigArg1.isInvalid()) return true; // Make sure any conversions are pushed back into the call; this is // type safe since unordered compare builtins are declared as "_Bool // foo(...)". TheCall->setArg(0, OrigArg0.get()); TheCall->setArg(1, OrigArg1.get()); if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent()) return false; // If the common type isn't a real floating type, then the arguments were // invalid for this operation. if (Res.isNull() || !Res->isRealFloatingType()) return Diag(OrigArg0.get()->getLocStart(), diag::err_typecheck_call_invalid_ordered_compare) << OrigArg0.get()->getType() << OrigArg1.get()->getType() << SourceRange(OrigArg0.get()->getLocStart(), OrigArg1.get()->getLocEnd()); return false; } /// SemaBuiltinSemaBuiltinFPClassification - Handle functions like /// __builtin_isnan and friends. This is declared to take (...), so we have /// to check everything. We expect the last argument to be a floating point /// value. bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) { if (TheCall->getNumArgs() < NumArgs) return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args) << 0 << NumArgs << TheCall->getNumArgs()/*function call*/; if (TheCall->getNumArgs() > NumArgs) return Diag(TheCall->getArg(NumArgs)->getLocStart(), diag::err_typecheck_call_too_many_args) << 0 /*function call*/ << NumArgs << TheCall->getNumArgs() << SourceRange(TheCall->getArg(NumArgs)->getLocStart(), (*(TheCall->arg_end()-1))->getLocEnd()); Expr *OrigArg = TheCall->getArg(NumArgs-1); if (OrigArg->isTypeDependent()) return false; // This operation requires a non-_Complex floating-point number. if (!OrigArg->getType()->isRealFloatingType()) return Diag(OrigArg->getLocStart(), diag::err_typecheck_call_invalid_unary_fp) << OrigArg->getType() << OrigArg->getSourceRange(); // If this is an implicit conversion from float -> double, remove it. if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) { Expr *CastArg = Cast->getSubExpr(); if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) { assert(Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) && "promotion from float to double is the only expected cast here"); Cast->setSubExpr(nullptr); TheCall->setArg(NumArgs-1, CastArg); } } return false; } /// SemaBuiltinShuffleVector - Handle __builtin_shufflevector. // This is declared to take (...), so we have to check everything. ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) { if (TheCall->getNumArgs() < 2) return ExprError(Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least) << 0 /*function call*/ << 2 << TheCall->getNumArgs() << TheCall->getSourceRange()); // Determine which of the following types of shufflevector we're checking: // 1) unary, vector mask: (lhs, mask) // 2) binary, vector mask: (lhs, rhs, mask) // 3) binary, scalar mask: (lhs, rhs, index, ..., index) QualType resType = TheCall->getArg(0)->getType(); unsigned numElements = 0; if (!TheCall->getArg(0)->isTypeDependent() && !TheCall->getArg(1)->isTypeDependent()) { QualType LHSType = TheCall->getArg(0)->getType(); QualType RHSType = TheCall->getArg(1)->getType(); if (!LHSType->isVectorType() || !RHSType->isVectorType()) return ExprError(Diag(TheCall->getLocStart(), diag::err_shufflevector_non_vector) << SourceRange(TheCall->getArg(0)->getLocStart(), TheCall->getArg(1)->getLocEnd())); numElements = LHSType->getAs<VectorType>()->getNumElements(); unsigned numResElements = TheCall->getNumArgs() - 2; // Check to see if we have a call with 2 vector arguments, the unary shuffle // with mask. If so, verify that RHS is an integer vector type with the // same number of elts as lhs. if (TheCall->getNumArgs() == 2) { if (!RHSType->hasIntegerRepresentation() || RHSType->getAs<VectorType>()->getNumElements() != numElements) return ExprError(Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector) << SourceRange(TheCall->getArg(1)->getLocStart(), TheCall->getArg(1)->getLocEnd())); } else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) { return ExprError(Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector) << SourceRange(TheCall->getArg(0)->getLocStart(), TheCall->getArg(1)->getLocEnd())); } else if (numElements != numResElements) { QualType eltType = LHSType->getAs<VectorType>()->getElementType(); resType = Context.getVectorType(eltType, numResElements, VectorType::GenericVector); } } for (unsigned i = 2; i < TheCall->getNumArgs(); i++) { if (TheCall->getArg(i)->isTypeDependent() || TheCall->getArg(i)->isValueDependent()) continue; llvm::APSInt Result(32); if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context)) return ExprError(Diag(TheCall->getLocStart(), diag::err_shufflevector_nonconstant_argument) << TheCall->getArg(i)->getSourceRange()); // Allow -1 which will be translated to undef in the IR. if (Result.isSigned() && Result.isAllOnesValue()) continue; if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2) return ExprError(Diag(TheCall->getLocStart(), diag::err_shufflevector_argument_too_large) << TheCall->getArg(i)->getSourceRange()); } SmallVector<Expr*, 32> exprs; for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) { exprs.push_back(TheCall->getArg(i)); TheCall->setArg(i, nullptr); } return new (Context) ShuffleVectorExpr(Context, exprs, resType, TheCall->getCallee()->getLocStart(), TheCall->getRParenLoc()); } /// SemaConvertVectorExpr - Handle __builtin_convertvector ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc) { ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; QualType DstTy = TInfo->getType(); QualType SrcTy = E->getType(); if (!SrcTy->isVectorType() && !SrcTy->isDependentType()) return ExprError(Diag(BuiltinLoc, diag::err_convertvector_non_vector) << E->getSourceRange()); if (!DstTy->isVectorType() && !DstTy->isDependentType()) return ExprError(Diag(BuiltinLoc, diag::err_convertvector_non_vector_type)); if (!SrcTy->isDependentType() && !DstTy->isDependentType()) { unsigned SrcElts = SrcTy->getAs<VectorType>()->getNumElements(); unsigned DstElts = DstTy->getAs<VectorType>()->getNumElements(); if (SrcElts != DstElts) return ExprError(Diag(BuiltinLoc, diag::err_convertvector_incompatible_vector) << E->getSourceRange()); } return new (Context) ConvertVectorExpr(E, TInfo, DstTy, VK, OK, BuiltinLoc, RParenLoc); } /// SemaBuiltinPrefetch - Handle __builtin_prefetch. // This is declared to take (const void*, ...) and can take two // optional constant int args. bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) { unsigned NumArgs = TheCall->getNumArgs(); if (NumArgs > 3) return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_many_args_at_most) << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); // Argument 0 is checked for us and the remaining arguments must be // constant integers. for (unsigned i = 1; i != NumArgs; ++i) if (SemaBuiltinConstantArgRange(TheCall, i, 0, i == 1 ? 1 : 3)) return true; return false; } /// SemaBuiltinAssume - Handle __assume (MS Extension). // __assume does not evaluate its arguments, and should warn if its argument // has side effects. bool Sema::SemaBuiltinAssume(CallExpr *TheCall) { Expr *Arg = TheCall->getArg(0); if (Arg->isInstantiationDependent()) return false; if (Arg->HasSideEffects(Context)) Diag(Arg->getLocStart(), diag::warn_assume_side_effects) << Arg->getSourceRange() << cast<FunctionDecl>(TheCall->getCalleeDecl())->getIdentifier(); return false; } /// Handle __builtin_assume_aligned. This is declared /// as (const void*, size_t, ...) and can take one optional constant int arg. bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) { unsigned NumArgs = TheCall->getNumArgs(); if (NumArgs > 3) return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_many_args_at_most) << 0 /*function call*/ << 3 << NumArgs << TheCall->getSourceRange(); // The alignment must be a constant integer. Expr *Arg = TheCall->getArg(1); // We can't check the value of a dependent argument. if (!Arg->isTypeDependent() && !Arg->isValueDependent()) { llvm::APSInt Result; if (SemaBuiltinConstantArg(TheCall, 1, Result)) return true; if (!Result.isPowerOf2()) return Diag(TheCall->getLocStart(), diag::err_alignment_not_power_of_two) << Arg->getSourceRange(); } if (NumArgs > 2) { ExprResult Arg(TheCall->getArg(2)); InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, Context.getSizeType(), false); Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg); if (Arg.isInvalid()) return true; TheCall->setArg(2, Arg.get()); } return false; } /// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr /// TheCall is a constant expression. bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result) { Expr *Arg = TheCall->getArg(ArgNum); DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts()); FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl()); if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; if (!Arg->isIntegerConstantExpr(Result, Context)) return Diag(TheCall->getLocStart(), diag::err_constant_integer_arg_type) << FDecl->getDeclName() << Arg->getSourceRange(); return false; } /// SemaBuiltinConstantArgRange - Handle a check if argument ArgNum of CallExpr /// TheCall is a constant expression in the range [Low, High]. bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High) { llvm::APSInt Result; // We can't check the value of a dependent argument. Expr *Arg = TheCall->getArg(ArgNum); if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; // Check constant-ness first. if (SemaBuiltinConstantArg(TheCall, ArgNum, Result)) return true; if (Result.getSExtValue() < Low || Result.getSExtValue() > High) return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range) << Low << High << Arg->getSourceRange(); return false; } /// SemaBuiltinARMSpecialReg - Handle a check if argument ArgNum of CallExpr /// TheCall is an ARM/AArch64 special register string literal. bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName) { #if 1 // HLSL Change - remove unsupported builtins return false; #else bool IsARMBuiltin = BuiltinID == ARM::BI__builtin_arm_rsr64 || BuiltinID == ARM::BI__builtin_arm_wsr64 || BuiltinID == ARM::BI__builtin_arm_rsr || BuiltinID == ARM::BI__builtin_arm_rsrp || BuiltinID == ARM::BI__builtin_arm_wsr || BuiltinID == ARM::BI__builtin_arm_wsrp; bool IsAArch64Builtin = BuiltinID == AArch64::BI__builtin_arm_rsr64 || BuiltinID == AArch64::BI__builtin_arm_wsr64 || BuiltinID == AArch64::BI__builtin_arm_rsr || BuiltinID == AArch64::BI__builtin_arm_rsrp || BuiltinID == AArch64::BI__builtin_arm_wsr || BuiltinID == AArch64::BI__builtin_arm_wsrp; assert((IsARMBuiltin || IsAArch64Builtin) && "Unexpected ARM builtin."); // We can't check the value of a dependent argument. Expr *Arg = TheCall->getArg(ArgNum); if (Arg->isTypeDependent() || Arg->isValueDependent()) return false; // Check if the argument is a string literal. if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) return Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal) << Arg->getSourceRange(); // Check the type of special register given. StringRef Reg = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); SmallVector<StringRef, 6> Fields; Reg.split(Fields, ":"); if (Fields.size() != ExpectedFieldNum && !(AllowName && Fields.size() == 1)) return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg) << Arg->getSourceRange(); // If the string is the name of a register then we cannot check that it is // valid here but if the string is of one the forms described in ACLE then we // can check that the supplied fields are integers and within the valid // ranges. if (Fields.size() > 1) { bool FiveFields = Fields.size() == 5; bool ValidString = true; if (IsARMBuiltin) { ValidString &= Fields[0].startswith_lower("cp") || Fields[0].startswith_lower("p"); if (ValidString) Fields[0] = Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1); ValidString &= Fields[2].startswith_lower("c"); if (ValidString) Fields[2] = Fields[2].drop_front(1); if (FiveFields) { ValidString &= Fields[3].startswith_lower("c"); if (ValidString) Fields[3] = Fields[3].drop_front(1); } } SmallVector<int, 5> Ranges; if (FiveFields) Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 7, 15, 15}); else Ranges.append({15, 7, 15}); for (unsigned i=0; i<Fields.size(); ++i) { int IntField; ValidString &= !Fields[i].getAsInteger(10, IntField); ValidString &= (IntField >= 0 && IntField <= Ranges[i]); } if (!ValidString) return Diag(TheCall->getLocStart(), diag::err_arm_invalid_specialreg) << Arg->getSourceRange(); } else if (IsAArch64Builtin && Fields.size() == 1) { // If the register name is one of those that appear in the condition below // and the special register builtin being used is one of the write builtins, // then we require that the argument provided for writing to the register // is an integer constant expression. This is because it will be lowered to // an MSR (immediate) instruction, so we need to know the immediate at // compile time. if (TheCall->getNumArgs() != 2) return false; std::string RegLower = Reg.lower(); if (RegLower != "spsel" && RegLower != "daifset" && RegLower != "daifclr" && RegLower != "pan" && RegLower != "uao") return false; return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15); } return false; #endif // HLSL Change Ends } /// SemaBuiltinCpuSupports - Handle __builtin_cpu_supports(char *). /// This checks that the target supports __builtin_cpu_supports and /// that the string argument is constant and valid. bool Sema::SemaBuiltinCpuSupports(CallExpr *TheCall) { Expr *Arg = TheCall->getArg(0); // Check if the argument is a string literal. if (!isa<StringLiteral>(Arg->IgnoreParenImpCasts())) return Diag(TheCall->getLocStart(), diag::err_expr_not_string_literal) << Arg->getSourceRange(); // Check the contents of the string. StringRef Feature = cast<StringLiteral>(Arg->IgnoreParenImpCasts())->getString(); if (!Context.getTargetInfo().validateCpuSupports(Feature)) return Diag(TheCall->getLocStart(), diag::err_invalid_cpu_supports) << Arg->getSourceRange(); return false; } /// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val). /// This checks that the target supports __builtin_longjmp and /// that val is a constant 1. bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) { if (!Context.getTargetInfo().hasSjLjLowering()) return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_unsupported) << SourceRange(TheCall->getLocStart(), TheCall->getLocEnd()); Expr *Arg = TheCall->getArg(1); llvm::APSInt Result; // TODO: This is less than ideal. Overload this to take a value. if (SemaBuiltinConstantArg(TheCall, 1, Result)) return true; if (Result != 1) return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val) << SourceRange(Arg->getLocStart(), Arg->getLocEnd()); return false; } /// SemaBuiltinSetjmp - Handle __builtin_setjmp(void *env[5]). /// This checks that the target supports __builtin_setjmp. bool Sema::SemaBuiltinSetjmp(CallExpr *TheCall) { if (!Context.getTargetInfo().hasSjLjLowering()) return Diag(TheCall->getLocStart(), diag::err_builtin_setjmp_unsupported) << SourceRange(TheCall->getLocStart(), TheCall->getLocEnd()); return false; } #if 1 // HLSL Change Starts - no support for format strings bool Sema::hasCStrMethod(const Expr *) { return false; } #else namespace { enum StringLiteralCheckType { SLCT_NotALiteral, SLCT_UncheckedLiteral, SLCT_CheckedLiteral }; } // Determine if an expression is a string literal or constant string. // If this function returns false on the arguments to a function expecting a // format string, we will usually need to emit a warning. // True string literals are then checked by CheckFormatString. static StringLiteralCheckType checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, Sema::FormatStringType Type, Sema::VariadicCallType CallType, bool InFunctionCall, llvm::SmallBitVector &CheckedVarArgs) { tryAgain: if (E->isTypeDependent() || E->isValueDependent()) return SLCT_NotALiteral; E = E->IgnoreParenCasts(); if (E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)) // Technically -Wformat-nonliteral does not warn about this case. // The behavior of printf and friends in this case is implementation // dependent. Ideally if the format string cannot be null then // it should have a 'nonnull' attribute in the function prototype. return SLCT_UncheckedLiteral; switch (E->getStmtClass()) { case Stmt::BinaryConditionalOperatorClass: case Stmt::ConditionalOperatorClass: { // The expression is a literal if both sub-expressions were, and it was // completely checked only if both sub-expressions were checked. const AbstractConditionalOperator *C = cast<AbstractConditionalOperator>(E); StringLiteralCheckType Left = checkFormatStringExpr(S, C->getTrueExpr(), Args, HasVAListArg, format_idx, firstDataArg, Type, CallType, InFunctionCall, CheckedVarArgs); if (Left == SLCT_NotALiteral) return SLCT_NotALiteral; StringLiteralCheckType Right = checkFormatStringExpr(S, C->getFalseExpr(), Args, HasVAListArg, format_idx, firstDataArg, Type, CallType, InFunctionCall, CheckedVarArgs); return Left < Right ? Left : Right; } case Stmt::ImplicitCastExprClass: { E = cast<ImplicitCastExpr>(E)->getSubExpr(); goto tryAgain; } case Stmt::OpaqueValueExprClass: if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) { E = src; goto tryAgain; } return SLCT_NotALiteral; case Stmt::PredefinedExprClass: // While __func__, etc., are technically not string literals, they // cannot contain format specifiers and thus are not a security // liability. return SLCT_UncheckedLiteral; case Stmt::DeclRefExprClass: { const DeclRefExpr *DR = cast<DeclRefExpr>(E); // As an exception, do not flag errors for variables binding to // const string literals. if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) { bool isConstant = false; QualType T = DR->getType(); if (const ArrayType *AT = S.Context.getAsArrayType(T)) { isConstant = AT->getElementType().isConstant(S.Context); } else if (const PointerType *PT = T->getAs<PointerType>()) { isConstant = T.isConstant(S.Context) && PT->getPointeeType().isConstant(S.Context); } else if (T->isObjCObjectPointerType()) { // In ObjC, there is usually no "const ObjectPointer" type, // so don't check if the pointee type is constant. isConstant = T.isConstant(S.Context); } if (isConstant) { if (const Expr *Init = VD->getAnyInitializer()) { // Look through initializers like const char c[] = { "foo" } if (const InitListExpr *InitList = dyn_cast<InitListExpr>(Init)) { if (InitList->isStringLiteralInit()) Init = InitList->getInit(0)->IgnoreParenImpCasts(); } return checkFormatStringExpr(S, Init, Args, HasVAListArg, format_idx, firstDataArg, Type, CallType, /*InFunctionCall*/false, CheckedVarArgs); } } // For vprintf* functions (i.e., HasVAListArg==true), we add a // special check to see if the format string is a function parameter // of the function calling the printf function. If the function // has an attribute indicating it is a printf-like function, then we // should suppress warnings concerning non-literals being used in a call // to a vprintf function. For example: // // void // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){ // va_list ap; // va_start(ap, fmt); // vprintf(fmt, ap); // Do NOT emit a warning about "fmt". // ... // } if (HasVAListArg) { if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) { if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) { int PVIndex = PV->getFunctionScopeIndex() + 1; for (const auto *PVFormat : ND->specific_attrs<FormatAttr>()) { // adjust for implicit parameter if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) if (MD->isInstance()) ++PVIndex; // We also check if the formats are compatible. // We can't pass a 'scanf' string to a 'printf' function. if (PVIndex == PVFormat->getFormatIdx() && Type == S.GetFormatStringType(PVFormat)) return SLCT_UncheckedLiteral; } } } } } return SLCT_NotALiteral; } case Stmt::CallExprClass: case Stmt::CXXMemberCallExprClass: { const CallExpr *CE = cast<CallExpr>(E); if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) { if (const FormatArgAttr *FA = ND->getAttr<FormatArgAttr>()) { unsigned ArgIndex = FA->getFormatIdx(); if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND)) if (MD->isInstance()) --ArgIndex; const Expr *Arg = CE->getArg(ArgIndex - 1); return checkFormatStringExpr(S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, CallType, InFunctionCall, CheckedVarArgs); } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) { unsigned BuiltinID = FD->getBuiltinID(); if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString || BuiltinID == Builtin::BI__builtin___NSStringMakeConstantString) { const Expr *Arg = CE->getArg(0); return checkFormatStringExpr(S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type, CallType, InFunctionCall, CheckedVarArgs); } } } return SLCT_NotALiteral; } case Stmt::ObjCStringLiteralClass: case Stmt::StringLiteralClass: { const StringLiteral *StrE = nullptr; if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E)) StrE = ObjCFExpr->getString(); else StrE = cast<StringLiteral>(E); if (StrE) { S.CheckFormatString(StrE, E, Args, HasVAListArg, format_idx, firstDataArg, Type, InFunctionCall, CallType, CheckedVarArgs); return SLCT_CheckedLiteral; } return SLCT_NotALiteral; } default: return SLCT_NotALiteral; } } Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) { return llvm::StringSwitch<FormatStringType>(Format->getType()->getName()) .Case("scanf", FST_Scanf) .Cases("printf", "printf0", FST_Printf) .Cases("NSString", "CFString", FST_NSString) .Case("strftime", FST_Strftime) .Case("strfmon", FST_Strfmon) .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf) .Case("freebsd_kprintf", FST_FreeBSDKPrintf) .Case("os_trace", FST_OSTrace) .Default(FST_Unknown); } /// CheckFormatArguments - Check calls to printf and scanf (and similar /// functions) for correct use of format strings. /// Returns true if a format string has been fully checked. bool Sema::CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs) { FormatStringInfo FSI; if (getFormatStringInfo(Format, IsCXXMember, &FSI)) return CheckFormatArguments(Args, FSI.HasVAListArg, FSI.FormatIdx, FSI.FirstDataArg, GetFormatStringType(Format), CallType, Loc, Range, CheckedVarArgs); return false; } bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs) { // CHECK: printf/scanf-like function is called with no format string. if (format_idx >= Args.size()) { Diag(Loc, diag::warn_missing_format_string) << Range; return false; } const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts(); // CHECK: format string is not a string literal. // // Dynamically generated format strings are difficult to // automatically vet at compile time. Requiring that format strings // are string literals: (1) permits the checking of format strings by // the compiler and thereby (2) can practically remove the source of // many format string exploits. // Format string can be either ObjC string (e.g. @"%d") or // C string (e.g. "%d") // ObjC string uses the same format specifiers as C string, so we can use // the same format string checking logic for both ObjC and C strings. StringLiteralCheckType CT = checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg, format_idx, firstDataArg, Type, CallType, /*IsFunctionCall*/true, CheckedVarArgs); if (CT != SLCT_NotALiteral) // Literal format string found, check done! return CT == SLCT_CheckedLiteral; // Strftime is particular as it always uses a single 'time' argument, // so it is safe to pass a non-literal string. if (Type == FST_Strftime) return false; // Do not emit diag when the string param is a macro expansion and the // format is either NSString or CFString. This is a hack to prevent // diag when using the NSLocalizedString and CFCopyLocalizedString macros // which are usually used in place of NS and CF string literals. if (Type == FST_NSString && SourceMgr.isInSystemMacro(Args[format_idx]->getLocStart())) return false; // If there are no arguments specified, warn with -Wformat-security, otherwise // warn only with -Wformat-nonliteral. if (Args.size() == firstDataArg) Diag(Args[format_idx]->getLocStart(), diag::warn_format_nonliteral_noargs) << OrigFormatExpr->getSourceRange(); else Diag(Args[format_idx]->getLocStart(), diag::warn_format_nonliteral) << OrigFormatExpr->getSourceRange(); return false; } namespace { class CheckFormatHandler : public analyze_format_string::FormatStringHandler { protected: Sema &S; const StringLiteral *FExpr; const Expr *OrigFormatExpr; const unsigned FirstDataArg; const unsigned NumDataArgs; const char *Beg; // Start of format string. const bool HasVAListArg; ArrayRef<const Expr *> Args; unsigned FormatIdx; llvm::SmallBitVector CoveredArgs; bool usesPositionalArgs; bool atFirstArg; bool inFunctionCall; Sema::VariadicCallType CallType; llvm::SmallBitVector &CheckedVarArgs; public: CheckFormatHandler(Sema &s, const StringLiteral *fexpr, const Expr *origFormatExpr, unsigned firstDataArg, unsigned numDataArgs, const char *beg, bool hasVAListArg, ArrayRef<const Expr *> Args, unsigned formatIdx, bool inFunctionCall, Sema::VariadicCallType callType, llvm::SmallBitVector &CheckedVarArgs) : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg), HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx), usesPositionalArgs(false), atFirstArg(true), inFunctionCall(inFunctionCall), CallType(callType), CheckedVarArgs(CheckedVarArgs) { CoveredArgs.resize(numDataArgs); CoveredArgs.reset(); } void DoneProcessing(); void HandleIncompleteSpecifier(const char *startSpecifier, unsigned specifierLen) override; void HandleInvalidLengthModifier( const analyze_format_string::FormatSpecifier &FS, const analyze_format_string::ConversionSpecifier &CS, const char *startSpecifier, unsigned specifierLen, unsigned DiagID); void HandleNonStandardLengthModifier( const analyze_format_string::FormatSpecifier &FS, const char *startSpecifier, unsigned specifierLen); void HandleNonStandardConversionSpecifier( const analyze_format_string::ConversionSpecifier &CS, const char *startSpecifier, unsigned specifierLen); void HandlePosition(const char *startPos, unsigned posLen) override; void HandleInvalidPosition(const char *startSpecifier, unsigned specifierLen, analyze_format_string::PositionContext p) override; void HandleZeroPosition(const char *startPos, unsigned posLen) override; void HandleNullChar(const char *nullCharacter) override; template <typename Range> static void EmitFormatDiagnostic(Sema &S, bool inFunctionCall, const Expr *ArgumentExpr, PartialDiagnostic PDiag, SourceLocation StringLoc, bool IsStringLocation, Range StringRange, ArrayRef<FixItHint> Fixit = None); protected: bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, const char *startSpec, unsigned specifierLen, const char *csStart, unsigned csLen); void HandlePositionalNonpositionalArgs(SourceLocation Loc, const char *startSpec, unsigned specifierLen); SourceRange getFormatStringRange(); CharSourceRange getSpecifierRange(const char *startSpecifier, unsigned specifierLen); SourceLocation getLocationOfByte(const char *x); const Expr *getDataArg(unsigned i) const; bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS, const analyze_format_string::ConversionSpecifier &CS, const char *startSpecifier, unsigned specifierLen, unsigned argIndex); template <typename Range> void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, bool IsStringLocation, Range StringRange, ArrayRef<FixItHint> Fixit = None); }; } SourceRange CheckFormatHandler::getFormatStringRange() { return OrigFormatExpr->getSourceRange(); } CharSourceRange CheckFormatHandler:: getSpecifierRange(const char *startSpecifier, unsigned specifierLen) { SourceLocation Start = getLocationOfByte(startSpecifier); SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1); // Advance the end SourceLocation by one due to half-open ranges. End = End.getLocWithOffset(1); return CharSourceRange::getCharRange(Start, End); } SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) { return S.getLocationOfStringLiteralByte(FExpr, x - Beg); } void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier, unsigned specifierLen){ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier), getLocationOfByte(startSpecifier), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen)); } void CheckFormatHandler::HandleInvalidLengthModifier( const analyze_format_string::FormatSpecifier &FS, const analyze_format_string::ConversionSpecifier &CS, const char *startSpecifier, unsigned specifierLen, unsigned DiagID) { using namespace analyze_format_string; const LengthModifier &LM = FS.getLengthModifier(); CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); // See if we know how to fix this length modifier. Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); if (FixedLM) { EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), getLocationOfByte(LM.getStart()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen)); S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) << FixedLM->toString() << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); } else { FixItHint Hint; if (DiagID == diag::warn_format_nonsensical_length) Hint = FixItHint::CreateRemoval(LMRange); EmitFormatDiagnostic(S.PDiag(DiagID) << LM.toString() << CS.toString(), getLocationOfByte(LM.getStart()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen), Hint); } } void CheckFormatHandler::HandleNonStandardLengthModifier( const analyze_format_string::FormatSpecifier &FS, const char *startSpecifier, unsigned specifierLen) { using namespace analyze_format_string; const LengthModifier &LM = FS.getLengthModifier(); CharSourceRange LMRange = getSpecifierRange(LM.getStart(), LM.getLength()); // See if we know how to fix this length modifier. Optional<LengthModifier> FixedLM = FS.getCorrectedLengthModifier(); if (FixedLM) { EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << LM.toString() << 0, getLocationOfByte(LM.getStart()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen)); S.Diag(getLocationOfByte(LM.getStart()), diag::note_format_fix_specifier) << FixedLM->toString() << FixItHint::CreateReplacement(LMRange, FixedLM->toString()); } else { EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << LM.toString() << 0, getLocationOfByte(LM.getStart()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen)); } } void CheckFormatHandler::HandleNonStandardConversionSpecifier( const analyze_format_string::ConversionSpecifier &CS, const char *startSpecifier, unsigned specifierLen) { using namespace analyze_format_string; // See if we know how to fix this conversion specifier. Optional<ConversionSpecifier> FixedCS = CS.getStandardSpecifier(); if (FixedCS) { EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << CS.toString() << /*conversion specifier*/1, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen)); CharSourceRange CSRange = getSpecifierRange(CS.getStart(), CS.getLength()); S.Diag(getLocationOfByte(CS.getStart()), diag::note_format_fix_specifier) << FixedCS->toString() << FixItHint::CreateReplacement(CSRange, FixedCS->toString()); } else { EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << CS.toString() << /*conversion specifier*/1, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen)); } } void CheckFormatHandler::HandlePosition(const char *startPos, unsigned posLen) { EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg), getLocationOfByte(startPos), /*IsStringLocation*/true, getSpecifierRange(startPos, posLen)); } void CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen, analyze_format_string::PositionContext p) { EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier) << (unsigned) p, getLocationOfByte(startPos), /*IsStringLocation*/true, getSpecifierRange(startPos, posLen)); } void CheckFormatHandler::HandleZeroPosition(const char *startPos, unsigned posLen) { EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier), getLocationOfByte(startPos), /*IsStringLocation*/true, getSpecifierRange(startPos, posLen)); } void CheckFormatHandler::HandleNullChar(const char *nullCharacter) { if (!isa<ObjCStringLiteral>(OrigFormatExpr)) { // The presence of a null character is likely an error. EmitFormatDiagnostic( S.PDiag(diag::warn_printf_format_string_contains_null_char), getLocationOfByte(nullCharacter), /*IsStringLocation*/true, getFormatStringRange()); } } // Note that this may return NULL if there was an error parsing or building // one of the argument expressions. const Expr *CheckFormatHandler::getDataArg(unsigned i) const { return Args[FirstDataArg + i]; } void CheckFormatHandler::DoneProcessing() { // Does the number of data arguments exceed the number of // format conversions in the format string? if (!HasVAListArg) { // Find any arguments that weren't covered. CoveredArgs.flip(); signed notCoveredArg = CoveredArgs.find_first(); if (notCoveredArg >= 0) { assert((unsigned)notCoveredArg < NumDataArgs); if (const Expr *E = getDataArg((unsigned) notCoveredArg)) { SourceLocation Loc = E->getLocStart(); if (!S.getSourceManager().isInSystemMacro(Loc)) { EmitFormatDiagnostic(S.PDiag(diag::warn_printf_data_arg_not_used), Loc, /*IsStringLocation*/false, getFormatStringRange()); } } } } } bool CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc, const char *startSpec, unsigned specifierLen, const char *csStart, unsigned csLen) { bool keepGoing = true; if (argIndex < NumDataArgs) { // Consider the argument coverered, even though the specifier doesn't // make sense. CoveredArgs.set(argIndex); } else { // If argIndex exceeds the number of data arguments we // don't issue a warning because that is just a cascade of warnings (and // they may have intended '%%' anyway). We don't want to continue processing // the format string after this point, however, as we will like just get // gibberish when trying to match arguments. keepGoing = false; } EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_conversion) << StringRef(csStart, csLen), Loc, /*IsStringLocation*/true, getSpecifierRange(startSpec, specifierLen)); return keepGoing; } void CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc, const char *startSpec, unsigned specifierLen) { EmitFormatDiagnostic( S.PDiag(diag::warn_format_mix_positional_nonpositional_args), Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen)); } bool CheckFormatHandler::CheckNumArgs( const analyze_format_string::FormatSpecifier &FS, const analyze_format_string::ConversionSpecifier &CS, const char *startSpecifier, unsigned specifierLen, unsigned argIndex) { if (argIndex >= NumDataArgs) { PartialDiagnostic PDiag = FS.usesPositionalArg() ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args) << (argIndex+1) << NumDataArgs) : S.PDiag(diag::warn_printf_insufficient_data_args); EmitFormatDiagnostic( PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen)); return false; } return true; } template<typename Range> void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation Loc, bool IsStringLocation, Range StringRange, ArrayRef<FixItHint> FixIt) { EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag, Loc, IsStringLocation, StringRange, FixIt); } /// \brief If the format string is not within the funcion call, emit a note /// so that the function call and string are in diagnostic messages. /// /// \param InFunctionCall if true, the format string is within the function /// call and only one diagnostic message will be produced. Otherwise, an /// extra note will be emitted pointing to location of the format string. /// /// \param ArgumentExpr the expression that is passed as the format string /// argument in the function call. Used for getting locations when two /// diagnostics are emitted. /// /// \param PDiag the callee should already have provided any strings for the /// diagnostic message. This function only adds locations and fixits /// to diagnostics. /// /// \param Loc primary location for diagnostic. If two diagnostics are /// required, one will be at Loc and a new SourceLocation will be created for /// the other one. /// /// \param IsStringLocation if true, Loc points to the format string should be /// used for the note. Otherwise, Loc points to the argument list and will /// be used with PDiag. /// /// \param StringRange some or all of the string to highlight. This is /// templated so it can accept either a CharSourceRange or a SourceRange. /// /// \param FixIt optional fix it hint for the format string. template<typename Range> void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall, const Expr *ArgumentExpr, PartialDiagnostic PDiag, SourceLocation Loc, bool IsStringLocation, Range StringRange, ArrayRef<FixItHint> FixIt) { if (InFunctionCall) { const Sema::SemaDiagnosticBuilder &D = S.Diag(Loc, PDiag); D << StringRange; D << FixIt; } else { S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag) << ArgumentExpr->getSourceRange(); const Sema::SemaDiagnosticBuilder &Note = S.Diag(IsStringLocation ? Loc : StringRange.getBegin(), diag::note_format_string_defined); Note << StringRange; Note << FixIt; } } //===--- CHECK: Printf format string checking ------------------------------===// namespace { class CheckPrintfHandler : public CheckFormatHandler { bool ObjCContext; public: CheckPrintfHandler(Sema &s, const StringLiteral *fexpr, const Expr *origFormatExpr, unsigned firstDataArg, unsigned numDataArgs, bool isObjC, const char *beg, bool hasVAListArg, ArrayRef<const Expr *> Args, unsigned formatIdx, bool inFunctionCall, Sema::VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs) : CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg, numDataArgs, beg, hasVAListArg, Args, formatIdx, inFunctionCall, CallType, CheckedVarArgs), ObjCContext(isObjC) {} bool HandleInvalidPrintfConversionSpecifier( const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, unsigned specifierLen) override; bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, unsigned specifierLen) override; bool checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, const char *StartSpecifier, unsigned SpecifierLen, const Expr *E); bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k, const char *startSpecifier, unsigned specifierLen); void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS, const analyze_printf::OptionalAmount &Amt, unsigned type, const char *startSpecifier, unsigned specifierLen); void HandleFlag(const analyze_printf::PrintfSpecifier &FS, const analyze_printf::OptionalFlag &flag, const char *startSpecifier, unsigned specifierLen); void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS, const analyze_printf::OptionalFlag &ignoredFlag, const analyze_printf::OptionalFlag &flag, const char *startSpecifier, unsigned specifierLen); bool checkForCStrMembers(const analyze_printf::ArgType &AT, const Expr *E); void HandleEmptyObjCModifierFlag(const char *startFlag, unsigned flagLen) override; void HandleInvalidObjCModifierFlag(const char *startFlag, unsigned flagLen) override; void HandleObjCFlagsWithNonObjCConversion(const char *flagsStart, const char *flagsEnd, const char *conversionPosition) override; }; } bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier( const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, unsigned specifierLen) { const analyze_printf::PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); return HandleInvalidConversionSpecifier(FS.getArgIndex(), getLocationOfByte(CS.getStart()), startSpecifier, specifierLen, CS.getStart(), CS.getLength()); } bool CheckPrintfHandler::HandleAmount( const analyze_format_string::OptionalAmount &Amt, unsigned k, const char *startSpecifier, unsigned specifierLen) { if (Amt.hasDataArgument()) { if (!HasVAListArg) { unsigned argIndex = Amt.getArgIndex(); if (argIndex >= NumDataArgs) { EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg) << k, getLocationOfByte(Amt.getStart()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen)); // Don't do any more checking. We will just emit // spurious errors. return false; } // Type check the data argument. It should be an 'int'. // Although not in conformance with C99, we also allow the argument to be // an 'unsigned int' as that is a reasonably safe case. GCC also // doesn't emit a warning for that case. CoveredArgs.set(argIndex); const Expr *Arg = getDataArg(argIndex); if (!Arg) return false; QualType T = Arg->getType(); const analyze_printf::ArgType &AT = Amt.getArgType(S.Context); assert(AT.isValid()); if (!AT.matchesType(S.Context, T)) { EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type) << k << AT.getRepresentativeTypeName(S.Context) << T << Arg->getSourceRange(), getLocationOfByte(Amt.getStart()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen)); // Don't do any more checking. We will just emit // spurious errors. return false; } } } return true; } void CheckPrintfHandler::HandleInvalidAmount( const analyze_printf::PrintfSpecifier &FS, const analyze_printf::OptionalAmount &Amt, unsigned type, const char *startSpecifier, unsigned specifierLen) { const analyze_printf::PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); FixItHint fixit = Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(), Amt.getConstantLength())) : FixItHint(); EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount) << type << CS.toString(), getLocationOfByte(Amt.getStart()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen), fixit); } void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS, const analyze_printf::OptionalFlag &flag, const char *startSpecifier, unsigned specifierLen) { // Warn about pointless flag with a fixit removal. const analyze_printf::PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag) << flag.toString() << CS.toString(), getLocationOfByte(flag.getPosition()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen), FixItHint::CreateRemoval( getSpecifierRange(flag.getPosition(), 1))); } void CheckPrintfHandler::HandleIgnoredFlag( const analyze_printf::PrintfSpecifier &FS, const analyze_printf::OptionalFlag &ignoredFlag, const analyze_printf::OptionalFlag &flag, const char *startSpecifier, unsigned specifierLen) { // Warn about ignored flag with a fixit removal. EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag) << ignoredFlag.toString() << flag.toString(), getLocationOfByte(ignoredFlag.getPosition()), /*IsStringLocation*/true, getSpecifierRange(startSpecifier, specifierLen), FixItHint::CreateRemoval( getSpecifierRange(ignoredFlag.getPosition(), 1))); } // void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc, // bool IsStringLocation, Range StringRange, // ArrayRef<FixItHint> Fixit = None); void CheckPrintfHandler::HandleEmptyObjCModifierFlag(const char *startFlag, unsigned flagLen) { // Warn about an empty flag. EmitFormatDiagnostic(S.PDiag(diag::warn_printf_empty_objc_flag), getLocationOfByte(startFlag), /*IsStringLocation*/true, getSpecifierRange(startFlag, flagLen)); } void CheckPrintfHandler::HandleInvalidObjCModifierFlag(const char *startFlag, unsigned flagLen) { // Warn about an invalid flag. auto Range = getSpecifierRange(startFlag, flagLen); StringRef flag(startFlag, flagLen); EmitFormatDiagnostic(S.PDiag(diag::warn_printf_invalid_objc_flag) << flag, getLocationOfByte(startFlag), /*IsStringLocation*/true, Range, FixItHint::CreateRemoval(Range)); } void CheckPrintfHandler::HandleObjCFlagsWithNonObjCConversion( const char *flagsStart, const char *flagsEnd, const char *conversionPosition) { // Warn about using '[...]' without a '@' conversion. auto Range = getSpecifierRange(flagsStart, flagsEnd - flagsStart + 1); auto diag = diag::warn_printf_ObjCflags_without_ObjCConversion; EmitFormatDiagnostic(S.PDiag(diag) << StringRef(conversionPosition, 1), getLocationOfByte(conversionPosition), /*IsStringLocation*/true, Range, FixItHint::CreateRemoval(Range)); } // Determines if the specified is a C++ class or struct containing // a member with the specified name and kind (e.g. a CXXMethodDecl named // "c_str()"). template<typename MemberKind> static llvm::SmallPtrSet<MemberKind*, 1> CXXRecordMembersNamed(StringRef Name, Sema &S, QualType Ty) { const RecordType *RT = Ty->getAs<RecordType>(); llvm::SmallPtrSet<MemberKind*, 1> Results; if (!RT) return Results; const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()); if (!RD || !RD->getDefinition()) return Results; LookupResult R(S, &S.Context.Idents.get(Name), SourceLocation(), Sema::LookupMemberName); R.suppressDiagnostics(); // We just need to include all members of the right kind turned up by the // filter, at this point. if (S.LookupQualifiedName(R, RT->getDecl())) for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { NamedDecl *decl = (*I)->getUnderlyingDecl(); if (MemberKind *FK = dyn_cast<MemberKind>(decl)) Results.insert(FK); } return Results; } /// Check if we could call '.c_str()' on an object. /// /// FIXME: This returns the wrong results in some cases (if cv-qualifiers don't /// allow the call, or if it would be ambiguous). bool Sema::hasCStrMethod(const Expr *E) { typedef llvm::SmallPtrSet<CXXMethodDecl*, 1> MethodSet; MethodSet Results = CXXRecordMembersNamed<CXXMethodDecl>("c_str", *this, E->getType()); for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); MI != ME; ++MI) if ((*MI)->getMinRequiredArguments() == 0) return true; return false; } // Check if a (w)string was passed when a (w)char* was needed, and offer a // better diagnostic if so. AT is assumed to be valid. // Returns true when a c_str() conversion method is found. bool CheckPrintfHandler::checkForCStrMembers( const analyze_printf::ArgType &AT, const Expr *E) { typedef llvm::SmallPtrSet<CXXMethodDecl*, 1> MethodSet; MethodSet Results = CXXRecordMembersNamed<CXXMethodDecl>("c_str", S, E->getType()); for (MethodSet::iterator MI = Results.begin(), ME = Results.end(); MI != ME; ++MI) { const CXXMethodDecl *Method = *MI; if (Method->getMinRequiredArguments() == 0 && AT.matchesType(S.Context, Method->getReturnType())) { // FIXME: Suggest parens if the expression needs them. SourceLocation EndLoc = S.getLocForEndOfToken(E->getLocEnd()); S.Diag(E->getLocStart(), diag::note_printf_c_str) << "c_str()" << FixItHint::CreateInsertion(EndLoc, ".c_str()"); return true; } } return false; } bool CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS, const char *startSpecifier, unsigned specifierLen) { using namespace analyze_format_string; using namespace analyze_printf; const PrintfConversionSpecifier &CS = FS.getConversionSpecifier(); if (FS.consumesDataArgument()) { if (atFirstArg) { atFirstArg = false; usesPositionalArgs = FS.usesPositionalArg(); } else if (usesPositionalArgs != FS.usesPositionalArg()) { HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), startSpecifier, specifierLen); return false; } } // First check if the field width, precision, and conversion specifier // have matching data arguments. if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0, startSpecifier, specifierLen)) { return false; } if (!HandleAmount(FS.getPrecision(), /* precision */ 1, startSpecifier, specifierLen)) { return false; } if (!CS.consumesDataArgument()) { // FIXME: Technically specifying a precision or field width here // makes no sense. Worth issuing a warning at some point. return true; } // Consume the argument. unsigned argIndex = FS.getArgIndex(); if (argIndex < NumDataArgs) { // The check to see if the argIndex is valid will come later. // We set the bit here because we may exit early from this // function if we encounter some other error. CoveredArgs.set(argIndex); } // FreeBSD kernel extensions. if (CS.getKind() == ConversionSpecifier::FreeBSDbArg || CS.getKind() == ConversionSpecifier::FreeBSDDArg) { // We need at least two arguments. if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex + 1)) return false; // Claim the second argument. CoveredArgs.set(argIndex + 1); // Type check the first argument (int for %b, pointer for %D) const Expr *Ex = getDataArg(argIndex); const analyze_printf::ArgType &AT = (CS.getKind() == ConversionSpecifier::FreeBSDbArg) ? ArgType(S.Context.IntTy) : ArgType::CPointerTy; if (AT.isValid() && !AT.matchesType(S.Context, Ex->getType())) EmitFormatDiagnostic( S.PDiag(diag::warn_format_conversion_argument_type_mismatch) << AT.getRepresentativeTypeName(S.Context) << Ex->getType() << false << Ex->getSourceRange(), Ex->getLocStart(), /*IsStringLocation*/false, getSpecifierRange(startSpecifier, specifierLen)); // Type check the second argument (char * for both %b and %D) Ex = getDataArg(argIndex + 1); const analyze_printf::ArgType &AT2 = ArgType::CStrTy; if (AT2.isValid() && !AT2.matchesType(S.Context, Ex->getType())) EmitFormatDiagnostic( S.PDiag(diag::warn_format_conversion_argument_type_mismatch) << AT2.getRepresentativeTypeName(S.Context) << Ex->getType() << false << Ex->getSourceRange(), Ex->getLocStart(), /*IsStringLocation*/false, getSpecifierRange(startSpecifier, specifierLen)); return true; } // Check for using an Objective-C specific conversion specifier // in a non-ObjC literal. if (!ObjCContext && CS.isObjCArg()) { return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier, specifierLen); } // Check for invalid use of field width if (!FS.hasValidFieldWidth()) { HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0, startSpecifier, specifierLen); } // Check for invalid use of precision if (!FS.hasValidPrecision()) { HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1, startSpecifier, specifierLen); } // Check each flag does not conflict with any other component. if (!FS.hasValidThousandsGroupingPrefix()) HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen); if (!FS.hasValidLeadingZeros()) HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen); if (!FS.hasValidPlusPrefix()) HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen); if (!FS.hasValidSpacePrefix()) HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen); if (!FS.hasValidAlternativeForm()) HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen); if (!FS.hasValidLeftJustified()) HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen); // Check that flags are not ignored by another flag if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+' HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(), startSpecifier, specifierLen); if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-' HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(), startSpecifier, specifierLen); // Check the length modifier is valid with the given conversion specifier. if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo())) HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, diag::warn_format_nonsensical_length); else if (!FS.hasStandardLengthModifier()) HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); else if (!FS.hasStandardLengthConversionCombination()) HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, diag::warn_format_non_standard_conversion_spec); if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); // The remaining checks depend on the data arguments. if (HasVAListArg) return true; if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) return false; const Expr *Arg = getDataArg(argIndex); if (!Arg) return true; return checkFormatExpr(FS, startSpecifier, specifierLen, Arg); } static bool requiresParensToAddCast(const Expr *E) { // FIXME: We should have a general way to reason about operator // precedence and whether parens are actually needed here. // Take care of a few common cases where they aren't. const Expr *Inside = E->IgnoreImpCasts(); if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(Inside)) Inside = POE->getSyntacticForm()->IgnoreImpCasts(); switch (Inside->getStmtClass()) { case Stmt::ArraySubscriptExprClass: case Stmt::CallExprClass: case Stmt::CharacterLiteralClass: case Stmt::CXXBoolLiteralExprClass: case Stmt::DeclRefExprClass: case Stmt::FloatingLiteralClass: case Stmt::IntegerLiteralClass: case Stmt::MemberExprClass: case Stmt::ObjCArrayLiteralClass: case Stmt::ObjCBoolLiteralExprClass: case Stmt::ObjCBoxedExprClass: case Stmt::ObjCDictionaryLiteralClass: case Stmt::ObjCEncodeExprClass: case Stmt::ObjCIvarRefExprClass: case Stmt::ObjCMessageExprClass: case Stmt::ObjCPropertyRefExprClass: case Stmt::ObjCStringLiteralClass: case Stmt::ObjCSubscriptRefExprClass: case Stmt::ParenExprClass: case Stmt::StringLiteralClass: case Stmt::UnaryOperatorClass: return false; default: return true; } } static std::pair<QualType, StringRef> shouldNotPrintDirectly(const ASTContext &Context, QualType IntendedTy, const Expr *E) { // Use a 'while' to peel off layers of typedefs. QualType TyTy = IntendedTy; while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) { StringRef Name = UserTy->getDecl()->getName(); QualType CastTy = llvm::StringSwitch<QualType>(Name) .Case("NSInteger", Context.LongTy) .Case("NSUInteger", Context.UnsignedLongTy) .Case("SInt32", Context.IntTy) .Case("UInt32", Context.UnsignedIntTy) .Default(QualType()); if (!CastTy.isNull()) return std::make_pair(CastTy, Name); TyTy = UserTy->desugar(); } // Strip parens if necessary. if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) return shouldNotPrintDirectly(Context, PE->getSubExpr()->getType(), PE->getSubExpr()); // If this is a conditional expression, then its result type is constructed // via usual arithmetic conversions and thus there might be no necessary // typedef sugar there. Recurse to operands to check for NSInteger & // Co. usage condition. if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { QualType TrueTy, FalseTy; StringRef TrueName, FalseName; std::tie(TrueTy, TrueName) = shouldNotPrintDirectly(Context, CO->getTrueExpr()->getType(), CO->getTrueExpr()); std::tie(FalseTy, FalseName) = shouldNotPrintDirectly(Context, CO->getFalseExpr()->getType(), CO->getFalseExpr()); if (TrueTy == FalseTy) return std::make_pair(TrueTy, TrueName); else if (TrueTy.isNull()) return std::make_pair(FalseTy, FalseName); else if (FalseTy.isNull()) return std::make_pair(TrueTy, TrueName); } return std::make_pair(QualType(), StringRef()); } bool CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS, const char *StartSpecifier, unsigned SpecifierLen, const Expr *E) { using namespace analyze_format_string; using namespace analyze_printf; // Now type check the data expression that matches the // format specifier. const analyze_printf::ArgType &AT = FS.getArgType(S.Context, ObjCContext); if (!AT.isValid()) return true; QualType ExprTy = E->getType(); while (const TypeOfExprType *TET = dyn_cast<TypeOfExprType>(ExprTy)) { ExprTy = TET->getUnderlyingExpr()->getType(); } analyze_printf::ArgType::MatchKind match = AT.matchesType(S.Context, ExprTy); if (match == analyze_printf::ArgType::Match) { return true; } // Look through argument promotions for our error message's reported type. // This includes the integral and floating promotions, but excludes array // and function pointer decay; seeing that an argument intended to be a // string has type 'char [6]' is probably more confusing than 'char *'. if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { if (ICE->getCastKind() == CK_IntegralCast || ICE->getCastKind() == CK_FloatingCast) { E = ICE->getSubExpr(); ExprTy = E->getType(); // Check if we didn't match because of an implicit cast from a 'char' // or 'short' to an 'int'. This is done because printf is a varargs // function. if (ICE->getType() == S.Context.IntTy || ICE->getType() == S.Context.UnsignedIntTy) { // All further checking is done on the subexpression. if (AT.matchesType(S.Context, ExprTy)) return true; } } } else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) { // Special case for 'a', which has type 'int' in C. // Note, however, that we do /not/ want to treat multibyte constants like // 'MooV' as characters! This form is deprecated but still exists. if (ExprTy == S.Context.IntTy) if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue())) ExprTy = S.Context.CharTy; } // Look through enums to their underlying type. bool IsEnum = false; if (auto EnumTy = ExprTy->getAs<EnumType>()) { ExprTy = EnumTy->getDecl()->getIntegerType(); IsEnum = true; } // %C in an Objective-C context prints a unichar, not a wchar_t. // If the argument is an integer of some kind, believe the %C and suggest // a cast instead of changing the conversion specifier. QualType IntendedTy = ExprTy; if (ObjCContext && FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) { if (ExprTy->isIntegralOrUnscopedEnumerationType() && !ExprTy->isCharType()) { // 'unichar' is defined as a typedef of unsigned short, but we should // prefer using the typedef if it is visible. IntendedTy = S.Context.UnsignedShortTy; // While we are here, check if the value is an IntegerLiteral that happens // to be within the valid range. if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E)) { const llvm::APInt &V = IL->getValue(); if (V.getActiveBits() <= S.Context.getTypeSize(IntendedTy)) return true; } LookupResult Result(S, &S.Context.Idents.get("unichar"), E->getLocStart(), Sema::LookupOrdinaryName); if (S.LookupName(Result, S.getCurScope())) { NamedDecl *ND = Result.getFoundDecl(); if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(ND)) if (TD->getUnderlyingType() == IntendedTy) IntendedTy = S.Context.getTypedefType(TD); } } } // Special-case some of Darwin's platform-independence types by suggesting // casts to primitive types that are known to be large enough. bool ShouldNotPrintDirectly = false; StringRef CastTyName; if (S.Context.getTargetInfo().getTriple().isOSDarwin()) { QualType CastTy; std::tie(CastTy, CastTyName) = shouldNotPrintDirectly(S.Context, IntendedTy, E); if (!CastTy.isNull()) { IntendedTy = CastTy; ShouldNotPrintDirectly = true; } } // We may be able to offer a FixItHint if it is a supported type. PrintfSpecifier fixedFS = FS; bool success = fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, ObjCContext); if (success) { // Get the fix string from the fixed format specifier SmallString<16> buf; llvm::raw_svector_ostream os(buf); fixedFS.toString(os); CharSourceRange SpecRange = getSpecifierRange(StartSpecifier, SpecifierLen); if (IntendedTy == ExprTy && !ShouldNotPrintDirectly) { unsigned diag = diag::warn_format_conversion_argument_type_mismatch; if (match == analyze_format_string::ArgType::NoMatchPedantic) { diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; } // In this case, the specifier is wrong and should be changed to match // the argument. EmitFormatDiagnostic(S.PDiag(diag) << AT.getRepresentativeTypeName(S.Context) << IntendedTy << IsEnum << E->getSourceRange(), E->getLocStart(), /*IsStringLocation*/ false, SpecRange, FixItHint::CreateReplacement(SpecRange, os.str())); } else { // The canonical type for formatting this value is different from the // actual type of the expression. (This occurs, for example, with Darwin's // NSInteger on 32-bit platforms, where it is typedef'd as 'int', but // should be printed as 'long' for 64-bit compatibility.) // Rather than emitting a normal format/argument mismatch, we want to // add a cast to the recommended type (and correct the format string // if necessary). SmallString<16> CastBuf; llvm::raw_svector_ostream CastFix(CastBuf); CastFix << "("; IntendedTy.print(CastFix, S.Context.getPrintingPolicy()); CastFix << ")"; SmallVector<FixItHint,4> Hints; if (!AT.matchesType(S.Context, IntendedTy)) Hints.push_back(FixItHint::CreateReplacement(SpecRange, os.str())); if (const CStyleCastExpr *CCast = dyn_cast<CStyleCastExpr>(E)) { // If there's already a cast present, just replace it. SourceRange CastRange(CCast->getLParenLoc(), CCast->getRParenLoc()); Hints.push_back(FixItHint::CreateReplacement(CastRange, CastFix.str())); } else if (!requiresParensToAddCast(E)) { // If the expression has high enough precedence, // just write the C-style cast. Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(), CastFix.str())); } else { // Otherwise, add parens around the expression as well as the cast. CastFix << "("; Hints.push_back(FixItHint::CreateInsertion(E->getLocStart(), CastFix.str())); SourceLocation After = S.getLocForEndOfToken(E->getLocEnd()); Hints.push_back(FixItHint::CreateInsertion(After, ")")); } if (ShouldNotPrintDirectly) { // The expression has a type that should not be printed directly. // We extract the name from the typedef because we don't want to show // the underlying type in the diagnostic. StringRef Name; if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(ExprTy)) Name = TypedefTy->getDecl()->getName(); else Name = CastTyName; EmitFormatDiagnostic(S.PDiag(diag::warn_format_argument_needs_cast) << Name << IntendedTy << IsEnum << E->getSourceRange(), E->getLocStart(), /*IsStringLocation=*/false, SpecRange, Hints); } else { // In this case, the expression could be printed using a different // specifier, but we've decided that the specifier is probably correct // and we should cast instead. Just use the normal warning message. EmitFormatDiagnostic( S.PDiag(diag::warn_format_conversion_argument_type_mismatch) << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum << E->getSourceRange(), E->getLocStart(), /*IsStringLocation*/false, SpecRange, Hints); } } } else { const CharSourceRange &CSR = getSpecifierRange(StartSpecifier, SpecifierLen); // Since the warning for passing non-POD types to variadic functions // was deferred until now, we emit a warning for non-POD // arguments here. switch (S.isValidVarArgType(ExprTy)) { case Sema::VAK_Valid: case Sema::VAK_ValidInCXX11: { unsigned diag = diag::warn_format_conversion_argument_type_mismatch; if (match == analyze_printf::ArgType::NoMatchPedantic) { diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; } EmitFormatDiagnostic( S.PDiag(diag) << AT.getRepresentativeTypeName(S.Context) << ExprTy << IsEnum << CSR << E->getSourceRange(), E->getLocStart(), /*IsStringLocation*/ false, CSR); break; } case Sema::VAK_Undefined: case Sema::VAK_MSVCUndefined: EmitFormatDiagnostic( S.PDiag(diag::warn_non_pod_vararg_with_format_string) << S.getLangOpts().CPlusPlus11 << ExprTy << CallType << AT.getRepresentativeTypeName(S.Context) << CSR << E->getSourceRange(), E->getLocStart(), /*IsStringLocation*/false, CSR); checkForCStrMembers(AT, E); break; case Sema::VAK_Invalid: if (ExprTy->isObjCObjectType()) EmitFormatDiagnostic( S.PDiag(diag::err_cannot_pass_objc_interface_to_vararg_format) << S.getLangOpts().CPlusPlus11 << ExprTy << CallType << AT.getRepresentativeTypeName(S.Context) << CSR << E->getSourceRange(), E->getLocStart(), /*IsStringLocation*/false, CSR); else // FIXME: If this is an initializer list, suggest removing the braces // or inserting a cast to the target type. S.Diag(E->getLocStart(), diag::err_cannot_pass_to_vararg_format) << isa<InitListExpr>(E) << ExprTy << CallType << AT.getRepresentativeTypeName(S.Context) << E->getSourceRange(); break; } assert(FirstDataArg + FS.getArgIndex() < CheckedVarArgs.size() && "format string specifier index out of range"); CheckedVarArgs[FirstDataArg + FS.getArgIndex()] = true; } return true; } //===--- CHECK: Scanf format string checking ------------------------------===// namespace { class CheckScanfHandler : public CheckFormatHandler { public: CheckScanfHandler(Sema &s, const StringLiteral *fexpr, const Expr *origFormatExpr, unsigned firstDataArg, unsigned numDataArgs, const char *beg, bool hasVAListArg, ArrayRef<const Expr *> Args, unsigned formatIdx, bool inFunctionCall, Sema::VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs) : CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg, numDataArgs, beg, hasVAListArg, Args, formatIdx, inFunctionCall, CallType, CheckedVarArgs) {} bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS, const char *startSpecifier, unsigned specifierLen) override; bool HandleInvalidScanfConversionSpecifier( const analyze_scanf::ScanfSpecifier &FS, const char *startSpecifier, unsigned specifierLen) override; void HandleIncompleteScanList(const char *start, const char *end) override; }; } void CheckScanfHandler::HandleIncompleteScanList(const char *start, const char *end) { EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete), getLocationOfByte(end), /*IsStringLocation*/true, getSpecifierRange(start, end - start)); } bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier( const analyze_scanf::ScanfSpecifier &FS, const char *startSpecifier, unsigned specifierLen) { const analyze_scanf::ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); return HandleInvalidConversionSpecifier(FS.getArgIndex(), getLocationOfByte(CS.getStart()), startSpecifier, specifierLen, CS.getStart(), CS.getLength()); } bool CheckScanfHandler::HandleScanfSpecifier( const analyze_scanf::ScanfSpecifier &FS, const char *startSpecifier, unsigned specifierLen) { using namespace analyze_scanf; using namespace analyze_format_string; const ScanfConversionSpecifier &CS = FS.getConversionSpecifier(); // Handle case where '%' and '*' don't consume an argument. These shouldn't // be used to decide if we are using positional arguments consistently. if (FS.consumesDataArgument()) { if (atFirstArg) { atFirstArg = false; usesPositionalArgs = FS.usesPositionalArg(); } else if (usesPositionalArgs != FS.usesPositionalArg()) { HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()), startSpecifier, specifierLen); return false; } } // Check if the field with is non-zero. const OptionalAmount &Amt = FS.getFieldWidth(); if (Amt.getHowSpecified() == OptionalAmount::Constant) { if (Amt.getConstantAmount() == 0) { const CharSourceRange &R = getSpecifierRange(Amt.getStart(), Amt.getConstantLength()); EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width), getLocationOfByte(Amt.getStart()), /*IsStringLocation*/true, R, FixItHint::CreateRemoval(R)); } } if (!FS.consumesDataArgument()) { // FIXME: Technically specifying a precision or field width here // makes no sense. Worth issuing a warning at some point. return true; } // Consume the argument. unsigned argIndex = FS.getArgIndex(); if (argIndex < NumDataArgs) { // The check to see if the argIndex is valid will come later. // We set the bit here because we may exit early from this // function if we encounter some other error. CoveredArgs.set(argIndex); } // Check the length modifier is valid with the given conversion specifier. if (!FS.hasValidLengthModifier(S.getASTContext().getTargetInfo())) HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, diag::warn_format_nonsensical_length); else if (!FS.hasStandardLengthModifier()) HandleNonStandardLengthModifier(FS, startSpecifier, specifierLen); else if (!FS.hasStandardLengthConversionCombination()) HandleInvalidLengthModifier(FS, CS, startSpecifier, specifierLen, diag::warn_format_non_standard_conversion_spec); if (!FS.hasStandardConversionSpecifier(S.getLangOpts())) HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen); // The remaining checks depend on the data arguments. if (HasVAListArg) return true; if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex)) return false; // Check that the argument type matches the format specifier. const Expr *Ex = getDataArg(argIndex); if (!Ex) return true; const analyze_format_string::ArgType &AT = FS.getArgType(S.Context); if (!AT.isValid()) { return true; } analyze_format_string::ArgType::MatchKind match = AT.matchesType(S.Context, Ex->getType()); if (match == analyze_format_string::ArgType::Match) { return true; } ScanfSpecifier fixedFS = FS; bool success = fixedFS.fixType(Ex->getType(), Ex->IgnoreImpCasts()->getType(), S.getLangOpts(), S.Context); unsigned diag = diag::warn_format_conversion_argument_type_mismatch; if (match == analyze_format_string::ArgType::NoMatchPedantic) { diag = diag::warn_format_conversion_argument_type_mismatch_pedantic; } if (success) { // Get the fix string from the fixed format specifier. SmallString<128> buf; llvm::raw_svector_ostream os(buf); fixedFS.toString(os); EmitFormatDiagnostic( S.PDiag(diag) << AT.getRepresentativeTypeName(S.Context) << Ex->getType() << false << Ex->getSourceRange(), Ex->getLocStart(), /*IsStringLocation*/ false, getSpecifierRange(startSpecifier, specifierLen), FixItHint::CreateReplacement( getSpecifierRange(startSpecifier, specifierLen), os.str())); } else { EmitFormatDiagnostic(S.PDiag(diag) << AT.getRepresentativeTypeName(S.Context) << Ex->getType() << false << Ex->getSourceRange(), Ex->getLocStart(), /*IsStringLocation*/ false, getSpecifierRange(startSpecifier, specifierLen)); } return true; } void Sema::CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs) { // CHECK: is the format string a wide literal? if (!FExpr->isAscii() && !FExpr->isUTF8()) { CheckFormatHandler::EmitFormatDiagnostic( *this, inFunctionCall, Args[format_idx], PDiag(diag::warn_format_string_is_wide_literal), FExpr->getLocStart(), /*IsStringLocation*/true, OrigFormatExpr->getSourceRange()); return; } // Str - The format string. NOTE: this is NOT null-terminated! StringRef StrRef = FExpr->getString(); const char *Str = StrRef.data(); // Account for cases where the string literal is truncated in a declaration. const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); assert(T && "String literal not of constant array type!"); size_t TypeSize = T->getSize().getZExtValue(); size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); const unsigned numDataArgs = Args.size() - firstDataArg; // Emit a warning if the string literal is truncated and does not contain an // embedded null character. if (TypeSize <= StrRef.size() && StrRef.substr(0, TypeSize).find('\0') == StringRef::npos) { CheckFormatHandler::EmitFormatDiagnostic( *this, inFunctionCall, Args[format_idx], PDiag(diag::warn_printf_format_string_not_null_terminated), FExpr->getLocStart(), /*IsStringLocation=*/true, OrigFormatExpr->getSourceRange()); return; } // CHECK: empty format string? if (StrLen == 0 && numDataArgs > 0) { CheckFormatHandler::EmitFormatDiagnostic( *this, inFunctionCall, Args[format_idx], PDiag(diag::warn_empty_format_string), FExpr->getLocStart(), /*IsStringLocation*/true, OrigFormatExpr->getSourceRange()); return; } if (Type == FST_Printf || Type == FST_NSString || Type == FST_FreeBSDKPrintf || Type == FST_OSTrace) { CheckPrintfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg, numDataArgs, (Type == FST_NSString || Type == FST_OSTrace), Str, HasVAListArg, Args, format_idx, inFunctionCall, CallType, CheckedVarArgs); if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen, getLangOpts(), Context.getTargetInfo(), Type == FST_FreeBSDKPrintf)) H.DoneProcessing(); } else if (Type == FST_Scanf) { CheckScanfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg, numDataArgs, Str, HasVAListArg, Args, format_idx, inFunctionCall, CallType, CheckedVarArgs); if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen, getLangOpts(), Context.getTargetInfo())) H.DoneProcessing(); } // TODO: handle other formats } bool Sema::FormatStringHasSArg(const StringLiteral *FExpr) { // Str - The format string. NOTE: this is NOT null-terminated! StringRef StrRef = FExpr->getString(); const char *Str = StrRef.data(); // Account for cases where the string literal is truncated in a declaration. const ConstantArrayType *T = Context.getAsConstantArrayType(FExpr->getType()); assert(T && "String literal not of constant array type!"); size_t TypeSize = T->getSize().getZExtValue(); size_t StrLen = std::min(std::max(TypeSize, size_t(1)) - 1, StrRef.size()); return analyze_format_string::ParseFormatStringHasSArg(Str, Str + StrLen, getLangOpts(), Context.getTargetInfo()); } #endif // HLSL Change - no support for format strings //===--- CHECK: Warn on use of wrong absolute value function. -------------===// // Returns the related absolute value function that is larger, of 0 if one // does not exist. static unsigned getLargerAbsoluteValueFunction(unsigned AbsFunction) { switch (AbsFunction) { default: return 0; case Builtin::BI__builtin_abs: return Builtin::BI__builtin_labs; case Builtin::BI__builtin_labs: return Builtin::BI__builtin_llabs; case Builtin::BI__builtin_llabs: return 0; case Builtin::BI__builtin_fabsf: return Builtin::BI__builtin_fabs; case Builtin::BI__builtin_fabs: return Builtin::BI__builtin_fabsl; case Builtin::BI__builtin_fabsl: return 0; case Builtin::BI__builtin_cabsf: return Builtin::BI__builtin_cabs; case Builtin::BI__builtin_cabs: return Builtin::BI__builtin_cabsl; case Builtin::BI__builtin_cabsl: return 0; case Builtin::BIabs: return Builtin::BIlabs; case Builtin::BIlabs: return Builtin::BIllabs; case Builtin::BIllabs: return 0; case Builtin::BIfabsf: return Builtin::BIfabs; case Builtin::BIfabs: return Builtin::BIfabsl; case Builtin::BIfabsl: return 0; case Builtin::BIcabsf: return Builtin::BIcabs; case Builtin::BIcabs: return Builtin::BIcabsl; case Builtin::BIcabsl: return 0; } } // Returns the argument type of the absolute value function. static QualType getAbsoluteValueArgumentType(ASTContext &Context, unsigned AbsType) { if (AbsType == 0) return QualType(); ASTContext::GetBuiltinTypeError Error = ASTContext::GE_None; QualType BuiltinType = Context.GetBuiltinType(AbsType, Error); if (Error != ASTContext::GE_None) return QualType(); const FunctionProtoType *FT = BuiltinType->getAs<FunctionProtoType>(); if (!FT) return QualType(); if (FT->getNumParams() != 1) return QualType(); return FT->getParamType(0); } // Returns the best absolute value function, or zero, based on type and // current absolute value function. static unsigned getBestAbsFunction(ASTContext &Context, QualType ArgType, unsigned AbsFunctionKind) { unsigned BestKind = 0; uint64_t ArgSize = Context.getTypeSize(ArgType); for (unsigned Kind = AbsFunctionKind; Kind != 0; Kind = getLargerAbsoluteValueFunction(Kind)) { QualType ParamType = getAbsoluteValueArgumentType(Context, Kind); if (Context.getTypeSize(ParamType) >= ArgSize) { if (BestKind == 0) BestKind = Kind; else if (Context.hasSameType(ParamType, ArgType)) { BestKind = Kind; break; } } } return BestKind; } enum AbsoluteValueKind { AVK_Integer, AVK_Floating, AVK_Complex }; static AbsoluteValueKind getAbsoluteValueKind(QualType T) { if (T->isIntegralOrEnumerationType()) return AVK_Integer; if (T->isRealFloatingType()) return AVK_Floating; if (T->isAnyComplexType()) return AVK_Complex; llvm_unreachable("Type not integer, floating, or complex"); } // Changes the absolute value function to a different type. Preserves whether // the function is a builtin. static unsigned changeAbsFunction(unsigned AbsKind, AbsoluteValueKind ValueKind) { switch (ValueKind) { case AVK_Integer: switch (AbsKind) { default: return 0; case Builtin::BI__builtin_fabsf: case Builtin::BI__builtin_fabs: case Builtin::BI__builtin_fabsl: case Builtin::BI__builtin_cabsf: case Builtin::BI__builtin_cabs: case Builtin::BI__builtin_cabsl: return Builtin::BI__builtin_abs; case Builtin::BIfabsf: case Builtin::BIfabs: case Builtin::BIfabsl: case Builtin::BIcabsf: case Builtin::BIcabs: case Builtin::BIcabsl: return Builtin::BIabs; } case AVK_Floating: switch (AbsKind) { default: return 0; case Builtin::BI__builtin_abs: case Builtin::BI__builtin_labs: case Builtin::BI__builtin_llabs: case Builtin::BI__builtin_cabsf: case Builtin::BI__builtin_cabs: case Builtin::BI__builtin_cabsl: return Builtin::BI__builtin_fabsf; case Builtin::BIabs: case Builtin::BIlabs: case Builtin::BIllabs: case Builtin::BIcabsf: case Builtin::BIcabs: case Builtin::BIcabsl: return Builtin::BIfabsf; } case AVK_Complex: switch (AbsKind) { default: return 0; case Builtin::BI__builtin_abs: case Builtin::BI__builtin_labs: case Builtin::BI__builtin_llabs: case Builtin::BI__builtin_fabsf: case Builtin::BI__builtin_fabs: case Builtin::BI__builtin_fabsl: return Builtin::BI__builtin_cabsf; case Builtin::BIabs: case Builtin::BIlabs: case Builtin::BIllabs: case Builtin::BIfabsf: case Builtin::BIfabs: case Builtin::BIfabsl: return Builtin::BIcabsf; } } llvm_unreachable("Unable to convert function"); } static unsigned getAbsoluteValueFunctionKind(const FunctionDecl *FDecl) { const IdentifierInfo *FnInfo = FDecl->getIdentifier(); if (!FnInfo) return 0; switch (FDecl->getBuiltinID()) { default: return 0; case Builtin::BI__builtin_abs: case Builtin::BI__builtin_fabs: case Builtin::BI__builtin_fabsf: case Builtin::BI__builtin_fabsl: case Builtin::BI__builtin_labs: case Builtin::BI__builtin_llabs: case Builtin::BI__builtin_cabs: case Builtin::BI__builtin_cabsf: case Builtin::BI__builtin_cabsl: case Builtin::BIabs: case Builtin::BIlabs: case Builtin::BIllabs: case Builtin::BIfabs: case Builtin::BIfabsf: case Builtin::BIfabsl: case Builtin::BIcabs: case Builtin::BIcabsf: case Builtin::BIcabsl: return FDecl->getBuiltinID(); } llvm_unreachable("Unknown Builtin type"); } // If the replacement is valid, emit a note with replacement function. // Additionally, suggest including the proper header if not already included. static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range, unsigned AbsKind, QualType ArgType) { bool EmitHeaderHint = true; const char *HeaderName = nullptr; const char *FunctionName = nullptr; if (S.getLangOpts().CPlusPlus && !ArgType->isAnyComplexType()) { FunctionName = "std::abs"; if (ArgType->isIntegralOrEnumerationType()) { HeaderName = "cstdlib"; } else if (ArgType->isRealFloatingType()) { HeaderName = "cmath"; } else { llvm_unreachable("Invalid Type"); } // Lookup all std::abs if (NamespaceDecl *Std = S.getStdNamespace()) { LookupResult R(S, &S.Context.Idents.get("abs"), Loc, Sema::LookupAnyName); R.suppressDiagnostics(); S.LookupQualifiedName(R, Std); for (const auto *I : R) { const FunctionDecl *FDecl = nullptr; if (const UsingShadowDecl *UsingD = dyn_cast<UsingShadowDecl>(I)) { FDecl = dyn_cast<FunctionDecl>(UsingD->getTargetDecl()); } else { FDecl = dyn_cast<FunctionDecl>(I); } if (!FDecl) continue; // Found std::abs(), check that they are the right ones. if (FDecl->getNumParams() != 1) continue; // Check that the parameter type can handle the argument. QualType ParamType = FDecl->getParamDecl(0)->getType(); if (getAbsoluteValueKind(ArgType) == getAbsoluteValueKind(ParamType) && S.Context.getTypeSize(ArgType) <= S.Context.getTypeSize(ParamType)) { // Found a function, don't need the header hint. EmitHeaderHint = false; break; } } } } else { FunctionName = S.Context.BuiltinInfo.GetName(AbsKind); HeaderName = S.Context.BuiltinInfo.getHeaderName(AbsKind); if (HeaderName) { DeclarationName DN(&S.Context.Idents.get(FunctionName)); LookupResult R(S, DN, Loc, Sema::LookupAnyName); R.suppressDiagnostics(); S.LookupName(R, S.getCurScope()); if (R.isSingleResult()) { FunctionDecl *FD = dyn_cast<FunctionDecl>(R.getFoundDecl()); if (FD && FD->getBuiltinID() == AbsKind) { EmitHeaderHint = false; } else { return; } } else if (!R.empty()) { return; } } } S.Diag(Loc, diag::note_replace_abs_function) << FunctionName << FixItHint::CreateReplacement(Range, FunctionName); if (!HeaderName) return; if (!EmitHeaderHint) return; S.Diag(Loc, diag::note_include_header_or_declare) << HeaderName << FunctionName; } static bool IsFunctionStdAbs(const FunctionDecl *FDecl) { if (!FDecl) return false; if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr("abs")) return false; const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(FDecl->getDeclContext()); while (ND && ND->isInlineNamespace()) { ND = dyn_cast<NamespaceDecl>(ND->getDeclContext()); } if (!ND || !ND->getIdentifier() || !ND->getIdentifier()->isStr("std")) return false; if (!isa<TranslationUnitDecl>(ND->getDeclContext())) return false; return true; } // Warn when using the wrong abs() function. void Sema::CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo) { if (Call->getNumArgs() != 1) return; unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl); bool IsStdAbs = IsFunctionStdAbs(FDecl); if (AbsKind == 0 && !IsStdAbs) return; QualType ArgType = Call->getArg(0)->IgnoreParenImpCasts()->getType(); QualType ParamType = Call->getArg(0)->getType(); // Unsigned types cannot be negative. Suggest removing the absolute value // function call. if (ArgType->isUnsignedIntegerType()) { const char *FunctionName = IsStdAbs ? "std::abs" : Context.BuiltinInfo.GetName(AbsKind); Diag(Call->getExprLoc(), diag::warn_unsigned_abs) << ArgType << ParamType; Diag(Call->getExprLoc(), diag::note_remove_abs) << FunctionName << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange()); return; } // std::abs has overloads which prevent most of the absolute value problems // from occurring. if (IsStdAbs) return; AbsoluteValueKind ArgValueKind = getAbsoluteValueKind(ArgType); AbsoluteValueKind ParamValueKind = getAbsoluteValueKind(ParamType); // The argument and parameter are the same kind. Check if they are the right // size. if (ArgValueKind == ParamValueKind) { if (Context.getTypeSize(ArgType) <= Context.getTypeSize(ParamType)) return; unsigned NewAbsKind = getBestAbsFunction(Context, ArgType, AbsKind); Diag(Call->getExprLoc(), diag::warn_abs_too_small) << FDecl << ArgType << ParamType; if (NewAbsKind == 0) return; emitReplacement(*this, Call->getExprLoc(), Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); return; } // ArgValueKind != ParamValueKind // The wrong type of absolute value function was used. Attempt to find the // proper one. unsigned NewAbsKind = changeAbsFunction(AbsKind, ArgValueKind); NewAbsKind = getBestAbsFunction(Context, ArgType, NewAbsKind); if (NewAbsKind == 0) return; Diag(Call->getExprLoc(), diag::warn_wrong_absolute_value_type) << FDecl << ParamValueKind << ArgValueKind; emitReplacement(*this, Call->getExprLoc(), Call->getCallee()->getSourceRange(), NewAbsKind, ArgType); return; } //===--- CHECK: Standard memory functions ---------------------------------===// /// \brief Takes the expression passed to the size_t parameter of functions /// such as memcmp, strncat, etc and warns if it's a comparison. /// /// This is to catch typos like `if (memcmp(&a, &b, sizeof(a) > 0))`. static bool CheckMemorySizeofForComparison(Sema &S, const Expr *E, IdentifierInfo *FnName, SourceLocation FnLoc, SourceLocation RParenLoc) { const BinaryOperator *Size = dyn_cast<BinaryOperator>(E); if (!Size) return false; // if E is binop and op is >, <, >=, <=, ==, &&, ||: if (!Size->isComparisonOp() && !Size->isEqualityOp() && !Size->isLogicalOp()) return false; SourceRange SizeRange = Size->getSourceRange(); S.Diag(Size->getOperatorLoc(), diag::warn_memsize_comparison) << SizeRange << FnName; S.Diag(FnLoc, diag::note_memsize_comparison_paren) << FnName << FixItHint::CreateInsertion( S.getLocForEndOfToken(Size->getLHS()->getLocEnd()), ")") << FixItHint::CreateRemoval(RParenLoc); S.Diag(SizeRange.getBegin(), diag::note_memsize_comparison_cast_silence) << FixItHint::CreateInsertion(SizeRange.getBegin(), "(size_t)(") << FixItHint::CreateInsertion(S.getLocForEndOfToken(SizeRange.getEnd()), ")"); return true; } /// \brief Determine whether the given type is or contains a dynamic class type /// (e.g., whether it has a vtable). static const CXXRecordDecl *getContainedDynamicClass(QualType T, bool &IsContained) { // Look through array types while ignoring qualifiers. const Type *Ty = T->getBaseElementTypeUnsafe(); IsContained = false; const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); RD = RD ? RD->getDefinition() : nullptr; if (!RD) return nullptr; if (RD->isDynamicClass()) return RD; // Check all the fields. If any bases were dynamic, the class is dynamic. // It's impossible for a class to transitively contain itself by value, so // infinite recursion is impossible. for (auto *FD : RD->fields()) { bool SubContained; if (const CXXRecordDecl *ContainedRD = getContainedDynamicClass(FD->getType(), SubContained)) { IsContained = true; return ContainedRD; } } return nullptr; } /// \brief If E is a sizeof expression, returns its argument expression, /// otherwise returns NULL. static const Expr *getSizeOfExprArg(const Expr *E) { if (const UnaryExprOrTypeTraitExpr *SizeOf = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) if (SizeOf->getKind() == clang::UETT_SizeOf && !SizeOf->isArgumentType()) return SizeOf->getArgumentExpr()->IgnoreParenImpCasts(); return nullptr; } /// \brief If E is a sizeof expression, returns its argument type. static QualType getSizeOfArgType(const Expr *E) { if (const UnaryExprOrTypeTraitExpr *SizeOf = dyn_cast<UnaryExprOrTypeTraitExpr>(E)) if (SizeOf->getKind() == clang::UETT_SizeOf) return SizeOf->getTypeOfArgument(); return QualType(); } /// \brief Check for dangerous or invalid arguments to memset(). /// /// This issues warnings on known problematic, dangerous or unspecified /// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp' /// function calls. /// /// \param Call The call expression to diagnose. void Sema::CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName) { assert(BId != 0); // It is possible to have a non-standard definition of memset. Validate // we have enough arguments, and if not, abort further checking. unsigned ExpectedNumArgs = (BId == Builtin::BIstrndup ? 2 : 3); if (Call->getNumArgs() < ExpectedNumArgs) return; unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIstrndup ? 1 : 2); unsigned LenArg = (BId == Builtin::BIstrndup ? 1 : 2); const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts(); if (CheckMemorySizeofForComparison(*this, LenExpr, FnName, Call->getLocStart(), Call->getRParenLoc())) return; // We have special checking when the length is a sizeof expression. QualType SizeOfArgTy = getSizeOfArgType(LenExpr); const Expr *SizeOfArg = getSizeOfExprArg(LenExpr); llvm::FoldingSetNodeID SizeOfArgID; for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) { const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts(); SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange(); QualType DestTy = Dest->getType(); QualType PointeeTy; if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) { PointeeTy = DestPtrTy->getPointeeType(); // Never warn about void type pointers. This can be used to suppress // false positives. if (PointeeTy->isVoidType()) continue; // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by // actually comparing the expressions for equality. Because computing the // expression IDs can be expensive, we only do this if the diagnostic is // enabled. if (SizeOfArg && !Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, SizeOfArg->getExprLoc())) { // We only compute IDs for expressions if the warning is enabled, and // cache the sizeof arg's ID. if (SizeOfArgID == llvm::FoldingSetNodeID()) SizeOfArg->Profile(SizeOfArgID, Context, true); llvm::FoldingSetNodeID DestID; Dest->Profile(DestID, Context, true); if (DestID == SizeOfArgID) { // TODO: For strncpy() and friends, this could suggest sizeof(dst) // over sizeof(src) as well. unsigned ActionIdx = 0; // Default is to suggest dereferencing. StringRef ReadableName = FnName->getName(); if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest)) if (UnaryOp->getOpcode() == UO_AddrOf) ActionIdx = 1; // If its an address-of operator, just remove it. if (!PointeeTy->isIncompleteType() && (Context.getTypeSize(PointeeTy) == Context.getCharWidth())) ActionIdx = 2; // If the pointee's size is sizeof(char), // suggest an explicit length. // If the function is defined as a builtin macro, do not show macro // expansion. SourceLocation SL = SizeOfArg->getExprLoc(); SourceRange DSR = Dest->getSourceRange(); SourceRange SSR = SizeOfArg->getSourceRange(); SourceManager &SM = getSourceManager(); if (SM.isMacroArgExpansion(SL)) { ReadableName = Lexer::getImmediateMacroName(SL, SM, LangOpts); SL = SM.getSpellingLoc(SL); DSR = SourceRange(SM.getSpellingLoc(DSR.getBegin()), SM.getSpellingLoc(DSR.getEnd())); SSR = SourceRange(SM.getSpellingLoc(SSR.getBegin()), SM.getSpellingLoc(SSR.getEnd())); } DiagRuntimeBehavior(SL, SizeOfArg, PDiag(diag::warn_sizeof_pointer_expr_memaccess) << ReadableName << PointeeTy << DestTy << DSR << SSR); DiagRuntimeBehavior(SL, SizeOfArg, PDiag(diag::warn_sizeof_pointer_expr_memaccess_note) << ActionIdx << SSR); break; } } // Also check for cases where the sizeof argument is the exact same // type as the memory argument, and where it points to a user-defined // record type. if (SizeOfArgTy != QualType()) { if (PointeeTy->isRecordType() && Context.typesAreCompatible(SizeOfArgTy, DestTy)) { DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest, PDiag(diag::warn_sizeof_pointer_type_memaccess) << FnName << SizeOfArgTy << ArgIdx << PointeeTy << Dest->getSourceRange() << LenExpr->getSourceRange()); break; } } } else if (DestTy->isArrayType()) { PointeeTy = DestTy; } if (PointeeTy == QualType()) continue; // Always complain about dynamic classes. bool IsContained; if (const CXXRecordDecl *ContainedRD = getContainedDynamicClass(PointeeTy, IsContained)) { unsigned OperationType = 0; // "overwritten" if we're warning about the destination for any call // but memcmp; otherwise a verb appropriate to the call. if (ArgIdx != 0 || BId == Builtin::BImemcmp) { if (BId == Builtin::BImemcpy) OperationType = 1; else if(BId == Builtin::BImemmove) OperationType = 2; else if (BId == Builtin::BImemcmp) OperationType = 3; } DiagRuntimeBehavior( Dest->getExprLoc(), Dest, PDiag(diag::warn_dyn_class_memaccess) << (BId == Builtin::BImemcmp ? ArgIdx + 2 : ArgIdx) << FnName << IsContained << ContainedRD << OperationType << Call->getCallee()->getSourceRange()); } else if (PointeeTy.hasNonTrivialObjCLifetime() && BId != Builtin::BImemset) DiagRuntimeBehavior( Dest->getExprLoc(), Dest, PDiag(diag::warn_arc_object_memaccess) << ArgIdx << FnName << PointeeTy << Call->getCallee()->getSourceRange()); else continue; DiagRuntimeBehavior( Dest->getExprLoc(), Dest, PDiag(diag::note_bad_memaccess_silence) << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)")); break; } } // A little helper routine: ignore addition and subtraction of integer literals. // This intentionally does not ignore all integer constant expressions because // we don't want to remove sizeof(). static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) { Ex = Ex->IgnoreParenCasts(); for (;;) { const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex); if (!BO || !BO->isAdditiveOp()) break; const Expr *RHS = BO->getRHS()->IgnoreParenCasts(); const Expr *LHS = BO->getLHS()->IgnoreParenCasts(); if (isa<IntegerLiteral>(RHS)) Ex = LHS; else if (isa<IntegerLiteral>(LHS)) Ex = RHS; else break; } return Ex; } static bool isConstantSizeArrayWithMoreThanOneElement(QualType Ty, ASTContext &Context) { // Only handle constant-sized or VLAs, but not flexible members. if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(Ty)) { // Only issue the FIXIT for arrays of size > 1. if (CAT->getSize().getSExtValue() <= 1) return false; } else if (!Ty->isVariableArrayType()) { return false; } return true; } // Warn if the user has made the 'size' argument to strlcpy or strlcat // be the size of the source, instead of the destination. void Sema::CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName) { // Don't crash if the user has the wrong number of arguments unsigned NumArgs = Call->getNumArgs(); if ((NumArgs != 3) && (NumArgs != 4)) return; const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context); const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context); const Expr *CompareWithSrc = nullptr; if (CheckMemorySizeofForComparison(*this, SizeArg, FnName, Call->getLocStart(), Call->getRParenLoc())) return; // Look for 'strlcpy(dst, x, sizeof(x))' if (const Expr *Ex = getSizeOfExprArg(SizeArg)) CompareWithSrc = Ex; else { // Look for 'strlcpy(dst, x, strlen(x))' if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) { if (SizeCall->getBuiltinCallee() == Builtin::BIstrlen && SizeCall->getNumArgs() == 1) CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context); } } if (!CompareWithSrc) return; // Determine if the argument to sizeof/strlen is equal to the source // argument. In principle there's all kinds of things you could do // here, for instance creating an == expression and evaluating it with // EvaluateAsBooleanCondition, but this uses a more direct technique: const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg); if (!SrcArgDRE) return; const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc); if (!CompareWithSrcDRE || SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl()) return; const Expr *OriginalSizeArg = Call->getArg(2); Diag(CompareWithSrcDRE->getLocStart(), diag::warn_strlcpycat_wrong_size) << OriginalSizeArg->getSourceRange() << FnName; // Output a FIXIT hint if the destination is an array (rather than a // pointer to an array). This could be enhanced to handle some // pointers if we know the actual size, like if DstArg is 'array+2' // we could say 'sizeof(array)-2'. const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts(); if (!isConstantSizeArrayWithMoreThanOneElement(DstArg->getType(), Context)) return; SmallString<128> sizeString; llvm::raw_svector_ostream OS(sizeString); OS << "sizeof("; DstArg->printPretty(OS, nullptr, getPrintingPolicy()); OS << ")"; Diag(OriginalSizeArg->getLocStart(), diag::note_strlcpycat_wrong_size) << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(), OS.str()); } /// Check if two expressions refer to the same declaration. static bool referToTheSameDecl(const Expr *E1, const Expr *E2) { if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1)) if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2)) return D1->getDecl() == D2->getDecl(); return false; } static const Expr *getStrlenExprArg(const Expr *E) { if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { const FunctionDecl *FD = CE->getDirectCallee(); if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen) return nullptr; return CE->getArg(0)->IgnoreParenCasts(); } return nullptr; } // Warn on anti-patterns as the 'size' argument to strncat. // The correct size argument should look like following: // strncat(dst, src, sizeof(dst) - strlen(dest) - 1); void Sema::CheckStrncatArguments(const CallExpr *CE, IdentifierInfo *FnName) { // Don't crash if the user has the wrong number of arguments. if (CE->getNumArgs() < 3) return; const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts(); const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts(); const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts(); if (CheckMemorySizeofForComparison(*this, LenArg, FnName, CE->getLocStart(), CE->getRParenLoc())) return; // Identify common expressions, which are wrongly used as the size argument // to strncat and may lead to buffer overflows. unsigned PatternType = 0; if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) { // - sizeof(dst) if (referToTheSameDecl(SizeOfArg, DstArg)) PatternType = 1; // - sizeof(src) else if (referToTheSameDecl(SizeOfArg, SrcArg)) PatternType = 2; } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) { if (BE->getOpcode() == BO_Sub) { const Expr *L = BE->getLHS()->IgnoreParenCasts(); const Expr *R = BE->getRHS()->IgnoreParenCasts(); // - sizeof(dst) - strlen(dst) if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) && referToTheSameDecl(DstArg, getStrlenExprArg(R))) PatternType = 1; // - sizeof(src) - (anything) else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L))) PatternType = 2; } } if (PatternType == 0) return; // Generate the diagnostic. SourceLocation SL = LenArg->getLocStart(); SourceRange SR = LenArg->getSourceRange(); SourceManager &SM = getSourceManager(); // If the function is defined as a builtin macro, do not show macro expansion. if (SM.isMacroArgExpansion(SL)) { SL = SM.getSpellingLoc(SL); SR = SourceRange(SM.getSpellingLoc(SR.getBegin()), SM.getSpellingLoc(SR.getEnd())); } // Check if the destination is an array (rather than a pointer to an array). QualType DstTy = DstArg->getType(); bool isKnownSizeArray = isConstantSizeArrayWithMoreThanOneElement(DstTy, Context); if (!isKnownSizeArray) { if (PatternType == 1) Diag(SL, diag::warn_strncat_wrong_size) << SR; else Diag(SL, diag::warn_strncat_src_size) << SR; return; } if (PatternType == 1) Diag(SL, diag::warn_strncat_large_size) << SR; else Diag(SL, diag::warn_strncat_src_size) << SR; SmallString<128> sizeString; llvm::raw_svector_ostream OS(sizeString); OS << "sizeof("; DstArg->printPretty(OS, nullptr, getPrintingPolicy()); OS << ") - "; OS << "strlen("; DstArg->printPretty(OS, nullptr, getPrintingPolicy()); OS << ") - 1"; Diag(SL, diag::note_strncat_wrong_size) << FixItHint::CreateReplacement(SR, OS.str()); } //===--- CHECK: Return Address of Stack Variable --------------------------===// static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars, Decl *ParentDecl); static Expr *EvalAddr(Expr* E, SmallVectorImpl<DeclRefExpr *> &refVars, Decl *ParentDecl); /// CheckReturnStackAddr - Check if a return statement returns the address /// of a stack variable. static void CheckReturnStackAddr(Sema &S, Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc) { Expr *stackE = nullptr; SmallVector<DeclRefExpr *, 8> refVars; // Perform checking for returned stack addresses, local blocks, // label addresses or references to temporaries. if (lhsType->isPointerType() || (!S.getLangOpts().ObjCAutoRefCount && lhsType->isBlockPointerType())) { stackE = EvalAddr(RetValExp, refVars, /*ParentDecl=*/nullptr); } else if (lhsType->isReferenceType()) { stackE = EvalVal(RetValExp, refVars, /*ParentDecl=*/nullptr); } if (!stackE) return; // Nothing suspicious was found. SourceLocation diagLoc; SourceRange diagRange; if (refVars.empty()) { diagLoc = stackE->getLocStart(); diagRange = stackE->getSourceRange(); } else { // We followed through a reference variable. 'stackE' contains the // problematic expression but we will warn at the return statement pointing // at the reference variable. We will later display the "trail" of // reference variables using notes. diagLoc = refVars[0]->getLocStart(); diagRange = refVars[0]->getSourceRange(); } if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(stackE)) { //address of local var. S.Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_stack_ref : diag::warn_ret_stack_addr) << DR->getDecl()->getDeclName() << diagRange; } else if (isa<BlockExpr>(stackE)) { // local block. S.Diag(diagLoc, diag::err_ret_local_block) << diagRange; } else if (isa<AddrLabelExpr>(stackE)) { // address of label. S.Diag(diagLoc, diag::warn_ret_addr_label) << diagRange; } else { // local temporary. S.Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_local_temp_ref : diag::warn_ret_local_temp_addr) << diagRange; } // Display the "trail" of reference variables that we followed until we // found the problematic expression using notes. for (unsigned i = 0, e = refVars.size(); i != e; ++i) { VarDecl *VD = cast<VarDecl>(refVars[i]->getDecl()); // If this var binds to another reference var, show the range of the next // var, otherwise the var binds to the problematic expression, in which case // show the range of the expression. SourceRange range = (i < e-1) ? refVars[i+1]->getSourceRange() : stackE->getSourceRange(); S.Diag(VD->getLocation(), diag::note_ref_var_local_bind) << VD->getDeclName() << range; } } /// EvalAddr - EvalAddr and EvalVal are mutually recursive functions that /// check if the expression in a return statement evaluates to an address /// to a location on the stack, a local block, an address of a label, or a /// reference to local temporary. The recursion is used to traverse the /// AST of the return expression, with recursion backtracking when we /// encounter a subexpression that (1) clearly does not lead to one of the /// above problematic expressions (2) is something we cannot determine leads to /// a problematic expression based on such local checking. /// /// Both EvalAddr and EvalVal follow through reference variables to evaluate /// the expression that they point to. Such variables are added to the /// 'refVars' vector so that we know what the reference variable "trail" was. /// /// EvalAddr processes expressions that are pointers that are used as /// references (and not L-values). EvalVal handles all other values. /// At the base case of the recursion is a check for the above problematic /// expressions. /// /// This implementation handles: /// /// * pointer-to-pointer casts /// * implicit conversions from array references to pointers /// * taking the address of fields /// * arbitrary interplay between "&" and "*" operators /// * pointer arithmetic from an address of a stack variable /// * taking the address of an array element where the array is on the stack static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars, Decl *ParentDecl) { if (E->isTypeDependent()) return nullptr; // We should only be called for evaluating pointer expressions. assert((E->getType()->isAnyPointerType() || E->getType()->isBlockPointerType() || E->getType()->isObjCQualifiedIdType()) && "EvalAddr only works on pointers"); E = E->IgnoreParens(); // Our "symbolic interpreter" is just a dispatch off the currently // viewed AST node. We then recursively traverse the AST by calling // EvalAddr and EvalVal appropriately. switch (E->getStmtClass()) { case Stmt::DeclRefExprClass: { DeclRefExpr *DR = cast<DeclRefExpr>(E); // If we leave the immediate function, the lifetime isn't about to end. if (DR->refersToEnclosingVariableOrCapture()) return nullptr; if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl())) // If this is a reference variable, follow through to the expression that // it points to. if (V->hasLocalStorage() && V->getType()->isReferenceType() && V->hasInit()) { // Add the reference variable to the "trail". refVars.push_back(DR); return EvalAddr(V->getInit(), refVars, ParentDecl); } return nullptr; } case Stmt::UnaryOperatorClass: { // The only unary operator that make sense to handle here // is AddrOf. All others don't make sense as pointers. UnaryOperator *U = cast<UnaryOperator>(E); if (U->getOpcode() == UO_AddrOf) return EvalVal(U->getSubExpr(), refVars, ParentDecl); else return nullptr; } case Stmt::BinaryOperatorClass: { // Handle pointer arithmetic. All other binary operators are not valid // in this context. BinaryOperator *B = cast<BinaryOperator>(E); BinaryOperatorKind op = B->getOpcode(); if (op != BO_Add && op != BO_Sub) return nullptr; Expr *Base = B->getLHS(); // Determine which argument is the real pointer base. It could be // the RHS argument instead of the LHS. if (!Base->getType()->isPointerType()) Base = B->getRHS(); assert (Base->getType()->isPointerType()); return EvalAddr(Base, refVars, ParentDecl); } // For conditional operators we need to see if either the LHS or RHS are // valid DeclRefExpr*s. If one of them is valid, we return it. case Stmt::ConditionalOperatorClass: { ConditionalOperator *C = cast<ConditionalOperator>(E); // Handle the GNU extension for missing LHS. // FIXME: That isn't a ConditionalOperator, so doesn't get here. if (Expr *LHSExpr = C->getLHS()) { // In C++, we can have a throw-expression, which has 'void' type. if (!LHSExpr->getType()->isVoidType()) if (Expr *LHS = EvalAddr(LHSExpr, refVars, ParentDecl)) return LHS; } // In C++, we can have a throw-expression, which has 'void' type. if (C->getRHS()->getType()->isVoidType()) return nullptr; return EvalAddr(C->getRHS(), refVars, ParentDecl); } case Stmt::BlockExprClass: if (cast<BlockExpr>(E)->getBlockDecl()->hasCaptures()) return E; // local block. return nullptr; case Stmt::AddrLabelExprClass: return E; // address of label. case Stmt::ExprWithCleanupsClass: return EvalAddr(cast<ExprWithCleanups>(E)->getSubExpr(), refVars, ParentDecl); // For casts, we need to handle conversions from arrays to // pointer values, and pointer-to-pointer conversions. case Stmt::ImplicitCastExprClass: case Stmt::CStyleCastExprClass: case Stmt::CXXFunctionalCastExprClass: case Stmt::ObjCBridgedCastExprClass: case Stmt::CXXStaticCastExprClass: case Stmt::CXXDynamicCastExprClass: case Stmt::CXXConstCastExprClass: case Stmt::CXXReinterpretCastExprClass: { Expr* SubExpr = cast<CastExpr>(E)->getSubExpr(); switch (cast<CastExpr>(E)->getCastKind()) { case CK_LValueToRValue: case CK_NoOp: case CK_BaseToDerived: case CK_DerivedToBase: case CK_UncheckedDerivedToBase: case CK_Dynamic: case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_AnyPointerToBlockPointerCast: return EvalAddr(SubExpr, refVars, ParentDecl); case CK_ArrayToPointerDecay: return EvalVal(SubExpr, refVars, ParentDecl); case CK_BitCast: if (SubExpr->getType()->isAnyPointerType() || SubExpr->getType()->isBlockPointerType() || SubExpr->getType()->isObjCQualifiedIdType()) return EvalAddr(SubExpr, refVars, ParentDecl); else return nullptr; default: return nullptr; } } case Stmt::MaterializeTemporaryExprClass: if (Expr *Result = EvalAddr( cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(), refVars, ParentDecl)) return Result; return E; // Everything else: we simply don't reason about them. default: return nullptr; } } /// EvalVal - This function is complements EvalAddr in the mutual recursion. /// See the comments for EvalAddr for more details. static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars, Decl *ParentDecl) { do { // We should only be called for evaluating non-pointer expressions, or // expressions with a pointer type that are not used as references but instead // are l-values (e.g., DeclRefExpr with a pointer type). // Our "symbolic interpreter" is just a dispatch off the currently // viewed AST node. We then recursively traverse the AST by calling // EvalAddr and EvalVal appropriately. E = E->IgnoreParens(); switch (E->getStmtClass()) { case Stmt::ImplicitCastExprClass: { ImplicitCastExpr *IE = cast<ImplicitCastExpr>(E); if (IE->getValueKind() == VK_LValue) { E = IE->getSubExpr(); continue; } return nullptr; } case Stmt::ExprWithCleanupsClass: return EvalVal(cast<ExprWithCleanups>(E)->getSubExpr(), refVars,ParentDecl); case Stmt::DeclRefExprClass: { // When we hit a DeclRefExpr we are looking at code that refers to a // variable's name. If it's not a reference variable we check if it has // local storage within the function, and if so, return the expression. DeclRefExpr *DR = cast<DeclRefExpr>(E); // If we leave the immediate function, the lifetime isn't about to end. if (DR->refersToEnclosingVariableOrCapture()) return nullptr; if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl())) { // Check if it refers to itself, e.g. "int& i = i;". if (V == ParentDecl) return DR; if (V->hasLocalStorage()) { if (!V->getType()->isReferenceType()) return DR; // Reference variable, follow through to the expression that // it points to. if (V->hasInit()) { // Add the reference variable to the "trail". refVars.push_back(DR); return EvalVal(V->getInit(), refVars, V); } } } return nullptr; } case Stmt::UnaryOperatorClass: { // The only unary operator that make sense to handle here // is Deref. All others don't resolve to a "name." This includes // handling all sorts of rvalues passed to a unary operator. UnaryOperator *U = cast<UnaryOperator>(E); if (U->getOpcode() == UO_Deref) return EvalAddr(U->getSubExpr(), refVars, ParentDecl); return nullptr; } case Stmt::ArraySubscriptExprClass: { // Array subscripts are potential references to data on the stack. We // retrieve the DeclRefExpr* for the array variable if it indeed // has local storage. return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase(), refVars,ParentDecl); } case Stmt::ConditionalOperatorClass: { // For conditional operators we need to see if either the LHS or RHS are // non-NULL Expr's. If one is non-NULL, we return it. ConditionalOperator *C = cast<ConditionalOperator>(E); // Handle the GNU extension for missing LHS. if (Expr *LHSExpr = C->getLHS()) { // In C++, we can have a throw-expression, which has 'void' type. if (!LHSExpr->getType()->isVoidType()) if (Expr *LHS = EvalVal(LHSExpr, refVars, ParentDecl)) return LHS; } // In C++, we can have a throw-expression, which has 'void' type. if (C->getRHS()->getType()->isVoidType()) return nullptr; return EvalVal(C->getRHS(), refVars, ParentDecl); } // Accesses to members are potential references to data on the stack. case Stmt::MemberExprClass: { MemberExpr *M = cast<MemberExpr>(E); // Check for indirect access. We only want direct field accesses. if (M->isArrow()) return nullptr; // Check whether the member type is itself a reference, in which case // we're not going to refer to the member, but to what the member refers to. if (M->getMemberDecl()->getType()->isReferenceType()) return nullptr; return EvalVal(M->getBase(), refVars, ParentDecl); } case Stmt::MaterializeTemporaryExprClass: if (Expr *Result = EvalVal( cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(), refVars, ParentDecl)) return Result; return E; default: // Check that we don't return or take the address of a reference to a // temporary. This is only useful in C++. if (!E->isTypeDependent() && E->isRValue()) return E; // Everything else: we simply don't reason about them. return nullptr; } } while (true); } void Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod, const AttrVec *Attrs, const FunctionDecl *FD) { CheckReturnStackAddr(*this, RetValExp, lhsType, ReturnLoc); // Check if the return value is null but should not be. if (((Attrs && hasSpecificAttr<ReturnsNonNullAttr>(*Attrs)) || (!isObjCMethod && isNonNullType(Context, lhsType))) && CheckNonNullExpr(*this, RetValExp)) Diag(ReturnLoc, diag::warn_null_ret) << (isObjCMethod ? 1 : 0) << RetValExp->getSourceRange(); // C++11 [basic.stc.dynamic.allocation]p4: // If an allocation function declared with a non-throwing // exception-specification fails to allocate storage, it shall return // a null pointer. Any other allocation function that fails to allocate // storage shall indicate failure only by throwing an exception [...] if (FD) { OverloadedOperatorKind Op = FD->getOverloadedOperator(); if (Op == OO_New || Op == OO_Array_New) { const FunctionProtoType *Proto = FD->getType()->castAs<FunctionProtoType>(); if (!Proto->isNothrow(Context, /*ResultIfDependent*/true) && CheckNonNullExpr(*this, RetValExp)) Diag(ReturnLoc, diag::warn_operator_new_returns_null) << FD << getLangOpts().CPlusPlus11; } } } //===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===// /// Check for comparisons of floating point operands using != and ==. /// Issue a warning if these are no self-comparisons, as they are not likely /// to do what the programmer intended. void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) { Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts(); Expr* RightExprSansParen = RHS->IgnoreParenImpCasts(); // Special case: check for x == x (which is OK). // Do not emit warnings for such cases. if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen)) if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen)) if (DRL->getDecl() == DRR->getDecl()) return; // Special case: check for comparisons against literals that can be exactly // represented by APFloat. In such cases, do not emit a warning. This // is a heuristic: often comparison against such literals are used to // detect if a value in a variable has not changed. This clearly can // lead to false negatives. if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) { if (FLL->isExact()) return; } else if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)) if (FLR->isExact()) return; // Check for comparisons with builtin types. if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen)) if (CL->getBuiltinCallee()) return; if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen)) if (CR->getBuiltinCallee()) return; // Emit the diagnostic. Diag(Loc, diag::warn_floatingpoint_eq) << LHS->getSourceRange() << RHS->getSourceRange(); } //===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===// //===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===// namespace { /// Structure recording the 'active' range of an integer-valued /// expression. struct IntRange { /// The number of bits active in the int. unsigned Width; /// True if the int is known not to have negative values. bool NonNegative; IntRange(unsigned Width, bool NonNegative) : Width(Width), NonNegative(NonNegative) {} /// Returns the range of the bool type. static IntRange forBoolType() { return IntRange(1, true); } /// Returns the range of an opaque value of the given integral type. static IntRange forValueOfType(ASTContext &C, QualType T) { return forValueOfCanonicalType(C, T->getCanonicalTypeInternal().getTypePtr()); } /// Returns the range of an opaque value of a canonical integral type. static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) { assert(T->isCanonicalUnqualified()); if (const VectorType *VT = dyn_cast<VectorType>(T)) T = VT->getElementType().getTypePtr(); if (const ComplexType *CT = dyn_cast<ComplexType>(T)) T = CT->getElementType().getTypePtr(); if (const AtomicType *AT = dyn_cast<AtomicType>(T)) T = AT->getValueType().getTypePtr(); // For enum types, use the known bit width of the enumerators. if (const EnumType *ET = dyn_cast<EnumType>(T)) { EnumDecl *Enum = ET->getDecl(); if (!Enum->isCompleteDefinition()) return IntRange(C.getIntWidth(QualType(T, 0)), false); unsigned NumPositive = Enum->getNumPositiveBits(); unsigned NumNegative = Enum->getNumNegativeBits(); if (NumNegative == 0) return IntRange(NumPositive, true/*NonNegative*/); else return IntRange(std::max(NumPositive + 1, NumNegative), false/*NonNegative*/); } const BuiltinType *BT = cast<BuiltinType>(T); assert(BT->isInteger()); return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); } /// Returns the "target" range of a canonical integral type, i.e. /// the range of values expressible in the type. /// /// This matches forValueOfCanonicalType except that enums have the /// full range of their type, not the range of their enumerators. static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) { assert(T->isCanonicalUnqualified()); if (const VectorType *VT = dyn_cast<VectorType>(T)) T = VT->getElementType().getTypePtr(); if (const ComplexType *CT = dyn_cast<ComplexType>(T)) T = CT->getElementType().getTypePtr(); if (const AtomicType *AT = dyn_cast<AtomicType>(T)) T = AT->getValueType().getTypePtr(); if (const EnumType *ET = dyn_cast<EnumType>(T)) T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr(); const BuiltinType *BT = cast<BuiltinType>(T); assert(BT->isInteger()); return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger()); } /// Returns the supremum of two ranges: i.e. their conservative merge. static IntRange join(IntRange L, IntRange R) { return IntRange(std::max(L.Width, R.Width), L.NonNegative && R.NonNegative); } /// Returns the infinum of two ranges: i.e. their aggressive merge. static IntRange meet(IntRange L, IntRange R) { return IntRange(std::min(L.Width, R.Width), L.NonNegative || R.NonNegative); } }; static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value, unsigned MaxWidth) { if (value.isSigned() && value.isNegative()) return IntRange(value.getMinSignedBits(), false); if (value.getBitWidth() > MaxWidth) value = value.trunc(MaxWidth); // isNonNegative() just checks the sign bit without considering // signedness. return IntRange(value.getActiveBits(), true); } static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty, unsigned MaxWidth) { if (result.isInt()) return GetValueRange(C, result.getInt(), MaxWidth); if (result.isVector()) { IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth); for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) { IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth); R = IntRange::join(R, El); } return R; } if (result.isComplexInt()) { IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth); IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth); return IntRange::join(R, I); } // This can happen with lossless casts to intptr_t of "based" lvalues. // Assume it might use arbitrary bits. // FIXME: The only reason we need to pass the type in here is to get // the sign right on this one case. It would be nice if APValue // preserved this. assert(result.isLValue() || result.isAddrLabelDiff()); return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType()); } static QualType GetExprType(Expr *E) { QualType Ty = E->getType(); if (const AtomicType *AtomicRHS = Ty->getAs<AtomicType>()) Ty = AtomicRHS->getValueType(); return Ty; } /// Pseudo-evaluate the given integer expression, estimating the /// range of values it might take. /// /// \param MaxWidth - the width to which the value will be truncated static IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) { E = E->IgnoreParens(); // Try a full evaluation first. Expr::EvalResult result; if (E->EvaluateAsRValue(result, C)) return GetValueRange(C, result.Val, GetExprType(E), MaxWidth); // I think we only want to look through implicit casts here; if the // user has an explicit widening cast, we should treat the value as // being of the new, wider type. if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) { if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue) return GetExprRange(C, CE->getSubExpr(), MaxWidth); IntRange OutputTypeRange = IntRange::forValueOfType(C, GetExprType(CE)); bool isIntegerCast = (CE->getCastKind() == CK_IntegralCast); // Assume that non-integer casts can span the full range of the type. if (!isIntegerCast) return OutputTypeRange; IntRange SubRange = GetExprRange(C, CE->getSubExpr(), std::min(MaxWidth, OutputTypeRange.Width)); // Bail out if the subexpr's range is as wide as the cast type. if (SubRange.Width >= OutputTypeRange.Width) return OutputTypeRange; // Otherwise, we take the smaller width, and we're non-negative if // either the output type or the subexpr is. return IntRange(SubRange.Width, SubRange.NonNegative || OutputTypeRange.NonNegative); } if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { // If we can fold the condition, just take that operand. bool CondResult; if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C)) return GetExprRange(C, CondResult ? CO->getTrueExpr() : CO->getFalseExpr(), MaxWidth); // Otherwise, conservatively merge. IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth); IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth); return IntRange::join(L, R); } if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { switch (BO->getOpcode()) { // Boolean-valued operations are single-bit and positive. case BO_LAnd: case BO_LOr: case BO_LT: case BO_GT: case BO_LE: case BO_GE: case BO_EQ: case BO_NE: return IntRange::forBoolType(); // The type of the assignments is the type of the LHS, so the RHS // is not necessarily the same type. case BO_MulAssign: case BO_DivAssign: case BO_RemAssign: case BO_AddAssign: case BO_SubAssign: case BO_XorAssign: case BO_OrAssign: // TODO: bitfields? return IntRange::forValueOfType(C, GetExprType(E)); // Simple assignments just pass through the RHS, which will have // been coerced to the LHS type. case BO_Assign: // TODO: bitfields? return GetExprRange(C, BO->getRHS(), MaxWidth); // Operations with opaque sources are black-listed. case BO_PtrMemD: case BO_PtrMemI: return IntRange::forValueOfType(C, GetExprType(E)); // Bitwise-and uses the *infinum* of the two source ranges. case BO_And: case BO_AndAssign: return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth), GetExprRange(C, BO->getRHS(), MaxWidth)); // Left shift gets black-listed based on a judgement call. case BO_Shl: // ...except that we want to treat '1 << (blah)' as logically // positive. It's an important idiom. if (IntegerLiteral *I = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) { if (I->getValue() == 1) { IntRange R = IntRange::forValueOfType(C, GetExprType(E)); return IntRange(R.Width, /*NonNegative*/ true); } } LLVM_FALLTHROUGH; // HLSL Change case BO_ShlAssign: return IntRange::forValueOfType(C, GetExprType(E)); // Right shift by a constant can narrow its left argument. case BO_Shr: case BO_ShrAssign: { IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth); // If the shift amount is a positive constant, drop the width by // that much. llvm::APSInt shift; if (BO->getRHS()->isIntegerConstantExpr(shift, C) && shift.isNonNegative()) { unsigned zext = shift.getZExtValue(); if (zext >= L.Width) L.Width = (L.NonNegative ? 0 : 1); else L.Width -= zext; } return L; } // Comma acts as its right operand. case BO_Comma: return GetExprRange(C, BO->getRHS(), MaxWidth); // Black-list pointer subtractions. case BO_Sub: if (BO->getLHS()->getType()->isPointerType()) return IntRange::forValueOfType(C, GetExprType(E)); break; // The width of a division result is mostly determined by the size // of the LHS. case BO_Div: { // Don't 'pre-truncate' the operands. unsigned opWidth = C.getIntWidth(GetExprType(E)); IntRange L = GetExprRange(C, BO->getLHS(), opWidth); // If the divisor is constant, use that. llvm::APSInt divisor; if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) { unsigned log2 = divisor.logBase2(); // floor(log_2(divisor)) if (log2 >= L.Width) L.Width = (L.NonNegative ? 0 : 1); else L.Width = std::min(L.Width - log2, MaxWidth); return L; } // Otherwise, just use the LHS's width. IntRange R = GetExprRange(C, BO->getRHS(), opWidth); return IntRange(L.Width, L.NonNegative && R.NonNegative); } // The result of a remainder can't be larger than the result of // either side. case BO_Rem: { // Don't 'pre-truncate' the operands. unsigned opWidth = C.getIntWidth(GetExprType(E)); IntRange L = GetExprRange(C, BO->getLHS(), opWidth); IntRange R = GetExprRange(C, BO->getRHS(), opWidth); IntRange meet = IntRange::meet(L, R); meet.Width = std::min(meet.Width, MaxWidth); return meet; } // The default behavior is okay for these. case BO_Mul: case BO_Add: case BO_Xor: case BO_Or: break; } // The default case is to treat the operation as if it were closed // on the narrowest type that encompasses both operands. IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth); IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth); return IntRange::join(L, R); } if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { switch (UO->getOpcode()) { // Boolean-valued operations are white-listed. case UO_LNot: return IntRange::forBoolType(); // Operations with opaque sources are black-listed. case UO_Deref: case UO_AddrOf: // should be impossible return IntRange::forValueOfType(C, GetExprType(E)); default: return GetExprRange(C, UO->getSubExpr(), MaxWidth); } } if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) return GetExprRange(C, OVE->getSourceExpr(), MaxWidth); if (FieldDecl *BitField = E->getSourceBitField()) return IntRange(BitField->getBitWidthValue(C), BitField->getType()->isUnsignedIntegerOrEnumerationType()); return IntRange::forValueOfType(C, GetExprType(E)); } static IntRange GetExprRange(ASTContext &C, Expr *E) { return GetExprRange(C, E, C.getIntWidth(GetExprType(E))); } /// Checks whether the given value, which currently has the given /// source semantics, has the same value when coerced through the /// target semantics. static bool IsSameFloatAfterCast(const llvm::APFloat &value, const llvm::fltSemantics &Src, const llvm::fltSemantics &Tgt) { llvm::APFloat truncated = value; bool ignored; truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored); truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored); return truncated.bitwiseIsEqual(value); } /// Checks whether the given value, which currently has the given /// source semantics, has the same value when coerced through the /// target semantics. /// /// The value might be a vector of floats (or a complex number). static bool IsSameFloatAfterCast(const APValue &value, const llvm::fltSemantics &Src, const llvm::fltSemantics &Tgt) { if (value.isFloat()) return IsSameFloatAfterCast(value.getFloat(), Src, Tgt); if (value.isVector()) { for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i) if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt)) return false; return true; } assert(value.isComplexFloat()); return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) && IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt)); } static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC); static bool IsZero(Sema &S, Expr *E) { // Suppress cases where we are comparing against an enum constant. if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) if (isa<EnumConstantDecl>(DR->getDecl())) return false; // Suppress cases where the '0' value is expanded from a macro. if (E->getLocStart().isMacroID()) return false; llvm::APSInt Value; return E->isIntegerConstantExpr(Value, S.Context) && Value == 0; } static bool HasEnumType(Expr *E) { // Strip off implicit integral promotions. while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) { if (ICE->getCastKind() != CK_IntegralCast && ICE->getCastKind() != CK_NoOp) break; E = ICE->getSubExpr(); } return E->getType()->isEnumeralType(); } static void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) { // Disable warning in template instantiations. if (!S.ActiveTemplateInstantiations.empty()) return; BinaryOperatorKind op = E->getOpcode(); if (E->isValueDependent()) return; if (op == BO_LT && IsZero(S, E->getRHS())) { S.Diag(E->getOperatorLoc(), diag::warn_lunsigned_always_true_comparison) << "< 0" << "false" << HasEnumType(E->getLHS()) << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); } else if (op == BO_GE && IsZero(S, E->getRHS())) { S.Diag(E->getOperatorLoc(), diag::warn_lunsigned_always_true_comparison) << ">= 0" << "true" << HasEnumType(E->getLHS()) << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); } else if (op == BO_GT && IsZero(S, E->getLHS())) { S.Diag(E->getOperatorLoc(), diag::warn_runsigned_always_true_comparison) << "0 >" << "false" << HasEnumType(E->getRHS()) << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); } else if (op == BO_LE && IsZero(S, E->getLHS())) { S.Diag(E->getOperatorLoc(), diag::warn_runsigned_always_true_comparison) << "0 <=" << "true" << HasEnumType(E->getRHS()) << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange(); } } static void DiagnoseOutOfRangeComparison(Sema &S, BinaryOperator *E, Expr *Constant, Expr *Other, llvm::APSInt Value, bool RhsConstant) { // Disable warning in template instantiations. if (!S.ActiveTemplateInstantiations.empty()) return; // TODO: Investigate using GetExprRange() to get tighter bounds // on the bit ranges. QualType OtherT = Other->getType(); if (const auto *AT = OtherT->getAs<AtomicType>()) OtherT = AT->getValueType(); IntRange OtherRange = IntRange::forValueOfType(S.Context, OtherT); unsigned OtherWidth = OtherRange.Width; bool OtherIsBooleanType = Other->isKnownToHaveBooleanValue(); // 0 values are handled later by CheckTrivialUnsignedComparison(). if ((Value == 0) && (!OtherIsBooleanType)) return; BinaryOperatorKind op = E->getOpcode(); bool IsTrue = true; // Used for diagnostic printout. enum { LiteralConstant = 0, CXXBoolLiteralTrue, CXXBoolLiteralFalse } LiteralOrBoolConstant = LiteralConstant; if (!OtherIsBooleanType) { QualType ConstantT = Constant->getType(); QualType CommonT = E->getLHS()->getType(); if (S.Context.hasSameUnqualifiedType(OtherT, ConstantT)) return; assert((OtherT->isIntegerType() && ConstantT->isIntegerType()) && "comparison with non-integer type"); bool ConstantSigned = ConstantT->isSignedIntegerType(); bool CommonSigned = CommonT->isSignedIntegerType(); bool EqualityOnly = false; if (CommonSigned) { // The common type is signed, therefore no signed to unsigned conversion. if (!OtherRange.NonNegative) { // Check that the constant is representable in type OtherT. if (ConstantSigned) { if (OtherWidth >= Value.getMinSignedBits()) return; } else { // !ConstantSigned if (OtherWidth >= Value.getActiveBits() + 1) return; } } else { // !OtherSigned // Check that the constant is representable in type OtherT. // Negative values are out of range. if (ConstantSigned) { if (Value.isNonNegative() && OtherWidth >= Value.getActiveBits()) return; } else { // !ConstantSigned if (OtherWidth >= Value.getActiveBits()) return; } } } else { // !CommonSigned if (OtherRange.NonNegative) { if (OtherWidth >= Value.getActiveBits()) return; } else { // OtherSigned assert(!ConstantSigned && "Two signed types converted to unsigned types."); // Check to see if the constant is representable in OtherT. if (OtherWidth > Value.getActiveBits()) return; // Check to see if the constant is equivalent to a negative value // cast to CommonT. if (S.Context.getIntWidth(ConstantT) == S.Context.getIntWidth(CommonT) && Value.isNegative() && Value.getMinSignedBits() <= OtherWidth) return; // The constant value rests between values that OtherT can represent // after conversion. Relational comparison still works, but equality // comparisons will be tautological. EqualityOnly = true; } } bool PositiveConstant = !ConstantSigned || Value.isNonNegative(); if (op == BO_EQ || op == BO_NE) { IsTrue = op == BO_NE; } else if (EqualityOnly) { return; } else if (RhsConstant) { if (op == BO_GT || op == BO_GE) IsTrue = !PositiveConstant; else // op == BO_LT || op == BO_LE IsTrue = PositiveConstant; } else { if (op == BO_LT || op == BO_LE) IsTrue = !PositiveConstant; else // op == BO_GT || op == BO_GE IsTrue = PositiveConstant; } } else { // Other isKnownToHaveBooleanValue enum CompareBoolWithConstantResult { AFals, ATrue, Unkwn }; enum ConstantValue { LT_Zero, Zero, One, GT_One, SizeOfConstVal }; enum ConstantSide { Lhs, Rhs, SizeOfConstSides }; static const struct LinkedConditions { CompareBoolWithConstantResult BO_LT_OP[SizeOfConstSides][SizeOfConstVal]; CompareBoolWithConstantResult BO_GT_OP[SizeOfConstSides][SizeOfConstVal]; CompareBoolWithConstantResult BO_LE_OP[SizeOfConstSides][SizeOfConstVal]; CompareBoolWithConstantResult BO_GE_OP[SizeOfConstSides][SizeOfConstVal]; CompareBoolWithConstantResult BO_EQ_OP[SizeOfConstSides][SizeOfConstVal]; CompareBoolWithConstantResult BO_NE_OP[SizeOfConstSides][SizeOfConstVal]; } TruthTable = { // Constant on LHS. | Constant on RHS. | // LT_Zero| Zero | One |GT_One| LT_Zero| Zero | One |GT_One| { { ATrue, Unkwn, AFals, AFals }, { AFals, AFals, Unkwn, ATrue } }, { { AFals, AFals, Unkwn, ATrue }, { ATrue, Unkwn, AFals, AFals } }, { { ATrue, ATrue, Unkwn, AFals }, { AFals, Unkwn, ATrue, ATrue } }, { { AFals, Unkwn, ATrue, ATrue }, { ATrue, ATrue, Unkwn, AFals } }, { { AFals, Unkwn, Unkwn, AFals }, { AFals, Unkwn, Unkwn, AFals } }, { { ATrue, Unkwn, Unkwn, ATrue }, { ATrue, Unkwn, Unkwn, ATrue } } }; bool ConstantIsBoolLiteral = isa<CXXBoolLiteralExpr>(Constant); enum ConstantValue ConstVal = Zero; if (Value.isUnsigned() || Value.isNonNegative()) { if (Value == 0) { LiteralOrBoolConstant = ConstantIsBoolLiteral ? CXXBoolLiteralFalse : LiteralConstant; ConstVal = Zero; } else if (Value == 1) { LiteralOrBoolConstant = ConstantIsBoolLiteral ? CXXBoolLiteralTrue : LiteralConstant; ConstVal = One; } else { LiteralOrBoolConstant = LiteralConstant; ConstVal = GT_One; } } else { ConstVal = LT_Zero; } CompareBoolWithConstantResult CmpRes; switch (op) { case BO_LT: CmpRes = TruthTable.BO_LT_OP[RhsConstant][ConstVal]; break; case BO_GT: CmpRes = TruthTable.BO_GT_OP[RhsConstant][ConstVal]; break; case BO_LE: CmpRes = TruthTable.BO_LE_OP[RhsConstant][ConstVal]; break; case BO_GE: CmpRes = TruthTable.BO_GE_OP[RhsConstant][ConstVal]; break; case BO_EQ: CmpRes = TruthTable.BO_EQ_OP[RhsConstant][ConstVal]; break; case BO_NE: CmpRes = TruthTable.BO_NE_OP[RhsConstant][ConstVal]; break; default: CmpRes = Unkwn; break; } if (CmpRes == AFals) { IsTrue = false; } else if (CmpRes == ATrue) { IsTrue = true; } else { return; } } // If this is a comparison to an enum constant, include that // constant in the diagnostic. const EnumConstantDecl *ED = nullptr; if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Constant)) ED = dyn_cast<EnumConstantDecl>(DR->getDecl()); SmallString<64> PrettySourceValue; llvm::raw_svector_ostream OS(PrettySourceValue); if (ED) OS << '\'' << *ED << "' (" << Value << ")"; else OS << Value; S.DiagRuntimeBehavior( E->getOperatorLoc(), E, S.PDiag(diag::warn_out_of_range_compare) << OS.str() << LiteralOrBoolConstant << OtherT << (OtherIsBooleanType && !OtherT->isBooleanType()) << IsTrue << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange()); } /// Analyze the operands of the given comparison. Implements the /// fallback case from AnalyzeComparison. static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) { AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); } /// \brief Implements -Wsign-compare. /// /// \param E the binary operator to check for warnings static void AnalyzeComparison(Sema &S, BinaryOperator *E) { // The type the comparison is being performed in. QualType T = E->getLHS()->getType(); // HLSL Change Starts // HLSL has a different set of standard conversions allowed. Until // the warning analysis is updated, simply skip this step. if (S.getLangOpts().HLSL) { return; } // HLSL Change Ends // Only analyze comparison operators where both sides have been converted to // the same type. if (!S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())) return AnalyzeImpConvsInComparison(S, E); // Don't analyze value-dependent comparisons directly. if (E->isValueDependent()) return AnalyzeImpConvsInComparison(S, E); Expr *LHS = E->getLHS()->IgnoreParenImpCasts(); Expr *RHS = E->getRHS()->IgnoreParenImpCasts(); bool IsComparisonConstant = false; // Check whether an integer constant comparison results in a value // of 'true' or 'false'. if (T->isIntegralType(S.Context)) { llvm::APSInt RHSValue; bool IsRHSIntegralLiteral = RHS->isIntegerConstantExpr(RHSValue, S.Context); llvm::APSInt LHSValue; bool IsLHSIntegralLiteral = LHS->isIntegerConstantExpr(LHSValue, S.Context); if (IsRHSIntegralLiteral && !IsLHSIntegralLiteral) DiagnoseOutOfRangeComparison(S, E, RHS, LHS, RHSValue, true); else if (!IsRHSIntegralLiteral && IsLHSIntegralLiteral) DiagnoseOutOfRangeComparison(S, E, LHS, RHS, LHSValue, false); else IsComparisonConstant = (IsRHSIntegralLiteral && IsLHSIntegralLiteral); } else if (!T->hasUnsignedIntegerRepresentation()) IsComparisonConstant = E->isIntegerConstantExpr(S.Context); // We don't do anything special if this isn't an unsigned integral // comparison: we're only interested in integral comparisons, and // signed comparisons only happen in cases we don't care to warn about. // // We also don't care about value-dependent expressions or expressions // whose result is a constant. if (!T->hasUnsignedIntegerRepresentation() || IsComparisonConstant) return AnalyzeImpConvsInComparison(S, E); // Check to see if one of the (unmodified) operands is of different // signedness. Expr *signedOperand, *unsignedOperand; if (LHS->getType()->hasSignedIntegerRepresentation()) { assert(!RHS->getType()->hasSignedIntegerRepresentation() && "unsigned comparison between two signed integer expressions?"); signedOperand = LHS; unsignedOperand = RHS; } else if (RHS->getType()->hasSignedIntegerRepresentation()) { signedOperand = RHS; unsignedOperand = LHS; } else { CheckTrivialUnsignedComparison(S, E); return AnalyzeImpConvsInComparison(S, E); } // Otherwise, calculate the effective range of the signed operand. IntRange signedRange = GetExprRange(S.Context, signedOperand); // Go ahead and analyze implicit conversions in the operands. Note // that we skip the implicit conversions on both sides. AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc()); AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc()); // If the signed range is non-negative, -Wsign-compare won't fire, // but we should still check for comparisons which are always true // or false. if (signedRange.NonNegative) return CheckTrivialUnsignedComparison(S, E); // For (in)equality comparisons, if the unsigned operand is a // constant which cannot collide with a overflowed signed operand, // then reinterpreting the signed operand as unsigned will not // change the result of the comparison. if (E->isEqualityOp()) { unsigned comparisonWidth = S.Context.getIntWidth(T); IntRange unsignedRange = GetExprRange(S.Context, unsignedOperand); // We should never be unable to prove that the unsigned operand is // non-negative. assert(unsignedRange.NonNegative && "unsigned range includes negative?"); if (unsignedRange.Width < comparisonWidth) return; } S.DiagRuntimeBehavior(E->getOperatorLoc(), E, S.PDiag(diag::warn_mixed_sign_comparison) << LHS->getType() << RHS->getType() << LHS->getSourceRange() << RHS->getSourceRange()); } /// Analyzes an attempt to assign the given value to a bitfield. /// /// Returns true if there was something fishy about the attempt. static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init, SourceLocation InitLoc) { assert(Bitfield->isBitField()); if (Bitfield->isInvalidDecl()) return false; // White-list bool bitfields. if (Bitfield->getType()->isBooleanType()) return false; // Ignore value- or type-dependent expressions. if (Bitfield->getBitWidth()->isValueDependent() || Bitfield->getBitWidth()->isTypeDependent() || Init->isValueDependent() || Init->isTypeDependent()) return false; Expr *OriginalInit = Init->IgnoreParenImpCasts(); llvm::APSInt Value; if (!OriginalInit->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects)) return false; unsigned OriginalWidth = Value.getBitWidth(); unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context); if (OriginalWidth <= FieldWidth) return false; // Compute the value which the bitfield will contain. llvm::APSInt TruncatedValue = Value.trunc(FieldWidth); TruncatedValue.setIsSigned(Bitfield->getType()->isSignedIntegerType()); // Check whether the stored value is equal to the original value. TruncatedValue = TruncatedValue.extend(OriginalWidth); if (llvm::APSInt::isSameValue(Value, TruncatedValue)) return false; // Special-case bitfields of width 1: booleans are naturally 0/1, and // therefore don't strictly fit into a signed bitfield of width 1. if (FieldWidth == 1 && Value == 1) return false; std::string PrettyValue = Value.toString(10); std::string PrettyTrunc = TruncatedValue.toString(10); S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant) << PrettyValue << PrettyTrunc << OriginalInit->getType() << Init->getSourceRange(); return true; } /// Analyze the given simple or compound assignment for warning-worthy /// operations. static void AnalyzeAssignment(Sema &S, BinaryOperator *E) { // Just recurse on the LHS. AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc()); S.DiagnoseGloballyCoherentMismatch(E->getRHS(), E->getLHS()->getType(), E->getOperatorLoc()); // We want to recurse on the RHS as normal unless we're assigning to // a bitfield. if (FieldDecl *Bitfield = E->getLHS()->getSourceBitField()) { if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(), E->getOperatorLoc())) { // Recurse, ignoring any implicit conversions on the RHS. return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(), E->getOperatorLoc()); } } AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc()); } /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T, SourceLocation CContext, unsigned diag, bool pruneControlFlow = false) { if (pruneControlFlow) { S.DiagRuntimeBehavior(E->getExprLoc(), E, S.PDiag(diag) << SourceType << T << E->getSourceRange() << SourceRange(CContext)); return; } S.Diag(E->getExprLoc(), diag) << SourceType << T << E->getSourceRange() << SourceRange(CContext); } /// Diagnose an implicit cast; purely a helper for CheckImplicitConversion. static void DiagnoseImpCast(Sema &S, Expr *E, QualType T, SourceLocation CContext, unsigned diag, bool pruneControlFlow = false) { DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow); } /// Diagnose an implicit cast from a literal expression. Does not warn when the /// cast wouldn't lose information. void DiagnoseFloatingLiteralImpCast(Sema &S, FloatingLiteral *FL, QualType T, SourceLocation CContext) { // Try to convert the literal exactly to an integer. If we can, don't warn. bool isExact = false; const llvm::APFloat &Value = FL->getValue(); llvm::APSInt IntegerValue(S.Context.getIntWidth(T), T->hasUnsignedIntegerRepresentation()); if (Value.convertToInteger(IntegerValue, llvm::APFloat::rmTowardZero, &isExact) == llvm::APFloat::opOK && isExact) return; // FIXME: Force the precision of the source value down so we don't print // digits which are usually useless (we don't really care here if we // truncate a digit by accident in edge cases). Ideally, APFloat::toString // would automatically print the shortest representation, but it's a bit // tricky to implement. SmallString<16> PrettySourceValue; unsigned precision = llvm::APFloat::semanticsPrecision(Value.getSemantics()); precision = (precision * 59 + 195) / 196; Value.toString(PrettySourceValue, precision); SmallString<16> PrettyTargetValue; if (T->isSpecificBuiltinType(BuiltinType::Bool)) PrettyTargetValue = IntegerValue == 0 ? "false" : "true"; else IntegerValue.toString(PrettyTargetValue); S.Diag(FL->getExprLoc(), diag::warn_impcast_literal_float_to_integer) << FL->getType() << T.getUnqualifiedType() << PrettySourceValue << PrettyTargetValue << FL->getSourceRange() << SourceRange(CContext); } std::string PrettyPrintInRange(const llvm::APSInt &Value, IntRange Range) { if (!Range.Width) return "0"; llvm::APSInt ValueInRange = Value; ValueInRange.setIsSigned(!Range.NonNegative); ValueInRange = ValueInRange.trunc(Range.Width); return ValueInRange.toString(10); } static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) { if (!isa<ImplicitCastExpr>(Ex)) return false; Expr *InnerE = Ex->IgnoreParenImpCasts(); const Type *Target = S.Context.getCanonicalType(Ex->getType()).getTypePtr(); const Type *Source = S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); if (Target->isDependentType()) return false; const BuiltinType *FloatCandidateBT = dyn_cast<BuiltinType>(ToBool ? Source : Target); const Type *BoolCandidateType = ToBool ? Target : Source; return (BoolCandidateType->isSpecificBuiltinType(BuiltinType::Bool) && FloatCandidateBT && (FloatCandidateBT->isFloatingPoint())); } void CheckImplicitArgumentConversions(Sema &S, CallExpr *TheCall, SourceLocation CC) { // HLSL Change Begin if (FunctionDecl *FD = TheCall->getDirectCallee()) { CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); unsigned ArgIdx = 0; unsigned ParmIdx = 0; if (MD && MD->isInstance()) ++ParmIdx; for (; ArgIdx < TheCall->getNumArgs() && ParmIdx < FD->getNumParams(); ++ArgIdx, ++ParmIdx) { ParmVarDecl *PD = FD->getParamDecl(ParmIdx); Expr *CurrA = TheCall->getArg(ArgIdx); S.DiagnoseGloballyCoherentMismatch(CurrA, PD->getType(), CC); } } // HLSL CHange End unsigned NumArgs = TheCall->getNumArgs(); for (unsigned i = 0; i < NumArgs; ++i) { Expr *CurrA = TheCall->getArg(i); if (!IsImplicitBoolFloatConversion(S, CurrA, true)) continue; bool IsSwapped = ((i > 0) && IsImplicitBoolFloatConversion(S, TheCall->getArg(i - 1), false)); IsSwapped |= ((i < (NumArgs - 1)) && IsImplicitBoolFloatConversion(S, TheCall->getArg(i + 1), false)); if (IsSwapped) { // Warn on this floating-point to bool conversion. DiagnoseImpCast(S, CurrA->IgnoreParenImpCasts(), CurrA->getType(), CC, diag::warn_impcast_floating_point_to_bool); } } } static void DiagnoseNullConversion(Sema &S, Expr *E, QualType T, SourceLocation CC) { if (S.Diags.isIgnored(diag::warn_impcast_null_pointer_to_integer, E->getExprLoc())) return; // Check for NULL (GNUNull) or nullptr (CXX11_nullptr). const Expr::NullPointerConstantKind NullKind = E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull); if (NullKind != Expr::NPCK_GNUNull && NullKind != Expr::NPCK_CXX11_nullptr) return; // Return if target type is a safe conversion. if (T->isAnyPointerType() || T->isBlockPointerType() || T->isMemberPointerType() || !T->isScalarType() || T->isNullPtrType()) return; SourceLocation Loc = E->getSourceRange().getBegin(); // __null is usually wrapped in a macro. Go up a macro if that is the case. if (NullKind == Expr::NPCK_GNUNull) { if (Loc.isMacroID()) Loc = S.SourceMgr.getImmediateExpansionRange(Loc).first; } // Only warn if the null and context location are in the same macro expansion. if (S.SourceMgr.getFileID(Loc) != S.SourceMgr.getFileID(CC)) return; S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer) << (NullKind == Expr::NPCK_CXX11_nullptr) << T << clang::SourceRange(CC) << FixItHint::CreateReplacement(Loc, S.getFixItZeroLiteralForType(T, Loc)); } static void checkObjCArrayLiteral(Sema &S, QualType TargetType, ObjCArrayLiteral *ArrayLiteral); static void checkObjCDictionaryLiteral(Sema &S, QualType TargetType, ObjCDictionaryLiteral *DictionaryLiteral); /// Check a single element within a collection literal against the /// target element type. static void checkObjCCollectionLiteralElement(Sema &S, QualType TargetElementType, Expr *Element, unsigned ElementKind) { // Skip a bitcast to 'id' or qualified 'id'. if (auto ICE = dyn_cast<ImplicitCastExpr>(Element)) { if (ICE->getCastKind() == CK_BitCast && ICE->getSubExpr()->getType()->getAs<ObjCObjectPointerType>()) Element = ICE->getSubExpr(); } QualType ElementType = Element->getType(); ExprResult ElementResult(Element); if (ElementType->getAs<ObjCObjectPointerType>() && S.CheckSingleAssignmentConstraints(TargetElementType, ElementResult, false, false) != Sema::Compatible) { S.Diag(Element->getLocStart(), diag::warn_objc_collection_literal_element) << ElementType << ElementKind << TargetElementType << Element->getSourceRange(); } if (auto ArrayLiteral = dyn_cast<ObjCArrayLiteral>(Element)) checkObjCArrayLiteral(S, TargetElementType, ArrayLiteral); else if (auto DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(Element)) checkObjCDictionaryLiteral(S, TargetElementType, DictionaryLiteral); } /// Check an Objective-C array literal being converted to the given /// target type. static void checkObjCArrayLiteral(Sema &S, QualType TargetType, ObjCArrayLiteral *ArrayLiteral) { if (!S.NSArrayDecl) return; const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); if (!TargetObjCPtr) return; if (TargetObjCPtr->isUnspecialized() || TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() != S.NSArrayDecl->getCanonicalDecl()) return; auto TypeArgs = TargetObjCPtr->getTypeArgs(); if (TypeArgs.size() != 1) return; QualType TargetElementType = TypeArgs[0]; for (unsigned I = 0, N = ArrayLiteral->getNumElements(); I != N; ++I) { checkObjCCollectionLiteralElement(S, TargetElementType, ArrayLiteral->getElement(I), 0); } } /// Check an Objective-C dictionary literal being converted to the given /// target type. static void checkObjCDictionaryLiteral( Sema &S, QualType TargetType, ObjCDictionaryLiteral *DictionaryLiteral) { if (!S.NSDictionaryDecl) return; const auto *TargetObjCPtr = TargetType->getAs<ObjCObjectPointerType>(); if (!TargetObjCPtr) return; if (TargetObjCPtr->isUnspecialized() || TargetObjCPtr->getInterfaceDecl()->getCanonicalDecl() != S.NSDictionaryDecl->getCanonicalDecl()) return; auto TypeArgs = TargetObjCPtr->getTypeArgs(); if (TypeArgs.size() != 2) return; QualType TargetKeyType = TypeArgs[0]; QualType TargetObjectType = TypeArgs[1]; for (unsigned I = 0, N = DictionaryLiteral->getNumElements(); I != N; ++I) { auto Element = DictionaryLiteral->getKeyValueElement(I); checkObjCCollectionLiteralElement(S, TargetKeyType, Element.Key, 1); checkObjCCollectionLiteralElement(S, TargetObjectType, Element.Value, 2); } } void CheckImplicitConversion(Sema &S, Expr *E, QualType T, SourceLocation CC, bool *ICContext = nullptr) { if (E->isTypeDependent() || E->isValueDependent()) return; const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr(); const Type *Target = S.Context.getCanonicalType(T).getTypePtr(); if (Source == Target) return; if (Target->isDependentType()) return; // If the conversion context location is invalid don't complain. We also // don't want to emit a warning if the issue occurs from the expansion of // a system macro. The problem is that 'getSpellingLoc()' is slow, so we // delay this check as long as possible. Once we detect we are in that // scenario, we just return. if (CC.isInvalid()) return; // Diagnose implicit casts to bool. if (Target->isSpecificBuiltinType(BuiltinType::Bool)) { if (isa<StringLiteral>(E)) // Warn on string literal to bool. Checks for string literals in logical // and expressions, for instance, assert(0 && "error here"), are // prevented by a check in AnalyzeImplicitConversions(). return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_string_literal_to_bool); if (isa<ObjCStringLiteral>(E) || isa<ObjCArrayLiteral>(E) || isa<ObjCDictionaryLiteral>(E) || isa<ObjCBoxedExpr>(E)) { // This covers the literal expressions that evaluate to Objective-C // objects. return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_objective_c_literal_to_bool); } if (Source->isPointerType() || Source->canDecayToPointerType()) { // Warn on pointer to bool conversion that is always true. S.DiagnoseAlwaysNonNullPointer(E, Expr::NPCK_NotNull, /*IsEqual*/ false, SourceRange(CC)); } } // Check implicit casts from Objective-C collection literals to specialized // collection types, e.g., NSArray<NSString *> *. if (auto *ArrayLiteral = dyn_cast<ObjCArrayLiteral>(E)) checkObjCArrayLiteral(S, QualType(Target, 0), ArrayLiteral); else if (auto *DictionaryLiteral = dyn_cast<ObjCDictionaryLiteral>(E)) checkObjCDictionaryLiteral(S, QualType(Target, 0), DictionaryLiteral); // Strip vector types. if (isa<VectorType>(Source)) { if (!isa<VectorType>(Target)) { if (S.SourceMgr.isInSystemMacro(CC)) return; return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar); } // If the vector cast is cast between two vectors of the same size, it is // a bitcast, not a conversion. if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target)) return; Source = cast<VectorType>(Source)->getElementType().getTypePtr(); Target = cast<VectorType>(Target)->getElementType().getTypePtr(); } if (auto VecTy = dyn_cast<VectorType>(Target)) Target = VecTy->getElementType().getTypePtr(); // HLSL Change Begins: Vector/matrix implicit conversions already diagnosed if (hlsl::IsVectorType(&S, QualType(Source, 0)) || hlsl::IsMatrixType(&S, QualType(Source, 0)) || hlsl::IsVectorType(&S, QualType(Target, 0)) || hlsl::IsMatrixType(&S, QualType(Target, 0))) { return; } // HLSL Change Ends // Strip complex types. if (isa<ComplexType>(Source)) { if (!isa<ComplexType>(Target)) { if (S.SourceMgr.isInSystemMacro(CC)) return; return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_complex_scalar); } Source = cast<ComplexType>(Source)->getElementType().getTypePtr(); Target = cast<ComplexType>(Target)->getElementType().getTypePtr(); } const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source); const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target); // If the source is floating point... if (SourceBT && SourceBT->isFloatingPoint()) { // ...and the target is floating point... if (TargetBT && TargetBT->isFloatingPoint()) { // ...then warn if we're dropping FP rank. // HLSL Change Begin - Warn on both promotions and conversions. // Don't warn on Literal float. if (SourceBT->getKind() == BuiltinType::LitFloat) return; int Order = S.getASTContext().getFloatingTypeOrder(QualType(SourceBT, 0), QualType(TargetBT, 0)); // Builtin FP kinds are ordered by increasing FP rank. if (Order > 0) { // Don't warn about float constants that are precisely // representable in the target type. Expr::EvalResult result; if (E->EvaluateAsRValue(result, S.Context)) { // Value might be a float, a float vector, or a float complex. if (IsSameFloatAfterCast(result.Val, S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)), S.Context.getFloatTypeSemantics(QualType(SourceBT, 0)))) return; } if (S.SourceMgr.isInSystemMacro(CC)) return; DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision); } else if (Order < 0) { DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_double_promotion); } // HLSL Change End return; } // If the target is integral, always warn. if (TargetBT && TargetBT->isInteger()) { if (S.SourceMgr.isInSystemMacro(CC)) return; Expr *InnerE = E->IgnoreParenImpCasts(); // We also want to warn on, e.g., "int i = -1.234" if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE)) if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus) InnerE = UOp->getSubExpr()->IgnoreParenImpCasts(); if (FloatingLiteral *FL = dyn_cast<FloatingLiteral>(InnerE)) { DiagnoseFloatingLiteralImpCast(S, FL, T, CC); } else { DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_integer); } } // If the target is bool, warn if expr is a function or method call. if (Target->isSpecificBuiltinType(BuiltinType::Bool) && isa<CallExpr>(E)) { // Check last argument of function call to see if it is an // implicit cast from a type matching the type the result // is being cast to. CallExpr *CEx = cast<CallExpr>(E); unsigned NumArgs = CEx->getNumArgs(); if (NumArgs > 0) { Expr *LastA = CEx->getArg(NumArgs - 1); Expr *InnerE = LastA->IgnoreParenImpCasts(); const Type *InnerType = S.Context.getCanonicalType(InnerE->getType()).getTypePtr(); if (isa<ImplicitCastExpr>(LastA) && (InnerType == Target)) { // Warn on this floating-point to bool conversion DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_floating_point_to_bool); } } } return; } DiagnoseNullConversion(S, E, T, CC); if (!Source->isIntegerType() || !Target->isIntegerType()) return; // TODO: remove this early return once the false positives for constant->bool // in templates, macros, etc, are reduced or removed. if (Target->isSpecificBuiltinType(BuiltinType::Bool)) return; IntRange SourceRange = GetExprRange(S.Context, E); IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target); if (SourceRange.Width > TargetRange.Width) { // If the source is a constant, use a default-on diagnostic. // TODO: this should happen for bitfield stores, too. llvm::APSInt Value(32); if (E->isIntegerConstantExpr(Value, S.Context)) { if (S.SourceMgr.isInSystemMacro(CC)) return; std::string PrettySourceValue = Value.toString(10); std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange); S.DiagRuntimeBehavior(E->getExprLoc(), E, S.PDiag(diag::warn_impcast_integer_precision_constant) << PrettySourceValue << PrettyTargetValue << E->getType() << T << E->getSourceRange() << clang::SourceRange(CC)); return; } // People want to build with -Wshorten-64-to-32 and not -Wconversion. if (S.SourceMgr.isInSystemMacro(CC)) return; if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64) return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32, /* pruneControlFlow */ true); return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision); } if ((TargetRange.NonNegative && !SourceRange.NonNegative) || (!TargetRange.NonNegative && SourceRange.NonNegative && SourceRange.Width == TargetRange.Width)) { if (S.SourceMgr.isInSystemMacro(CC)) return; unsigned DiagID = diag::warn_impcast_integer_sign; // Traditionally, gcc has warned about this under -Wsign-compare. // We also want to warn about it in -Wconversion. // So if -Wconversion is off, use a completely identical diagnostic // in the sign-compare group. // The conditional-checking code will if (ICContext) { DiagID = diag::warn_impcast_integer_sign_conditional; *ICContext = true; } return DiagnoseImpCast(S, E, T, CC, DiagID); } // Diagnose conversions between different enumeration types. // In C, we pretend that the type of an EnumConstantDecl is its enumeration // type, to give us better diagnostics. QualType SourceType = E->getType(); if (!S.getLangOpts().CPlusPlus) { if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) { EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext()); SourceType = S.Context.getTypeDeclType(Enum); Source = S.Context.getCanonicalType(SourceType).getTypePtr(); } } if (const EnumType *SourceEnum = Source->getAs<EnumType>()) if (const EnumType *TargetEnum = Target->getAs<EnumType>()) if (SourceEnum->getDecl()->hasNameForLinkage() && TargetEnum->getDecl()->hasNameForLinkage() && SourceEnum != TargetEnum) { if (S.SourceMgr.isInSystemMacro(CC)) return; return DiagnoseImpCast(S, E, SourceType, T, CC, diag::warn_impcast_different_enum_types); } return; } void CheckConditionalOperator(Sema &S, ConditionalOperator *E, SourceLocation CC, QualType T); void CheckConditionalOperand(Sema &S, Expr *E, QualType T, SourceLocation CC, bool &ICContext) { E = E->IgnoreParenImpCasts(); if (isa<ConditionalOperator>(E)) return CheckConditionalOperator(S, cast<ConditionalOperator>(E), CC, T); AnalyzeImplicitConversions(S, E, CC); if (E->getType() != T) return CheckImplicitConversion(S, E, T, CC, &ICContext); return; } void CheckConditionalOperator(Sema &S, ConditionalOperator *E, SourceLocation CC, QualType T) { AnalyzeImplicitConversions(S, E->getCond(), E->getQuestionLoc()); bool Suspicious = false; CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious); CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious); // If -Wconversion would have warned about either of the candidates // for a signedness conversion to the context type... if (!Suspicious) return; // ...but it's currently ignored... if (!S.Diags.isIgnored(diag::warn_impcast_integer_sign_conditional, CC)) return; // ...then check whether it would have warned about either of the // candidates for a signedness conversion to the condition type. if (E->getType() == T) return; Suspicious = false; CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(), E->getType(), CC, &Suspicious); if (!Suspicious) CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(), E->getType(), CC, &Suspicious); } /// CheckBoolLikeConversion - Check conversion of given expression to boolean. /// Input argument E is a logical expression. static void CheckBoolLikeConversion(Sema &S, Expr *E, SourceLocation CC) { if (S.getLangOpts().Bool) return; CheckImplicitConversion(S, E->IgnoreParenImpCasts(), S.Context.BoolTy, CC); } /// AnalyzeImplicitConversions - Find and report any interesting /// implicit conversions in the given expression. There are a couple /// of competing diagnostics here, -Wconversion and -Wsign-compare. void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) { QualType T = OrigE->getType(); Expr *E = OrigE->IgnoreParenImpCasts(); if (E->isTypeDependent() || E->isValueDependent()) return; // For conditional operators, we analyze the arguments as if they // were being fed directly into the output. if (isa<ConditionalOperator>(E)) { ConditionalOperator *CO = cast<ConditionalOperator>(E); CheckConditionalOperator(S, CO, CC, T); return; } // Check implicit argument conversions for function calls. if (CallExpr *Call = dyn_cast<CallExpr>(E)) CheckImplicitArgumentConversions(S, Call, CC); // Go ahead and check any implicit conversions we might have skipped. // The non-canonical typecheck is just an optimization; // CheckImplicitConversion will filter out dead implicit conversions. if (E->getType() != T) CheckImplicitConversion(S, E, T, CC); // Now continue drilling into this expression. if (PseudoObjectExpr * POE = dyn_cast<PseudoObjectExpr>(E)) { if (POE->getResultExpr()) E = POE->getResultExpr(); } if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) { if (OVE->getSourceExpr()) AnalyzeImplicitConversions(S, OVE->getSourceExpr(), CC); return; } // Skip past explicit casts. if (isa<ExplicitCastExpr>(E)) { E = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreParenImpCasts(); return AnalyzeImplicitConversions(S, E, CC); } if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { // Do a somewhat different check with comparison operators. if (BO->isComparisonOp()) return AnalyzeComparison(S, BO); // And with simple assignments. if (BO->getOpcode() == BO_Assign) return AnalyzeAssignment(S, BO); } // These break the otherwise-useful invariant below. Fortunately, // we don't really need to recurse into them, because any internal // expressions should have been analyzed already when they were // built into statements. if (isa<StmtExpr>(E)) return; // Don't descend into unevaluated contexts. if (isa<UnaryExprOrTypeTraitExpr>(E)) return; // Now just recurse over the expression's children. CC = E->getExprLoc(); BinaryOperator *BO = dyn_cast<BinaryOperator>(E); bool IsLogicalAndOperator = BO && BO->getOpcode() == BO_LAnd; for (Stmt *SubStmt : E->children()) { Expr *ChildExpr = dyn_cast_or_null<Expr>(SubStmt); if (!ChildExpr) continue; if (IsLogicalAndOperator && isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts())) // Ignore checking string literals that are in logical and operators. // This is a common pattern for asserts. continue; AnalyzeImplicitConversions(S, ChildExpr, CC); } if (BO && BO->isLogicalOp()) { Expr *SubExpr = BO->getLHS()->IgnoreParenImpCasts(); if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); SubExpr = BO->getRHS()->IgnoreParenImpCasts(); if (!IsLogicalAndOperator || !isa<StringLiteral>(SubExpr)) ::CheckBoolLikeConversion(S, SubExpr, BO->getExprLoc()); } if (const UnaryOperator *U = dyn_cast<UnaryOperator>(E)) if (U->getOpcode() == UO_LNot) ::CheckBoolLikeConversion(S, U->getSubExpr(), CC); } } // end anonymous namespace enum { AddressOf, FunctionPointer, ArrayPointer }; // Helper function for Sema::DiagnoseAlwaysNonNullPointer. // Returns true when emitting a warning about taking the address of a reference. static bool CheckForReference(Sema &SemaRef, const Expr *E, PartialDiagnostic PD) { E = E->IgnoreParenImpCasts(); const FunctionDecl *FD = nullptr; if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { if (!DRE->getDecl()->getType()->isReferenceType()) return false; } else if (const MemberExpr *M = dyn_cast<MemberExpr>(E)) { if (!M->getMemberDecl()->getType()->isReferenceType()) return false; } else if (const CallExpr *Call = dyn_cast<CallExpr>(E)) { if (!Call->getCallReturnType(SemaRef.Context)->isReferenceType()) return false; FD = Call->getDirectCallee(); } else { return false; } SemaRef.Diag(E->getExprLoc(), PD); // If possible, point to location of function. if (FD) { SemaRef.Diag(FD->getLocation(), diag::note_reference_is_return_value) << FD; } return true; } // Returns true if the SourceLocation is expanded from any macro body. // Returns false if the SourceLocation is invalid, is from not in a macro // expansion, or is from expanded from a top-level macro argument. static bool IsInAnyMacroBody(const SourceManager &SM, SourceLocation Loc) { if (Loc.isInvalid()) return false; while (Loc.isMacroID()) { if (SM.isMacroBodyExpansion(Loc)) return true; Loc = SM.getImmediateMacroCallerLoc(Loc); } return false; } /// \brief Diagnose pointers that are always non-null. /// \param E the expression containing the pointer /// \param NullKind NPCK_NotNull if E is a cast to bool, otherwise, E is /// compared to a null pointer /// \param IsEqual True when the comparison is equal to a null pointer /// \param Range Extra SourceRange to highlight in the diagnostic void Sema::DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullKind, bool IsEqual, SourceRange Range) { if (!E) return; // Don't warn inside macros. if (E->getExprLoc().isMacroID()) { const SourceManager &SM = getSourceManager(); if (IsInAnyMacroBody(SM, E->getExprLoc()) || IsInAnyMacroBody(SM, Range.getBegin())) return; } E = E->IgnoreImpCasts(); const bool IsCompare = NullKind != Expr::NPCK_NotNull; if (isa<CXXThisExpr>(E)) { unsigned DiagID = IsCompare ? diag::warn_this_null_compare : diag::warn_this_bool_conversion; Diag(E->getExprLoc(), DiagID) << E->getSourceRange() << Range << IsEqual; return; } bool IsAddressOf = false; if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { if (UO->getOpcode() != UO_AddrOf) return; IsAddressOf = true; E = UO->getSubExpr(); } if (IsAddressOf) { unsigned DiagID = IsCompare ? diag::warn_address_of_reference_null_compare : diag::warn_address_of_reference_bool_conversion; PartialDiagnostic PD = PDiag(DiagID) << E->getSourceRange() << Range << IsEqual; if (CheckForReference(*this, E, PD)) { return; } } // Expect to find a single Decl. Skip anything more complicated. ValueDecl *D = nullptr; if (DeclRefExpr *R = dyn_cast<DeclRefExpr>(E)) { D = R->getDecl(); } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) { D = M->getMemberDecl(); } // Weak Decls can be null. if (!D || D->isWeak()) return; // Check for parameter decl with nonnull attribute if (const ParmVarDecl* PV = dyn_cast<ParmVarDecl>(D)) { if (getCurFunction() && !getCurFunction()->ModifiedNonNullParams.count(PV)) if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(PV->getDeclContext())) { unsigned NumArgs = FD->getNumParams(); llvm::SmallBitVector AttrNonNull(NumArgs); for (const auto *NonNull : FD->specific_attrs<NonNullAttr>()) { if (!NonNull->args_size()) { AttrNonNull.set(0, NumArgs); break; } for (unsigned Val : NonNull->args()) { if (Val >= NumArgs) continue; AttrNonNull.set(Val); } } if (!AttrNonNull.empty()) for (unsigned i = 0; i < NumArgs; ++i) if (FD->getParamDecl(i) == PV && (AttrNonNull[i] || PV->hasAttr<NonNullAttr>())) { std::string Str; llvm::raw_string_ostream S(Str); E->printPretty(S, nullptr, getPrintingPolicy()); unsigned DiagID = IsCompare ? diag::warn_nonnull_parameter_compare : diag::warn_cast_nonnull_to_bool; Diag(E->getExprLoc(), DiagID) << S.str() << E->getSourceRange() << Range << IsEqual; return; } } } QualType T = D->getType(); const bool IsArray = T->isArrayType(); const bool IsFunction = T->isFunctionType(); // Address of function is used to silence the function warning. if (IsAddressOf && IsFunction) { return; } // Found nothing. if (!IsAddressOf && !IsFunction && !IsArray) return; // Pretty print the expression for the diagnostic. std::string Str; llvm::raw_string_ostream S(Str); E->printPretty(S, nullptr, getPrintingPolicy()); unsigned DiagID = IsCompare ? diag::warn_null_pointer_compare : diag::warn_impcast_pointer_to_bool; unsigned DiagType; if (IsAddressOf) DiagType = AddressOf; else if (IsFunction) DiagType = FunctionPointer; else if (IsArray) DiagType = ArrayPointer; else llvm_unreachable("Could not determine diagnostic."); Diag(E->getExprLoc(), DiagID) << DiagType << S.str() << E->getSourceRange() << Range << IsEqual; if (!IsFunction) return; // Suggest '&' to silence the function warning. Diag(E->getExprLoc(), diag::note_function_warning_silence) << FixItHint::CreateInsertion(E->getLocStart(), "&"); // Check to see if '()' fixit should be emitted. QualType ReturnType; UnresolvedSet<4> NonTemplateOverloads; tryExprAsCall(*E, ReturnType, NonTemplateOverloads); if (ReturnType.isNull()) return; if (IsCompare) { // There are two cases here. If there is null constant, the only suggest // for a pointer return type. If the null is 0, then suggest if the return // type is a pointer or an integer type. if (!ReturnType->isPointerType()) { if (NullKind == Expr::NPCK_ZeroExpression || NullKind == Expr::NPCK_ZeroLiteral) { if (!ReturnType->isIntegerType()) return; } else { return; } } } else { // !IsCompare // For function to bool, only suggest if the function pointer has bool // return type. if (!ReturnType->isSpecificBuiltinType(BuiltinType::Bool)) return; } Diag(E->getExprLoc(), diag::note_function_to_function_call) << FixItHint::CreateInsertion(getLocForEndOfToken(E->getLocEnd()), "()"); } /// Diagnoses "dangerous" implicit conversions within the given /// expression (which is a full expression). Implements -Wconversion /// and -Wsign-compare. /// /// \param CC the "context" location of the implicit conversion, i.e. /// the most location of the syntactic entity requiring the implicit /// conversion void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) { // Don't diagnose in unevaluated contexts. if (isUnevaluatedContext()) return; // Don't diagnose for value- or type-dependent expressions. if (E->isTypeDependent() || E->isValueDependent()) return; // Check for array bounds violations in cases where the check isn't triggered // elsewhere for other Expr types (like BinaryOperators), e.g. when an // ArraySubscriptExpr is on the RHS of a variable initialization. CheckArrayAccess(E); // This is not the right CC for (e.g.) a variable initialization. AnalyzeImplicitConversions(*this, E, CC); } /// CheckBoolLikeConversion - Check conversion of given expression to boolean. /// Input argument E is a logical expression. void Sema::CheckBoolLikeConversion(Expr *E, SourceLocation CC) { ::CheckBoolLikeConversion(*this, E, CC); } /// Diagnose when expression is an integer constant expression and its evaluation /// results in integer overflow void Sema::CheckForIntOverflow (Expr *E) { if (isa<BinaryOperator>(E->IgnoreParenCasts())) E->IgnoreParenCasts()->EvaluateForOverflow(Context); } namespace { /// \brief Visitor for expressions which looks for unsequenced operations on the /// same object. class SequenceChecker : public EvaluatedExprVisitor<SequenceChecker> { typedef EvaluatedExprVisitor<SequenceChecker> Base; /// \brief A tree of sequenced regions within an expression. Two regions are /// unsequenced if one is an ancestor or a descendent of the other. When we /// finish processing an expression with sequencing, such as a comma /// expression, we fold its tree nodes into its parent, since they are /// unsequenced with respect to nodes we will visit later. class SequenceTree { struct Value { explicit Value(unsigned Parent) : Parent(Parent), Merged(false) {} unsigned Parent : 31; bool Merged : 1; }; SmallVector<Value, 8> Values; public: /// \brief A region within an expression which may be sequenced with respect /// to some other region. class Seq { explicit Seq(unsigned N) : Index(N) {} unsigned Index; friend class SequenceTree; public: Seq() : Index(0) {} }; SequenceTree() { Values.push_back(Value(0)); } Seq root() const { return Seq(0); } /// \brief Create a new sequence of operations, which is an unsequenced /// subset of \p Parent. This sequence of operations is sequenced with /// respect to other children of \p Parent. Seq allocate(Seq Parent) { Values.push_back(Value(Parent.Index)); return Seq(Values.size() - 1); } /// \brief Merge a sequence of operations into its parent. void merge(Seq S) { Values[S.Index].Merged = true; } /// \brief Determine whether two operations are unsequenced. This operation /// is asymmetric: \p Cur should be the more recent sequence, and \p Old /// should have been merged into its parent as appropriate. bool isUnsequenced(Seq Cur, Seq Old) { unsigned C = representative(Cur.Index); unsigned Target = representative(Old.Index); while (C >= Target) { if (C == Target) return true; C = Values[C].Parent; } return false; } private: /// \brief Pick a representative for a sequence. unsigned representative(unsigned K) { if (Values[K].Merged) // Perform path compression as we go. return Values[K].Parent = representative(Values[K].Parent); return K; } }; /// An object for which we can track unsequenced uses. typedef NamedDecl *Object; /// Different flavors of object usage which we track. We only track the /// least-sequenced usage of each kind. enum UsageKind { /// A read of an object. Multiple unsequenced reads are OK. UK_Use, /// A modification of an object which is sequenced before the value /// computation of the expression, such as ++n in C++. UK_ModAsValue, /// A modification of an object which is not sequenced before the value /// computation of the expression, such as n++. UK_ModAsSideEffect, UK_Count = UK_ModAsSideEffect + 1 }; struct Usage { Usage() : Use(nullptr), Seq() {} Expr *Use; SequenceTree::Seq Seq; }; struct UsageInfo { UsageInfo() : Diagnosed(false) {} Usage Uses[UK_Count]; /// Have we issued a diagnostic for this variable already? bool Diagnosed; }; typedef llvm::SmallDenseMap<Object, UsageInfo, 16> UsageInfoMap; Sema &SemaRef; /// Sequenced regions within the expression. SequenceTree Tree; /// Declaration modifications and references which we have seen. UsageInfoMap UsageMap; /// The region we are currently within. SequenceTree::Seq Region; /// Filled in with declarations which were modified as a side-effect /// (that is, post-increment operations). SmallVectorImpl<std::pair<Object, Usage> > *ModAsSideEffect; /// Expressions to check later. We defer checking these to reduce /// stack usage. SmallVectorImpl<Expr *> &WorkList; /// RAII object wrapping the visitation of a sequenced subexpression of an /// expression. At the end of this process, the side-effects of the evaluation /// become sequenced with respect to the value computation of the result, so /// we downgrade any UK_ModAsSideEffect within the evaluation to /// UK_ModAsValue. struct SequencedSubexpression { SequencedSubexpression(SequenceChecker &Self) : Self(Self), OldModAsSideEffect(Self.ModAsSideEffect) { Self.ModAsSideEffect = &ModAsSideEffect; } ~SequencedSubexpression() { for (auto MI = ModAsSideEffect.rbegin(), ME = ModAsSideEffect.rend(); MI != ME; ++MI) { UsageInfo &U = Self.UsageMap[MI->first]; auto &SideEffectUsage = U.Uses[UK_ModAsSideEffect]; Self.addUsage(U, MI->first, SideEffectUsage.Use, UK_ModAsValue); SideEffectUsage = MI->second; } Self.ModAsSideEffect = OldModAsSideEffect; } SequenceChecker &Self; SmallVector<std::pair<Object, Usage>, 4> ModAsSideEffect; SmallVectorImpl<std::pair<Object, Usage> > *OldModAsSideEffect; }; /// RAII object wrapping the visitation of a subexpression which we might /// choose to evaluate as a constant. If any subexpression is evaluated and /// found to be non-constant, this allows us to suppress the evaluation of /// the outer expression. class EvaluationTracker { public: EvaluationTracker(SequenceChecker &Self) : Self(Self), Prev(Self.EvalTracker), EvalOK(true) { Self.EvalTracker = this; } ~EvaluationTracker() { Self.EvalTracker = Prev; if (Prev) Prev->EvalOK &= EvalOK; } bool evaluate(const Expr *E, bool &Result) { if (!EvalOK || E->isValueDependent()) return false; EvalOK = E->EvaluateAsBooleanCondition(Result, Self.SemaRef.Context); return EvalOK; } private: SequenceChecker &Self; EvaluationTracker *Prev; bool EvalOK; } *EvalTracker; /// \brief Find the object which is produced by the specified expression, /// if any. Object getObject(Expr *E, bool Mod) const { E = E->IgnoreParenCasts(); if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { if (Mod && (UO->getOpcode() == UO_PreInc || UO->getOpcode() == UO_PreDec)) return getObject(UO->getSubExpr(), Mod); } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { if (BO->getOpcode() == BO_Comma) return getObject(BO->getRHS(), Mod); if (Mod && BO->isAssignmentOp()) return getObject(BO->getLHS(), Mod); } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) { // FIXME: Check for more interesting cases, like "x.n = ++x.n". if (isa<CXXThisExpr>(ME->getBase()->IgnoreParenCasts())) return ME->getMemberDecl(); } else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) // FIXME: If this is a reference, map through to its value. return DRE->getDecl(); return nullptr; } /// \brief Note that an object was modified or used by an expression. void addUsage(UsageInfo &UI, Object O, Expr *Ref, UsageKind UK) { Usage &U = UI.Uses[UK]; if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) { if (UK == UK_ModAsSideEffect && ModAsSideEffect) ModAsSideEffect->push_back(std::make_pair(O, U)); U.Use = Ref; U.Seq = Region; } } /// \brief Check whether a modification or use conflicts with a prior usage. void checkUsage(Object O, UsageInfo &UI, Expr *Ref, UsageKind OtherKind, bool IsModMod) { if (UI.Diagnosed) return; const Usage &U = UI.Uses[OtherKind]; if (!U.Use || !Tree.isUnsequenced(Region, U.Seq)) return; Expr *Mod = U.Use; Expr *ModOrUse = Ref; if (OtherKind == UK_Use) std::swap(Mod, ModOrUse); SemaRef.Diag(Mod->getExprLoc(), IsModMod ? diag::warn_unsequenced_mod_mod : diag::warn_unsequenced_mod_use) << O << SourceRange(ModOrUse->getExprLoc()); UI.Diagnosed = true; } void notePreUse(Object O, Expr *Use) { UsageInfo &U = UsageMap[O]; // Uses conflict with other modifications. checkUsage(O, U, Use, UK_ModAsValue, false); } void notePostUse(Object O, Expr *Use) { UsageInfo &U = UsageMap[O]; checkUsage(O, U, Use, UK_ModAsSideEffect, false); addUsage(U, O, Use, UK_Use); } void notePreMod(Object O, Expr *Mod) { UsageInfo &U = UsageMap[O]; // Modifications conflict with other modifications and with uses. checkUsage(O, U, Mod, UK_ModAsValue, true); checkUsage(O, U, Mod, UK_Use, false); } void notePostMod(Object O, Expr *Use, UsageKind UK) { UsageInfo &U = UsageMap[O]; checkUsage(O, U, Use, UK_ModAsSideEffect, true); addUsage(U, O, Use, UK); } public: SequenceChecker(Sema &S, Expr *E, SmallVectorImpl<Expr *> &WorkList) : Base(S.Context), SemaRef(S), Region(Tree.root()), ModAsSideEffect(nullptr), WorkList(WorkList), EvalTracker(nullptr) { Visit(E); } void VisitStmt(Stmt *S) { // Skip all statements which aren't expressions for now. } void VisitExpr(Expr *E) { // By default, just recurse to evaluated subexpressions. Base::VisitStmt(E); } void VisitCastExpr(CastExpr *E) { Object O = Object(); if (E->getCastKind() == CK_LValueToRValue) O = getObject(E->getSubExpr(), false); if (O) notePreUse(O, E); VisitExpr(E); if (O) notePostUse(O, E); } void VisitBinComma(BinaryOperator *BO) { // C++11 [expr.comma]p1: // Every value computation and side effect associated with the left // expression is sequenced before every value computation and side // effect associated with the right expression. SequenceTree::Seq LHS = Tree.allocate(Region); SequenceTree::Seq RHS = Tree.allocate(Region); SequenceTree::Seq OldRegion = Region; { SequencedSubexpression SeqLHS(*this); Region = LHS; Visit(BO->getLHS()); } Region = RHS; Visit(BO->getRHS()); Region = OldRegion; // Forget that LHS and RHS are sequenced. They are both unsequenced // with respect to other stuff. Tree.merge(LHS); Tree.merge(RHS); } void VisitBinAssign(BinaryOperator *BO) { // The modification is sequenced after the value computation of the LHS // and RHS, so check it before inspecting the operands and update the // map afterwards. Object O = getObject(BO->getLHS(), true); if (!O) return VisitExpr(BO); notePreMod(O, BO); // C++11 [expr.ass]p7: // E1 op= E2 is equivalent to E1 = E1 op E2, except that E1 is evaluated // only once. // // Therefore, for a compound assignment operator, O is considered used // everywhere except within the evaluation of E1 itself. if (isa<CompoundAssignOperator>(BO)) notePreUse(O, BO); Visit(BO->getLHS()); if (isa<CompoundAssignOperator>(BO)) notePostUse(O, BO); Visit(BO->getRHS()); // C++11 [expr.ass]p1: // the assignment is sequenced [...] before the value computation of the // assignment expression. // C11 6.5.16/3 has no such rule. notePostMod(O, BO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue : UK_ModAsSideEffect); } void VisitCompoundAssignOperator(CompoundAssignOperator *CAO) { VisitBinAssign(CAO); } void VisitUnaryPreInc(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } void VisitUnaryPreDec(UnaryOperator *UO) { VisitUnaryPreIncDec(UO); } void VisitUnaryPreIncDec(UnaryOperator *UO) { Object O = getObject(UO->getSubExpr(), true); if (!O) return VisitExpr(UO); notePreMod(O, UO); Visit(UO->getSubExpr()); // C++11 [expr.pre.incr]p1: // the expression ++x is equivalent to x+=1 notePostMod(O, UO, SemaRef.getLangOpts().CPlusPlus ? UK_ModAsValue : UK_ModAsSideEffect); } void VisitUnaryPostInc(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } void VisitUnaryPostDec(UnaryOperator *UO) { VisitUnaryPostIncDec(UO); } void VisitUnaryPostIncDec(UnaryOperator *UO) { Object O = getObject(UO->getSubExpr(), true); if (!O) return VisitExpr(UO); notePreMod(O, UO); Visit(UO->getSubExpr()); notePostMod(O, UO, UK_ModAsSideEffect); } /// Don't visit the RHS of '&&' or '||' if it might not be evaluated. void VisitBinLOr(BinaryOperator *BO) { // The side-effects of the LHS of an '&&' are sequenced before the // value computation of the RHS, and hence before the value computation // of the '&&' itself, unless the LHS evaluates to zero. We treat them // as if they were unconditionally sequenced. EvaluationTracker Eval(*this); { SequencedSubexpression Sequenced(*this); Visit(BO->getLHS()); } bool Result; if (Eval.evaluate(BO->getLHS(), Result)) { if (!Result) Visit(BO->getRHS()); } else { // Check for unsequenced operations in the RHS, treating it as an // entirely separate evaluation. // // FIXME: If there are operations in the RHS which are unsequenced // with respect to operations outside the RHS, and those operations // are unconditionally evaluated, diagnose them. WorkList.push_back(BO->getRHS()); } } void VisitBinLAnd(BinaryOperator *BO) { EvaluationTracker Eval(*this); { SequencedSubexpression Sequenced(*this); Visit(BO->getLHS()); } bool Result; if (Eval.evaluate(BO->getLHS(), Result)) { if (Result) Visit(BO->getRHS()); } else { WorkList.push_back(BO->getRHS()); } } // Only visit the condition, unless we can be sure which subexpression will // be chosen. void VisitAbstractConditionalOperator(AbstractConditionalOperator *CO) { EvaluationTracker Eval(*this); { SequencedSubexpression Sequenced(*this); Visit(CO->getCond()); } bool Result; if (Eval.evaluate(CO->getCond(), Result)) Visit(Result ? CO->getTrueExpr() : CO->getFalseExpr()); else { WorkList.push_back(CO->getTrueExpr()); WorkList.push_back(CO->getFalseExpr()); } } void VisitCallExpr(CallExpr *CE) { // C++11 [intro.execution]p15: // When calling a function [...], every value computation and side effect // associated with any argument expression, or with the postfix expression // designating the called function, is sequenced before execution of every // expression or statement in the body of the function [and thus before // the value computation of its result]. SequencedSubexpression Sequenced(*this); Base::VisitCallExpr(CE); // FIXME: CXXNewExpr and CXXDeleteExpr implicitly call functions. } void VisitCXXConstructExpr(CXXConstructExpr *CCE) { // This is a call, so all subexpressions are sequenced before the result. SequencedSubexpression Sequenced(*this); if (!CCE->isListInitialization()) return VisitExpr(CCE); // In C++11, list initializations are sequenced. SmallVector<SequenceTree::Seq, 32> Elts; SequenceTree::Seq Parent = Region; for (CXXConstructExpr::arg_iterator I = CCE->arg_begin(), E = CCE->arg_end(); I != E; ++I) { Region = Tree.allocate(Parent); Elts.push_back(Region); Visit(*I); } // Forget that the initializers are sequenced. Region = Parent; for (unsigned I = 0; I < Elts.size(); ++I) Tree.merge(Elts[I]); } void VisitInitListExpr(InitListExpr *ILE) { if (!SemaRef.getLangOpts().CPlusPlus11) return VisitExpr(ILE); // In C++11, list initializations are sequenced. SmallVector<SequenceTree::Seq, 32> Elts; SequenceTree::Seq Parent = Region; for (unsigned I = 0; I < ILE->getNumInits(); ++I) { Expr *E = ILE->getInit(I); if (!E) continue; Region = Tree.allocate(Parent); Elts.push_back(Region); Visit(E); } // Forget that the initializers are sequenced. Region = Parent; for (unsigned I = 0; I < Elts.size(); ++I) Tree.merge(Elts[I]); } }; } void Sema::CheckUnsequencedOperations(Expr *E) { SmallVector<Expr *, 8> WorkList; WorkList.push_back(E); while (!WorkList.empty()) { Expr *Item = WorkList.pop_back_val(); SequenceChecker(*this, Item, WorkList); } } void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc, bool IsConstexpr) { CheckImplicitConversions(E, CheckLoc); CheckUnsequencedOperations(E); if (!IsConstexpr && !E->isValueDependent()) CheckForIntOverflow(E); } void Sema::CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *BitField, Expr *Init) { (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc); } static void diagnoseArrayStarInParamType(Sema &S, QualType PType, SourceLocation Loc) { if (!PType->isVariablyModifiedType()) return; if (const auto *PointerTy = dyn_cast<PointerType>(PType)) { diagnoseArrayStarInParamType(S, PointerTy->getPointeeType(), Loc); return; } if (const auto *ReferenceTy = dyn_cast<ReferenceType>(PType)) { diagnoseArrayStarInParamType(S, ReferenceTy->getPointeeType(), Loc); return; } if (const auto *ParenTy = dyn_cast<ParenType>(PType)) { diagnoseArrayStarInParamType(S, ParenTy->getInnerType(), Loc); return; } const ArrayType *AT = S.Context.getAsArrayType(PType); if (!AT) return; if (AT->getSizeModifier() != ArrayType::Star) { diagnoseArrayStarInParamType(S, AT->getElementType(), Loc); return; } S.Diag(Loc, diag::err_array_star_in_function_definition); } /// CheckParmsForFunctionDef - Check that the parameters of the given /// function are appropriate for the definition of a function. This /// takes care of any checks that cannot be performed on the /// declaration itself, e.g., that the types of each of the function /// parameters are complete. bool Sema::CheckParmsForFunctionDef(ParmVarDecl *const *P, ParmVarDecl *const *PEnd, bool CheckParameterNames) { bool HasInvalidParm = false; for (; P != PEnd; ++P) { ParmVarDecl *Param = *P; // C99 6.7.5.3p4: the parameters in a parameter type list in a // function declarator that is part of a function definition of // that function shall not have incomplete type. // // This is also C++ [dcl.fct]p6. if (!Param->isInvalidDecl() && !(getLangOpts().HLSL && Param->getType()->isIncompleteArrayType()) && // HLSL Change: allow incomplete array type RequireCompleteType(Param->getLocation(), Param->getType(), diag::err_typecheck_decl_incomplete_type)) { Param->setInvalidDecl(); HasInvalidParm = true; } // C99 6.9.1p5: If the declarator includes a parameter type list, the // declaration of each parameter shall include an identifier. if (CheckParameterNames && Param->getIdentifier() == nullptr && !Param->isImplicit() && !getLangOpts().CPlusPlus) Diag(Param->getLocation(), diag::err_parameter_name_omitted); // C99 6.7.5.3p12: // If the function declarator is not part of a definition of that // function, parameters may have incomplete type and may use the [*] // notation in their sequences of declarator specifiers to specify // variable length array types. QualType PType = Param->getOriginalType(); // FIXME: This diagnostic should point the '[*]' if source-location // information is added for it. diagnoseArrayStarInParamType(*this, PType, Param->getLocation()); // MSVC destroys objects passed by value in the callee. Therefore a // function definition which takes such a parameter must be able to call the // object's destructor. However, we don't perform any direct access check // on the dtor. if (getLangOpts().CPlusPlus && Context.getTargetInfo() .getCXXABI() .areArgsDestroyedLeftToRightInCallee()) { if (!Param->isInvalidDecl()) { if (const RecordType *RT = Param->getType()->getAs<RecordType>()) { CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); if (!ClassDecl->isInvalidDecl() && !ClassDecl->hasIrrelevantDestructor() && !ClassDecl->isDependentContext()) { CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl); MarkFunctionReferenced(Param->getLocation(), Destructor); DiagnoseUseOfDecl(Destructor, Param->getLocation()); } } } } } return HasInvalidParm; } /// CheckCastAlign - Implements -Wcast-align, which warns when a /// pointer cast increases the alignment requirements. void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) { // This is actually a lot of work to potentially be doing on every // cast; don't do it if we're ignoring -Wcast_align (as is the default). if (getDiagnostics().isIgnored(diag::warn_cast_align, TRange.getBegin())) return; // Ignore dependent types. if (T->isDependentType() || Op->getType()->isDependentType()) return; // Require that the destination be a pointer type. const PointerType *DestPtr = T->getAs<PointerType>(); if (!DestPtr) return; // If the destination has alignment 1, we're done. QualType DestPointee = DestPtr->getPointeeType(); if (DestPointee->isIncompleteType()) return; CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee); if (DestAlign.isOne()) return; // Require that the source be a pointer type. const PointerType *SrcPtr = Op->getType()->getAs<PointerType>(); if (!SrcPtr) return; QualType SrcPointee = SrcPtr->getPointeeType(); // Whitelist casts from cv void*. We already implicitly // whitelisted casts to cv void*, since they have alignment 1. // Also whitelist casts involving incomplete types, which implicitly // includes 'void'. if (SrcPointee->isIncompleteType()) return; CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee); if (SrcAlign >= DestAlign) return; Diag(TRange.getBegin(), diag::warn_cast_align) << Op->getType() << T << static_cast<unsigned>(SrcAlign.getQuantity()) << static_cast<unsigned>(DestAlign.getQuantity()) << TRange << Op->getSourceRange(); } static const Type* getElementType(const Expr *BaseExpr) { const Type* EltType = BaseExpr->getType().getTypePtr(); if (EltType->isAnyPointerType()) return EltType->getPointeeType().getTypePtr(); else if (EltType->isArrayType()) return EltType->getBaseElementTypeUnsafe(); return EltType; } /// \brief Check whether this array fits the idiom of a size-one tail padded /// array member of a struct. /// /// We avoid emitting out-of-bounds access warnings for such arrays as they are /// commonly used to emulate flexible arrays in C89 code. static bool IsTailPaddedMemberArray(Sema &S, llvm::APInt Size, const NamedDecl *ND) { if (Size != 1 || !ND) return false; const FieldDecl *FD = dyn_cast<FieldDecl>(ND); if (!FD) return false; // Don't consider sizes resulting from macro expansions or template argument // substitution to form C89 tail-padded arrays. TypeSourceInfo *TInfo = FD->getTypeSourceInfo(); while (TInfo) { TypeLoc TL = TInfo->getTypeLoc(); // Look through typedefs. if (TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>()) { const TypedefNameDecl *TDL = TTL.getTypedefNameDecl(); TInfo = TDL->getTypeSourceInfo(); continue; } if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) { const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr()); if (!SizeExpr || SizeExpr->getExprLoc().isMacroID()) return false; } break; } const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext()); if (!RD) return false; if (RD->isUnion()) return false; if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { if (!CRD->isStandardLayout()) return false; } // See if this is the last field decl in the record. const Decl *D = FD; while ((D = D->getNextDeclInContext())) if (isa<FieldDecl>(D)) return false; return true; } void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE, bool AllowOnePastEnd, bool IndexNegated) { IndexExpr = IndexExpr->IgnoreParenImpCasts(); if (IndexExpr->isValueDependent()) return; const Type *EffectiveType = getElementType(BaseExpr); BaseExpr = BaseExpr->IgnoreParenCasts(); const ConstantArrayType *ArrayTy = Context.getAsConstantArrayType(BaseExpr->getType()); if (!ArrayTy) return; llvm::APSInt index; if (!IndexExpr->EvaluateAsInt(index, Context)) return; if (IndexNegated) index = -index; const NamedDecl *ND = nullptr; if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) ND = dyn_cast<NamedDecl>(DRE->getDecl()); if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) ND = dyn_cast<NamedDecl>(ME->getMemberDecl()); if (index.isUnsigned() || !index.isNegative()) { llvm::APInt size = ArrayTy->getSize(); if (!size.isStrictlyPositive()) return; const Type* BaseType = getElementType(BaseExpr); if (BaseType != EffectiveType) { // Make sure we're comparing apples to apples when comparing index to size uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType); uint64_t array_typesize = Context.getTypeSize(BaseType); // Handle ptrarith_typesize being zero, such as when casting to void* if (!ptrarith_typesize) ptrarith_typesize = 1; if (ptrarith_typesize != array_typesize) { // There's a cast to a different size type involved uint64_t ratio = array_typesize / ptrarith_typesize; // TODO: Be smarter about handling cases where array_typesize is not a // multiple of ptrarith_typesize if (ptrarith_typesize * ratio == array_typesize) size *= llvm::APInt(size.getBitWidth(), ratio); } } if (size.getBitWidth() > index.getBitWidth()) index = index.zext(size.getBitWidth()); else if (size.getBitWidth() < index.getBitWidth()) size = size.zext(index.getBitWidth()); // For array subscripting the index must be less than size, but for pointer // arithmetic also allow the index (offset) to be equal to size since // computing the next address after the end of the array is legal and // commonly done e.g. in C++ iterators and range-based for loops. if (AllowOnePastEnd ? index.ule(size) : index.ult(size)) return; // Also don't warn for arrays of size 1 which are members of some // structure. These are often used to approximate flexible arrays in C89 // code. if (IsTailPaddedMemberArray(*this, size, ND)) return; // Suppress the warning if the subscript expression (as identified by the // ']' location) and the index expression are both from macro expansions // within a system header. if (ASE) { SourceLocation RBracketLoc = SourceMgr.getSpellingLoc( ASE->getRBracketLoc()); if (SourceMgr.isInSystemHeader(RBracketLoc)) { SourceLocation IndexLoc = SourceMgr.getSpellingLoc( IndexExpr->getLocStart()); if (SourceMgr.isWrittenInSameFile(RBracketLoc, IndexLoc)) return; } } // HLSL Change Starts if (getLangOpts().HLSL && getLangOpts().HLSLVersion > hlsl::LangStd::v2016) { DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr, PDiag(diag::err_hlsl_array_element_index_out_of_bounds) << index.toString(10, true)); } else { // HLSL Change Ends unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds; if (ASE) DiagID = diag::warn_array_index_exceeds_bounds; DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr, PDiag(DiagID) << index.toString(10, true) << size.toString(10, true) << (unsigned)size.getLimitedValue(~0U) << IndexExpr->getSourceRange()); } // HLSL Change } else { // HLSL Change Starts if (getLangOpts().HLSL && getLangOpts().HLSLVersion > hlsl::LangStd::v2016) { DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr, PDiag(diag::err_hlsl_array_element_index_out_of_bounds) << index.toString(10, true)); } else { // HLSL Change Ends unsigned DiagID = diag::warn_array_index_precedes_bounds; if (!ASE) { DiagID = diag::warn_ptr_arith_precedes_bounds; if (index.isNegative()) index = -index; } DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr, PDiag(DiagID) << index.toString(10, true) << IndexExpr->getSourceRange()); } // HLSL Change } if (!ND) { // Try harder to find a NamedDecl to point at in the note. while (const ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr)) BaseExpr = ASE->getBase()->IgnoreParenCasts(); if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr)) ND = dyn_cast<NamedDecl>(DRE->getDecl()); if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr)) ND = dyn_cast<NamedDecl>(ME->getMemberDecl()); } if (ND) DiagRuntimeBehavior(ND->getLocStart(), BaseExpr, PDiag(diag::note_array_index_out_of_bounds) << ND->getDeclName()); } void Sema::CheckArrayAccess(const Expr *expr) { int AllowOnePastEnd = 0; while (expr) { expr = expr->IgnoreParenImpCasts(); switch (expr->getStmtClass()) { case Stmt::ArraySubscriptExprClass: { const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr); CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE, AllowOnePastEnd > 0); return; } case Stmt::UnaryOperatorClass: { // Only unwrap the * and & unary operators const UnaryOperator *UO = cast<UnaryOperator>(expr); expr = UO->getSubExpr(); switch (UO->getOpcode()) { case UO_AddrOf: AllowOnePastEnd++; break; case UO_Deref: AllowOnePastEnd--; break; default: return; } break; } case Stmt::ConditionalOperatorClass: { const ConditionalOperator *cond = cast<ConditionalOperator>(expr); if (const Expr *lhs = cond->getLHS()) CheckArrayAccess(lhs); if (const Expr *rhs = cond->getRHS()) CheckArrayAccess(rhs); return; } // HLSL Change Starts : Access checking for HLSL vector and matrix array subscript case Stmt::CXXOperatorCallExprClass : { if (getLangOpts().HLSL) { const CXXOperatorCallExpr *OperatorCallExpr = cast<CXXOperatorCallExpr>(expr); if (OperatorCallExpr->getOperator() == OverloadedOperatorKind::OO_Subscript) { CheckHLSLArrayAccess(expr); } } return; } // HLSL Change Ends default: return; } } } //===--- CHECK: Objective-C retain cycles ----------------------------------// namespace { struct RetainCycleOwner { RetainCycleOwner() : Variable(nullptr), Indirect(false) {} VarDecl *Variable; SourceRange Range; SourceLocation Loc; bool Indirect; void setLocsFrom(Expr *e) { Loc = e->getExprLoc(); Range = e->getSourceRange(); } }; } #if 0 // HLSL Change Starts /// Consider whether capturing the given variable can possibly lead to /// a retain cycle. static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) { // In ARC, it's captured strongly iff the variable has __strong // lifetime. In MRR, it's captured strongly if the variable is // __block and has an appropriate type. if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong) return false; owner.Variable = var; if (ref) owner.setLocsFrom(ref); return true; } static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) { while (true) { e = e->IgnoreParens(); if (CastExpr *cast = dyn_cast<CastExpr>(e)) { switch (cast->getCastKind()) { case CK_BitCast: case CK_LValueBitCast: case CK_LValueToRValue: case CK_ARCReclaimReturnedObject: e = cast->getSubExpr(); continue; default: return false; } } if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) { ObjCIvarDecl *ivar = ref->getDecl(); if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong) return false; // Try to find a retain cycle in the base. if (!findRetainCycleOwner(S, ref->getBase(), owner)) return false; if (ref->isFreeIvar()) owner.setLocsFrom(ref); owner.Indirect = true; return true; } if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) { VarDecl *var = dyn_cast<VarDecl>(ref->getDecl()); if (!var) return false; return considerVariable(var, ref, owner); } if (MemberExpr *member = dyn_cast<MemberExpr>(e)) { if (member->isArrow()) return false; // Don't count this as an indirect ownership. e = member->getBase(); continue; } if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { // Only pay attention to pseudo-objects on property references. ObjCPropertyRefExpr *pre = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm() ->IgnoreParens()); if (!pre) return false; if (pre->isImplicitProperty()) return false; ObjCPropertyDecl *property = pre->getExplicitProperty(); if (!property->isRetaining() && !(property->getPropertyIvarDecl() && property->getPropertyIvarDecl()->getType() .getObjCLifetime() == Qualifiers::OCL_Strong)) return false; owner.Indirect = true; if (pre->isSuperReceiver()) { owner.Variable = S.getCurMethodDecl()->getSelfDecl(); if (!owner.Variable) return false; owner.Loc = pre->getLocation(); owner.Range = pre->getSourceRange(); return true; } e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase()) ->getSourceExpr()); continue; } // Array ivars? return false; } } #endif // HLSL Change Ends namespace { struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> { FindCaptureVisitor(ASTContext &Context, VarDecl *variable) : EvaluatedExprVisitor<FindCaptureVisitor>(Context), Context(Context), Variable(variable), Capturer(nullptr), VarWillBeReased(false) {} ASTContext &Context; VarDecl *Variable; Expr *Capturer; bool VarWillBeReased; void VisitDeclRefExpr(DeclRefExpr *ref) { if (ref->getDecl() == Variable && !Capturer) Capturer = ref; } void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) { if (Capturer) return; Visit(ref->getBase()); if (Capturer && ref->isFreeIvar()) Capturer = ref; } void VisitBlockExpr(BlockExpr *block) { // Look inside nested blocks if (block->getBlockDecl()->capturesVariable(Variable)) Visit(block->getBlockDecl()->getBody()); } void VisitOpaqueValueExpr(OpaqueValueExpr *OVE) { if (Capturer) return; if (OVE->getSourceExpr()) Visit(OVE->getSourceExpr()); } void VisitBinaryOperator(BinaryOperator *BinOp) { if (!Variable || VarWillBeReased || BinOp->getOpcode() != BO_Assign) return; Expr *LHS = BinOp->getLHS(); if (const DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(LHS)) { if (DRE->getDecl() != Variable) return; if (Expr *RHS = BinOp->getRHS()) { RHS = RHS->IgnoreParenCasts(); llvm::APSInt Value; VarWillBeReased = (RHS && RHS->isIntegerConstantExpr(Value, Context) && Value == 0); } } } }; } #if 0 // HLSL Change Starts /// Check whether the given argument is a block which captures a /// variable. static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) { assert(owner.Variable && owner.Loc.isValid()); e = e->IgnoreParenCasts(); // Look through [^{...} copy] and Block_copy(^{...}). if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(e)) { Selector Cmd = ME->getSelector(); if (Cmd.isUnarySelector() && Cmd.getNameForSlot(0) == "copy") { e = ME->getInstanceReceiver(); if (!e) return nullptr; e = e->IgnoreParenCasts(); } } else if (CallExpr *CE = dyn_cast<CallExpr>(e)) { if (CE->getNumArgs() == 1) { FunctionDecl *Fn = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl()); if (Fn) { const IdentifierInfo *FnI = Fn->getIdentifier(); if (FnI && FnI->isStr("_Block_copy")) { e = CE->getArg(0)->IgnoreParenCasts(); } } } } BlockExpr *block = dyn_cast<BlockExpr>(e); if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable)) return nullptr; FindCaptureVisitor visitor(S.Context, owner.Variable); visitor.Visit(block->getBlockDecl()->getBody()); return visitor.VarWillBeReased ? nullptr : visitor.Capturer; } static void diagnoseRetainCycle(Sema &S, Expr *capturer, RetainCycleOwner &owner) { assert(capturer); assert(owner.Variable && owner.Loc.isValid()); S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle) << owner.Variable << capturer->getSourceRange(); S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner) << owner.Indirect << owner.Range; } /// Check for a keyword selector that starts with the word 'add' or /// 'set'. static bool isSetterLikeSelector(Selector sel) { if (sel.isUnarySelector()) return false; StringRef str = sel.getNameForSlot(0); while (!str.empty() && str.front() == '_') str = str.substr(1); if (str.startswith("set")) str = str.substr(3); else if (str.startswith("add")) { // Specially whitelist 'addOperationWithBlock:'. if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock")) return false; str = str.substr(3); } else return false; if (str.empty()) return true; return !isLowercase(str.front()); } static Optional<int> GetNSMutableArrayArgumentIndex(Sema &S, ObjCMessageExpr *Message) { bool IsMutableArray = S.NSAPIObj->isSubclassOfNSClass( Message->getReceiverInterface(), NSAPI::ClassId_NSMutableArray); if (!IsMutableArray) { return None; } Selector Sel = Message->getSelector(); Optional<NSAPI::NSArrayMethodKind> MKOpt = S.NSAPIObj->getNSArrayMethodKind(Sel); if (!MKOpt) { return None; } NSAPI::NSArrayMethodKind MK = *MKOpt; switch (MK) { case NSAPI::NSMutableArr_addObject: case NSAPI::NSMutableArr_insertObjectAtIndex: case NSAPI::NSMutableArr_setObjectAtIndexedSubscript: return 0; case NSAPI::NSMutableArr_replaceObjectAtIndex: return 1; default: return None; } return None; } static Optional<int> GetNSMutableDictionaryArgumentIndex(Sema &S, ObjCMessageExpr *Message) { bool IsMutableDictionary = S.NSAPIObj->isSubclassOfNSClass( Message->getReceiverInterface(), NSAPI::ClassId_NSMutableDictionary); if (!IsMutableDictionary) { return None; } Selector Sel = Message->getSelector(); Optional<NSAPI::NSDictionaryMethodKind> MKOpt = S.NSAPIObj->getNSDictionaryMethodKind(Sel); if (!MKOpt) { return None; } NSAPI::NSDictionaryMethodKind MK = *MKOpt; switch (MK) { case NSAPI::NSMutableDict_setObjectForKey: case NSAPI::NSMutableDict_setValueForKey: case NSAPI::NSMutableDict_setObjectForKeyedSubscript: return 0; default: return None; } return None; } static Optional<int> GetNSSetArgumentIndex(Sema &S, ObjCMessageExpr *Message) { bool IsMutableSet = S.NSAPIObj->isSubclassOfNSClass( Message->getReceiverInterface(), NSAPI::ClassId_NSMutableSet); bool IsMutableOrderedSet = S.NSAPIObj->isSubclassOfNSClass( Message->getReceiverInterface(), NSAPI::ClassId_NSMutableOrderedSet); if (!IsMutableSet && !IsMutableOrderedSet) { return None; } Selector Sel = Message->getSelector(); Optional<NSAPI::NSSetMethodKind> MKOpt = S.NSAPIObj->getNSSetMethodKind(Sel); if (!MKOpt) { return None; } NSAPI::NSSetMethodKind MK = *MKOpt; switch (MK) { case NSAPI::NSMutableSet_addObject: case NSAPI::NSOrderedSet_setObjectAtIndex: case NSAPI::NSOrderedSet_setObjectAtIndexedSubscript: case NSAPI::NSOrderedSet_insertObjectAtIndex: return 0; case NSAPI::NSOrderedSet_replaceObjectAtIndexWithObject: return 1; } return None; } void Sema::CheckObjCCircularContainer(ObjCMessageExpr *Message) { if (!Message->isInstanceMessage()) { return; } Optional<int> ArgOpt; if (!(ArgOpt = GetNSMutableArrayArgumentIndex(*this, Message)) && !(ArgOpt = GetNSMutableDictionaryArgumentIndex(*this, Message)) && !(ArgOpt = GetNSSetArgumentIndex(*this, Message))) { return; } int ArgIndex = *ArgOpt; Expr *Arg = Message->getArg(ArgIndex)->IgnoreImpCasts(); if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Arg)) { Arg = OE->getSourceExpr()->IgnoreImpCasts(); } if (Message->getReceiverKind() == ObjCMessageExpr::SuperInstance) { if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { if (ArgRE->isObjCSelfExpr()) { Diag(Message->getSourceRange().getBegin(), diag::warn_objc_circular_container) << ArgRE->getDecl()->getName() << StringRef("super"); } } } else { Expr *Receiver = Message->getInstanceReceiver()->IgnoreImpCasts(); if (OpaqueValueExpr *OE = dyn_cast<OpaqueValueExpr>(Receiver)) { Receiver = OE->getSourceExpr()->IgnoreImpCasts(); } if (DeclRefExpr *ReceiverRE = dyn_cast<DeclRefExpr>(Receiver)) { if (DeclRefExpr *ArgRE = dyn_cast<DeclRefExpr>(Arg)) { if (ReceiverRE->getDecl() == ArgRE->getDecl()) { ValueDecl *Decl = ReceiverRE->getDecl(); Diag(Message->getSourceRange().getBegin(), diag::warn_objc_circular_container) << Decl->getName() << Decl->getName(); if (!ArgRE->isObjCSelfExpr()) { Diag(Decl->getLocation(), diag::note_objc_circular_container_declared_here) << Decl->getName(); } } } } else if (ObjCIvarRefExpr *IvarRE = dyn_cast<ObjCIvarRefExpr>(Receiver)) { if (ObjCIvarRefExpr *IvarArgRE = dyn_cast<ObjCIvarRefExpr>(Arg)) { if (IvarRE->getDecl() == IvarArgRE->getDecl()) { ObjCIvarDecl *Decl = IvarRE->getDecl(); Diag(Message->getSourceRange().getBegin(), diag::warn_objc_circular_container) << Decl->getName() << Decl->getName(); Diag(Decl->getLocation(), diag::note_objc_circular_container_declared_here) << Decl->getName(); } } } } } /// Check a message send to see if it's likely to cause a retain cycle. void Sema::checkRetainCycles(ObjCMessageExpr *msg) { // Only check instance methods whose selector looks like a setter. if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector())) return; // Try to find a variable that the receiver is strongly owned by. RetainCycleOwner owner; if (msg->getReceiverKind() == ObjCMessageExpr::Instance) { if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner)) return; } else { assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); owner.Variable = getCurMethodDecl()->getSelfDecl(); owner.Loc = msg->getSuperLoc(); owner.Range = msg->getSuperLoc(); } // Check whether the receiver is captured by any of the arguments. for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i) if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner)) return diagnoseRetainCycle(*this, capturer, owner); } /// Check a property assign to see if it's likely to cause a retain cycle. void Sema::checkRetainCycles(Expr *receiver, Expr *argument) { RetainCycleOwner owner; if (!findRetainCycleOwner(*this, receiver, owner)) return; if (Expr *capturer = findCapturingExpr(*this, argument, owner)) diagnoseRetainCycle(*this, capturer, owner); } void Sema::checkRetainCycles(VarDecl *Var, Expr *Init) { RetainCycleOwner Owner; if (!considerVariable(Var, /*DeclRefExpr=*/nullptr, Owner)) return; // Because we don't have an expression for the variable, we have to set the // location explicitly here. Owner.Loc = Var->getLocation(); Owner.Range = Var->getSourceRange(); if (Expr *Capturer = findCapturingExpr(*this, Init, Owner)) diagnoseRetainCycle(*this, Capturer, Owner); } static bool checkUnsafeAssignLiteral(Sema &S, SourceLocation Loc, Expr *RHS, bool isProperty) { // Check if RHS is an Objective-C object literal, which also can get // immediately zapped in a weak reference. Note that we explicitly // allow ObjCStringLiterals, since those are designed to never really die. RHS = RHS->IgnoreParenImpCasts(); // This enum needs to match with the 'select' in // warn_objc_arc_literal_assign (off-by-1). Sema::ObjCLiteralKind Kind = S.CheckLiteralKind(RHS); if (Kind == Sema::LK_String || Kind == Sema::LK_None) return false; S.Diag(Loc, diag::warn_arc_literal_assign) << (unsigned) Kind << (isProperty ? 0 : 1) << RHS->getSourceRange(); return true; } static bool checkUnsafeAssignObject(Sema &S, SourceLocation Loc, Qualifiers::ObjCLifetime LT, Expr *RHS, bool isProperty) { // Strip off any implicit cast added to get to the one ARC-specific. while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { if (cast->getCastKind() == CK_ARCConsumeObject) { S.Diag(Loc, diag::warn_arc_retained_assign) << (LT == Qualifiers::OCL_ExplicitNone) << (isProperty ? 0 : 1) << RHS->getSourceRange(); return true; } RHS = cast->getSubExpr(); } if (LT == Qualifiers::OCL_Weak && checkUnsafeAssignLiteral(S, Loc, RHS, isProperty)) return true; return false; } bool Sema::checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS) { Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime(); if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone) return false; if (checkUnsafeAssignObject(*this, Loc, LT, RHS, false)) return true; return false; } void Sema::checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS) { QualType LHSType; // PropertyRef on LHS type need be directly obtained from // its declaration as it has a PseudoType. ObjCPropertyRefExpr *PRE = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens()); if (PRE && !PRE->isImplicitProperty()) { const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); if (PD) LHSType = PD->getType(); } if (LHSType.isNull()) LHSType = LHS->getType(); Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime(); if (LT == Qualifiers::OCL_Weak) { if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) getCurFunction()->markSafeWeakUse(LHS); } if (checkUnsafeAssigns(Loc, LHSType, RHS)) return; // FIXME. Check for other life times. if (LT != Qualifiers::OCL_None) return; if (PRE) { if (PRE->isImplicitProperty()) return; const ObjCPropertyDecl *PD = PRE->getExplicitProperty(); if (!PD) return; unsigned Attributes = PD->getPropertyAttributes(); if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) { // when 'assign' attribute was not explicitly specified // by user, ignore it and rely on property type itself // for lifetime info. unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten(); if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) && LHSType->isObjCRetainableType()) return; while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) { if (cast->getCastKind() == CK_ARCConsumeObject) { Diag(Loc, diag::warn_arc_retained_property_assign) << RHS->getSourceRange(); return; } RHS = cast->getSubExpr(); } } else if (Attributes & ObjCPropertyDecl::OBJC_PR_weak) { if (checkUnsafeAssignObject(*this, Loc, Qualifiers::OCL_Weak, RHS, true)) return; } } } #endif // HLSL Change //===--- CHECK: Empty statement body (-Wempty-body) ---------------------===// namespace { bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr, SourceLocation StmtLoc, const NullStmt *Body) { // Do not warn if the body is a macro that expands to nothing, e.g: // // #define CALL(x) // if (condition) // CALL(0); // if (Body->hasLeadingEmptyMacro()) return false; // Get line numbers of statement and body. bool StmtLineInvalid; unsigned StmtLine = SourceMgr.getPresumedLineNumber(StmtLoc, &StmtLineInvalid); if (StmtLineInvalid) return false; bool BodyLineInvalid; unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(), &BodyLineInvalid); if (BodyLineInvalid) return false; // Warn if null statement and body are on the same line. if (StmtLine != BodyLine) return false; return true; } } // Unnamed namespace void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID) { // Since this is a syntactic check, don't emit diagnostic for template // instantiations, this just adds noise. if (CurrentInstantiationScope) return; // The body should be a null statement. const NullStmt *NBody = dyn_cast<NullStmt>(Body); if (!NBody) return; // Do the usual checks. if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) return; Diag(NBody->getSemiLoc(), DiagID); Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); } void Sema::DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody) { assert(!CurrentInstantiationScope); // Ensured by caller SourceLocation StmtLoc; const Stmt *Body; unsigned DiagID; if (const ForStmt *FS = dyn_cast<ForStmt>(S)) { StmtLoc = FS->getRParenLoc(); Body = FS->getBody(); DiagID = diag::warn_empty_for_body; } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) { StmtLoc = WS->getCond()->getSourceRange().getEnd(); Body = WS->getBody(); DiagID = diag::warn_empty_while_body; } else return; // Neither `for' nor `while'. // The body should be a null statement. const NullStmt *NBody = dyn_cast<NullStmt>(Body); if (!NBody) return; // Skip expensive checks if diagnostic is disabled. if (Diags.isIgnored(DiagID, NBody->getSemiLoc())) return; // Do the usual checks. if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody)) return; // `for(...);' and `while(...);' are popular idioms, so in order to keep // noise level low, emit diagnostics only if for/while is followed by a // CompoundStmt, e.g.: // for (int i = 0; i < n; i++); // { // a(i); // } // or if for/while is followed by a statement with more indentation // than for/while itself: // for (int i = 0; i < n; i++); // a(i); bool ProbableTypo = isa<CompoundStmt>(PossibleBody); if (!ProbableTypo) { bool BodyColInvalid; unsigned BodyCol = SourceMgr.getPresumedColumnNumber( PossibleBody->getLocStart(), &BodyColInvalid); if (BodyColInvalid) return; bool StmtColInvalid; unsigned StmtCol = SourceMgr.getPresumedColumnNumber( S->getLocStart(), &StmtColInvalid); if (StmtColInvalid) return; if (BodyCol > StmtCol) ProbableTypo = true; } if (ProbableTypo) { Diag(NBody->getSemiLoc(), DiagID); Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line); } } //===--- CHECK: Warn on self move with std::move. -------------------------===// /// DiagnoseSelfMove - Emits a warning if a value is moved to itself. void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc) { if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc)) return; if (!ActiveTemplateInstantiations.empty()) return; // Strip parens and casts away. LHSExpr = LHSExpr->IgnoreParenImpCasts(); RHSExpr = RHSExpr->IgnoreParenImpCasts(); // Check for a call expression const CallExpr *CE = dyn_cast<CallExpr>(RHSExpr); if (!CE || CE->getNumArgs() != 1) return; // Check for a call to std::move const FunctionDecl *FD = CE->getDirectCallee(); if (!FD || !FD->isInStdNamespace() || !FD->getIdentifier() || !FD->getIdentifier()->isStr("move")) return; // Get argument from std::move RHSExpr = CE->getArg(0); const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); // Two DeclRefExpr's, check that the decls are the same. if (LHSDeclRef && RHSDeclRef) { if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) return; if (LHSDeclRef->getDecl()->getCanonicalDecl() != RHSDeclRef->getDecl()->getCanonicalDecl()) return; Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); return; } // Member variables require a different approach to check for self moves. // MemberExpr's are the same if every nested MemberExpr refers to the same // Decl and that the base Expr's are DeclRefExpr's with the same Decl or // the base Expr's are CXXThisExpr's. const Expr *LHSBase = LHSExpr; const Expr *RHSBase = RHSExpr; const MemberExpr *LHSME = dyn_cast<MemberExpr>(LHSExpr); const MemberExpr *RHSME = dyn_cast<MemberExpr>(RHSExpr); if (!LHSME || !RHSME) return; while (LHSME && RHSME) { if (LHSME->getMemberDecl()->getCanonicalDecl() != RHSME->getMemberDecl()->getCanonicalDecl()) return; LHSBase = LHSME->getBase(); RHSBase = RHSME->getBase(); LHSME = dyn_cast<MemberExpr>(LHSBase); RHSME = dyn_cast<MemberExpr>(RHSBase); } LHSDeclRef = dyn_cast<DeclRefExpr>(LHSBase); RHSDeclRef = dyn_cast<DeclRefExpr>(RHSBase); if (LHSDeclRef && RHSDeclRef) { if (!LHSDeclRef->getDecl() || !RHSDeclRef->getDecl()) return; if (LHSDeclRef->getDecl()->getCanonicalDecl() != RHSDeclRef->getDecl()->getCanonicalDecl()) return; Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); return; } if (isa<CXXThisExpr>(LHSBase) && isa<CXXThisExpr>(RHSBase)) Diag(OpLoc, diag::warn_self_move) << LHSExpr->getType() << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); } //===--- Layout compatibility ----------------------------------------------// namespace { bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2); /// \brief Check if two enumeration types are layout-compatible. bool isLayoutCompatible(ASTContext &C, EnumDecl *ED1, EnumDecl *ED2) { // C++11 [dcl.enum] p8: // Two enumeration types are layout-compatible if they have the same // underlying type. return ED1->isComplete() && ED2->isComplete() && C.hasSameType(ED1->getIntegerType(), ED2->getIntegerType()); } /// \brief Check if two fields are layout-compatible. bool isLayoutCompatible(ASTContext &C, FieldDecl *Field1, FieldDecl *Field2) { if (!isLayoutCompatible(C, Field1->getType(), Field2->getType())) return false; if (Field1->isBitField() != Field2->isBitField()) return false; if (Field1->isBitField()) { // Make sure that the bit-fields are the same length. unsigned Bits1 = Field1->getBitWidthValue(C); unsigned Bits2 = Field2->getBitWidthValue(C); if (Bits1 != Bits2) return false; } return true; } /// \brief Check if two standard-layout structs are layout-compatible. /// (C++11 [class.mem] p17) bool isLayoutCompatibleStruct(ASTContext &C, RecordDecl *RD1, RecordDecl *RD2) { // If both records are C++ classes, check that base classes match. if (const CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(RD1)) { // If one of records is a CXXRecordDecl we are in C++ mode, // thus the other one is a CXXRecordDecl, too. const CXXRecordDecl *D2CXX = cast<CXXRecordDecl>(RD2); // Check number of base classes. if (D1CXX->getNumBases() != D2CXX->getNumBases()) return false; // Check the base classes. for (CXXRecordDecl::base_class_const_iterator Base1 = D1CXX->bases_begin(), BaseEnd1 = D1CXX->bases_end(), Base2 = D2CXX->bases_begin(); Base1 != BaseEnd1; ++Base1, ++Base2) { if (!isLayoutCompatible(C, Base1->getType(), Base2->getType())) return false; } } else if (const CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(RD2)) { // If only RD2 is a C++ class, it should have zero base classes. if (D2CXX->getNumBases() > 0) return false; } // Check the fields. RecordDecl::field_iterator Field2 = RD2->field_begin(), Field2End = RD2->field_end(), Field1 = RD1->field_begin(), Field1End = RD1->field_end(); for ( ; Field1 != Field1End && Field2 != Field2End; ++Field1, ++Field2) { if (!isLayoutCompatible(C, *Field1, *Field2)) return false; } if (Field1 != Field1End || Field2 != Field2End) return false; return true; } /// \brief Check if two standard-layout unions are layout-compatible. /// (C++11 [class.mem] p18) bool isLayoutCompatibleUnion(ASTContext &C, RecordDecl *RD1, RecordDecl *RD2) { llvm::SmallPtrSet<FieldDecl *, 8> UnmatchedFields; for (auto *Field2 : RD2->fields()) UnmatchedFields.insert(Field2); for (auto *Field1 : RD1->fields()) { llvm::SmallPtrSet<FieldDecl *, 8>::iterator I = UnmatchedFields.begin(), E = UnmatchedFields.end(); for ( ; I != E; ++I) { if (isLayoutCompatible(C, Field1, *I)) { bool Result = UnmatchedFields.erase(*I); (void) Result; assert(Result); break; } } if (I == E) return false; } return UnmatchedFields.empty(); } bool isLayoutCompatible(ASTContext &C, RecordDecl *RD1, RecordDecl *RD2) { if (RD1->isUnion() != RD2->isUnion()) return false; if (RD1->isUnion()) return isLayoutCompatibleUnion(C, RD1, RD2); else return isLayoutCompatibleStruct(C, RD1, RD2); } /// \brief Check if two types are layout-compatible in C++11 sense. bool isLayoutCompatible(ASTContext &C, QualType T1, QualType T2) { if (T1.isNull() || T2.isNull()) return false; // C++11 [basic.types] p11: // If two types T1 and T2 are the same type, then T1 and T2 are // layout-compatible types. if (C.hasSameType(T1, T2)) return true; T1 = T1.getCanonicalType().getUnqualifiedType(); T2 = T2.getCanonicalType().getUnqualifiedType(); const Type::TypeClass TC1 = T1->getTypeClass(); const Type::TypeClass TC2 = T2->getTypeClass(); if (TC1 != TC2) return false; if (TC1 == Type::Enum) { return isLayoutCompatible(C, cast<EnumType>(T1)->getDecl(), cast<EnumType>(T2)->getDecl()); } else if (TC1 == Type::Record) { if (!T1->isStandardLayoutType() || !T2->isStandardLayoutType()) return false; return isLayoutCompatible(C, cast<RecordType>(T1)->getDecl(), cast<RecordType>(T2)->getDecl()); } return false; } } //===--- CHECK: pointer_with_type_tag attribute: datatypes should match ----// namespace { /// \brief Given a type tag expression find the type tag itself. /// /// \param TypeExpr Type tag expression, as it appears in user's code. /// /// \param VD Declaration of an identifier that appears in a type tag. /// /// \param MagicValue Type tag magic value. bool FindTypeTagExpr(const Expr *TypeExpr, const ASTContext &Ctx, const ValueDecl **VD, uint64_t *MagicValue) { while(true) { if (!TypeExpr) return false; TypeExpr = TypeExpr->IgnoreParenImpCasts()->IgnoreParenCasts(); switch (TypeExpr->getStmtClass()) { case Stmt::UnaryOperatorClass: { const UnaryOperator *UO = cast<UnaryOperator>(TypeExpr); if (UO->getOpcode() == UO_AddrOf || UO->getOpcode() == UO_Deref) { TypeExpr = UO->getSubExpr(); continue; } return false; } case Stmt::DeclRefExprClass: { const DeclRefExpr *DRE = cast<DeclRefExpr>(TypeExpr); *VD = DRE->getDecl(); return true; } case Stmt::IntegerLiteralClass: { const IntegerLiteral *IL = cast<IntegerLiteral>(TypeExpr); llvm::APInt MagicValueAPInt = IL->getValue(); if (MagicValueAPInt.getActiveBits() <= 64) { *MagicValue = MagicValueAPInt.getZExtValue(); return true; } else return false; } case Stmt::BinaryConditionalOperatorClass: case Stmt::ConditionalOperatorClass: { const AbstractConditionalOperator *ACO = cast<AbstractConditionalOperator>(TypeExpr); bool Result; if (ACO->getCond()->EvaluateAsBooleanCondition(Result, Ctx)) { if (Result) TypeExpr = ACO->getTrueExpr(); else TypeExpr = ACO->getFalseExpr(); continue; } return false; } case Stmt::BinaryOperatorClass: { const BinaryOperator *BO = cast<BinaryOperator>(TypeExpr); if (BO->getOpcode() == BO_Comma) { TypeExpr = BO->getRHS(); continue; } return false; } default: return false; } } } /// \brief Retrieve the C type corresponding to type tag TypeExpr. /// /// \param TypeExpr Expression that specifies a type tag. /// /// \param MagicValues Registered magic values. /// /// \param FoundWrongKind Set to true if a type tag was found, but of a wrong /// kind. /// /// \param TypeInfo Information about the corresponding C type. /// /// \returns true if the corresponding C type was found. bool GetMatchingCType( const IdentifierInfo *ArgumentKind, const Expr *TypeExpr, const ASTContext &Ctx, const llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData> *MagicValues, bool &FoundWrongKind, Sema::TypeTagData &TypeInfo) { FoundWrongKind = false; // Variable declaration that has type_tag_for_datatype attribute. const ValueDecl *VD = nullptr; uint64_t MagicValue; if (!FindTypeTagExpr(TypeExpr, Ctx, &VD, &MagicValue)) return false; if (VD) { if (TypeTagForDatatypeAttr *I = VD->getAttr<TypeTagForDatatypeAttr>()) { if (I->getArgumentKind() != ArgumentKind) { FoundWrongKind = true; return false; } TypeInfo.Type = I->getMatchingCType(); TypeInfo.LayoutCompatible = I->getLayoutCompatible(); TypeInfo.MustBeNull = I->getMustBeNull(); return true; } return false; } if (!MagicValues) return false; llvm::DenseMap<Sema::TypeTagMagicValue, Sema::TypeTagData>::const_iterator I = MagicValues->find(std::make_pair(ArgumentKind, MagicValue)); if (I == MagicValues->end()) return false; TypeInfo = I->second; return true; } } // unnamed namespace void Sema::RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull) { if (!TypeTagForDatatypeMagicValues) TypeTagForDatatypeMagicValues.reset( new llvm::DenseMap<TypeTagMagicValue, TypeTagData>); TypeTagMagicValue Magic(ArgumentKind, MagicValue); (*TypeTagForDatatypeMagicValues)[Magic] = TypeTagData(Type, LayoutCompatible, MustBeNull); } namespace { bool IsSameCharType(QualType T1, QualType T2) { const BuiltinType *BT1 = T1->getAs<BuiltinType>(); if (!BT1) return false; const BuiltinType *BT2 = T2->getAs<BuiltinType>(); if (!BT2) return false; BuiltinType::Kind T1Kind = BT1->getKind(); BuiltinType::Kind T2Kind = BT2->getKind(); return (T1Kind == BuiltinType::SChar && T2Kind == BuiltinType::Char_S) || (T1Kind == BuiltinType::UChar && T2Kind == BuiltinType::Char_U) || (T1Kind == BuiltinType::Char_U && T2Kind == BuiltinType::UChar) || (T1Kind == BuiltinType::Char_S && T2Kind == BuiltinType::SChar); } } // unnamed namespace void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs) { const IdentifierInfo *ArgumentKind = Attr->getArgumentKind(); bool IsPointerAttr = Attr->getIsPointer(); const Expr *TypeTagExpr = ExprArgs[Attr->getTypeTagIdx()]; bool FoundWrongKind; TypeTagData TypeInfo; if (!GetMatchingCType(ArgumentKind, TypeTagExpr, Context, TypeTagForDatatypeMagicValues.get(), FoundWrongKind, TypeInfo)) { if (FoundWrongKind) Diag(TypeTagExpr->getExprLoc(), diag::warn_type_tag_for_datatype_wrong_kind) << TypeTagExpr->getSourceRange(); return; } const Expr *ArgumentExpr = ExprArgs[Attr->getArgumentIdx()]; if (IsPointerAttr) { // Skip implicit cast of pointer to `void *' (as a function argument). if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgumentExpr)) if (ICE->getType()->isVoidPointerType() && ICE->getCastKind() == CK_BitCast) ArgumentExpr = ICE->getSubExpr(); } QualType ArgumentType = ArgumentExpr->getType(); // Passing a `void*' pointer shouldn't trigger a warning. if (IsPointerAttr && ArgumentType->isVoidPointerType()) return; if (TypeInfo.MustBeNull) { // Type tag with matching void type requires a null pointer. if (!ArgumentExpr->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull)) { Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_null_pointer_required) << ArgumentKind->getName() << ArgumentExpr->getSourceRange() << TypeTagExpr->getSourceRange(); } return; } QualType RequiredType = TypeInfo.Type; if (IsPointerAttr) RequiredType = Context.getPointerType(RequiredType); bool mismatch = false; if (!TypeInfo.LayoutCompatible) { mismatch = !Context.hasSameType(ArgumentType, RequiredType); // C++11 [basic.fundamental] p1: // Plain char, signed char, and unsigned char are three distinct types. // // But we treat plain `char' as equivalent to `signed char' or `unsigned // char' depending on the current char signedness mode. if (mismatch) if ((IsPointerAttr && IsSameCharType(ArgumentType->getPointeeType(), RequiredType->getPointeeType())) || (!IsPointerAttr && IsSameCharType(ArgumentType, RequiredType))) mismatch = false; } else if (IsPointerAttr) mismatch = !isLayoutCompatible(Context, ArgumentType->getPointeeType(), RequiredType->getPointeeType()); else mismatch = !isLayoutCompatible(Context, ArgumentType, RequiredType); if (mismatch) Diag(ArgumentExpr->getExprLoc(), diag::warn_type_safety_type_mismatch) << ArgumentType << ArgumentKind << TypeInfo.LayoutCompatible << RequiredType << ArgumentExpr->getSourceRange() << TypeTagExpr->getSourceRange(); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaPseudoObject.cpp
//===--- SemaPseudoObject.cpp - Semantic Analysis for Pseudo-Objects ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for expressions involving // pseudo-object references. Pseudo-objects are conceptual objects // whose storage is entirely abstract and all accesses to which are // translated through some sort of abstraction barrier. // // For example, Objective-C objects can have "properties", either // declared or undeclared. A property may be accessed by writing // expr.prop // where 'expr' is an r-value of Objective-C pointer type and 'prop' // is the name of the property. If this expression is used in a context // needing an r-value, it is treated as if it were a message-send // of the associated 'getter' selector, typically: // [expr prop] // If it is used as the LHS of a simple assignment, it is treated // as a message-send of the associated 'setter' selector, typically: // [expr setProp: RHS] // If it is used as the LHS of a compound assignment, or the operand // of a unary increment or decrement, both are required; for example, // 'expr.prop *= 100' would be translated to: // [expr setProp: [expr prop] * 100] // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/Basic/CharInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/ScopeInfo.h" #include "llvm/ADT/SmallString.h" using namespace clang; using namespace sema; // HLSL Change Starts // No ObjC parse/sema support, so simply skip all of this compilation. // Here are enough stubs to link the current targets. #if 1 Sema::ObjCSubscriptKind Sema::CheckSubscriptingKind(Expr *FromE) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::checkPseudoObjectRValue(Expr *E) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc, UnaryOperatorKind opcode, Expr *op) { llvm_unreachable("HLSL does not support ObjC constructs"); } ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc, BinaryOperatorKind opcode, Expr *LHS, Expr *RHS) { llvm_unreachable("HLSL does not support ObjC constructs"); } Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) { llvm_unreachable("HLSL does not support ObjC constructs"); } #else // HLSL Change Ends namespace { // Basically just a very focused copy of TreeTransform. template <class T> struct Rebuilder { Sema &S; Rebuilder(Sema &S) : S(S) {} T &getDerived() { return static_cast<T&>(*this); } Expr *rebuild(Expr *e) { // Fast path: nothing to look through. if (typename T::specific_type *specific = dyn_cast<typename T::specific_type>(e)) return getDerived().rebuildSpecific(specific); // Otherwise, we should look through and rebuild anything that // IgnoreParens would. if (ParenExpr *parens = dyn_cast<ParenExpr>(e)) { e = rebuild(parens->getSubExpr()); return new (S.Context) ParenExpr(parens->getLParen(), parens->getRParen(), e); } if (UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) { assert(uop->getOpcode() == UO_Extension); e = rebuild(uop->getSubExpr()); return new (S.Context) UnaryOperator(e, uop->getOpcode(), uop->getType(), uop->getValueKind(), uop->getObjectKind(), uop->getOperatorLoc()); } if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) { assert(!gse->isResultDependent()); unsigned resultIndex = gse->getResultIndex(); unsigned numAssocs = gse->getNumAssocs(); SmallVector<Expr*, 8> assocs(numAssocs); SmallVector<TypeSourceInfo*, 8> assocTypes(numAssocs); for (unsigned i = 0; i != numAssocs; ++i) { Expr *assoc = gse->getAssocExpr(i); if (i == resultIndex) assoc = rebuild(assoc); assocs[i] = assoc; assocTypes[i] = gse->getAssocTypeSourceInfo(i); } return new (S.Context) GenericSelectionExpr(S.Context, gse->getGenericLoc(), gse->getControllingExpr(), assocTypes, assocs, gse->getDefaultLoc(), gse->getRParenLoc(), gse->containsUnexpandedParameterPack(), resultIndex); } if (ChooseExpr *ce = dyn_cast<ChooseExpr>(e)) { assert(!ce->isConditionDependent()); Expr *LHS = ce->getLHS(), *RHS = ce->getRHS(); Expr *&rebuiltExpr = ce->isConditionTrue() ? LHS : RHS; rebuiltExpr = rebuild(rebuiltExpr); return new (S.Context) ChooseExpr(ce->getBuiltinLoc(), ce->getCond(), LHS, RHS, rebuiltExpr->getType(), rebuiltExpr->getValueKind(), rebuiltExpr->getObjectKind(), ce->getRParenLoc(), ce->isConditionTrue(), rebuiltExpr->isTypeDependent(), rebuiltExpr->isValueDependent()); } llvm_unreachable("bad expression to rebuild!"); } }; struct ObjCPropertyRefRebuilder : Rebuilder<ObjCPropertyRefRebuilder> { Expr *NewBase; ObjCPropertyRefRebuilder(Sema &S, Expr *newBase) : Rebuilder<ObjCPropertyRefRebuilder>(S), NewBase(newBase) {} typedef ObjCPropertyRefExpr specific_type; Expr *rebuildSpecific(ObjCPropertyRefExpr *refExpr) { // Fortunately, the constraint that we're rebuilding something // with a base limits the number of cases here. assert(refExpr->isObjectReceiver()); if (refExpr->isExplicitProperty()) { return new (S.Context) ObjCPropertyRefExpr(refExpr->getExplicitProperty(), refExpr->getType(), refExpr->getValueKind(), refExpr->getObjectKind(), refExpr->getLocation(), NewBase); } return new (S.Context) ObjCPropertyRefExpr(refExpr->getImplicitPropertyGetter(), refExpr->getImplicitPropertySetter(), refExpr->getType(), refExpr->getValueKind(), refExpr->getObjectKind(),refExpr->getLocation(), NewBase); } }; struct ObjCSubscriptRefRebuilder : Rebuilder<ObjCSubscriptRefRebuilder> { Expr *NewBase; Expr *NewKeyExpr; ObjCSubscriptRefRebuilder(Sema &S, Expr *newBase, Expr *newKeyExpr) : Rebuilder<ObjCSubscriptRefRebuilder>(S), NewBase(newBase), NewKeyExpr(newKeyExpr) {} typedef ObjCSubscriptRefExpr specific_type; Expr *rebuildSpecific(ObjCSubscriptRefExpr *refExpr) { assert(refExpr->getBaseExpr()); assert(refExpr->getKeyExpr()); return new (S.Context) ObjCSubscriptRefExpr(NewBase, NewKeyExpr, refExpr->getType(), refExpr->getValueKind(), refExpr->getObjectKind(),refExpr->getAtIndexMethodDecl(), refExpr->setAtIndexMethodDecl(), refExpr->getRBracket()); } }; struct MSPropertyRefRebuilder : Rebuilder<MSPropertyRefRebuilder> { Expr *NewBase; MSPropertyRefRebuilder(Sema &S, Expr *newBase) : Rebuilder<MSPropertyRefRebuilder>(S), NewBase(newBase) {} typedef MSPropertyRefExpr specific_type; Expr *rebuildSpecific(MSPropertyRefExpr *refExpr) { assert(refExpr->getBaseExpr()); return new (S.Context) MSPropertyRefExpr(NewBase, refExpr->getPropertyDecl(), refExpr->isArrow(), refExpr->getType(), refExpr->getValueKind(), refExpr->getQualifierLoc(), refExpr->getMemberLoc()); } }; class PseudoOpBuilder { public: Sema &S; unsigned ResultIndex; SourceLocation GenericLoc; SmallVector<Expr *, 4> Semantics; PseudoOpBuilder(Sema &S, SourceLocation genericLoc) : S(S), ResultIndex(PseudoObjectExpr::NoResult), GenericLoc(genericLoc) {} virtual ~PseudoOpBuilder() {} /// Add a normal semantic expression. void addSemanticExpr(Expr *semantic) { Semantics.push_back(semantic); } /// Add the 'result' semantic expression. void addResultSemanticExpr(Expr *resultExpr) { assert(ResultIndex == PseudoObjectExpr::NoResult); ResultIndex = Semantics.size(); Semantics.push_back(resultExpr); } ExprResult buildRValueOperation(Expr *op); ExprResult buildAssignmentOperation(Scope *Sc, SourceLocation opLoc, BinaryOperatorKind opcode, Expr *LHS, Expr *RHS); ExprResult buildIncDecOperation(Scope *Sc, SourceLocation opLoc, UnaryOperatorKind opcode, Expr *op); virtual ExprResult complete(Expr *syntacticForm); OpaqueValueExpr *capture(Expr *op); OpaqueValueExpr *captureValueAsResult(Expr *op); void setResultToLastSemantic() { assert(ResultIndex == PseudoObjectExpr::NoResult); ResultIndex = Semantics.size() - 1; } /// Return true if assignments have a non-void result. bool CanCaptureValue(Expr *exp) { if (exp->isGLValue()) return true; QualType ty = exp->getType(); assert(!ty->isIncompleteType()); assert(!ty->isDependentType()); if (const CXXRecordDecl *ClassDecl = ty->getAsCXXRecordDecl()) return ClassDecl->isTriviallyCopyable(); return true; } virtual Expr *rebuildAndCaptureObject(Expr *) = 0; virtual ExprResult buildGet() = 0; virtual ExprResult buildSet(Expr *, SourceLocation, bool captureSetValueAsResult) = 0; }; /// A PseudoOpBuilder for Objective-C \@properties. class ObjCPropertyOpBuilder : public PseudoOpBuilder { ObjCPropertyRefExpr *RefExpr; ObjCPropertyRefExpr *SyntacticRefExpr; OpaqueValueExpr *InstanceReceiver; ObjCMethodDecl *Getter; ObjCMethodDecl *Setter; Selector SetterSelector; Selector GetterSelector; public: ObjCPropertyOpBuilder(Sema &S, ObjCPropertyRefExpr *refExpr) : PseudoOpBuilder(S, refExpr->getLocation()), RefExpr(refExpr), SyntacticRefExpr(nullptr), InstanceReceiver(nullptr), Getter(nullptr), Setter(nullptr) { } ExprResult buildRValueOperation(Expr *op); ExprResult buildAssignmentOperation(Scope *Sc, SourceLocation opLoc, BinaryOperatorKind opcode, Expr *LHS, Expr *RHS); ExprResult buildIncDecOperation(Scope *Sc, SourceLocation opLoc, UnaryOperatorKind opcode, Expr *op); bool tryBuildGetOfReference(Expr *op, ExprResult &result); bool findSetter(bool warn=true); bool findGetter(); void DiagnoseUnsupportedPropertyUse(); Expr *rebuildAndCaptureObject(Expr *syntacticBase) override; ExprResult buildGet() override; ExprResult buildSet(Expr *op, SourceLocation, bool) override; ExprResult complete(Expr *SyntacticForm) override; bool isWeakProperty() const; }; /// A PseudoOpBuilder for Objective-C array/dictionary indexing. class ObjCSubscriptOpBuilder : public PseudoOpBuilder { ObjCSubscriptRefExpr *RefExpr; OpaqueValueExpr *InstanceBase; OpaqueValueExpr *InstanceKey; ObjCMethodDecl *AtIndexGetter; Selector AtIndexGetterSelector; ObjCMethodDecl *AtIndexSetter; Selector AtIndexSetterSelector; public: ObjCSubscriptOpBuilder(Sema &S, ObjCSubscriptRefExpr *refExpr) : PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()), RefExpr(refExpr), InstanceBase(nullptr), InstanceKey(nullptr), AtIndexGetter(nullptr), AtIndexSetter(nullptr) {} ExprResult buildRValueOperation(Expr *op); ExprResult buildAssignmentOperation(Scope *Sc, SourceLocation opLoc, BinaryOperatorKind opcode, Expr *LHS, Expr *RHS); Expr *rebuildAndCaptureObject(Expr *syntacticBase) override; bool findAtIndexGetter(); bool findAtIndexSetter(); ExprResult buildGet() override; ExprResult buildSet(Expr *op, SourceLocation, bool) override; }; class MSPropertyOpBuilder : public PseudoOpBuilder { MSPropertyRefExpr *RefExpr; public: MSPropertyOpBuilder(Sema &S, MSPropertyRefExpr *refExpr) : PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()), RefExpr(refExpr) {} Expr *rebuildAndCaptureObject(Expr *) override; ExprResult buildGet() override; ExprResult buildSet(Expr *op, SourceLocation, bool) override; }; } /// Capture the given expression in an OpaqueValueExpr. OpaqueValueExpr *PseudoOpBuilder::capture(Expr *e) { // Make a new OVE whose source is the given expression. OpaqueValueExpr *captured = new (S.Context) OpaqueValueExpr(GenericLoc, e->getType(), e->getValueKind(), e->getObjectKind(), e); // Make sure we bind that in the semantics. addSemanticExpr(captured); return captured; } /// Capture the given expression as the result of this pseudo-object /// operation. This routine is safe against expressions which may /// already be captured. /// /// \returns the captured expression, which will be the /// same as the input if the input was already captured OpaqueValueExpr *PseudoOpBuilder::captureValueAsResult(Expr *e) { assert(ResultIndex == PseudoObjectExpr::NoResult); // If the expression hasn't already been captured, just capture it // and set the new semantic if (!isa<OpaqueValueExpr>(e)) { OpaqueValueExpr *cap = capture(e); setResultToLastSemantic(); return cap; } // Otherwise, it must already be one of our semantic expressions; // set ResultIndex to its index. unsigned index = 0; for (;; ++index) { assert(index < Semantics.size() && "captured expression not found in semantics!"); if (e == Semantics[index]) break; } ResultIndex = index; return cast<OpaqueValueExpr>(e); } /// The routine which creates the final PseudoObjectExpr. ExprResult PseudoOpBuilder::complete(Expr *syntactic) { return PseudoObjectExpr::Create(S.Context, syntactic, Semantics, ResultIndex); } /// The main skeleton for building an r-value operation. ExprResult PseudoOpBuilder::buildRValueOperation(Expr *op) { Expr *syntacticBase = rebuildAndCaptureObject(op); ExprResult getExpr = buildGet(); if (getExpr.isInvalid()) return ExprError(); addResultSemanticExpr(getExpr.get()); return complete(syntacticBase); } /// The basic skeleton for building a simple or compound /// assignment operation. ExprResult PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc, BinaryOperatorKind opcode, Expr *LHS, Expr *RHS) { assert(BinaryOperator::isAssignmentOp(opcode)); // Recover from user error if (isa<UnresolvedLookupExpr>(RHS)) return ExprError(); Expr *syntacticLHS = rebuildAndCaptureObject(LHS); OpaqueValueExpr *capturedRHS = capture(RHS); Expr *syntactic; ExprResult result; if (opcode == BO_Assign) { result = capturedRHS; syntactic = new (S.Context) BinaryOperator(syntacticLHS, capturedRHS, opcode, capturedRHS->getType(), capturedRHS->getValueKind(), OK_Ordinary, opcLoc, false); } else { ExprResult opLHS = buildGet(); if (opLHS.isInvalid()) return ExprError(); // Build an ordinary, non-compound operation. BinaryOperatorKind nonCompound = BinaryOperator::getOpForCompoundAssignment(opcode); result = S.BuildBinOp(Sc, opcLoc, nonCompound, opLHS.get(), capturedRHS); if (result.isInvalid()) return ExprError(); syntactic = new (S.Context) CompoundAssignOperator(syntacticLHS, capturedRHS, opcode, result.get()->getType(), result.get()->getValueKind(), OK_Ordinary, opLHS.get()->getType(), result.get()->getType(), opcLoc, false); } // The result of the assignment, if not void, is the value set into // the l-value. result = buildSet(result.get(), opcLoc, /*captureSetValueAsResult*/ true); if (result.isInvalid()) return ExprError(); addSemanticExpr(result.get()); return complete(syntactic); } /// The basic skeleton for building an increment or decrement /// operation. ExprResult PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc, UnaryOperatorKind opcode, Expr *op) { assert(UnaryOperator::isIncrementDecrementOp(opcode)); Expr *syntacticOp = rebuildAndCaptureObject(op); // Load the value. ExprResult result = buildGet(); if (result.isInvalid()) return ExprError(); QualType resultType = result.get()->getType(); // That's the postfix result. if (UnaryOperator::isPostfix(opcode) && (result.get()->isTypeDependent() || CanCaptureValue(result.get()))) { result = capture(result.get()); setResultToLastSemantic(); } // Add or subtract a literal 1. llvm::APInt oneV(S.Context.getTypeSize(S.Context.IntTy), 1); Expr *one = IntegerLiteral::Create(S.Context, oneV, S.Context.IntTy, GenericLoc); if (UnaryOperator::isIncrementOp(opcode)) { result = S.BuildBinOp(Sc, opcLoc, BO_Add, result.get(), one); } else { result = S.BuildBinOp(Sc, opcLoc, BO_Sub, result.get(), one); } if (result.isInvalid()) return ExprError(); // Store that back into the result. The value stored is the result // of a prefix operation. result = buildSet(result.get(), opcLoc, UnaryOperator::isPrefix(opcode)); if (result.isInvalid()) return ExprError(); addSemanticExpr(result.get()); UnaryOperator *syntactic = new (S.Context) UnaryOperator(syntacticOp, opcode, resultType, VK_LValue, OK_Ordinary, opcLoc); return complete(syntactic); } //===----------------------------------------------------------------------===// // Objective-C @property and implicit property references //===----------------------------------------------------------------------===// /// Look up a method in the receiver type of an Objective-C property /// reference. static ObjCMethodDecl *LookupMethodInReceiverType(Sema &S, Selector sel, const ObjCPropertyRefExpr *PRE) { if (PRE->isObjectReceiver()) { const ObjCObjectPointerType *PT = PRE->getBase()->getType()->castAs<ObjCObjectPointerType>(); // Special case for 'self' in class method implementations. if (PT->isObjCClassType() && S.isSelfExpr(const_cast<Expr*>(PRE->getBase()))) { // This cast is safe because isSelfExpr is only true within // methods. ObjCMethodDecl *method = cast<ObjCMethodDecl>(S.CurContext->getNonClosureAncestor()); return S.LookupMethodInObjectType(sel, S.Context.getObjCInterfaceType(method->getClassInterface()), /*instance*/ false); } return S.LookupMethodInObjectType(sel, PT->getPointeeType(), true); } if (PRE->isSuperReceiver()) { if (const ObjCObjectPointerType *PT = PRE->getSuperReceiverType()->getAs<ObjCObjectPointerType>()) return S.LookupMethodInObjectType(sel, PT->getPointeeType(), true); return S.LookupMethodInObjectType(sel, PRE->getSuperReceiverType(), false); } assert(PRE->isClassReceiver() && "Invalid expression"); QualType IT = S.Context.getObjCInterfaceType(PRE->getClassReceiver()); return S.LookupMethodInObjectType(sel, IT, false); } bool ObjCPropertyOpBuilder::isWeakProperty() const { QualType T; if (RefExpr->isExplicitProperty()) { const ObjCPropertyDecl *Prop = RefExpr->getExplicitProperty(); if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_weak) return !Prop->hasAttr<IBOutletAttr>(); T = Prop->getType(); } else if (Getter) { T = Getter->getReturnType(); } else { return false; } return T.getObjCLifetime() == Qualifiers::OCL_Weak; } bool ObjCPropertyOpBuilder::findGetter() { if (Getter) return true; // For implicit properties, just trust the lookup we already did. if (RefExpr->isImplicitProperty()) { if ((Getter = RefExpr->getImplicitPropertyGetter())) { GetterSelector = Getter->getSelector(); return true; } else { // Must build the getter selector the hard way. ObjCMethodDecl *setter = RefExpr->getImplicitPropertySetter(); assert(setter && "both setter and getter are null - cannot happen"); IdentifierInfo *setterName = setter->getSelector().getIdentifierInfoForSlot(0); IdentifierInfo *getterName = &S.Context.Idents.get(setterName->getName().substr(3)); GetterSelector = S.PP.getSelectorTable().getNullarySelector(getterName); return false; } } ObjCPropertyDecl *prop = RefExpr->getExplicitProperty(); Getter = LookupMethodInReceiverType(S, prop->getGetterName(), RefExpr); return (Getter != nullptr); } /// Try to find the most accurate setter declaration for the property /// reference. /// /// \return true if a setter was found, in which case Setter bool ObjCPropertyOpBuilder::findSetter(bool warn) { // For implicit properties, just trust the lookup we already did. if (RefExpr->isImplicitProperty()) { if (ObjCMethodDecl *setter = RefExpr->getImplicitPropertySetter()) { Setter = setter; SetterSelector = setter->getSelector(); return true; } else { IdentifierInfo *getterName = RefExpr->getImplicitPropertyGetter()->getSelector() .getIdentifierInfoForSlot(0); SetterSelector = SelectorTable::constructSetterSelector(S.PP.getIdentifierTable(), S.PP.getSelectorTable(), getterName); return false; } } // For explicit properties, this is more involved. ObjCPropertyDecl *prop = RefExpr->getExplicitProperty(); SetterSelector = prop->getSetterName(); // Do a normal method lookup first. if (ObjCMethodDecl *setter = LookupMethodInReceiverType(S, SetterSelector, RefExpr)) { if (setter->isPropertyAccessor() && warn) if (const ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(setter->getDeclContext())) { StringRef thisPropertyName = prop->getName(); // Try flipping the case of the first character. char front = thisPropertyName.front(); front = isLowercase(front) ? toUppercase(front) : toLowercase(front); SmallString<100> PropertyName = thisPropertyName; PropertyName[0] = front; IdentifierInfo *AltMember = &S.PP.getIdentifierTable().get(PropertyName); if (ObjCPropertyDecl *prop1 = IFace->FindPropertyDeclaration(AltMember)) if (prop != prop1 && (prop1->getSetterMethodDecl() == setter)) { S.Diag(RefExpr->getExprLoc(), diag::error_property_setter_ambiguous_use) << prop << prop1 << setter->getSelector(); S.Diag(prop->getLocation(), diag::note_property_declare); S.Diag(prop1->getLocation(), diag::note_property_declare); } } Setter = setter; return true; } // That can fail in the somewhat crazy situation that we're // type-checking a message send within the @interface declaration // that declared the @property. But it's not clear that that's // valuable to support. return false; } void ObjCPropertyOpBuilder::DiagnoseUnsupportedPropertyUse() { if (S.getCurLexicalContext()->isObjCContainer() && S.getCurLexicalContext()->getDeclKind() != Decl::ObjCCategoryImpl && S.getCurLexicalContext()->getDeclKind() != Decl::ObjCImplementation) { if (ObjCPropertyDecl *prop = RefExpr->getExplicitProperty()) { S.Diag(RefExpr->getLocation(), diag::err_property_function_in_objc_container); S.Diag(prop->getLocation(), diag::note_property_declare); } } } /// Capture the base object of an Objective-C property expression. Expr *ObjCPropertyOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) { assert(InstanceReceiver == nullptr); // If we have a base, capture it in an OVE and rebuild the syntactic // form to use the OVE as its base. if (RefExpr->isObjectReceiver()) { InstanceReceiver = capture(RefExpr->getBase()); syntacticBase = ObjCPropertyRefRebuilder(S, InstanceReceiver).rebuild(syntacticBase); } if (ObjCPropertyRefExpr * refE = dyn_cast<ObjCPropertyRefExpr>(syntacticBase->IgnoreParens())) SyntacticRefExpr = refE; return syntacticBase; } /// Load from an Objective-C property reference. ExprResult ObjCPropertyOpBuilder::buildGet() { findGetter(); if (!Getter) { DiagnoseUnsupportedPropertyUse(); return ExprError(); } if (SyntacticRefExpr) SyntacticRefExpr->setIsMessagingGetter(); QualType receiverType = RefExpr->getReceiverType(S.Context); if (!Getter->isImplicit()) S.DiagnoseUseOfDecl(Getter, GenericLoc, nullptr, true); // Build a message-send. ExprResult msg; if ((Getter->isInstanceMethod() && !RefExpr->isClassReceiver()) || RefExpr->isObjectReceiver()) { assert(InstanceReceiver || RefExpr->isSuperReceiver()); msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType, GenericLoc, Getter->getSelector(), Getter, None); } else { msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(), GenericLoc, Getter->getSelector(), Getter, None); } return msg; } /// Store to an Objective-C property reference. /// /// \param captureSetValueAsResult If true, capture the actual /// value being set as the value of the property operation. ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc, bool captureSetValueAsResult) { if (!findSetter(false)) { DiagnoseUnsupportedPropertyUse(); return ExprError(); } if (SyntacticRefExpr) SyntacticRefExpr->setIsMessagingSetter(); QualType receiverType = RefExpr->getReceiverType(S.Context); // Use assignment constraints when possible; they give us better // diagnostics. "When possible" basically means anything except a // C++ class type. if (!S.getLangOpts().CPlusPlus || !op->getType()->isRecordType()) { QualType paramType = (*Setter->param_begin())->getType() .substObjCMemberType( receiverType, Setter->getDeclContext(), ObjCSubstitutionContext::Parameter); if (!S.getLangOpts().CPlusPlus || !paramType->isRecordType()) { ExprResult opResult = op; Sema::AssignConvertType assignResult = S.CheckSingleAssignmentConstraints(paramType, opResult); if (S.DiagnoseAssignmentResult(assignResult, opcLoc, paramType, op->getType(), opResult.get(), Sema::AA_Assigning)) return ExprError(); op = opResult.get(); assert(op && "successful assignment left argument invalid?"); } else if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(op)) { Expr *Initializer = OVE->getSourceExpr(); // passing C++11 style initialized temporaries to objc++ properties // requires special treatment by removing OpaqueValueExpr so type // conversion takes place and adding the OpaqueValueExpr later on. if (isa<InitListExpr>(Initializer) && Initializer->getType()->isVoidType()) { op = Initializer; } } } // Arguments. Expr *args[] = { op }; // Build a message-send. ExprResult msg; if (!Setter->isImplicit()) S.DiagnoseUseOfDecl(Setter, GenericLoc, nullptr, true); if ((Setter->isInstanceMethod() && !RefExpr->isClassReceiver()) || RefExpr->isObjectReceiver()) { msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType, GenericLoc, SetterSelector, Setter, MultiExprArg(args, 1)); } else { msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(), GenericLoc, SetterSelector, Setter, MultiExprArg(args, 1)); } if (!msg.isInvalid() && captureSetValueAsResult) { ObjCMessageExpr *msgExpr = cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit()); Expr *arg = msgExpr->getArg(0); if (CanCaptureValue(arg)) msgExpr->setArg(0, captureValueAsResult(arg)); } return msg; } /// @property-specific behavior for doing lvalue-to-rvalue conversion. ExprResult ObjCPropertyOpBuilder::buildRValueOperation(Expr *op) { // Explicit properties always have getters, but implicit ones don't. // Check that before proceeding. if (RefExpr->isImplicitProperty() && !RefExpr->getImplicitPropertyGetter()) { S.Diag(RefExpr->getLocation(), diag::err_getter_not_found) << RefExpr->getSourceRange(); return ExprError(); } ExprResult result = PseudoOpBuilder::buildRValueOperation(op); if (result.isInvalid()) return ExprError(); if (RefExpr->isExplicitProperty() && !Getter->hasRelatedResultType()) S.DiagnosePropertyAccessorMismatch(RefExpr->getExplicitProperty(), Getter, RefExpr->getLocation()); // As a special case, if the method returns 'id', try to get // a better type from the property. if (RefExpr->isExplicitProperty() && result.get()->isRValue()) { QualType receiverType = RefExpr->getReceiverType(S.Context); QualType propType = RefExpr->getExplicitProperty() ->getUsageType(receiverType); if (result.get()->getType()->isObjCIdType()) { if (const ObjCObjectPointerType *ptr = propType->getAs<ObjCObjectPointerType>()) { if (!ptr->isObjCIdType()) result = S.ImpCastExprToType(result.get(), propType, CK_BitCast); } } if (S.getLangOpts().ObjCAutoRefCount) { Qualifiers::ObjCLifetime LT = propType.getObjCLifetime(); if (LT == Qualifiers::OCL_Weak) if (!S.Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, RefExpr->getLocation())) S.getCurFunction()->markSafeWeakUse(RefExpr); } } return result; } /// Try to build this as a call to a getter that returns a reference. /// /// \return true if it was possible, whether or not it actually /// succeeded bool ObjCPropertyOpBuilder::tryBuildGetOfReference(Expr *op, ExprResult &result) { if (!S.getLangOpts().CPlusPlus) return false; findGetter(); if (!Getter) { // The property has no setter and no getter! This can happen if the type is // invalid. Error have already been reported. result = ExprError(); return true; } // Only do this if the getter returns an l-value reference type. QualType resultType = Getter->getReturnType(); if (!resultType->isLValueReferenceType()) return false; result = buildRValueOperation(op); return true; } /// @property-specific behavior for doing assignments. ExprResult ObjCPropertyOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc, BinaryOperatorKind opcode, Expr *LHS, Expr *RHS) { assert(BinaryOperator::isAssignmentOp(opcode)); // If there's no setter, we have no choice but to try to assign to // the result of the getter. if (!findSetter()) { ExprResult result; if (tryBuildGetOfReference(LHS, result)) { if (result.isInvalid()) return ExprError(); return S.BuildBinOp(Sc, opcLoc, opcode, result.get(), RHS); } // Otherwise, it's an error. S.Diag(opcLoc, diag::err_nosetter_property_assignment) << unsigned(RefExpr->isImplicitProperty()) << SetterSelector << LHS->getSourceRange() << RHS->getSourceRange(); return ExprError(); } // If there is a setter, we definitely want to use it. // Verify that we can do a compound assignment. if (opcode != BO_Assign && !findGetter()) { S.Diag(opcLoc, diag::err_nogetter_property_compound_assignment) << LHS->getSourceRange() << RHS->getSourceRange(); return ExprError(); } ExprResult result = PseudoOpBuilder::buildAssignmentOperation(Sc, opcLoc, opcode, LHS, RHS); if (result.isInvalid()) return ExprError(); // Various warnings about property assignments in ARC. if (S.getLangOpts().ObjCAutoRefCount && InstanceReceiver) { S.checkRetainCycles(InstanceReceiver->getSourceExpr(), RHS); S.checkUnsafeExprAssigns(opcLoc, LHS, RHS); } return result; } /// @property-specific behavior for doing increments and decrements. ExprResult ObjCPropertyOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc, UnaryOperatorKind opcode, Expr *op) { // If there's no setter, we have no choice but to try to assign to // the result of the getter. if (!findSetter()) { ExprResult result; if (tryBuildGetOfReference(op, result)) { if (result.isInvalid()) return ExprError(); return S.BuildUnaryOp(Sc, opcLoc, opcode, result.get()); } // Otherwise, it's an error. S.Diag(opcLoc, diag::err_nosetter_property_incdec) << unsigned(RefExpr->isImplicitProperty()) << unsigned(UnaryOperator::isDecrementOp(opcode)) << SetterSelector << op->getSourceRange(); return ExprError(); } // If there is a setter, we definitely want to use it. // We also need a getter. if (!findGetter()) { assert(RefExpr->isImplicitProperty()); S.Diag(opcLoc, diag::err_nogetter_property_incdec) << unsigned(UnaryOperator::isDecrementOp(opcode)) << GetterSelector << op->getSourceRange(); return ExprError(); } return PseudoOpBuilder::buildIncDecOperation(Sc, opcLoc, opcode, op); } ExprResult ObjCPropertyOpBuilder::complete(Expr *SyntacticForm) { if (S.getLangOpts().ObjCAutoRefCount && isWeakProperty() && !S.Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, SyntacticForm->getLocStart())) S.recordUseOfEvaluatedWeak(SyntacticRefExpr, SyntacticRefExpr->isMessagingGetter()); return PseudoOpBuilder::complete(SyntacticForm); } // ObjCSubscript build stuff. // /// objective-c subscripting-specific behavior for doing lvalue-to-rvalue /// conversion. /// FIXME. Remove this routine if it is proven that no additional /// specifity is needed. ExprResult ObjCSubscriptOpBuilder::buildRValueOperation(Expr *op) { ExprResult result = PseudoOpBuilder::buildRValueOperation(op); if (result.isInvalid()) return ExprError(); return result; } /// objective-c subscripting-specific behavior for doing assignments. ExprResult ObjCSubscriptOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc, BinaryOperatorKind opcode, Expr *LHS, Expr *RHS) { assert(BinaryOperator::isAssignmentOp(opcode)); // There must be a method to do the Index'ed assignment. if (!findAtIndexSetter()) return ExprError(); // Verify that we can do a compound assignment. if (opcode != BO_Assign && !findAtIndexGetter()) return ExprError(); ExprResult result = PseudoOpBuilder::buildAssignmentOperation(Sc, opcLoc, opcode, LHS, RHS); if (result.isInvalid()) return ExprError(); // Various warnings about objc Index'ed assignments in ARC. if (S.getLangOpts().ObjCAutoRefCount && InstanceBase) { S.checkRetainCycles(InstanceBase->getSourceExpr(), RHS); S.checkUnsafeExprAssigns(opcLoc, LHS, RHS); } return result; } /// Capture the base object of an Objective-C Index'ed expression. Expr *ObjCSubscriptOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) { assert(InstanceBase == nullptr); // Capture base expression in an OVE and rebuild the syntactic // form to use the OVE as its base expression. InstanceBase = capture(RefExpr->getBaseExpr()); InstanceKey = capture(RefExpr->getKeyExpr()); syntacticBase = ObjCSubscriptRefRebuilder(S, InstanceBase, InstanceKey).rebuild(syntacticBase); return syntacticBase; } /// CheckSubscriptingKind - This routine decide what type /// of indexing represented by "FromE" is being done. Sema::ObjCSubscriptKind Sema::CheckSubscriptingKind(Expr *FromE) { // If the expression already has integral or enumeration type, we're golden. QualType T = FromE->getType(); if (T->isIntegralOrEnumerationType()) return OS_Array; // If we don't have a class type in C++, there's no way we can get an // expression of integral or enumeration type. const RecordType *RecordTy = T->getAs<RecordType>(); if (!RecordTy && (T->isObjCObjectPointerType() || T->isVoidPointerType())) // All other scalar cases are assumed to be dictionary indexing which // caller handles, with diagnostics if needed. return OS_Dictionary; if (!getLangOpts().CPlusPlus || !RecordTy || RecordTy->isIncompleteType()) { // No indexing can be done. Issue diagnostics and quit. const Expr *IndexExpr = FromE->IgnoreParenImpCasts(); if (isa<StringLiteral>(IndexExpr)) Diag(FromE->getExprLoc(), diag::err_objc_subscript_pointer) << T << FixItHint::CreateInsertion(FromE->getExprLoc(), "@"); else Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion) << T; return OS_Error; } // We must have a complete class type. if (RequireCompleteType(FromE->getExprLoc(), T, diag::err_objc_index_incomplete_class_type, FromE)) return OS_Error; // Look for a conversion to an integral, enumeration type, or // objective-C pointer type. int NoIntegrals=0, NoObjCIdPointers=0; SmallVector<CXXConversionDecl *, 4> ConversionDecls; for (NamedDecl *D : cast<CXXRecordDecl>(RecordTy->getDecl()) ->getVisibleConversionFunctions()) { if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D->getUnderlyingDecl())) { QualType CT = Conversion->getConversionType().getNonReferenceType(); if (CT->isIntegralOrEnumerationType()) { ++NoIntegrals; ConversionDecls.push_back(Conversion); } else if (CT->isObjCIdType() ||CT->isBlockPointerType()) { ++NoObjCIdPointers; ConversionDecls.push_back(Conversion); } } } if (NoIntegrals ==1 && NoObjCIdPointers == 0) return OS_Array; if (NoIntegrals == 0 && NoObjCIdPointers == 1) return OS_Dictionary; if (NoIntegrals == 0 && NoObjCIdPointers == 0) { // No conversion function was found. Issue diagnostic and return. Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion) << FromE->getType(); return OS_Error; } Diag(FromE->getExprLoc(), diag::err_objc_multiple_subscript_type_conversion) << FromE->getType(); for (unsigned int i = 0; i < ConversionDecls.size(); i++) Diag(ConversionDecls[i]->getLocation(), diag::not_conv_function_declared_at); return OS_Error; } /// CheckKeyForObjCARCConversion - This routine suggests bridge casting of CF /// objects used as dictionary subscript key objects. static void CheckKeyForObjCARCConversion(Sema &S, QualType ContainerT, Expr *Key) { if (ContainerT.isNull()) return; // dictionary subscripting. // - (id)objectForKeyedSubscript:(id)key; IdentifierInfo *KeyIdents[] = { &S.Context.Idents.get("objectForKeyedSubscript") }; Selector GetterSelector = S.Context.Selectors.getSelector(1, KeyIdents); ObjCMethodDecl *Getter = S.LookupMethodInObjectType(GetterSelector, ContainerT, true /*instance*/); if (!Getter) return; QualType T = Getter->parameters()[0]->getType(); S.CheckObjCARCConversion(Key->getSourceRange(), T, Key, Sema::CCK_ImplicitConversion); } bool ObjCSubscriptOpBuilder::findAtIndexGetter() { if (AtIndexGetter) return true; Expr *BaseExpr = RefExpr->getBaseExpr(); QualType BaseT = BaseExpr->getType(); QualType ResultType; if (const ObjCObjectPointerType *PTy = BaseT->getAs<ObjCObjectPointerType>()) { ResultType = PTy->getPointeeType(); } Sema::ObjCSubscriptKind Res = S.CheckSubscriptingKind(RefExpr->getKeyExpr()); if (Res == Sema::OS_Error) { if (S.getLangOpts().ObjCAutoRefCount) CheckKeyForObjCARCConversion(S, ResultType, RefExpr->getKeyExpr()); return false; } bool arrayRef = (Res == Sema::OS_Array); if (ResultType.isNull()) { S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_base_type) << BaseExpr->getType() << arrayRef; return false; } if (!arrayRef) { // dictionary subscripting. // - (id)objectForKeyedSubscript:(id)key; IdentifierInfo *KeyIdents[] = { &S.Context.Idents.get("objectForKeyedSubscript") }; AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents); } else { // - (id)objectAtIndexedSubscript:(size_t)index; IdentifierInfo *KeyIdents[] = { &S.Context.Idents.get("objectAtIndexedSubscript") }; AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents); } AtIndexGetter = S.LookupMethodInObjectType(AtIndexGetterSelector, ResultType, true /*instance*/); bool receiverIdType = (BaseT->isObjCIdType() || BaseT->isObjCQualifiedIdType()); if (!AtIndexGetter && S.getLangOpts().DebuggerObjCLiteral) { AtIndexGetter = ObjCMethodDecl::Create(S.Context, SourceLocation(), SourceLocation(), AtIndexGetterSelector, S.Context.getObjCIdType() /*ReturnType*/, nullptr /*TypeSourceInfo */, S.Context.getTranslationUnitDecl(), true /*Instance*/, false/*isVariadic*/, /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, false); ParmVarDecl *Argument = ParmVarDecl::Create(S.Context, AtIndexGetter, SourceLocation(), SourceLocation(), arrayRef ? &S.Context.Idents.get("index") : &S.Context.Idents.get("key"), arrayRef ? S.Context.UnsignedLongTy : S.Context.getObjCIdType(), /*TInfo=*/nullptr, SC_None, nullptr); AtIndexGetter->setMethodParams(S.Context, Argument, None); } if (!AtIndexGetter) { if (!receiverIdType) { S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_method_not_found) << BaseExpr->getType() << 0 << arrayRef; return false; } AtIndexGetter = S.LookupInstanceMethodInGlobalPool(AtIndexGetterSelector, RefExpr->getSourceRange(), true); } if (AtIndexGetter) { QualType T = AtIndexGetter->parameters()[0]->getType(); if ((arrayRef && !T->isIntegralOrEnumerationType()) || (!arrayRef && !T->isObjCObjectPointerType())) { S.Diag(RefExpr->getKeyExpr()->getExprLoc(), arrayRef ? diag::err_objc_subscript_index_type : diag::err_objc_subscript_key_type) << T; S.Diag(AtIndexGetter->parameters()[0]->getLocation(), diag::note_parameter_type) << T; return false; } QualType R = AtIndexGetter->getReturnType(); if (!R->isObjCObjectPointerType()) { S.Diag(RefExpr->getKeyExpr()->getExprLoc(), diag::err_objc_indexing_method_result_type) << R << arrayRef; S.Diag(AtIndexGetter->getLocation(), diag::note_method_declared_at) << AtIndexGetter->getDeclName(); } } return true; } bool ObjCSubscriptOpBuilder::findAtIndexSetter() { if (AtIndexSetter) return true; Expr *BaseExpr = RefExpr->getBaseExpr(); QualType BaseT = BaseExpr->getType(); QualType ResultType; if (const ObjCObjectPointerType *PTy = BaseT->getAs<ObjCObjectPointerType>()) { ResultType = PTy->getPointeeType(); } Sema::ObjCSubscriptKind Res = S.CheckSubscriptingKind(RefExpr->getKeyExpr()); if (Res == Sema::OS_Error) { if (S.getLangOpts().ObjCAutoRefCount) CheckKeyForObjCARCConversion(S, ResultType, RefExpr->getKeyExpr()); return false; } bool arrayRef = (Res == Sema::OS_Array); if (ResultType.isNull()) { S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_base_type) << BaseExpr->getType() << arrayRef; return false; } if (!arrayRef) { // dictionary subscripting. // - (void)setObject:(id)object forKeyedSubscript:(id)key; IdentifierInfo *KeyIdents[] = { &S.Context.Idents.get("setObject"), &S.Context.Idents.get("forKeyedSubscript") }; AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents); } else { // - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index; IdentifierInfo *KeyIdents[] = { &S.Context.Idents.get("setObject"), &S.Context.Idents.get("atIndexedSubscript") }; AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents); } AtIndexSetter = S.LookupMethodInObjectType(AtIndexSetterSelector, ResultType, true /*instance*/); bool receiverIdType = (BaseT->isObjCIdType() || BaseT->isObjCQualifiedIdType()); if (!AtIndexSetter && S.getLangOpts().DebuggerObjCLiteral) { TypeSourceInfo *ReturnTInfo = nullptr; QualType ReturnType = S.Context.VoidTy; AtIndexSetter = ObjCMethodDecl::Create( S.Context, SourceLocation(), SourceLocation(), AtIndexSetterSelector, ReturnType, ReturnTInfo, S.Context.getTranslationUnitDecl(), true /*Instance*/, false /*isVariadic*/, /*isPropertyAccessor=*/false, /*isImplicitlyDeclared=*/true, /*isDefined=*/false, ObjCMethodDecl::Required, false); SmallVector<ParmVarDecl *, 2> Params; ParmVarDecl *object = ParmVarDecl::Create(S.Context, AtIndexSetter, SourceLocation(), SourceLocation(), &S.Context.Idents.get("object"), S.Context.getObjCIdType(), /*TInfo=*/nullptr, SC_None, nullptr); Params.push_back(object); ParmVarDecl *key = ParmVarDecl::Create(S.Context, AtIndexSetter, SourceLocation(), SourceLocation(), arrayRef ? &S.Context.Idents.get("index") : &S.Context.Idents.get("key"), arrayRef ? S.Context.UnsignedLongTy : S.Context.getObjCIdType(), /*TInfo=*/nullptr, SC_None, nullptr); Params.push_back(key); AtIndexSetter->setMethodParams(S.Context, Params, None); } if (!AtIndexSetter) { if (!receiverIdType) { S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_method_not_found) << BaseExpr->getType() << 1 << arrayRef; return false; } AtIndexSetter = S.LookupInstanceMethodInGlobalPool(AtIndexSetterSelector, RefExpr->getSourceRange(), true); } bool err = false; if (AtIndexSetter && arrayRef) { QualType T = AtIndexSetter->parameters()[1]->getType(); if (!T->isIntegralOrEnumerationType()) { S.Diag(RefExpr->getKeyExpr()->getExprLoc(), diag::err_objc_subscript_index_type) << T; S.Diag(AtIndexSetter->parameters()[1]->getLocation(), diag::note_parameter_type) << T; err = true; } T = AtIndexSetter->parameters()[0]->getType(); if (!T->isObjCObjectPointerType()) { S.Diag(RefExpr->getBaseExpr()->getExprLoc(), diag::err_objc_subscript_object_type) << T << arrayRef; S.Diag(AtIndexSetter->parameters()[0]->getLocation(), diag::note_parameter_type) << T; err = true; } } else if (AtIndexSetter && !arrayRef) for (unsigned i=0; i <2; i++) { QualType T = AtIndexSetter->parameters()[i]->getType(); if (!T->isObjCObjectPointerType()) { if (i == 1) S.Diag(RefExpr->getKeyExpr()->getExprLoc(), diag::err_objc_subscript_key_type) << T; else S.Diag(RefExpr->getBaseExpr()->getExprLoc(), diag::err_objc_subscript_dic_object_type) << T; S.Diag(AtIndexSetter->parameters()[i]->getLocation(), diag::note_parameter_type) << T; err = true; } } return !err; } // Get the object at "Index" position in the container. // [BaseExpr objectAtIndexedSubscript : IndexExpr]; ExprResult ObjCSubscriptOpBuilder::buildGet() { if (!findAtIndexGetter()) return ExprError(); QualType receiverType = InstanceBase->getType(); // Build a message-send. ExprResult msg; Expr *Index = InstanceKey; // Arguments. Expr *args[] = { Index }; assert(InstanceBase); if (AtIndexGetter) S.DiagnoseUseOfDecl(AtIndexGetter, GenericLoc); msg = S.BuildInstanceMessageImplicit(InstanceBase, receiverType, GenericLoc, AtIndexGetterSelector, AtIndexGetter, MultiExprArg(args, 1)); return msg; } /// Store into the container the "op" object at "Index"'ed location /// by building this messaging expression: /// - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index; /// \param captureSetValueAsResult If true, capture the actual /// value being set as the value of the property operation. ExprResult ObjCSubscriptOpBuilder::buildSet(Expr *op, SourceLocation opcLoc, bool captureSetValueAsResult) { if (!findAtIndexSetter()) return ExprError(); if (AtIndexSetter) S.DiagnoseUseOfDecl(AtIndexSetter, GenericLoc); QualType receiverType = InstanceBase->getType(); Expr *Index = InstanceKey; // Arguments. Expr *args[] = { op, Index }; // Build a message-send. ExprResult msg = S.BuildInstanceMessageImplicit(InstanceBase, receiverType, GenericLoc, AtIndexSetterSelector, AtIndexSetter, MultiExprArg(args, 2)); if (!msg.isInvalid() && captureSetValueAsResult) { ObjCMessageExpr *msgExpr = cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit()); Expr *arg = msgExpr->getArg(0); if (CanCaptureValue(arg)) msgExpr->setArg(0, captureValueAsResult(arg)); } return msg; } //===----------------------------------------------------------------------===// // MSVC __declspec(property) references //===----------------------------------------------------------------------===// Expr *MSPropertyOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) { Expr *NewBase = capture(RefExpr->getBaseExpr()); syntacticBase = MSPropertyRefRebuilder(S, NewBase).rebuild(syntacticBase); return syntacticBase; } ExprResult MSPropertyOpBuilder::buildGet() { if (!RefExpr->getPropertyDecl()->hasGetter()) { S.Diag(RefExpr->getMemberLoc(), diag::err_no_accessor_for_property) << 0 /* getter */ << RefExpr->getPropertyDecl(); return ExprError(); } UnqualifiedId GetterName; IdentifierInfo *II = RefExpr->getPropertyDecl()->getGetterId(); GetterName.setIdentifier(II, RefExpr->getMemberLoc()); CXXScopeSpec SS; SS.Adopt(RefExpr->getQualifierLoc()); ExprResult GetterExpr = S.ActOnMemberAccessExpr( S.getCurScope(), RefExpr->getBaseExpr(), SourceLocation(), RefExpr->isArrow() ? tok::arrow : tok::period, SS, SourceLocation(), GetterName, nullptr); if (GetterExpr.isInvalid()) { S.Diag(RefExpr->getMemberLoc(), diag::error_cannot_find_suitable_accessor) << 0 /* getter */ << RefExpr->getPropertyDecl(); return ExprError(); } MultiExprArg ArgExprs; return S.ActOnCallExpr(S.getCurScope(), GetterExpr.get(), RefExpr->getSourceRange().getBegin(), ArgExprs, RefExpr->getSourceRange().getEnd()); } ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl, bool captureSetValueAsResult) { if (!RefExpr->getPropertyDecl()->hasSetter()) { S.Diag(RefExpr->getMemberLoc(), diag::err_no_accessor_for_property) << 1 /* setter */ << RefExpr->getPropertyDecl(); return ExprError(); } UnqualifiedId SetterName; IdentifierInfo *II = RefExpr->getPropertyDecl()->getSetterId(); SetterName.setIdentifier(II, RefExpr->getMemberLoc()); CXXScopeSpec SS; SS.Adopt(RefExpr->getQualifierLoc()); ExprResult SetterExpr = S.ActOnMemberAccessExpr( S.getCurScope(), RefExpr->getBaseExpr(), SourceLocation(), RefExpr->isArrow() ? tok::arrow : tok::period, SS, SourceLocation(), SetterName, nullptr); if (SetterExpr.isInvalid()) { S.Diag(RefExpr->getMemberLoc(), diag::error_cannot_find_suitable_accessor) << 1 /* setter */ << RefExpr->getPropertyDecl(); return ExprError(); } SmallVector<Expr*, 1> ArgExprs; ArgExprs.push_back(op); return S.ActOnCallExpr(S.getCurScope(), SetterExpr.get(), RefExpr->getSourceRange().getBegin(), ArgExprs, op->getSourceRange().getEnd()); } //===----------------------------------------------------------------------===// // General Sema routines. //===----------------------------------------------------------------------===// ExprResult Sema::checkPseudoObjectRValue(Expr *E) { Expr *opaqueRef = E->IgnoreParens(); if (ObjCPropertyRefExpr *refExpr = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) { ObjCPropertyOpBuilder builder(*this, refExpr); return builder.buildRValueOperation(E); } else if (ObjCSubscriptRefExpr *refExpr = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) { ObjCSubscriptOpBuilder builder(*this, refExpr); return builder.buildRValueOperation(E); } else if (MSPropertyRefExpr *refExpr = dyn_cast<MSPropertyRefExpr>(opaqueRef)) { MSPropertyOpBuilder builder(*this, refExpr); return builder.buildRValueOperation(E); } else { llvm_unreachable("unknown pseudo-object kind!"); } } /// Check an increment or decrement of a pseudo-object expression. ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc, UnaryOperatorKind opcode, Expr *op) { // Do nothing if the operand is dependent. if (op->isTypeDependent()) return new (Context) UnaryOperator(op, opcode, Context.DependentTy, VK_RValue, OK_Ordinary, opcLoc); assert(UnaryOperator::isIncrementDecrementOp(opcode)); Expr *opaqueRef = op->IgnoreParens(); if (ObjCPropertyRefExpr *refExpr = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) { ObjCPropertyOpBuilder builder(*this, refExpr); return builder.buildIncDecOperation(Sc, opcLoc, opcode, op); } else if (isa<ObjCSubscriptRefExpr>(opaqueRef)) { Diag(opcLoc, diag::err_illegal_container_subscripting_op); return ExprError(); } else if (MSPropertyRefExpr *refExpr = dyn_cast<MSPropertyRefExpr>(opaqueRef)) { MSPropertyOpBuilder builder(*this, refExpr); return builder.buildIncDecOperation(Sc, opcLoc, opcode, op); } else { llvm_unreachable("unknown pseudo-object kind!"); } } ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc, BinaryOperatorKind opcode, Expr *LHS, Expr *RHS) { // Do nothing if either argument is dependent. if (LHS->isTypeDependent() || RHS->isTypeDependent()) return new (Context) BinaryOperator(LHS, RHS, opcode, Context.DependentTy, VK_RValue, OK_Ordinary, opcLoc, false); // Filter out non-overload placeholder types in the RHS. if (RHS->getType()->isNonOverloadPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(RHS); if (result.isInvalid()) return ExprError(); RHS = result.get(); } Expr *opaqueRef = LHS->IgnoreParens(); if (ObjCPropertyRefExpr *refExpr = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) { ObjCPropertyOpBuilder builder(*this, refExpr); return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS); } else if (ObjCSubscriptRefExpr *refExpr = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) { ObjCSubscriptOpBuilder builder(*this, refExpr); return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS); } else if (MSPropertyRefExpr *refExpr = dyn_cast<MSPropertyRefExpr>(opaqueRef)) { MSPropertyOpBuilder builder(*this, refExpr); return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS); } else { llvm_unreachable("unknown pseudo-object kind!"); } } /// Given a pseudo-object reference, rebuild it without the opaque /// values. Basically, undo the behavior of rebuildAndCaptureObject. /// This should never operate in-place. static Expr *stripOpaqueValuesFromPseudoObjectRef(Sema &S, Expr *E) { Expr *opaqueRef = E->IgnoreParens(); if (ObjCPropertyRefExpr *refExpr = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) { // Class and super property references don't have opaque values in them. if (refExpr->isClassReceiver() || refExpr->isSuperReceiver()) return E; assert(refExpr->isObjectReceiver() && "Unknown receiver kind?"); OpaqueValueExpr *baseOVE = cast<OpaqueValueExpr>(refExpr->getBase()); return ObjCPropertyRefRebuilder(S, baseOVE->getSourceExpr()).rebuild(E); } else if (ObjCSubscriptRefExpr *refExpr = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) { OpaqueValueExpr *baseOVE = cast<OpaqueValueExpr>(refExpr->getBaseExpr()); OpaqueValueExpr *keyOVE = cast<OpaqueValueExpr>(refExpr->getKeyExpr()); return ObjCSubscriptRefRebuilder(S, baseOVE->getSourceExpr(), keyOVE->getSourceExpr()).rebuild(E); } else if (MSPropertyRefExpr *refExpr = dyn_cast<MSPropertyRefExpr>(opaqueRef)) { OpaqueValueExpr *baseOVE = cast<OpaqueValueExpr>(refExpr->getBaseExpr()); return MSPropertyRefRebuilder(S, baseOVE->getSourceExpr()).rebuild(E); } else { llvm_unreachable("unknown pseudo-object kind!"); } } /// Given a pseudo-object expression, recreate what it looks like /// syntactically without the attendant OpaqueValueExprs. /// /// This is a hack which should be removed when TreeTransform is /// capable of rebuilding a tree without stripping implicit /// operations. Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) { Expr *syntax = E->getSyntacticForm(); if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) { Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr()); return new (Context) UnaryOperator(op, uop->getOpcode(), uop->getType(), uop->getValueKind(), uop->getObjectKind(), uop->getOperatorLoc()); } else if (CompoundAssignOperator *cop = dyn_cast<CompoundAssignOperator>(syntax)) { Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS()); Expr *rhs = cast<OpaqueValueExpr>(cop->getRHS())->getSourceExpr(); return new (Context) CompoundAssignOperator(lhs, rhs, cop->getOpcode(), cop->getType(), cop->getValueKind(), cop->getObjectKind(), cop->getComputationLHSType(), cop->getComputationResultType(), cop->getOperatorLoc(), false); } else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(syntax)) { Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS()); Expr *rhs = cast<OpaqueValueExpr>(bop->getRHS())->getSourceExpr(); return new (Context) BinaryOperator(lhs, rhs, bop->getOpcode(), bop->getType(), bop->getValueKind(), bop->getObjectKind(), bop->getOperatorLoc(), false); } else { assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject)); return stripOpaqueValuesFromPseudoObjectRef(*this, syntax); } } #endif // HLSL Change
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/VkConstantsTables.h
//===--- VkConstantsTables.h --- Implict Vulkan Constants Tables ---C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains information about implictly-defined vulkan constants. // These constants will be added to the AST under the "vk" namespace. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_SEMA_VKCONSTANTSTABLES_H #define LLVM_CLANG_LIB_SEMA_VKCONSTANTSTABLES_H #include <string> #include <utility> #include <vector> std::vector<std::pair<std::string, uint32_t>> GetVkIntegerConstants() { return { {"CrossDeviceScope", 0u}, {"DeviceScope", 1u}, {"WorkgroupScope", 2u}, {"SubgroupScope", 3u}, {"InvocationScope", 4u}, {"QueueFamilyScope", 5u}, }; } #endif // LLVM_CLANG_LIB_SEMA_VKCONSTANTSTABLES_H
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaDecl.cpp
//===--- SemaDecl.cpp - Semantic Analysis for Declarations ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for declarations. // //===----------------------------------------------------------------------===// #include "TypeLocBuilder.h" #include "dxc/DXIL/DxilSemantic.h" // HLSL Change #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/CharUnits.h" #include "clang/AST/CommentDiagnostic.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/HeaderSearch.h" // TODO: Sema shouldn't depend on Lex #include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering. #include "clang/Lex/ModuleLoader.h" // TODO: Sema shouldn't depend on Lex #include "clang/Lex/Preprocessor.h" // Included for isCodeCompletionEnabled() #include "clang/Parse/ParseDiagnostic.h" #include "clang/Sema/CXXFieldCollector.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/DelayedDiagnostic.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change #include "clang/Sema/SemaInternal.h" #include "clang/Sema/Template.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/Triple.h" #include <algorithm> #include <cstring> #include <functional> using namespace clang; using namespace sema; Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType) { if (OwnedType) { Decl *Group[2] = { OwnedType, Ptr }; return DeclGroupPtrTy::make(DeclGroupRef::Create(Context, Group, 2)); } return DeclGroupPtrTy::make(DeclGroupRef(Ptr)); } namespace { class TypeNameValidatorCCC : public CorrectionCandidateCallback { public: TypeNameValidatorCCC(bool AllowInvalid, bool WantClass=false, bool AllowTemplates=false) : AllowInvalidDecl(AllowInvalid), WantClassName(WantClass), AllowClassTemplates(AllowTemplates) { WantExpressionKeywords = false; WantCXXNamedCasts = false; WantRemainingKeywords = false; } bool ValidateCandidate(const TypoCorrection &candidate) override { if (NamedDecl *ND = candidate.getCorrectionDecl()) { bool IsType = isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND); bool AllowedTemplate = AllowClassTemplates && isa<ClassTemplateDecl>(ND); return (IsType || AllowedTemplate) && (AllowInvalidDecl || !ND->isInvalidDecl()); } return !WantClassName && candidate.isKeyword(); } private: bool AllowInvalidDecl; bool WantClassName; bool AllowClassTemplates; }; } /// \brief Determine whether the token kind starts a simple-type-specifier. bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const { switch (Kind) { // FIXME: Take into account the current language when deciding whether a // token kind is a valid type specifier case tok::kw_short: case tok::kw_long: case tok::kw___int64: case tok::kw___int128: case tok::kw_signed: case tok::kw_unsigned: case tok::kw_void: case tok::kw_char: case tok::kw_int: case tok::kw_half: case tok::kw_float: case tok::kw_double: case tok::kw_wchar_t: case tok::kw_bool: case tok::kw___underlying_type: return true; case tok::annot_typename: case tok::kw_char16_t: case tok::kw_char32_t: case tok::kw_typeof: case tok::annot_decltype: case tok::kw_decltype: return getLangOpts().CPlusPlus; default: break; } return false; } namespace { enum class UnqualifiedTypeNameLookupResult { NotFound, FoundNonType, FoundType }; } // namespace /// \brief Tries to perform unqualified lookup of the type decls in bases for /// dependent class. /// \return \a NotFound if no any decls is found, \a FoundNotType if found not a /// type decl, \a FoundType if only type decls are found. static UnqualifiedTypeNameLookupResult lookupUnqualifiedTypeNameInBase(Sema &S, const IdentifierInfo &II, SourceLocation NameLoc, const CXXRecordDecl *RD) { if (!RD->hasDefinition()) return UnqualifiedTypeNameLookupResult::NotFound; // Look for type decls in base classes. UnqualifiedTypeNameLookupResult FoundTypeDecl = UnqualifiedTypeNameLookupResult::NotFound; for (const auto &Base : RD->bases()) { const CXXRecordDecl *BaseRD = nullptr; if (auto *BaseTT = Base.getType()->getAs<TagType>()) BaseRD = BaseTT->getAsCXXRecordDecl(); else if (auto *TST = Base.getType()->getAs<TemplateSpecializationType>()) { // Look for type decls in dependent base classes that have known primary // templates. if (!TST || !TST->isDependentType()) continue; auto *TD = TST->getTemplateName().getAsTemplateDecl(); if (!TD) continue; auto *BasePrimaryTemplate = dyn_cast_or_null<CXXRecordDecl>(TD->getTemplatedDecl()); if (!BasePrimaryTemplate) continue; BaseRD = BasePrimaryTemplate; } if (BaseRD) { for (NamedDecl *ND : BaseRD->lookup(&II)) { if (!isa<TypeDecl>(ND)) return UnqualifiedTypeNameLookupResult::FoundNonType; FoundTypeDecl = UnqualifiedTypeNameLookupResult::FoundType; } if (FoundTypeDecl == UnqualifiedTypeNameLookupResult::NotFound) { switch (lookupUnqualifiedTypeNameInBase(S, II, NameLoc, BaseRD)) { case UnqualifiedTypeNameLookupResult::FoundNonType: return UnqualifiedTypeNameLookupResult::FoundNonType; case UnqualifiedTypeNameLookupResult::FoundType: FoundTypeDecl = UnqualifiedTypeNameLookupResult::FoundType; break; case UnqualifiedTypeNameLookupResult::NotFound: break; } } } } return FoundTypeDecl; } static ParsedType recoverFromTypeInKnownDependentBase(Sema &S, const IdentifierInfo &II, SourceLocation NameLoc) { // Lookup in the parent class template context, if any. const CXXRecordDecl *RD = nullptr; UnqualifiedTypeNameLookupResult FoundTypeDecl = UnqualifiedTypeNameLookupResult::NotFound; for (DeclContext *DC = S.CurContext; DC && FoundTypeDecl == UnqualifiedTypeNameLookupResult::NotFound; DC = DC->getParent()) { // Look for type decls in dependent base classes that have known primary // templates. RD = dyn_cast<CXXRecordDecl>(DC); if (RD && RD->getDescribedClassTemplate()) FoundTypeDecl = lookupUnqualifiedTypeNameInBase(S, II, NameLoc, RD); } if (FoundTypeDecl != UnqualifiedTypeNameLookupResult::FoundType) return ParsedType(); // We found some types in dependent base classes. Recover as if the user // wrote 'typename MyClass::II' instead of 'II'. We'll fully resolve the // lookup during template instantiation. S.Diag(NameLoc, diag::ext_found_via_dependent_bases_lookup) << &II; ASTContext &Context = S.Context; auto *NNS = NestedNameSpecifier::Create(Context, nullptr, false, cast<Type>(Context.getRecordType(RD))); QualType T = Context.getDependentNameType(ETK_Typename, NNS, &II); CXXScopeSpec SS; SS.MakeTrivial(Context, NNS, SourceRange(NameLoc)); TypeLocBuilder Builder; DependentNameTypeLoc DepTL = Builder.push<DependentNameTypeLoc>(T); DepTL.setNameLoc(NameLoc); DepTL.setElaboratedKeywordLoc(SourceLocation()); DepTL.setQualifierLoc(SS.getWithLocInContext(Context)); return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T)); } /// \brief If the identifier refers to a type name within this scope, /// return the declaration of that type. /// /// This routine performs ordinary name lookup of the identifier II /// within the given scope, with optional C++ scope specifier SS, to /// determine whether the name refers to a type. If so, returns an /// opaque pointer (actually a QualType) corresponding to that /// type. Otherwise, returns NULL. ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS, bool isClassName, bool HasTrailingDot, ParsedType ObjectTypePtr, bool IsCtorOrDtorName, bool WantNontrivialTypeSourceInfo, IdentifierInfo **CorrectedII) { // Determine where we will perform name lookup. DeclContext *LookupCtx = nullptr; if (ObjectTypePtr) { QualType ObjectType = ObjectTypePtr.get(); if (ObjectType->isRecordType()) LookupCtx = computeDeclContext(ObjectType); } else if (SS && SS->isNotEmpty()) { LookupCtx = computeDeclContext(*SS, false); if (!LookupCtx) { if (isDependentScopeSpecifier(*SS)) { // C++ [temp.res]p3: // A qualified-id that refers to a type and in which the // nested-name-specifier depends on a template-parameter (14.6.2) // shall be prefixed by the keyword typename to indicate that the // qualified-id denotes a type, forming an // elaborated-type-specifier (7.1.5.3). // // We therefore do not perform any name lookup if the result would // refer to a member of an unknown specialization. if (!isClassName && !IsCtorOrDtorName) return ParsedType(); // We know from the grammar that this name refers to a type, // so build a dependent node to describe the type. if (WantNontrivialTypeSourceInfo) return ActOnTypenameType(S, SourceLocation(), *SS, II, NameLoc).get(); NestedNameSpecifierLoc QualifierLoc = SS->getWithLocInContext(Context); QualType T = CheckTypenameType(ETK_None, SourceLocation(), QualifierLoc, II, NameLoc); return ParsedType::make(T); } return ParsedType(); } if (!LookupCtx->isDependentContext() && RequireCompleteDeclContext(*SS, LookupCtx)) return ParsedType(); } // FIXME: LookupNestedNameSpecifierName isn't the right kind of // lookup for class-names. LookupNameKind Kind = isClassName ? LookupNestedNameSpecifierName : LookupOrdinaryName; LookupResult Result(*this, &II, NameLoc, Kind); if (LookupCtx) { // Perform "qualified" name lookup into the declaration context we // computed, which is either the type of the base of a member access // expression or the declaration context associated with a prior // nested-name-specifier. LookupQualifiedName(Result, LookupCtx); if (ObjectTypePtr && Result.empty()) { // C++ [basic.lookup.classref]p3: // If the unqualified-id is ~type-name, the type-name is looked up // in the context of the entire postfix-expression. If the type T of // the object expression is of a class type C, the type-name is also // looked up in the scope of class C. At least one of the lookups shall // find a name that refers to (possibly cv-qualified) T. LookupName(Result, S); } } else { // Perform unqualified name lookup. LookupName(Result, S); // For unqualified lookup in a class template in MSVC mode, look into // dependent base classes where the primary class template is known. if (Result.empty() && getLangOpts().MSVCCompat && (!SS || SS->isEmpty())) { if (ParsedType TypeInBase = recoverFromTypeInKnownDependentBase(*this, II, NameLoc)) return TypeInBase; } } NamedDecl *IIDecl = nullptr; switch (Result.getResultKind()) { case LookupResult::NotFound: case LookupResult::NotFoundInCurrentInstantiation: if (CorrectedII) { TypoCorrection Correction = CorrectTypo( Result.getLookupNameInfo(), Kind, S, SS, llvm::make_unique<TypeNameValidatorCCC>(true, isClassName), CTK_ErrorRecovery); IdentifierInfo *NewII = Correction.getCorrectionAsIdentifierInfo(); TemplateTy Template; bool MemberOfUnknownSpecialization; UnqualifiedId TemplateName; TemplateName.setIdentifier(NewII, NameLoc); NestedNameSpecifier *NNS = Correction.getCorrectionSpecifier(); CXXScopeSpec NewSS, *NewSSPtr = SS; if (SS && NNS) { NewSS.MakeTrivial(Context, NNS, SourceRange(NameLoc)); NewSSPtr = &NewSS; } if (Correction && (NNS || NewII != &II) && // Ignore a correction to a template type as the to-be-corrected // identifier is not a template (typo correction for template names // is handled elsewhere). !(getLangOpts().CPlusPlus && NewSSPtr && isTemplateName(S, *NewSSPtr, false, TemplateName, ParsedType(), false, Template, MemberOfUnknownSpecialization))) { ParsedType Ty = getTypeName(*NewII, NameLoc, S, NewSSPtr, isClassName, HasTrailingDot, ObjectTypePtr, IsCtorOrDtorName, WantNontrivialTypeSourceInfo); if (Ty) { diagnoseTypo(Correction, PDiag(diag::err_unknown_type_or_class_name_suggest) << Result.getLookupName() << isClassName); if (SS && NNS) SS->MakeTrivial(Context, NNS, SourceRange(NameLoc)); *CorrectedII = NewII; return Ty; } } } // If typo correction failed or was not performed, fall through LLVM_FALLTHROUGH; // HLSL Change case LookupResult::FoundOverloaded: case LookupResult::FoundUnresolvedValue: Result.suppressDiagnostics(); return ParsedType(); case LookupResult::Ambiguous: // Recover from type-hiding ambiguities by hiding the type. We'll // do the lookup again when looking for an object, and we can // diagnose the error then. If we don't do this, then the error // about hiding the type will be immediately followed by an error // that only makes sense if the identifier was treated like a type. if (Result.getAmbiguityKind() == LookupResult::AmbiguousTagHiding) { Result.suppressDiagnostics(); return ParsedType(); } // Look to see if we have a type anywhere in the list of results. for (LookupResult::iterator Res = Result.begin(), ResEnd = Result.end(); Res != ResEnd; ++Res) { if (isa<TypeDecl>(*Res) || isa<ObjCInterfaceDecl>(*Res)) { if (!IIDecl || (*Res)->getLocation().getRawEncoding() < IIDecl->getLocation().getRawEncoding()) IIDecl = *Res; } } if (!IIDecl) { // None of the entities we found is a type, so there is no way // to even assume that the result is a type. In this case, don't // complain about the ambiguity. The parser will either try to // perform this lookup again (e.g., as an object name), which // will produce the ambiguity, or will complain that it expected // a type name. Result.suppressDiagnostics(); return ParsedType(); } // We found a type within the ambiguous lookup; diagnose the // ambiguity and then return that type. This might be the right // answer, or it might not be, but it suppresses any attempt to // perform the name lookup again. break; case LookupResult::Found: IIDecl = Result.getFoundDecl(); break; } assert(IIDecl && "Didn't find decl"); QualType T; if (TypeDecl *TD = dyn_cast<TypeDecl>(IIDecl)) { DiagnoseUseOfDecl(IIDecl, NameLoc); T = Context.getTypeDeclType(TD); MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false); // NOTE: avoid constructing an ElaboratedType(Loc) if this is a // constructor or destructor name (in such a case, the scope specifier // will be attached to the enclosing Expr or Decl node). if (SS && SS->isNotEmpty() && !IsCtorOrDtorName) { if (WantNontrivialTypeSourceInfo) { // Construct a type with type-source information. TypeLocBuilder Builder; Builder.pushTypeSpec(T).setNameLoc(NameLoc); T = getElaboratedType(ETK_None, *SS, T); ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T); ElabTL.setElaboratedKeywordLoc(SourceLocation()); ElabTL.setQualifierLoc(SS->getWithLocInContext(Context)); return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T)); } else { T = getElaboratedType(ETK_None, *SS, T); } } } else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) { (void)DiagnoseUseOfDecl(IDecl, NameLoc); if (!HasTrailingDot) T = Context.getObjCInterfaceType(IDecl); } else if (getLangOpts().HLSL) { // HLSL - omit empty template argument lists if (TemplateDecl *TD = dyn_cast<TemplateDecl>(IIDecl)) { QualType DefaultTy = getHLSLDefaultSpecialization(TD); if (!DefaultTy.isNull()) T = DefaultTy; } // HLSL Change end } if (T.isNull()) { // If it's not plausibly a type, suppress diagnostics. Result.suppressDiagnostics(); return ParsedType(); } return ParsedType::make(T); } // Builds a fake NNS for the given decl context. static NestedNameSpecifier * synthesizeCurrentNestedNameSpecifier(ASTContext &Context, DeclContext *DC) { for (;; DC = DC->getLookupParent()) { DC = DC->getPrimaryContext(); auto *ND = dyn_cast<NamespaceDecl>(DC); if (ND && !ND->isInline() && !ND->isAnonymousNamespace()) return NestedNameSpecifier::Create(Context, nullptr, ND); else if (auto *RD = dyn_cast<CXXRecordDecl>(DC)) return NestedNameSpecifier::Create(Context, nullptr, RD->isTemplateDecl(), RD->getTypeForDecl()); else if (isa<TranslationUnitDecl>(DC)) return NestedNameSpecifier::GlobalSpecifier(Context); } llvm_unreachable("something isn't in TU scope?"); } ParsedType Sema::ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc) { // Accepting an undeclared identifier as a default argument for a template // type parameter is a Microsoft extension. Diag(NameLoc, diag::ext_ms_delayed_template_argument) << &II; // Build a fake DependentNameType that will perform lookup into CurContext at // instantiation time. The name specifier isn't dependent, so template // instantiation won't transform it. It will retry the lookup, however. NestedNameSpecifier *NNS = synthesizeCurrentNestedNameSpecifier(Context, CurContext); QualType T = Context.getDependentNameType(ETK_None, NNS, &II); // Build type location information. We synthesized the qualifier, so we have // to build a fake NestedNameSpecifierLoc. NestedNameSpecifierLocBuilder NNSLocBuilder; NNSLocBuilder.MakeTrivial(Context, NNS, SourceRange(NameLoc)); NestedNameSpecifierLoc QualifierLoc = NNSLocBuilder.getWithLocInContext(Context); TypeLocBuilder Builder; DependentNameTypeLoc DepTL = Builder.push<DependentNameTypeLoc>(T); DepTL.setNameLoc(NameLoc); DepTL.setElaboratedKeywordLoc(SourceLocation()); DepTL.setQualifierLoc(QualifierLoc); return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T)); } /// isTagName() - This method is called *for error recovery purposes only* /// to determine if the specified name is a valid tag name ("struct foo"). If /// so, this returns the TST for the tag corresponding to it (TST_enum, /// TST_union, TST_struct, TST_interface, TST_class). This is used to diagnose /// cases in C where the user forgot to specify the tag. DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) { // Do a tag name lookup in this scope. LookupResult R(*this, &II, SourceLocation(), LookupTagName); LookupName(R, S, false); R.suppressDiagnostics(); if (R.getResultKind() == LookupResult::Found) if (const TagDecl *TD = R.getAsSingle<TagDecl>()) { switch (TD->getTagKind()) { case TTK_Struct: return DeclSpec::TST_struct; case TTK_Interface: return DeclSpec::TST_interface; case TTK_Union: return DeclSpec::TST_union; case TTK_Class: return DeclSpec::TST_class; case TTK_Enum: return DeclSpec::TST_enum; } } return DeclSpec::TST_unspecified; } /// isMicrosoftMissingTypename - In Microsoft mode, within class scope, /// if a CXXScopeSpec's type is equal to the type of one of the base classes /// then downgrade the missing typename error to a warning. /// This is needed for MSVC compatibility; Example: /// @code /// template<class T> class A { /// public: /// typedef int TYPE; /// }; /// template<class T> class B : public A<T> { /// public: /// A<T>::TYPE a; // no typename required because A<T> is a base class. /// }; /// @endcode bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) { if (CurContext->isRecord()) { if (SS->getScopeRep()->getKind() == NestedNameSpecifier::Super) return true; const Type *Ty = SS->getScopeRep()->getAsType(); CXXRecordDecl *RD = cast<CXXRecordDecl>(CurContext); for (const auto &Base : RD->bases()) if (Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType())) return true; return S->isFunctionPrototypeScope(); } return CurContext->isFunctionOrMethod() || S->isFunctionPrototypeScope(); } void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates) { // We don't have anything to suggest (yet). SuggestedType = ParsedType(); // There may have been a typo in the name of the type. Look up typo // results, in case we have something that we can suggest. if (TypoCorrection Corrected = CorrectTypo(DeclarationNameInfo(II, IILoc), LookupOrdinaryName, S, SS, llvm::make_unique<TypeNameValidatorCCC>( false, false, AllowClassTemplates), CTK_ErrorRecovery)) { if (Corrected.isKeyword()) { // We corrected to a keyword. diagnoseTypo(Corrected, PDiag(diag::err_unknown_typename_suggest) << II); II = Corrected.getCorrectionAsIdentifierInfo(); } else { // We found a similarly-named type or interface; suggest that. if (!SS || !SS->isSet()) { diagnoseTypo(Corrected, PDiag(diag::err_unknown_typename_suggest) << II); } else if (DeclContext *DC = computeDeclContext(*SS, false)) { std::string CorrectedStr(Corrected.getAsString(getLangOpts())); bool DroppedSpecifier = Corrected.WillReplaceSpecifier() && II->getName().equals(CorrectedStr); diagnoseTypo(Corrected, PDiag(diag::err_unknown_nested_typename_suggest) << II << DC << DroppedSpecifier << SS->getRange()); } else { llvm_unreachable("could not have corrected a typo here"); } CXXScopeSpec tmpSS; if (Corrected.getCorrectionSpecifier()) tmpSS.MakeTrivial(Context, Corrected.getCorrectionSpecifier(), SourceRange(IILoc)); SuggestedType = getTypeName(*Corrected.getCorrectionAsIdentifierInfo(), IILoc, S, tmpSS.isSet() ? &tmpSS : SS, false, false, ParsedType(), /*IsCtorOrDtorName=*/false, /*NonTrivialTypeSourceInfo=*/true); } return; } if (getLangOpts().CPlusPlus) { // See if II is a class template that the user forgot to pass arguments to. UnqualifiedId Name; Name.setIdentifier(II, IILoc); CXXScopeSpec EmptySS; TemplateTy TemplateResult; bool MemberOfUnknownSpecialization; if (isTemplateName(S, SS ? *SS : EmptySS, /*hasTemplateKeyword=*/false, Name, ParsedType(), true, TemplateResult, MemberOfUnknownSpecialization) == TNK_Type_template) { TemplateName TplName = TemplateResult.get(); Diag(IILoc, diag::err_template_missing_args) << TplName; if (TemplateDecl *TplDecl = TplName.getAsTemplateDecl()) { if (TplDecl->getLocation().isValid()) { // HLSL Change - ellide location notes for built-ins Diag(TplDecl->getLocation(), diag::note_template_decl_here) << TplDecl->getTemplateParameters()->getSourceRange(); } } return; } } // FIXME: Should we move the logic that tries to recover from a missing tag // (struct, union, enum) from Parser::ParseImplicitInt here, instead? if (!SS || (!SS->isSet() && !SS->isInvalid())) Diag(IILoc, diag::err_unknown_typename) << II; else if (DeclContext *DC = computeDeclContext(*SS, false)) Diag(IILoc, diag::err_typename_nested_not_found) << II << DC << SS->getRange(); else if (isDependentScopeSpecifier(*SS)) { unsigned DiagID = diag::err_typename_missing; if (getLangOpts().MSVCCompat && isMicrosoftMissingTypename(SS, S)) DiagID = diag::ext_typename_missing; Diag(SS->getRange().getBegin(), DiagID) << SS->getScopeRep() << II->getName() << SourceRange(SS->getRange().getBegin(), IILoc) << FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename "); SuggestedType = ActOnTypenameType(S, SourceLocation(), *SS, *II, IILoc).get(); } else { assert(SS && SS->isInvalid() && "Invalid scope specifier has already been diagnosed"); } } /// \brief Determine whether the given result set contains either a type name /// or static bool isResultTypeOrTemplate(LookupResult &R, const Token &NextToken) { bool CheckTemplate = R.getSema().getLangOpts().CPlusPlus && NextToken.is(tok::less); for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I) { if (isa<TypeDecl>(*I) || isa<ObjCInterfaceDecl>(*I)) return true; if (CheckTemplate && isa<TemplateDecl>(*I)) return true; } return false; } static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result, Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc) { LookupResult R(SemaRef, Name, NameLoc, Sema::LookupTagName); SemaRef.LookupParsedName(R, S, &SS); if (TagDecl *Tag = R.getAsSingle<TagDecl>()) { StringRef FixItTagName; switch (Tag->getTagKind()) { case TTK_Class: FixItTagName = "class "; break; case TTK_Enum: FixItTagName = "enum "; break; case TTK_Struct: FixItTagName = "struct "; break; case TTK_Interface: FixItTagName = "__interface "; break; case TTK_Union: FixItTagName = "union "; break; } StringRef TagName = FixItTagName.drop_back(); SemaRef.Diag(NameLoc, diag::err_use_of_tag_name_without_tag) << Name << TagName << SemaRef.getLangOpts().CPlusPlus << FixItHint::CreateInsertion(NameLoc, FixItTagName); for (LookupResult::iterator I = Result.begin(), IEnd = Result.end(); I != IEnd; ++I) SemaRef.Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type) << Name << TagName; // Replace lookup results with just the tag decl. Result.clear(Sema::LookupTagName); SemaRef.LookupParsedName(Result, S, &SS); return true; } return false; } /// Build a ParsedType for a simple-type-specifier with a nested-name-specifier. static ParsedType buildNestedType(Sema &S, CXXScopeSpec &SS, QualType T, SourceLocation NameLoc) { ASTContext &Context = S.Context; TypeLocBuilder Builder; Builder.pushTypeSpec(T).setNameLoc(NameLoc); T = S.getElaboratedType(ETK_None, SS, T); ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T); ElabTL.setElaboratedKeywordLoc(SourceLocation()); ElabTL.setQualifierLoc(SS.getWithLocInContext(Context)); return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T)); } Sema::NameClassification Sema::ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC) { DeclarationNameInfo NameInfo(Name, NameLoc); ObjCMethodDecl *CurMethod = getCurMethodDecl(); if (NextToken.is(tok::coloncolon)) { BuildCXXNestedNameSpecifier(S, *Name, NameLoc, NextToken.getLocation(), QualType(), false, SS, nullptr, false); } LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName); LookupParsedName(Result, S, &SS, !CurMethod); // For unqualified lookup in a class template in MSVC mode, look into // dependent base classes where the primary class template is known. if (Result.empty() && SS.isEmpty() && getLangOpts().MSVCCompat) { if (ParsedType TypeInBase = recoverFromTypeInKnownDependentBase(*this, *Name, NameLoc)) return TypeInBase; } // Perform lookup for Objective-C instance variables (including automatically // synthesized instance variables), if we're in an Objective-C method. // FIXME: This lookup really, really needs to be folded in to the normal // unqualified lookup mechanism. if (!SS.isSet() && CurMethod && !isResultTypeOrTemplate(Result, NextToken)) { ExprResult E = LookupInObjCMethod(Result, S, Name, true); if (E.get() || E.isInvalid()) return E; } bool SecondTry = false; bool IsFilteredTemplateName = false; Corrected: switch (Result.getResultKind()) { case LookupResult::NotFound: // If an unqualified-id is followed by a '(', then we have a function // call. if (!SS.isSet() && NextToken.is(tok::l_paren)) { // In C++, this is an ADL-only call. // FIXME: Reference? if (getLangOpts().CPlusPlus) return BuildDeclarationNameExpr(SS, Result, /*ADL=*/true); // C90 6.3.2.2: // If the expression that precedes the parenthesized argument list in a // function call consists solely of an identifier, and if no // declaration is visible for this identifier, the identifier is // implicitly declared exactly as if, in the innermost block containing // the function call, the declaration // // extern int identifier (); // // appeared. // // We also allow this in C99 as an extension. if (NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *Name, S)) { Result.addDecl(D); Result.resolveKind(); return BuildDeclarationNameExpr(SS, Result, /*ADL=*/false); } } // In C, we first see whether there is a tag type by the same name, in // which case it's likely that the user just forget to write "enum", // "struct", or "union". if (!getLangOpts().CPlusPlus && !SecondTry && isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) { break; } // Perform typo correction to determine if there is another name that is // close to this name. if (!SecondTry && CCC) { SecondTry = true; if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(), Result.getLookupKind(), S, &SS, std::move(CCC), CTK_ErrorRecovery)) { unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest; unsigned QualifiedDiag = diag::err_no_member_suggest; NamedDecl *FirstDecl = Corrected.getCorrectionDecl(); NamedDecl *UnderlyingFirstDecl = FirstDecl? FirstDecl->getUnderlyingDecl() : nullptr; if (getLangOpts().CPlusPlus && NextToken.is(tok::less) && UnderlyingFirstDecl && isa<TemplateDecl>(UnderlyingFirstDecl)) { UnqualifiedDiag = diag::err_no_template_suggest; QualifiedDiag = diag::err_no_member_template_suggest; } else if (UnderlyingFirstDecl && (isa<TypeDecl>(UnderlyingFirstDecl) || isa<ObjCInterfaceDecl>(UnderlyingFirstDecl) || isa<ObjCCompatibleAliasDecl>(UnderlyingFirstDecl))) { UnqualifiedDiag = diag::err_unknown_typename_suggest; QualifiedDiag = diag::err_unknown_nested_typename_suggest; } if (SS.isEmpty()) { diagnoseTypo(Corrected, PDiag(UnqualifiedDiag) << Name); } else {// FIXME: is this even reachable? Test it. std::string CorrectedStr(Corrected.getAsString(getLangOpts())); bool DroppedSpecifier = Corrected.WillReplaceSpecifier() && Name->getName().equals(CorrectedStr); diagnoseTypo(Corrected, PDiag(QualifiedDiag) << Name << computeDeclContext(SS, false) << DroppedSpecifier << SS.getRange()); } // Update the name, so that the caller has the new name. Name = Corrected.getCorrectionAsIdentifierInfo(); // Typo correction corrected to a keyword. if (Corrected.isKeyword()) return Name; // Also update the LookupResult... // FIXME: This should probably go away at some point Result.clear(); Result.setLookupName(Corrected.getCorrection()); if (FirstDecl) Result.addDecl(FirstDecl); // If we found an Objective-C instance variable, let // LookupInObjCMethod build the appropriate expression to // reference the ivar. // FIXME: This is a gross hack. if (ObjCIvarDecl *Ivar = Result.getAsSingle<ObjCIvarDecl>()) { Result.clear(); ExprResult E(LookupInObjCMethod(Result, S, Ivar->getIdentifier())); return E; } goto Corrected; } } // We failed to correct; just fall through and let the parser deal with it. Result.suppressDiagnostics(); return NameClassification::Unknown(); case LookupResult::NotFoundInCurrentInstantiation: { // We performed name lookup into the current instantiation, and there were // dependent bases, so we treat this result the same way as any other // dependent nested-name-specifier. // C++ [temp.res]p2: // A name used in a template declaration or definition and that is // dependent on a template-parameter is assumed not to name a type // unless the applicable name lookup finds a type name or the name is // qualified by the keyword typename. // // FIXME: If the next token is '<', we might want to ask the parser to // perform some heroics to see if we actually have a // template-argument-list, which would indicate a missing 'template' // keyword here. return ActOnDependentIdExpression(SS, /*TemplateKWLoc=*/SourceLocation(), NameInfo, IsAddressOfOperand, /*TemplateArgs=*/nullptr); } case LookupResult::Found: case LookupResult::FoundOverloaded: case LookupResult::FoundUnresolvedValue: break; case LookupResult::Ambiguous: if (getLangOpts().CPlusPlus && NextToken.is(tok::less) && hasAnyAcceptableTemplateNames(Result)) { // C++ [temp.local]p3: // A lookup that finds an injected-class-name (10.2) can result in an // ambiguity in certain cases (for example, if it is found in more than // one base class). If all of the injected-class-names that are found // refer to specializations of the same class template, and if the name // is followed by a template-argument-list, the reference refers to the // class template itself and not a specialization thereof, and is not // ambiguous. // // This filtering can make an ambiguous result into an unambiguous one, // so try again after filtering out template names. FilterAcceptableTemplateNames(Result); if (!Result.isAmbiguous()) { IsFilteredTemplateName = true; break; } } // Diagnose the ambiguity and return an error. return NameClassification::Error(); } if (getLangOpts().CPlusPlus && NextToken.is(tok::less) && (IsFilteredTemplateName || hasAnyAcceptableTemplateNames(Result))) { // C++ [temp.names]p3: // After name lookup (3.4) finds that a name is a template-name or that // an operator-function-id or a literal- operator-id refers to a set of // overloaded functions any member of which is a function template if // this is followed by a <, the < is always taken as the delimiter of a // template-argument-list and never as the less-than operator. if (!IsFilteredTemplateName) FilterAcceptableTemplateNames(Result); if (!Result.empty()) { bool IsFunctionTemplate; bool IsVarTemplate; TemplateName Template; if (Result.end() - Result.begin() > 1) { IsFunctionTemplate = true; Template = Context.getOverloadedTemplateName(Result.begin(), Result.end()); } else { TemplateDecl *TD = cast<TemplateDecl>((*Result.begin())->getUnderlyingDecl()); IsFunctionTemplate = isa<FunctionTemplateDecl>(TD); IsVarTemplate = isa<VarTemplateDecl>(TD); if (SS.isSet() && !SS.isInvalid()) Template = Context.getQualifiedTemplateName(SS.getScopeRep(), /*TemplateKeyword=*/false, TD); else Template = TemplateName(TD); } if (IsFunctionTemplate) { // Function templates always go through overload resolution, at which // point we'll perform the various checks (e.g., accessibility) we need // to based on which function we selected. Result.suppressDiagnostics(); return NameClassification::FunctionTemplate(Template); } return IsVarTemplate ? NameClassification::VarTemplate(Template) : NameClassification::TypeTemplate(Template); } } NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl(); if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) { DiagnoseUseOfDecl(Type, NameLoc); MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false); QualType T = Context.getTypeDeclType(Type); if (SS.isNotEmpty()) return buildNestedType(*this, SS, T, NameLoc); return ParsedType::make(T); } ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(FirstDecl); if (!Class) { // FIXME: It's unfortunate that we don't have a Type node for handling this. if (ObjCCompatibleAliasDecl *Alias = dyn_cast<ObjCCompatibleAliasDecl>(FirstDecl)) Class = Alias->getClassInterface(); } if (Class) { DiagnoseUseOfDecl(Class, NameLoc); if (NextToken.is(tok::period)) { // Interface. <something> is parsed as a property reference expression. // Just return "unknown" as a fall-through for now. Result.suppressDiagnostics(); return NameClassification::Unknown(); } QualType T = Context.getObjCInterfaceType(Class); return ParsedType::make(T); } // We can have a type template here if we're classifying a template argument. if (isa<TemplateDecl>(FirstDecl) && !isa<FunctionTemplateDecl>(FirstDecl)) return NameClassification::TypeTemplate( TemplateName(cast<TemplateDecl>(FirstDecl))); // Check for a tag type hidden by a non-type decl in a few cases where it // seems likely a type is wanted instead of the non-type that was found. bool NextIsOp = NextToken.isOneOf(tok::amp, tok::star); if ((NextToken.is(tok::identifier) || (NextIsOp && FirstDecl->getUnderlyingDecl()->isFunctionOrFunctionTemplate())) && isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) { TypeDecl *Type = Result.getAsSingle<TypeDecl>(); DiagnoseUseOfDecl(Type, NameLoc); QualType T = Context.getTypeDeclType(Type); if (SS.isNotEmpty()) return buildNestedType(*this, SS, T, NameLoc); return ParsedType::make(T); } if (FirstDecl->isCXXClassMember()) return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result, nullptr); bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren)); return BuildDeclarationNameExpr(SS, Result, ADL); } // Determines the context to return to after temporarily entering a // context. This depends in an unnecessarily complicated way on the // exact ordering of callbacks from the parser. DeclContext *Sema::getContainingDC(DeclContext *DC) { // Functions defined inline within classes aren't parsed until we've // finished parsing the top-level class, so the top-level class is // the context we'll need to return to. // A Lambda call operator whose parent is a class must not be treated // as an inline member function. A Lambda can be used legally // either as an in-class member initializer or a default argument. These // are parsed once the class has been marked complete and so the containing // context would be the nested class (when the lambda is defined in one); // If the class is not complete, then the lambda is being used in an // ill-formed fashion (such as to specify the width of a bit-field, or // in an array-bound) - in which case we still want to return the // lexically containing DC (which could be a nested class). if (isa<FunctionDecl>(DC) && !isLambdaCallOperator(DC)) { DC = DC->getLexicalParent(); // A function not defined within a class will always return to its // lexical context. if (!isa<CXXRecordDecl>(DC)) return DC; // A C++ inline method/friend is parsed *after* the topmost class // it was declared in is fully parsed ("complete"); the topmost // class is the context we need to return to. while (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC->getLexicalParent())) DC = RD; // Return the declaration context of the topmost class the inline method is // declared in. return DC; } return DC->getLexicalParent(); } void Sema::PushDeclContext(Scope *S, DeclContext *DC) { assert(getContainingDC(DC) == CurContext && "The next DeclContext should be lexically contained in the current one."); CurContext = DC; S->setEntity(DC); } void Sema::PopDeclContext() { assert(CurContext && "DeclContext imbalance!"); CurContext = getContainingDC(CurContext); assert(CurContext && "Popped translation unit!"); } Sema::SkippedDefinitionContext Sema::ActOnTagStartSkippedDefinition(Scope *S, Decl *D) { // Unlike PushDeclContext, the context to which we return is not necessarily // the containing DC of TD, because the new context will be some pre-existing // TagDecl definition instead of a fresh one. auto Result = static_cast<SkippedDefinitionContext>(CurContext); CurContext = cast<TagDecl>(D)->getDefinition(); assert(CurContext && "skipping definition of undefined tag"); S->setEntity(CurContext); return Result; } void Sema::ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context) { CurContext = static_cast<decltype(CurContext)>(Context); } /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. /// void Sema::EnterDeclaratorContext(Scope *S, DeclContext *DC) { // C++0x [basic.lookup.unqual]p13: // A name used in the definition of a static data member of class // X (after the qualified-id of the static member) is looked up as // if the name was used in a member function of X. // C++0x [basic.lookup.unqual]p14: // If a variable member of a namespace is defined outside of the // scope of its namespace then any name used in the definition of // the variable member (after the declarator-id) is looked up as // if the definition of the variable member occurred in its // namespace. // Both of these imply that we should push a scope whose context // is the semantic context of the declaration. We can't use // PushDeclContext here because that context is not necessarily // lexically contained in the current context. Fortunately, // the containing scope should have the appropriate information. assert(!S->getEntity() && "scope already has entity"); #ifndef NDEBUG Scope *Ancestor = S->getParent(); while (!Ancestor->getEntity()) Ancestor = Ancestor->getParent(); assert(Ancestor->getEntity() == CurContext && "ancestor context mismatch"); #endif CurContext = DC; S->setEntity(DC); } void Sema::ExitDeclaratorContext(Scope *S) { assert(S->getEntity() == CurContext && "Context imbalance!"); // Switch back to the lexical context. The safety of this is // enforced by an assert in EnterDeclaratorContext. Scope *Ancestor = S->getParent(); while (!Ancestor->getEntity()) Ancestor = Ancestor->getParent(); CurContext = Ancestor->getEntity(); // We don't need to do anything with the scope, which is going to // disappear. } void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) { // We assume that the caller has already called // ActOnReenterTemplateScope so getTemplatedDecl() works. FunctionDecl *FD = D->getAsFunction(); if (!FD) return; // Same implementation as PushDeclContext, but enters the context // from the lexical parent, rather than the top-level class. assert(CurContext == FD->getLexicalParent() && "The next DeclContext should be lexically contained in the current one."); CurContext = FD; S->setEntity(CurContext); for (unsigned P = 0, NumParams = FD->getNumParams(); P < NumParams; ++P) { ParmVarDecl *Param = FD->getParamDecl(P); // If the parameter has an identifier, then add it to the scope if (Param->getIdentifier()) { S->AddDecl(Param); IdResolver.AddDecl(Param); } } } void Sema::ActOnExitFunctionContext() { // Same implementation as PopDeclContext, but returns to the lexical parent, // rather than the top-level class. assert(CurContext && "DeclContext imbalance!"); CurContext = CurContext->getLexicalParent(); assert(CurContext && "Popped translation unit!"); } /// \brief Determine whether we allow overloading of the function /// PrevDecl with another declaration. /// /// This routine determines whether overloading is possible, not /// whether some new function is actually an overload. It will return /// true in C++ (where we can always provide overloads) or, as an /// extension, in C when the previous function is already an /// overloaded function declaration or has the "overloadable" /// attribute. static bool AllowOverloadingOfFunction(LookupResult &Previous, ASTContext &Context) { if (Context.getLangOpts().CPlusPlus) return true; if (Previous.getResultKind() == LookupResult::FoundOverloaded) return true; return (Previous.getResultKind() == LookupResult::Found && Previous.getFoundDecl()->hasAttr<OverloadableAttr>()); } /// Add this decl to the scope shadowed decl chains. void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) { // Move up the scope chain until we find the nearest enclosing // non-transparent context. The declaration will be introduced into this // scope. while (S->getEntity() && S->getEntity()->isTransparentContext()) S = S->getParent(); // Add scoped declarations into their context, so that they can be // found later. Declarations without a context won't be inserted // into any context. if (AddToContext) CurContext->addDecl(D); // Out-of-line definitions shouldn't be pushed into scope in C++, unless they // are function-local declarations. if (getLangOpts().CPlusPlus && D->isOutOfLine() && !D->getDeclContext()->getRedeclContext()->Equals( D->getLexicalDeclContext()->getRedeclContext()) && !D->getLexicalDeclContext()->isFunctionOrMethod()) return; // Template instantiations should also not be pushed into scope. if (isa<FunctionDecl>(D) && cast<FunctionDecl>(D)->isFunctionTemplateSpecialization()) return; // If this replaces anything in the current scope, IdentifierResolver::iterator I = IdResolver.begin(D->getDeclName()), IEnd = IdResolver.end(); for (; I != IEnd; ++I) { if (S->isDeclScope(*I) && D->declarationReplaces(*I)) { S->RemoveDecl(*I); IdResolver.RemoveDecl(*I); // Should only need to replace one decl. break; } } S->AddDecl(D); if (isa<LabelDecl>(D) && !cast<LabelDecl>(D)->isGnuLocal()) { // Implicitly-generated labels may end up getting generated in an order that // isn't strictly lexical, which breaks name lookup. Be careful to insert // the label at the appropriate place in the identifier chain. for (I = IdResolver.begin(D->getDeclName()); I != IEnd; ++I) { DeclContext *IDC = (*I)->getLexicalDeclContext()->getRedeclContext(); if (IDC == CurContext) { if (!S->isDeclScope(*I)) continue; } else if (IDC->Encloses(CurContext)) break; } IdResolver.InsertDeclAfter(I, D); } else { IdResolver.AddDecl(D); } } void Sema::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) { if (IdResolver.tryAddTopLevelDecl(D, Name) && TUScope) TUScope->AddDecl(D); } bool Sema::isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S, bool AllowInlineNamespace) { return IdResolver.isDeclInScope(D, Ctx, S, AllowInlineNamespace); } Scope *Sema::getScopeForDeclContext(Scope *S, DeclContext *DC) { DeclContext *TargetDC = DC->getPrimaryContext(); do { if (DeclContext *ScopeDC = S->getEntity()) if (ScopeDC->getPrimaryContext() == TargetDC) return S; } while ((S = S->getParent())); return nullptr; } static bool isOutOfScopePreviousDeclaration(NamedDecl *, DeclContext*, ASTContext&); /// Filters out lookup results that don't fall within the given scope /// as determined by isDeclInScope. void Sema::FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace) { LookupResult::Filter F = R.makeFilter(); while (F.hasNext()) { NamedDecl *D = F.next(); if (isDeclInScope(D, Ctx, S, AllowInlineNamespace)) continue; if (ConsiderLinkage && isOutOfScopePreviousDeclaration(D, Ctx, Context)) continue; F.erase(); } F.done(); } static bool isUsingDecl(NamedDecl *D) { return isa<UsingShadowDecl>(D) || isa<UnresolvedUsingTypenameDecl>(D) || isa<UnresolvedUsingValueDecl>(D); } /// Removes using shadow declarations from the lookup results. static void RemoveUsingDecls(LookupResult &R) { LookupResult::Filter F = R.makeFilter(); while (F.hasNext()) if (isUsingDecl(F.next())) F.erase(); F.done(); } /// \brief Check for this common pattern: /// @code /// class S { /// S(const S&); // DO NOT IMPLEMENT /// void operator=(const S&); // DO NOT IMPLEMENT /// }; /// @endcode static bool IsDisallowedCopyOrAssign(const CXXMethodDecl *D) { // FIXME: Should check for private access too but access is set after we get // the decl here. if (D->doesThisDeclarationHaveABody()) return false; if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D)) return CD->isCopyConstructor(); if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) return Method->isCopyAssignmentOperator(); return false; } // We need this to handle // // typedef struct { // void *foo() { return 0; } // } A; // // When we see foo we don't know if after the typedef we will get 'A' or '*A' // for example. If 'A', foo will have external linkage. If we have '*A', // foo will have no linkage. Since we can't know until we get to the end // of the typedef, this function finds out if D might have non-external linkage. // Callers should verify at the end of the TU if it D has external linkage or // not. bool Sema::mightHaveNonExternalLinkage(const DeclaratorDecl *D) { const DeclContext *DC = D->getDeclContext(); while (!DC->isTranslationUnit()) { if (const RecordDecl *RD = dyn_cast<RecordDecl>(DC)){ if (!RD->hasNameForLinkage()) return true; } DC = DC->getParent(); } return !D->isExternallyVisible(); } // FIXME: This needs to be refactored; some other isInMainFile users want // these semantics. static bool isMainFileLoc(const Sema &S, SourceLocation Loc) { if (S.TUKind != TU_Complete) return false; return S.SourceMgr.isInMainFile(Loc); } bool Sema::ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const { assert(D); if (D->isInvalidDecl() || D->isUsed() || D->hasAttr<UnusedAttr>()) return false; // Ignore all entities declared within templates, and out-of-line definitions // of members of class templates. if (D->getDeclContext()->isDependentContext() || D->getLexicalDeclContext()->isDependentContext()) return false; if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) return false; if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { if (MD->isVirtual() || IsDisallowedCopyOrAssign(MD)) return false; } else { // 'static inline' functions are defined in headers; don't warn. if (FD->isInlined() && !isMainFileLoc(*this, FD->getLocation())) return false; } if (FD->doesThisDeclarationHaveABody() && Context.DeclMustBeEmitted(FD)) return false; } else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { // Constants and utility variables are defined in headers with internal // linkage; don't warn. (Unlike functions, there isn't a convenient marker // like "inline".) if (!isMainFileLoc(*this, VD->getLocation())) return false; if (Context.DeclMustBeEmitted(VD)) return false; if (VD->isStaticDataMember() && VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) return false; } else { return false; } // Only warn for unused decls internal to the translation unit. // FIXME: This seems like a bogus check; it suppresses -Wunused-function // for inline functions defined in the main source file, for instance. return mightHaveNonExternalLinkage(D); } void Sema::MarkUnusedFileScopedDecl(const DeclaratorDecl *D) { if (!D) return; if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { const FunctionDecl *First = FD->getFirstDecl(); if (FD != First && ShouldWarnIfUnusedFileScopedDecl(First)) return; // First should already be in the vector. } if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { const VarDecl *First = VD->getFirstDecl(); if (VD != First && ShouldWarnIfUnusedFileScopedDecl(First)) return; // First should already be in the vector. } if (ShouldWarnIfUnusedFileScopedDecl(D)) UnusedFileScopedDecls.push_back(D); } static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) { if (D->isInvalidDecl()) return false; if (D->isReferenced() || D->isUsed() || D->hasAttr<UnusedAttr>() || D->hasAttr<ObjCPreciseLifetimeAttr>()) return false; if (isa<LabelDecl>(D)) return true; // Except for labels, we only care about unused decls that are local to // functions. bool WithinFunction = D->getDeclContext()->isFunctionOrMethod(); if (const auto *R = dyn_cast<CXXRecordDecl>(D->getDeclContext())) // For dependent types, the diagnostic is deferred. WithinFunction = WithinFunction || (R->isLocalClass() && !R->isDependentType()); if (!WithinFunction) return false; if (isa<TypedefNameDecl>(D)) return true; // White-list anything that isn't a local variable. if (!isa<VarDecl>(D) || isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) return false; // Types of valid local variables should be complete, so this should succeed. if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { // White-list anything with an __attribute__((unused)) type. QualType Ty = VD->getType(); // Only look at the outermost level of typedef. if (const TypedefType *TT = Ty->getAs<TypedefType>()) { if (TT->getDecl()->hasAttr<UnusedAttr>()) return false; } // If we failed to complete the type for some reason, or if the type is // dependent, don't diagnose the variable. if (Ty->isIncompleteType() || Ty->isDependentType()) return false; if (const TagType *TT = Ty->getAs<TagType>()) { const TagDecl *Tag = TT->getDecl(); if (Tag->hasAttr<UnusedAttr>()) return false; if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Tag)) { if (!RD->hasTrivialDestructor() && !RD->hasAttr<WarnUnusedAttr>()) return false; if (const Expr *Init = VD->getInit()) { if (const ExprWithCleanups *Cleanups = dyn_cast<ExprWithCleanups>(Init)) Init = Cleanups->getSubExpr(); const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init); if (Construct && !Construct->isElidable()) { CXXConstructorDecl *CD = Construct->getConstructor(); if (!CD->isTrivial() && !RD->hasAttr<WarnUnusedAttr>()) return false; } } } } // TODO: __attribute__((unused)) templates? } return true; } static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx, FixItHint &Hint) { if (isa<LabelDecl>(D)) { SourceLocation AfterColon = Lexer::findLocationAfterToken(D->getLocEnd(), tok::colon, Ctx.getSourceManager(), Ctx.getLangOpts(), true); if (AfterColon.isInvalid()) return; Hint = FixItHint::CreateRemoval(CharSourceRange:: getCharRange(D->getLocStart(), AfterColon)); } return; } void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D) { if (D->getTypeForDecl()->isDependentType()) return; for (auto *TmpD : D->decls()) { if (const auto *T = dyn_cast<TypedefNameDecl>(TmpD)) DiagnoseUnusedDecl(T); else if(const auto *R = dyn_cast<RecordDecl>(TmpD)) DiagnoseUnusedNestedTypedefs(R); } } /// DiagnoseUnusedDecl - Emit warnings about declarations that are not used /// unless they are marked attr(unused). void Sema::DiagnoseUnusedDecl(const NamedDecl *D) { if (!ShouldDiagnoseUnusedDecl(D)) return; if (auto *TD = dyn_cast<TypedefNameDecl>(D)) { // typedefs can be referenced later on, so the diagnostics are emitted // at end-of-translation-unit. UnusedLocalTypedefNameCandidates.insert(TD); return; } FixItHint Hint; GenerateFixForUnusedDecl(D, Context, Hint); unsigned DiagID; if (isa<VarDecl>(D) && cast<VarDecl>(D)->isExceptionVariable()) DiagID = diag::warn_unused_exception_param; else if (isa<LabelDecl>(D)) DiagID = diag::warn_unused_label; else DiagID = diag::warn_unused_variable; Diag(D->getLocation(), DiagID) << D->getDeclName() << Hint; } static void CheckPoppedLabel(LabelDecl *L, Sema &S) { // Verify that we have no forward references left. If so, there was a goto // or address of a label taken, but no definition of it. Label fwd // definitions are indicated with a null substmt which is also not a resolved // MS inline assembly label name. bool Diagnose = false; if (L->isMSAsmLabel()) Diagnose = !L->isResolvedMSAsmLabel(); else Diagnose = L->getStmt() == nullptr; if (Diagnose) S.Diag(L->getLocation(), diag::err_undeclared_label_use) <<L->getDeclName(); } void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) { S->mergeNRVOIntoParent(); if (S->decl_empty()) return; assert((S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) && "Scope shouldn't contain decls!"); for (auto *TmpD : S->decls()) { assert(TmpD && "This decl didn't get pushed??"); assert(isa<NamedDecl>(TmpD) && "Decl isn't NamedDecl?"); NamedDecl *D = cast<NamedDecl>(TmpD); if (!D->getDeclName()) continue; // Diagnose unused variables in this scope. if (!S->hasUnrecoverableErrorOccurred()) { DiagnoseUnusedDecl(D); if (const auto *RD = dyn_cast<RecordDecl>(D)) DiagnoseUnusedNestedTypedefs(RD); } // If this was a forward reference to a label, verify it was defined. if (LabelDecl *LD = dyn_cast<LabelDecl>(D)) CheckPoppedLabel(LD, *this); // Remove this name from our lexical scope. IdResolver.RemoveDecl(D); } } /// \brief Look for an Objective-C class in the translation unit. /// /// \param Id The name of the Objective-C class we're looking for. If /// typo-correction fixes this name, the Id will be updated /// to the fixed name. /// /// \param IdLoc The location of the name in the translation unit. /// /// \param DoTypoCorrection If true, this routine will attempt typo correction /// if there is no class with the given name. /// /// \returns The declaration of the named Objective-C class, or NULL if the /// class could not be found. ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool DoTypoCorrection) { // The third "scope" argument is 0 since we aren't enabling lazy built-in // creation from this context. NamedDecl *IDecl = LookupSingleName(TUScope, Id, IdLoc, LookupOrdinaryName); if (!IDecl && DoTypoCorrection) { // Perform typo correction at the given location, but only if we // find an Objective-C class name. if (TypoCorrection C = CorrectTypo( DeclarationNameInfo(Id, IdLoc), LookupOrdinaryName, TUScope, nullptr, llvm::make_unique<DeclFilterCCC<ObjCInterfaceDecl>>(), CTK_ErrorRecovery)) { diagnoseTypo(C, PDiag(diag::err_undef_interface_suggest) << Id); IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>(); Id = IDecl->getIdentifier(); } } ObjCInterfaceDecl *Def = dyn_cast_or_null<ObjCInterfaceDecl>(IDecl); // This routine must always return a class definition, if any. if (Def && Def->getDefinition()) Def = Def->getDefinition(); return Def; } /// getNonFieldDeclScope - Retrieves the innermost scope, starting /// from S, where a non-field would be declared. This routine copes /// with the difference between C and C++ scoping rules in structs and /// unions. For example, the following code is well-formed in C but /// ill-formed in C++: /// @code /// struct S6 { /// enum { BAR } e; /// }; /// /// void test_S6() { /// struct S6 a; /// a.e = BAR; /// } /// @endcode /// For the declaration of BAR, this routine will return a different /// scope. The scope S will be the scope of the unnamed enumeration /// within S6. In C++, this routine will return the scope associated /// with S6, because the enumeration's scope is a transparent /// context but structures can contain non-field names. In C, this /// routine will return the translation unit scope, since the /// enumeration's scope is a transparent context and structures cannot /// contain non-field names. Scope *Sema::getNonFieldDeclScope(Scope *S) { while (((S->getFlags() & Scope::DeclScope) == 0) || (S->getEntity() && S->getEntity()->isTransparentContext()) || (S->isClassScope() && !getLangOpts().CPlusPlus)) S = S->getParent(); return S; } /// \brief Looks up the declaration of "struct objc_super" and /// saves it for later use in building builtin declaration of /// objc_msgSendSuper and objc_msgSendSuper_stret. If no such /// pre-existing declaration exists no action takes place. static void LookupPredefedObjCSuperType(Sema &ThisSema, Scope *S, IdentifierInfo *II) { if (!II->isStr("objc_msgSendSuper")) return; ASTContext &Context = ThisSema.Context; LookupResult Result(ThisSema, &Context.Idents.get("objc_super"), SourceLocation(), Sema::LookupTagName); ThisSema.LookupName(Result, S); if (Result.getResultKind() == LookupResult::Found) if (const TagDecl *TD = Result.getAsSingle<TagDecl>()) Context.setObjCSuperType(Context.getTagDeclType(TD)); } static StringRef getHeaderName(ASTContext::GetBuiltinTypeError Error) { switch (Error) { case ASTContext::GE_None: return ""; case ASTContext::GE_Missing_stdio: return "stdio.h"; case ASTContext::GE_Missing_setjmp: return "setjmp.h"; case ASTContext::GE_Missing_ucontext: return "ucontext.h"; } llvm_unreachable("unhandled error kind"); } /// LazilyCreateBuiltin - The specified Builtin-ID was first used at /// file scope. lazily create a decl for it. ForRedeclaration is true /// if we're creating this built-in in anticipation of redeclaring the /// built-in. NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc) { LookupPredefedObjCSuperType(*this, S, II); ASTContext::GetBuiltinTypeError Error; QualType R = Context.GetBuiltinType(ID, Error); if (Error) { if (ForRedeclaration) Diag(Loc, diag::warn_implicit_decl_requires_sysheader) << getHeaderName(Error) << Context.BuiltinInfo.GetName(ID); return nullptr; } if (!ForRedeclaration && Context.BuiltinInfo.isPredefinedLibFunction(ID)) { Diag(Loc, diag::ext_implicit_lib_function_decl) << Context.BuiltinInfo.GetName(ID) << R; if (Context.BuiltinInfo.getHeaderName(ID) && !Diags.isIgnored(diag::ext_implicit_lib_function_decl, Loc)) Diag(Loc, diag::note_include_header_or_declare) << Context.BuiltinInfo.getHeaderName(ID) << Context.BuiltinInfo.GetName(ID); } DeclContext *Parent = Context.getTranslationUnitDecl(); if (getLangOpts().CPlusPlus) { LinkageSpecDecl *CLinkageDecl = LinkageSpecDecl::Create(Context, Parent, Loc, Loc, LinkageSpecDecl::lang_c, false); CLinkageDecl->setImplicit(); Parent->addDecl(CLinkageDecl); Parent = CLinkageDecl; } FunctionDecl *New = FunctionDecl::Create(Context, Parent, Loc, Loc, II, R, /*TInfo=*/nullptr, SC_Extern, false, R->isFunctionProtoType()); New->setImplicit(); // Create Decl objects for each parameter, adding them to the // FunctionDecl. if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) { SmallVector<ParmVarDecl*, 16> Params; for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) { ParmVarDecl *parm = ParmVarDecl::Create(Context, New, SourceLocation(), SourceLocation(), nullptr, FT->getParamType(i), /*TInfo=*/nullptr, SC_None, nullptr); parm->setScopeInfo(0, i); Params.push_back(parm); } New->setParams(Params); } AddKnownFunctionAttributes(New); RegisterLocallyScopedExternCDecl(New, S); // TUScope is the translation-unit scope to insert this function into. // FIXME: This is hideous. We need to teach PushOnScopeChains to // relate Scopes to DeclContexts, and probably eliminate CurContext // entirely, but we're not there yet. DeclContext *SavedContext = CurContext; CurContext = Parent; PushOnScopeChains(New, TUScope); CurContext = SavedContext; return New; } /// \brief Filter out any previous declarations that the given declaration /// should not consider because they are not permitted to conflict, e.g., /// because they come from hidden sub-modules and do not refer to the same /// entity. static void filterNonConflictingPreviousDecls(Sema &S, NamedDecl *decl, LookupResult &previous){ // This is only interesting when modules are enabled. if ((!S.getLangOpts().Modules && !S.getLangOpts().ModulesLocalVisibility) || !S.getLangOpts().ModulesHideInternalLinkage) return; // Empty sets are uninteresting. if (previous.empty()) return; LookupResult::Filter filter = previous.makeFilter(); while (filter.hasNext()) { NamedDecl *old = filter.next(); // Non-hidden declarations are never ignored. if (S.isVisible(old)) continue; if (!old->isExternallyVisible()) filter.erase(); } filter.done(); } /// Typedef declarations don't have linkage, but they still denote the same /// entity if their types are the same. /// FIXME: This is notionally doing the same thing as ASTReaderDecl's /// isSameEntity. static void filterNonConflictingPreviousTypedefDecls(Sema &S, TypedefNameDecl *Decl, LookupResult &Previous) { // This is only interesting when modules are enabled. if (!S.getLangOpts().Modules && !S.getLangOpts().ModulesLocalVisibility) return; // Empty sets are uninteresting. if (Previous.empty()) return; LookupResult::Filter Filter = Previous.makeFilter(); while (Filter.hasNext()) { NamedDecl *Old = Filter.next(); // Non-hidden declarations are never ignored. if (S.isVisible(Old)) continue; // Declarations of the same entity are not ignored, even if they have // different linkages. if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) { if (S.Context.hasSameType(OldTD->getUnderlyingType(), Decl->getUnderlyingType())) continue; // If both declarations give a tag declaration a typedef name for linkage // purposes, then they declare the same entity. if (OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true) && Decl->getAnonDeclWithTypedefName()) continue; } if (!Old->isExternallyVisible()) Filter.erase(); } Filter.done(); } bool Sema::isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New) { QualType OldType; if (TypedefNameDecl *OldTypedef = dyn_cast<TypedefNameDecl>(Old)) OldType = OldTypedef->getUnderlyingType(); else OldType = Context.getTypeDeclType(Old); QualType NewType = New->getUnderlyingType(); if (NewType->isVariablyModifiedType()) { // Must not redefine a typedef with a variably-modified type. int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0; Diag(New->getLocation(), diag::err_redefinition_variably_modified_typedef) << Kind << NewType; if (Old->getLocation().isValid()) Diag(Old->getLocation(), diag::note_previous_definition); New->setInvalidDecl(); return true; } if (OldType != NewType && !OldType->isDependentType() && !NewType->isDependentType() && !Context.hasSameType(OldType, NewType)) { int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0; Diag(New->getLocation(), diag::err_redefinition_different_typedef) << Kind << NewType << OldType; if (Old->getLocation().isValid()) Diag(Old->getLocation(), diag::note_previous_definition); New->setInvalidDecl(); return true; } return false; } /// MergeTypedefNameDecl - We just parsed a typedef 'New' which has the /// same name and scope as a previous declaration 'Old'. Figure out /// how to resolve this situation, merging decls or emitting /// diagnostics as appropriate. If there was an error, set New to be invalid. /// void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) { // If the new decl is known invalid already, don't bother doing any // merging checks. if (New->isInvalidDecl()) return; // Allow multiple definitions for ObjC built-in typedefs. // FIXME: Verify the underlying types are equivalent! if (getLangOpts().ObjC1) { const IdentifierInfo *TypeID = New->getIdentifier(); switch (TypeID->getLength()) { default: break; case 2: { if (!TypeID->isStr("id")) break; QualType T = New->getUnderlyingType(); if (!T->isPointerType()) break; if (!T->isVoidPointerType()) { QualType PT = T->getAs<PointerType>()->getPointeeType(); if (!PT->isStructureType()) break; } Context.setObjCIdRedefinitionType(T); // Install the built-in type for 'id', ignoring the current definition. New->setTypeForDecl(Context.getObjCIdType().getTypePtr()); return; } case 5: if (!TypeID->isStr("Class")) break; Context.setObjCClassRedefinitionType(New->getUnderlyingType()); // Install the built-in type for 'Class', ignoring the current definition. New->setTypeForDecl(Context.getObjCClassType().getTypePtr()); return; case 3: if (!TypeID->isStr("SEL")) break; Context.setObjCSelRedefinitionType(New->getUnderlyingType()); // Install the built-in type for 'SEL', ignoring the current definition. New->setTypeForDecl(Context.getObjCSelType().getTypePtr()); return; } // Fall through - the typedef name was not a builtin type. } // Verify the old decl was also a type. TypeDecl *Old = OldDecls.getAsSingle<TypeDecl>(); if (!Old) { Diag(New->getLocation(), diag::err_redefinition_different_kind) << New->getDeclName(); NamedDecl *OldD = OldDecls.getRepresentativeDecl(); if (OldD->getLocation().isValid()) Diag(OldD->getLocation(), diag::note_previous_definition); return New->setInvalidDecl(); } // If the old declaration is invalid, just give up here. if (Old->isInvalidDecl()) return New->setInvalidDecl(); if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) { auto *OldTag = OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true); auto *NewTag = New->getAnonDeclWithTypedefName(); NamedDecl *Hidden = nullptr; if (getLangOpts().CPlusPlus && OldTag && NewTag && OldTag->getCanonicalDecl() != NewTag->getCanonicalDecl() && !hasVisibleDefinition(OldTag, &Hidden)) { // There is a definition of this tag, but it is not visible. Use it // instead of our tag. New->setTypeForDecl(OldTD->getTypeForDecl()); if (OldTD->isModed()) New->setModedTypeSourceInfo(OldTD->getTypeSourceInfo(), OldTD->getUnderlyingType()); else New->setTypeSourceInfo(OldTD->getTypeSourceInfo()); // Make the old tag definition visible. makeMergedDefinitionVisible(Hidden, NewTag->getLocation()); } } // If the typedef types are not identical, reject them in all languages and // with any extensions enabled. if (isIncompatibleTypedef(Old, New)) return; // The types match. Link up the redeclaration chain and merge attributes if // the old declaration was a typedef. if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Old)) { New->setPreviousDecl(Typedef); mergeDeclAttributes(New, Old); } if (getLangOpts().MicrosoftExt) return; if (getLangOpts().CPlusPlus) { // C++ [dcl.typedef]p2: // In a given non-class scope, a typedef specifier can be used to // redefine the name of any type declared in that scope to refer // to the type to which it already refers. if (!isa<CXXRecordDecl>(CurContext)) return; // C++0x [dcl.typedef]p4: // In a given class scope, a typedef specifier can be used to redefine // any class-name declared in that scope that is not also a typedef-name // to refer to the type to which it already refers. // // This wording came in via DR424, which was a correction to the // wording in DR56, which accidentally banned code like: // // struct S { // typedef struct A { } A; // }; // // in the C++03 standard. We implement the C++0x semantics, which // allow the above but disallow // // struct S { // typedef int I; // typedef int I; // }; // // since that was the intent of DR56. if (!isa<TypedefNameDecl>(Old)) return; Diag(New->getLocation(), diag::err_redefinition) << New->getDeclName(); Diag(Old->getLocation(), diag::note_previous_definition); return New->setInvalidDecl(); } // Modules always permit redefinition of typedefs, as does C11. if (getLangOpts().Modules || getLangOpts().C11) return; // If we have a redefinition of a typedef in C, emit a warning. This warning // is normally mapped to an error, but can be controlled with // -Wtypedef-redefinition. If either the original or the redefinition is // in a system header, don't emit this for compatibility with GCC. if (getDiagnostics().getSuppressSystemWarnings() && (Context.getSourceManager().isInSystemHeader(Old->getLocation()) || Context.getSourceManager().isInSystemHeader(New->getLocation()))) return; Diag(New->getLocation(), diag::ext_redefinition_of_typedef) << New->getDeclName(); Diag(Old->getLocation(), diag::note_previous_definition); } /// DeclhasAttr - returns true if decl Declaration already has the target /// attribute. static bool DeclHasAttr(const Decl *D, const Attr *A) { const OwnershipAttr *OA = dyn_cast<OwnershipAttr>(A); const AnnotateAttr *Ann = dyn_cast<AnnotateAttr>(A); for (const auto *i : D->attrs()) if (i->getKind() == A->getKind()) { if (Ann) { if (Ann->getAnnotation() == cast<AnnotateAttr>(i)->getAnnotation()) return true; continue; } // FIXME: Don't hardcode this check if (OA && isa<OwnershipAttr>(i)) return OA->getOwnKind() == cast<OwnershipAttr>(i)->getOwnKind(); return true; } return false; } static bool isAttributeTargetADefinition(Decl *D) { if (VarDecl *VD = dyn_cast<VarDecl>(D)) return VD->isThisDeclarationADefinition(); if (TagDecl *TD = dyn_cast<TagDecl>(D)) return TD->isCompleteDefinition() || TD->isBeingDefined(); return true; } /// Merge alignment attributes from \p Old to \p New, taking into account the /// special semantics of C11's _Alignas specifier and C++11's alignas attribute. /// /// \return \c true if any attributes were added to \p New. static bool mergeAlignedAttrs(Sema &S, NamedDecl *New, Decl *Old) { // Look for alignas attributes on Old, and pick out whichever attribute // specifies the strictest alignment requirement. AlignedAttr *OldAlignasAttr = nullptr; AlignedAttr *OldStrictestAlignAttr = nullptr; unsigned OldAlign = 0; for (auto *I : Old->specific_attrs<AlignedAttr>()) { // FIXME: We have no way of representing inherited dependent alignments // in a case like: // template<int A, int B> struct alignas(A) X; // template<int A, int B> struct alignas(B) X {}; // For now, we just ignore any alignas attributes which are not on the // definition in such a case. if (I->isAlignmentDependent()) return false; if (I->isAlignas()) OldAlignasAttr = I; unsigned Align = I->getAlignment(S.Context); if (Align > OldAlign) { OldAlign = Align; OldStrictestAlignAttr = I; } } // Look for alignas attributes on New. AlignedAttr *NewAlignasAttr = nullptr; unsigned NewAlign = 0; for (auto *I : New->specific_attrs<AlignedAttr>()) { if (I->isAlignmentDependent()) return false; if (I->isAlignas()) NewAlignasAttr = I; unsigned Align = I->getAlignment(S.Context); if (Align > NewAlign) NewAlign = Align; } if (OldAlignasAttr && NewAlignasAttr && OldAlign != NewAlign) { // Both declarations have 'alignas' attributes. We require them to match. // C++11 [dcl.align]p6 and C11 6.7.5/7 both come close to saying this, but // fall short. (If two declarations both have alignas, they must both match // every definition, and so must match each other if there is a definition.) // If either declaration only contains 'alignas(0)' specifiers, then it // specifies the natural alignment for the type. if (OldAlign == 0 || NewAlign == 0) { QualType Ty; if (ValueDecl *VD = dyn_cast<ValueDecl>(New)) Ty = VD->getType(); else Ty = S.Context.getTagDeclType(cast<TagDecl>(New)); if (OldAlign == 0) OldAlign = S.Context.getTypeAlign(Ty); if (NewAlign == 0) NewAlign = S.Context.getTypeAlign(Ty); } if (OldAlign != NewAlign) { S.Diag(NewAlignasAttr->getLocation(), diag::err_alignas_mismatch) << (unsigned)S.Context.toCharUnitsFromBits(OldAlign).getQuantity() << (unsigned)S.Context.toCharUnitsFromBits(NewAlign).getQuantity(); S.Diag(OldAlignasAttr->getLocation(), diag::note_previous_declaration); } } if (OldAlignasAttr && !NewAlignasAttr && isAttributeTargetADefinition(New)) { // C++11 [dcl.align]p6: // if any declaration of an entity has an alignment-specifier, // every defining declaration of that entity shall specify an // equivalent alignment. // C11 6.7.5/7: // If the definition of an object does not have an alignment // specifier, any other declaration of that object shall also // have no alignment specifier. S.Diag(New->getLocation(), diag::err_alignas_missing_on_definition) << OldAlignasAttr; S.Diag(OldAlignasAttr->getLocation(), diag::note_alignas_on_declaration) << OldAlignasAttr; } bool AnyAdded = false; // Ensure we have an attribute representing the strictest alignment. if (OldAlign > NewAlign) { AlignedAttr *Clone = OldStrictestAlignAttr->clone(S.Context); Clone->setInherited(true); New->addAttr(Clone); AnyAdded = true; } // Ensure we have an alignas attribute if the old declaration had one. if (OldAlignasAttr && !NewAlignasAttr && !(AnyAdded && OldStrictestAlignAttr->isAlignas())) { AlignedAttr *Clone = OldAlignasAttr->clone(S.Context); Clone->setInherited(true); New->addAttr(Clone); AnyAdded = true; } return AnyAdded; } static bool mergeDeclAttribute(Sema &S, NamedDecl *D, const InheritableAttr *Attr, bool Override) { InheritableAttr *NewAttr = nullptr; unsigned AttrSpellingListIndex = Attr->getSpellingListIndex(); if (const auto *AA = dyn_cast<AvailabilityAttr>(Attr)) NewAttr = S.mergeAvailabilityAttr(D, AA->getRange(), AA->getPlatform(), AA->getIntroduced(), AA->getDeprecated(), AA->getObsoleted(), AA->getUnavailable(), AA->getMessage(), Override, AttrSpellingListIndex); else if (const auto *VA = dyn_cast<VisibilityAttr>(Attr)) NewAttr = S.mergeVisibilityAttr(D, VA->getRange(), VA->getVisibility(), AttrSpellingListIndex); else if (const auto *VA = dyn_cast<TypeVisibilityAttr>(Attr)) NewAttr = S.mergeTypeVisibilityAttr(D, VA->getRange(), VA->getVisibility(), AttrSpellingListIndex); else if (const auto *ImportA = dyn_cast<DLLImportAttr>(Attr)) NewAttr = S.mergeDLLImportAttr(D, ImportA->getRange(), AttrSpellingListIndex); else if (const auto *ExportA = dyn_cast<DLLExportAttr>(Attr)) NewAttr = S.mergeDLLExportAttr(D, ExportA->getRange(), AttrSpellingListIndex); else if (const auto *FA = dyn_cast<FormatAttr>(Attr)) NewAttr = S.mergeFormatAttr(D, FA->getRange(), FA->getType(), FA->getFormatIdx(), FA->getFirstArg(), AttrSpellingListIndex); else if (const auto *SA = dyn_cast<SectionAttr>(Attr)) NewAttr = S.mergeSectionAttr(D, SA->getRange(), SA->getName(), AttrSpellingListIndex); else if (const auto *IA = dyn_cast<MSInheritanceAttr>(Attr)) NewAttr = S.mergeMSInheritanceAttr(D, IA->getRange(), IA->getBestCase(), AttrSpellingListIndex, IA->getSemanticSpelling()); else if (const auto *AA = dyn_cast<AlwaysInlineAttr>(Attr)) NewAttr = S.mergeAlwaysInlineAttr(D, AA->getRange(), &S.Context.Idents.get(AA->getSpelling()), AttrSpellingListIndex); else if (const auto *MA = dyn_cast<MinSizeAttr>(Attr)) NewAttr = S.mergeMinSizeAttr(D, MA->getRange(), AttrSpellingListIndex); else if (const auto *OA = dyn_cast<OptimizeNoneAttr>(Attr)) NewAttr = S.mergeOptimizeNoneAttr(D, OA->getRange(), AttrSpellingListIndex); else if (isa<AlignedAttr>(Attr)) // AlignedAttrs are handled separately, because we need to handle all // such attributes on a declaration at the same time. NewAttr = nullptr; else if (isa<DeprecatedAttr>(Attr) && Override) NewAttr = nullptr; else if (Attr->duplicatesAllowed() || !DeclHasAttr(D, Attr)) NewAttr = cast<InheritableAttr>(Attr->clone(S.Context)); if (NewAttr) { NewAttr->setInherited(true); D->addAttr(NewAttr); return true; } return false; } static const Decl *getDefinition(const Decl *D) { if (const TagDecl *TD = dyn_cast<TagDecl>(D)) return TD->getDefinition(); if (const VarDecl *VD = dyn_cast<VarDecl>(D)) { const VarDecl *Def = VD->getDefinition(); if (Def) return Def; return VD->getActingDefinition(); } if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { const FunctionDecl* Def; if (FD->isDefined(Def)) return Def; } return nullptr; } static bool hasAttribute(const Decl *D, attr::Kind Kind) { for (const auto *Attribute : D->attrs()) if (Attribute->getKind() == Kind) return true; return false; } /// checkNewAttributesAfterDef - If we already have a definition, check that /// there are no new attributes in this declaration. static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) { if (!New->hasAttrs()) return; const Decl *Def = getDefinition(Old); if (!Def || Def == New) return; AttrVec &NewAttributes = New->getAttrs(); for (unsigned I = 0, E = NewAttributes.size(); I != E;) { const Attr *NewAttribute = NewAttributes[I]; if (isa<AliasAttr>(NewAttribute)) { if (FunctionDecl *FD = dyn_cast<FunctionDecl>(New)) S.CheckForFunctionRedefinition(FD, cast<FunctionDecl>(Def)); else { VarDecl *VD = cast<VarDecl>(New); unsigned Diag = cast<VarDecl>(Def)->isThisDeclarationADefinition() == VarDecl::TentativeDefinition ? diag::err_alias_after_tentative : diag::err_redefinition; S.Diag(VD->getLocation(), Diag) << VD->getDeclName(); S.Diag(Def->getLocation(), diag::note_previous_definition); VD->setInvalidDecl(); } ++I; continue; } if (const VarDecl *VD = dyn_cast<VarDecl>(Def)) { // Tentative definitions are only interesting for the alias check above. if (VD->isThisDeclarationADefinition() != VarDecl::Definition) { ++I; continue; } } if (hasAttribute(Def, NewAttribute->getKind())) { ++I; continue; // regular attr merging will take care of validating this. } if (isa<C11NoReturnAttr>(NewAttribute)) { // C's _Noreturn is allowed to be added to a function after it is defined. ++I; continue; } else if (const AlignedAttr *AA = dyn_cast<AlignedAttr>(NewAttribute)) { if (AA->isAlignas()) { // C++11 [dcl.align]p6: // if any declaration of an entity has an alignment-specifier, // every defining declaration of that entity shall specify an // equivalent alignment. // C11 6.7.5/7: // If the definition of an object does not have an alignment // specifier, any other declaration of that object shall also // have no alignment specifier. S.Diag(Def->getLocation(), diag::err_alignas_missing_on_definition) << AA; S.Diag(NewAttribute->getLocation(), diag::note_alignas_on_declaration) << AA; NewAttributes.erase(NewAttributes.begin() + I); --E; continue; } } S.Diag(NewAttribute->getLocation(), diag::warn_attribute_precede_definition); S.Diag(Def->getLocation(), diag::note_previous_definition); NewAttributes.erase(NewAttributes.begin() + I); --E; } } /// mergeDeclAttributes - Copy attributes from the Old decl to the New one. void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK) { if (UsedAttr *OldAttr = Old->getMostRecentDecl()->getAttr<UsedAttr>()) { UsedAttr *NewAttr = OldAttr->clone(Context); NewAttr->setInherited(true); New->addAttr(NewAttr); } if (!Old->hasAttrs() && !New->hasAttrs()) return; // attributes declared post-definition are currently ignored checkNewAttributesAfterDef(*this, New, Old); if (!Old->hasAttrs()) return; bool foundAny = New->hasAttrs(); // Ensure that any moving of objects within the allocated map is done before // we process them. if (!foundAny) New->setAttrs(AttrVec()); for (auto *I : Old->specific_attrs<InheritableAttr>()) { bool Override = false; // Ignore deprecated/unavailable/availability attributes if requested. if (isa<DeprecatedAttr>(I) || isa<UnavailableAttr>(I) || isa<AvailabilityAttr>(I)) { switch (AMK) { case AMK_None: continue; case AMK_Redeclaration: break; case AMK_Override: Override = true; break; } } // Already handled. if (isa<UsedAttr>(I)) continue; if (mergeDeclAttribute(*this, New, I, Override)) foundAny = true; } if (mergeAlignedAttrs(*this, New, Old)) foundAny = true; if (!foundAny) New->dropAttrs(); } /// mergeParamDeclAttributes - Copy attributes from the old parameter /// to the new one. static void mergeParamDeclAttributes(ParmVarDecl *newDecl, const ParmVarDecl *oldDecl, Sema &S) { // C++11 [dcl.attr.depend]p2: // The first declaration of a function shall specify the // carries_dependency attribute for its declarator-id if any declaration // of the function specifies the carries_dependency attribute. const CarriesDependencyAttr *CDA = newDecl->getAttr<CarriesDependencyAttr>(); if (CDA && !oldDecl->hasAttr<CarriesDependencyAttr>()) { S.Diag(CDA->getLocation(), diag::err_carries_dependency_missing_on_first_decl) << 1/*Param*/; // Find the first declaration of the parameter. // FIXME: Should we build redeclaration chains for function parameters? const FunctionDecl *FirstFD = cast<FunctionDecl>(oldDecl->getDeclContext())->getFirstDecl(); const ParmVarDecl *FirstVD = FirstFD->getParamDecl(oldDecl->getFunctionScopeIndex()); S.Diag(FirstVD->getLocation(), diag::note_carries_dependency_missing_first_decl) << 1/*Param*/; } if (!oldDecl->hasAttrs()) return; bool foundAny = newDecl->hasAttrs(); // Ensure that any moving of objects within the allocated map is // done before we process them. if (!foundAny) newDecl->setAttrs(AttrVec()); for (const auto *I : oldDecl->specific_attrs<InheritableParamAttr>()) { if (!DeclHasAttr(newDecl, I)) { InheritableAttr *newAttr = cast<InheritableParamAttr>(I->clone(S.Context)); newAttr->setInherited(true); newDecl->addAttr(newAttr); foundAny = true; } } if (!foundAny) newDecl->dropAttrs(); } static void mergeParamDeclTypes(ParmVarDecl *NewParam, const ParmVarDecl *OldParam, Sema &S) { if (auto Oldnullability = OldParam->getType()->getNullability(S.Context)) { if (auto Newnullability = NewParam->getType()->getNullability(S.Context)) { if (*Oldnullability != *Newnullability) { S.Diag(NewParam->getLocation(), diag::warn_mismatched_nullability_attr) << DiagNullabilityKind( *Newnullability, ((NewParam->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability) != 0)) << DiagNullabilityKind( *Oldnullability, ((OldParam->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability) != 0)); S.Diag(OldParam->getLocation(), diag::note_previous_declaration); } } else { QualType NewT = NewParam->getType(); NewT = S.Context.getAttributedType( AttributedType::getNullabilityAttrKind(*Oldnullability), NewT, NewT); NewParam->setType(NewT); } } } namespace { /// Used in MergeFunctionDecl to keep track of function parameters in /// C. struct GNUCompatibleParamWarning { ParmVarDecl *OldParm; ParmVarDecl *NewParm; QualType PromotedType; }; } /// getSpecialMember - get the special member enum for a method. Sema::CXXSpecialMember Sema::getSpecialMember(const CXXMethodDecl *MD) { if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) { if (Ctor->isDefaultConstructor()) return Sema::CXXDefaultConstructor; if (Ctor->isCopyConstructor()) return Sema::CXXCopyConstructor; if (Ctor->isMoveConstructor()) return Sema::CXXMoveConstructor; } else if (isa<CXXDestructorDecl>(MD)) { return Sema::CXXDestructor; } else if (MD->isCopyAssignmentOperator()) { return Sema::CXXCopyAssignment; } else if (MD->isMoveAssignmentOperator()) { return Sema::CXXMoveAssignment; } return Sema::CXXInvalid; } // Determine whether the previous declaration was a definition, implicit // declaration, or a declaration. template <typename T> static std::pair<diag::kind, SourceLocation> getNoteDiagForInvalidRedeclaration(const T *Old, const T *New) { diag::kind PrevDiag; SourceLocation OldLocation = Old->getLocation(); if (Old->isThisDeclarationADefinition()) PrevDiag = diag::note_previous_definition; else if (Old->isImplicit()) { PrevDiag = diag::note_previous_implicit_declaration; if (OldLocation.isInvalid()) OldLocation = New->getLocation(); } else PrevDiag = diag::note_previous_declaration; return std::make_pair(PrevDiag, OldLocation); } /// canRedefineFunction - checks if a function can be redefined. Currently, /// only extern inline functions can be redefined, and even then only in /// GNU89 mode. static bool canRedefineFunction(const FunctionDecl *FD, const LangOptions& LangOpts) { return ((FD->hasAttr<GNUInlineAttr>() || LangOpts.GNUInline) && !LangOpts.CPlusPlus && FD->isInlineSpecified() && FD->getStorageClass() == SC_Extern); } const AttributedType *Sema::getCallingConvAttributedType(QualType T) const { const AttributedType *AT = T->getAs<AttributedType>(); while (AT && !AT->isCallingConv()) AT = AT->getModifiedType()->getAs<AttributedType>(); return AT; } template <typename T> static bool haveIncompatibleLanguageLinkages(const T *Old, const T *New) { const DeclContext *DC = Old->getDeclContext(); if (DC->isRecord()) return false; LanguageLinkage OldLinkage = Old->getLanguageLinkage(); if (OldLinkage == CXXLanguageLinkage && New->isInExternCContext()) return true; if (OldLinkage == CLanguageLinkage && New->isInExternCXXContext()) return true; return false; } template<typename T> static bool isExternC(T *D) { return D->isExternC(); } static bool isExternC(VarTemplateDecl *) { return false; } /// \brief Check whether a redeclaration of an entity introduced by a /// using-declaration is valid, given that we know it's not an overload /// (nor a hidden tag declaration). template<typename ExpectedDecl> static bool checkUsingShadowRedecl(Sema &S, UsingShadowDecl *OldS, ExpectedDecl *New) { // C++11 [basic.scope.declarative]p4: // Given a set of declarations in a single declarative region, each of // which specifies the same unqualified name, // -- they shall all refer to the same entity, or all refer to functions // and function templates; or // -- exactly one declaration shall declare a class name or enumeration // name that is not a typedef name and the other declarations shall all // refer to the same variable or enumerator, or all refer to functions // and function templates; in this case the class name or enumeration // name is hidden (3.3.10). // C++11 [namespace.udecl]p14: // If a function declaration in namespace scope or block scope has the // same name and the same parameter-type-list as a function introduced // by a using-declaration, and the declarations do not declare the same // function, the program is ill-formed. auto *Old = dyn_cast<ExpectedDecl>(OldS->getTargetDecl()); if (Old && !Old->getDeclContext()->getRedeclContext()->Equals( New->getDeclContext()->getRedeclContext()) && !(isExternC(Old) && isExternC(New))) Old = nullptr; if (!Old) { S.Diag(New->getLocation(), diag::err_using_decl_conflict_reverse); S.Diag(OldS->getTargetDecl()->getLocation(), diag::note_using_decl_target); S.Diag(OldS->getUsingDecl()->getLocation(), diag::note_using_decl) << 0; return true; } return false; } /// MergeFunctionDecl - We just parsed a function 'New' from /// declarator D which has the same name and scope as a previous /// declaration 'Old'. Figure out how to resolve this situation, /// merging decls or emitting diagnostics as appropriate. /// /// In C++, New and Old must be declarations that are not /// overloaded. Use IsOverload to determine whether New and Old are /// overloaded, and to select the Old declaration that New should be /// merged with. /// /// Returns true if there was an error, false otherwise. bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S, bool MergeTypeWithOld) { // Verify the old decl was also a function. FunctionDecl *Old = OldD->getAsFunction(); if (!Old) { if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(OldD)) { if (New->getFriendObjectKind()) { Diag(New->getLocation(), diag::err_using_decl_friend); Diag(Shadow->getTargetDecl()->getLocation(), diag::note_using_decl_target); Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl) << 0; return true; } // Check whether the two declarations might declare the same function. if (checkUsingShadowRedecl<FunctionDecl>(*this, Shadow, New)) return true; OldD = Old = cast<FunctionDecl>(Shadow->getTargetDecl()); } else { Diag(New->getLocation(), diag::err_redefinition_different_kind) << New->getDeclName(); Diag(OldD->getLocation(), diag::note_previous_definition); return true; } } // If the old declaration is invalid, just give up here. if (Old->isInvalidDecl()) return true; diag::kind PrevDiag; SourceLocation OldLocation; std::tie(PrevDiag, OldLocation) = getNoteDiagForInvalidRedeclaration(Old, New); // Don't complain about this if we're in GNU89 mode and the old function // is an extern inline function. // Don't complain about specializations. They are not supposed to have // storage classes. if (!isa<CXXMethodDecl>(New) && !isa<CXXMethodDecl>(Old) && New->getStorageClass() == SC_Static && Old->hasExternalFormalLinkage() && !New->getTemplateSpecializationInfo() && !canRedefineFunction(Old, getLangOpts())) { if (getLangOpts().MicrosoftExt) { Diag(New->getLocation(), diag::ext_static_non_static) << New; Diag(OldLocation, PrevDiag); } else { Diag(New->getLocation(), diag::err_static_non_static) << New; Diag(OldLocation, PrevDiag); return true; } } // If a function is first declared with a calling convention, but is later // declared or defined without one, all following decls assume the calling // convention of the first. // // It's OK if a function is first declared without a calling convention, // but is later declared or defined with the default calling convention. // // To test if either decl has an explicit calling convention, we look for // AttributedType sugar nodes on the type as written. If they are missing or // were canonicalized away, we assume the calling convention was implicit. // // Note also that we DO NOT return at this point, because we still have // other tests to run. QualType OldQType = Context.getCanonicalType(Old->getType()); QualType NewQType = Context.getCanonicalType(New->getType()); const FunctionType *OldType = cast<FunctionType>(OldQType); const FunctionType *NewType = cast<FunctionType>(NewQType); FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo(); FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo(); bool RequiresAdjustment = false; if (OldTypeInfo.getCC() != NewTypeInfo.getCC()) { FunctionDecl *First = Old->getFirstDecl(); const FunctionType *FT = First->getType().getCanonicalType()->castAs<FunctionType>(); FunctionType::ExtInfo FI = FT->getExtInfo(); bool NewCCExplicit = getCallingConvAttributedType(New->getType()); if (!NewCCExplicit) { // Inherit the CC from the previous declaration if it was specified // there but not here. NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC()); RequiresAdjustment = true; } else { // Calling conventions aren't compatible, so complain. bool FirstCCExplicit = getCallingConvAttributedType(First->getType()); Diag(New->getLocation(), diag::err_cconv_change) << FunctionType::getNameForCallConv(NewTypeInfo.getCC()) << !FirstCCExplicit << (!FirstCCExplicit ? "" : FunctionType::getNameForCallConv(FI.getCC())); // Put the note on the first decl, since it is the one that matters. Diag(First->getLocation(), diag::note_previous_declaration); return true; } } // FIXME: diagnose the other way around? if (OldTypeInfo.getNoReturn() && !NewTypeInfo.getNoReturn()) { NewTypeInfo = NewTypeInfo.withNoReturn(true); RequiresAdjustment = true; } // Merge regparm attribute. if (OldTypeInfo.getHasRegParm() != NewTypeInfo.getHasRegParm() || OldTypeInfo.getRegParm() != NewTypeInfo.getRegParm()) { if (NewTypeInfo.getHasRegParm()) { Diag(New->getLocation(), diag::err_regparm_mismatch) << NewType->getRegParmType() << OldType->getRegParmType(); Diag(OldLocation, diag::note_previous_declaration); return true; } NewTypeInfo = NewTypeInfo.withRegParm(OldTypeInfo.getRegParm()); RequiresAdjustment = true; } // Merge ns_returns_retained attribute. if (OldTypeInfo.getProducesResult() != NewTypeInfo.getProducesResult()) { if (NewTypeInfo.getProducesResult()) { Diag(New->getLocation(), diag::err_returns_retained_mismatch); Diag(OldLocation, diag::note_previous_declaration); return true; } NewTypeInfo = NewTypeInfo.withProducesResult(true); RequiresAdjustment = true; } if (RequiresAdjustment) { const FunctionType *AdjustedType = New->getType()->getAs<FunctionType>(); AdjustedType = Context.adjustFunctionType(AdjustedType, NewTypeInfo); New->setType(QualType(AdjustedType, 0)); NewQType = Context.getCanonicalType(New->getType()); NewType = cast<FunctionType>(NewQType); } // If this redeclaration makes the function inline, we may need to add it to // UndefinedButUsed. if (!Old->isInlined() && New->isInlined() && !New->hasAttr<GNUInlineAttr>() && !getLangOpts().GNUInline && Old->isUsed(false) && !Old->isDefined() && !New->isThisDeclarationADefinition()) UndefinedButUsed.insert(std::make_pair(Old->getCanonicalDecl(), SourceLocation())); // If this redeclaration makes it newly gnu_inline, we don't want to warn // about it. if (New->hasAttr<GNUInlineAttr>() && Old->isInlined() && !Old->hasAttr<GNUInlineAttr>()) { UndefinedButUsed.erase(Old->getCanonicalDecl()); } if (getLangOpts().CPlusPlus) { // (C++98 13.1p2): // Certain function declarations cannot be overloaded: // -- Function declarations that differ only in the return type // cannot be overloaded. // Go back to the type source info to compare the declared return types, // per C++1y [dcl.type.auto]p13: // Redeclarations or specializations of a function or function template // with a declared return type that uses a placeholder type shall also // use that placeholder, not a deduced type. QualType OldDeclaredReturnType = (Old->getTypeSourceInfo() ? Old->getTypeSourceInfo()->getType()->castAs<FunctionType>() : OldType)->getReturnType(); QualType NewDeclaredReturnType = (New->getTypeSourceInfo() ? New->getTypeSourceInfo()->getType()->castAs<FunctionType>() : NewType)->getReturnType(); QualType ResQT; if (!Context.hasSameType(OldDeclaredReturnType, NewDeclaredReturnType) && !((NewQType->isDependentType() || OldQType->isDependentType()) && New->isLocalExternDecl())) { if (NewDeclaredReturnType->isObjCObjectPointerType() && OldDeclaredReturnType->isObjCObjectPointerType()) ResQT = Context.mergeObjCGCQualifiers(NewQType, OldQType); if (ResQT.isNull()) { if (New->isCXXClassMember() && New->isOutOfLine()) Diag(New->getLocation(), diag::err_member_def_does_not_match_ret_type) << New << New->getReturnTypeSourceRange(); else Diag(New->getLocation(), diag::err_ovl_diff_return_type) << New->getReturnTypeSourceRange(); Diag(OldLocation, PrevDiag) << Old << Old->getType() << Old->getReturnTypeSourceRange(); return true; } else NewQType = ResQT; } QualType OldReturnType = OldType->getReturnType(); QualType NewReturnType = cast<FunctionType>(NewQType)->getReturnType(); if (OldReturnType != NewReturnType) { // If this function has a deduced return type and has already been // defined, copy the deduced value from the old declaration. AutoType *OldAT = Old->getReturnType()->getContainedAutoType(); if (OldAT && OldAT->isDeduced()) { New->setType( SubstAutoType(New->getType(), OldAT->isDependentType() ? Context.DependentTy : OldAT->getDeducedType())); NewQType = Context.getCanonicalType( SubstAutoType(NewQType, OldAT->isDependentType() ? Context.DependentTy : OldAT->getDeducedType())); } } const CXXMethodDecl *OldMethod = dyn_cast<CXXMethodDecl>(Old); CXXMethodDecl *NewMethod = dyn_cast<CXXMethodDecl>(New); if (OldMethod && NewMethod) { // Preserve triviality. NewMethod->setTrivial(OldMethod->isTrivial()); // MSVC allows explicit template specialization at class scope: // 2 CXXMethodDecls referring to the same function will be injected. // We don't want a redeclaration error. bool IsClassScopeExplicitSpecialization = OldMethod->isFunctionTemplateSpecialization() && NewMethod->isFunctionTemplateSpecialization(); bool isFriend = NewMethod->getFriendObjectKind(); if (!isFriend && NewMethod->getLexicalDeclContext()->isRecord() && !IsClassScopeExplicitSpecialization) { // -- Member function declarations with the same name and the // same parameter types cannot be overloaded if any of them // is a static member function declaration. if (OldMethod->isStatic() != NewMethod->isStatic()) { Diag(New->getLocation(), diag::err_ovl_static_nonstatic_member); Diag(OldLocation, PrevDiag) << Old << Old->getType(); return true; } // C++ [class.mem]p1: // [...] A member shall not be declared twice in the // member-specification, except that a nested class or member // class template can be declared and then later defined. if (ActiveTemplateInstantiations.empty()) { unsigned NewDiag; if (isa<CXXConstructorDecl>(OldMethod)) NewDiag = diag::err_constructor_redeclared; else if (isa<CXXDestructorDecl>(NewMethod)) NewDiag = diag::err_destructor_redeclared; else if (isa<CXXConversionDecl>(NewMethod)) NewDiag = diag::err_conv_function_redeclared; else NewDiag = diag::err_member_redeclared; Diag(New->getLocation(), NewDiag); } else { Diag(New->getLocation(), diag::err_member_redeclared_in_instantiation) << New << New->getType(); } Diag(OldLocation, PrevDiag) << Old << Old->getType(); return true; // Complain if this is an explicit declaration of a special // member that was initially declared implicitly. // // As an exception, it's okay to befriend such methods in order // to permit the implicit constructor/destructor/operator calls. } else if (OldMethod->isImplicit()) { if (isFriend) { NewMethod->setImplicit(); } else { Diag(NewMethod->getLocation(), diag::err_definition_of_implicitly_declared_member) << New << getSpecialMember(OldMethod); return true; } } else if (OldMethod->isExplicitlyDefaulted() && !isFriend) { Diag(NewMethod->getLocation(), diag::err_definition_of_explicitly_defaulted_member) << getSpecialMember(OldMethod); return true; } } // C++11 [dcl.attr.noreturn]p1: // The first declaration of a function shall specify the noreturn // attribute if any declaration of that function specifies the noreturn // attribute. const CXX11NoReturnAttr *NRA = New->getAttr<CXX11NoReturnAttr>(); if (NRA && !Old->hasAttr<CXX11NoReturnAttr>()) { Diag(NRA->getLocation(), diag::err_noreturn_missing_on_first_decl); Diag(Old->getFirstDecl()->getLocation(), diag::note_noreturn_missing_first_decl); } // C++11 [dcl.attr.depend]p2: // The first declaration of a function shall specify the // carries_dependency attribute for its declarator-id if any declaration // of the function specifies the carries_dependency attribute. const CarriesDependencyAttr *CDA = New->getAttr<CarriesDependencyAttr>(); if (CDA && !Old->hasAttr<CarriesDependencyAttr>()) { Diag(CDA->getLocation(), diag::err_carries_dependency_missing_on_first_decl) << 0/*Function*/; Diag(Old->getFirstDecl()->getLocation(), diag::note_carries_dependency_missing_first_decl) << 0/*Function*/; } // (C++98 8.3.5p3): // All declarations for a function shall agree exactly in both the // return type and the parameter-type-list. // We also want to respect all the extended bits except noreturn. // noreturn should now match unless the old type info didn't have it. QualType OldQTypeForComparison = OldQType; if (!OldTypeInfo.getNoReturn() && NewTypeInfo.getNoReturn()) { assert(OldQType == QualType(OldType, 0)); const FunctionType *OldTypeForComparison = Context.adjustFunctionType(OldType, OldTypeInfo.withNoReturn(true)); OldQTypeForComparison = QualType(OldTypeForComparison, 0); assert(OldQTypeForComparison.isCanonical()); } if (haveIncompatibleLanguageLinkages(Old, New)) { // As a special case, retain the language linkage from previous // declarations of a friend function as an extension. // // This liberal interpretation of C++ [class.friend]p3 matches GCC/MSVC // and is useful because there's otherwise no way to specify language // linkage within class scope. // // Check cautiously as the friend object kind isn't yet complete. if (New->getFriendObjectKind() != Decl::FOK_None) { Diag(New->getLocation(), diag::ext_retained_language_linkage) << New; Diag(OldLocation, PrevDiag); } else { Diag(New->getLocation(), diag::err_different_language_linkage) << New; Diag(OldLocation, PrevDiag); return true; } } if (OldQTypeForComparison == NewQType) return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld); if ((NewQType->isDependentType() || OldQType->isDependentType()) && New->isLocalExternDecl()) { // It's OK if we couldn't merge types for a local function declaraton // if either the old or new type is dependent. We'll merge the types // when we instantiate the function. return false; } // Fall through for conflicting redeclarations and redefinitions. } // C: Function types need to be compatible, not identical. This handles // duplicate function decls like "void f(int); void f(enum X);" properly. if (!getLangOpts().CPlusPlus && Context.typesAreCompatible(OldQType, NewQType)) { const FunctionType *OldFuncType = OldQType->getAs<FunctionType>(); const FunctionType *NewFuncType = NewQType->getAs<FunctionType>(); const FunctionProtoType *OldProto = nullptr; #if 1 // HLSL Change Starts - commenting this out rather than fixing for inout semantics - N/A for HLSL assert(!(MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) && (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) && "else fn with no prototype found"); (void)OldFuncType; (void)NewFuncType; (void)OldProto; #else if (MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) && (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) { // The old declaration provided a function prototype, but the // new declaration does not. Merge in the prototype. assert(!OldProto->hasExceptionSpec() && "Exception spec in C"); SmallVector<QualType, 16> ParamTypes(OldProto->param_types()); NewQType = Context.getFunctionType(NewFuncType->getReturnType(), ParamTypes, OldProto->getExtProtoInfo()); New->setType(NewQType); New->setHasInheritedPrototype(); // Synthesize parameters with the same types. SmallVector<ParmVarDecl*, 16> Params; for (const auto &ParamType : OldProto->param_types()) { ParmVarDecl *Param = ParmVarDecl::Create(Context, New, SourceLocation(), SourceLocation(), nullptr, ParamType, /*TInfo=*/nullptr, SC_None, nullptr); Param->setScopeInfo(0, Params.size()); Param->setImplicit(); Params.push_back(Param); } New->setParams(Params); } #endif // HLSL Change Ends return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld); } #if 0 // HLSL Change Starts - commenting this out rather than fixing for inout semantics - N/A for HLSL // GNU C permits a K&R definition to follow a prototype declaration // if the declared types of the parameters in the K&R definition // match the types in the prototype declaration, even when the // promoted types of the parameters from the K&R definition differ // from the types in the prototype. GCC then keeps the types from // the prototype. // // If a variadic prototype is followed by a non-variadic K&R definition, // the K&R definition becomes variadic. This is sort of an edge case, but // it's legal per the standard depending on how you read C99 6.7.5.3p15 and // C99 6.9.1p8. if (!getLangOpts().CPlusPlus && Old->hasPrototype() && !New->hasPrototype() && New->getType()->getAs<FunctionProtoType>() && Old->getNumParams() == New->getNumParams()) { SmallVector<QualType, 16> ArgTypes; SmallVector<GNUCompatibleParamWarning, 16> Warnings; const FunctionProtoType *OldProto = Old->getType()->getAs<FunctionProtoType>(); const FunctionProtoType *NewProto = New->getType()->getAs<FunctionProtoType>(); // Determine whether this is the GNU C extension. QualType MergedReturn = Context.mergeTypes(OldProto->getReturnType(), NewProto->getReturnType()); bool LooseCompatible = !MergedReturn.isNull(); for (unsigned Idx = 0, End = Old->getNumParams(); LooseCompatible && Idx != End; ++Idx) { ParmVarDecl *OldParm = Old->getParamDecl(Idx); ParmVarDecl *NewParm = New->getParamDecl(Idx); if (Context.typesAreCompatible(OldParm->getType(), NewProto->getParamType(Idx))) { ArgTypes.push_back(NewParm->getType()); } else if (Context.typesAreCompatible(OldParm->getType(), NewParm->getType(), /*CompareUnqualified=*/true)) { GNUCompatibleParamWarning Warn = { OldParm, NewParm, NewProto->getParamType(Idx) }; Warnings.push_back(Warn); ArgTypes.push_back(NewParm->getType()); } else LooseCompatible = false; } if (LooseCompatible) { for (unsigned Warn = 0; Warn < Warnings.size(); ++Warn) { Diag(Warnings[Warn].NewParm->getLocation(), diag::ext_param_promoted_not_compatible_with_prototype) << Warnings[Warn].PromotedType << Warnings[Warn].OldParm->getType(); if (Warnings[Warn].OldParm->getLocation().isValid()) Diag(Warnings[Warn].OldParm->getLocation(), diag::note_previous_declaration); } if (MergeTypeWithOld) New->setType(Context.getFunctionType(MergedReturn, ArgTypes, OldProto->getExtProtoInfo())); return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld); } // Fall through to diagnose conflicting types. } #endif // HLSL Change Ends // A function that has already been declared has been redeclared or // defined with a different type; show an appropriate diagnostic. // If the previous declaration was an implicitly-generated builtin // declaration, then at the very least we should use a specialized note. unsigned BuiltinID; if (Old->isImplicit() && (BuiltinID = Old->getBuiltinID())) { // If it's actually a library-defined builtin function like 'malloc' // or 'printf', just warn about the incompatible redeclaration. if (Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) { Diag(New->getLocation(), diag::warn_redecl_library_builtin) << New; Diag(OldLocation, diag::note_previous_builtin_declaration) << Old << Old->getType(); // If this is a global redeclaration, just forget hereafter // about the "builtin-ness" of the function. // // Doing this for local extern declarations is problematic. If // the builtin declaration remains visible, a second invalid // local declaration will produce a hard error; if it doesn't // remain visible, a single bogus local redeclaration (which is // actually only a warning) could break all the downstream code. if (!New->getLexicalDeclContext()->isFunctionOrMethod()) New->getIdentifier()->setBuiltinID(Builtin::NotBuiltin); return false; } PrevDiag = diag::note_previous_builtin_declaration; } Diag(New->getLocation(), diag::err_conflicting_types) << New->getDeclName(); Diag(OldLocation, PrevDiag) << Old << Old->getType(); return true; } /// \brief Completes the merge of two function declarations that are /// known to be compatible. /// /// This routine handles the merging of attributes and other /// properties of function declarations from the old declaration to /// the new declaration, once we know that New is in fact a /// redeclaration of Old. /// /// \returns false bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld) { // Merge the attributes mergeDeclAttributes(New, Old); // Merge "pure" flag. if (Old->isPure()) New->setPure(); // Merge "used" flag. if (Old->getMostRecentDecl()->isUsed(false)) New->setIsUsed(); // Merge attributes from the parameters. These can mismatch with K&R // declarations. if (New->getNumParams() == Old->getNumParams()) for (unsigned i = 0, e = New->getNumParams(); i != e; ++i) { ParmVarDecl *NewParam = New->getParamDecl(i); ParmVarDecl *OldParam = Old->getParamDecl(i); mergeParamDeclAttributes(NewParam, OldParam, *this); mergeParamDeclTypes(NewParam, OldParam, *this); } if (getLangOpts().CPlusPlus) return MergeCXXFunctionDecl(New, Old, S); // Merge the function types so the we get the composite types for the return // and argument types. Per C11 6.2.7/4, only update the type if the old decl // was visible. QualType Merged = Context.mergeTypes(Old->getType(), New->getType()); if (!Merged.isNull() && MergeTypeWithOld) New->setType(Merged); return false; } void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod, ObjCMethodDecl *oldMethod) { // Merge the attributes, including deprecated/unavailable AvailabilityMergeKind MergeKind = isa<ObjCImplDecl>(newMethod->getDeclContext()) ? AMK_Redeclaration : AMK_Override; mergeDeclAttributes(newMethod, oldMethod, MergeKind); // Merge attributes from the parameters. ObjCMethodDecl::param_const_iterator oi = oldMethod->param_begin(), oe = oldMethod->param_end(); for (ObjCMethodDecl::param_iterator ni = newMethod->param_begin(), ne = newMethod->param_end(); ni != ne && oi != oe; ++ni, ++oi) mergeParamDeclAttributes(*ni, *oi, *this); CheckObjCMethodOverride(newMethod, oldMethod); } /// MergeVarDeclTypes - We parsed a variable 'New' which has the same name and /// scope as a previous declaration 'Old'. Figure out how to merge their types, /// emitting diagnostics as appropriate. /// /// Declarations using the auto type specifier (C++ [decl.spec.auto]) call back /// to here in AddInitializerToDecl. We can't check them before the initializer /// is attached. void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState) { // HLSL Change - add merge state if (New->isInvalidDecl() || Old->isInvalidDecl()) return; QualType MergedT; if (getLangOpts().CPlusPlus) { if (New->getType()->isUndeducedType()) { // We don't know what the new type is until the initializer is attached. return; } else if (Context.hasSameType(New->getType(), Old->getType())) { // These could still be something that needs exception specs checked. return MergeVarDeclExceptionSpecs(New, Old); } // C++ [basic.link]p10: // [...] the types specified by all declarations referring to a given // object or function shall be identical, except that declarations for an // array object can specify array types that differ by the presence or // absence of a major array bound (8.3.4). else if (Old->getType()->isIncompleteArrayType() && New->getType()->isArrayType()) { const ArrayType *OldArray = Context.getAsArrayType(Old->getType()); const ArrayType *NewArray = Context.getAsArrayType(New->getType()); if (Context.hasSameType(OldArray->getElementType(), NewArray->getElementType())) MergedT = New->getType(); } else if (Old->getType()->isArrayType() && New->getType()->isIncompleteArrayType()) { const ArrayType *OldArray = Context.getAsArrayType(Old->getType()); const ArrayType *NewArray = Context.getAsArrayType(New->getType()); if (Context.hasSameType(OldArray->getElementType(), NewArray->getElementType())) MergedT = Old->getType(); } else if (New->getType()->isObjCObjectPointerType() && Old->getType()->isObjCObjectPointerType()) { MergedT = Context.mergeObjCGCQualifiers(New->getType(), Old->getType()); } } else { // C 6.2.7p2: // All declarations that refer to the same object or function shall have // compatible type. MergedT = Context.mergeTypes(New->getType(), Old->getType()); } if (MergedT.isNull()) { // It's OK if we couldn't merge types if either type is dependent, for a // block-scope variable. In other cases (static data members of class // templates, variable templates, ...), we require the types to be // equivalent. // FIXME: The C++ standard doesn't say anything about this. if ((New->getType()->isDependentType() || Old->getType()->isDependentType()) && New->isLocalVarDecl()) { // If the old type was dependent, we can't merge with it, so the new type // becomes dependent for now. We'll reproduce the original type when we // instantiate the TypeSourceInfo for the variable. if (!New->getType()->isDependentType() && MergeTypeWithOld) New->setType(Context.DependentTy); return; } // FIXME: Even if this merging succeeds, some other non-visible declaration // of this variable might have an incompatible type. For instance: // // extern int arr[]; // void f() { extern int arr[2]; } // void g() { extern int arr[3]; } // // Neither C nor C++ requires a diagnostic for this, but we should still try // to diagnose it. // HLSL Change Starts if (MergeState == ShadowMergeState_Disallowed) { Diag(New->getLocation(), New->isThisDeclarationADefinition() ? diag::err_redefinition_different_type : diag::err_redeclaration_different_type) << New->getDeclName() << New->getType() << Old->getType(); diag::kind PrevDiag; SourceLocation OldLocation; std::tie(PrevDiag, OldLocation) = getNoteDiagForInvalidRedeclaration(Old, New); Diag(OldLocation, PrevDiag); return New->setInvalidDecl(); } else if (MergeState == ShadowMergeState_Possible) { Diag(New->getLocation(), diag::warn_hlsl_for_redefinition_different_type) << New->getDeclName() << New->getType() << Old->getType(); Diag(Old->getLocation(), diag::note_previous_definition); MergeState = ShadowMergeState_Effective; MergeTypeWithOld = false; } // HLSL Change Ends } // Don't actually update the type on the new declaration if the old // declaration was an extern declaration in a different scope. if (MergeTypeWithOld) New->setType(MergedT); } static bool mergeTypeWithPrevious(Sema &S, VarDecl *NewVD, VarDecl *OldVD, LookupResult &Previous) { // C11 6.2.7p4: // For an identifier with internal or external linkage declared // in a scope in which a prior declaration of that identifier is // visible, if the prior declaration specifies internal or // external linkage, the type of the identifier at the later // declaration becomes the composite type. // // If the variable isn't visible, we do not merge with its type. if (Previous.isShadowed()) return false; if (S.getLangOpts().CPlusPlus) { // C++11 [dcl.array]p3: // If there is a preceding declaration of the entity in the same // scope in which the bound was specified, an omitted array bound // is taken to be the same as in that earlier declaration. return NewVD->isPreviousDeclInSameBlockScope() || (!OldVD->getLexicalDeclContext()->isFunctionOrMethod() && !NewVD->getLexicalDeclContext()->isFunctionOrMethod()); } else { // If the old declaration was function-local, don't merge with its // type unless we're in the same function. return !OldVD->getLexicalDeclContext()->isFunctionOrMethod() || OldVD->getLexicalDeclContext() == NewVD->getLexicalDeclContext(); } } /// MergeVarDecl - We just parsed a variable 'New' which has the same name /// and scope as a previous declaration 'Old'. Figure out how to resolve this /// situation, merging decls or emitting diagnostics as appropriate. /// /// Tentative definition rules (C99 6.9.2p2) are checked by /// FinalizeDeclaratorGroup. Unfortunately, we can't analyze tentative /// definitions here, since the initializer hasn't been attached. /// void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState) { // HLSL Change - add merge state // If the new decl is already invalid, don't do any other checking. if (New->isInvalidDecl()) return; VarTemplateDecl *NewTemplate = New->getDescribedVarTemplate(); // Verify the old decl was also a variable or variable template. VarDecl *Old = nullptr; VarTemplateDecl *OldTemplate = nullptr; if (Previous.isSingleResult()) { if (NewTemplate) { OldTemplate = dyn_cast<VarTemplateDecl>(Previous.getFoundDecl()); Old = OldTemplate ? OldTemplate->getTemplatedDecl() : nullptr; if (auto *Shadow = dyn_cast<UsingShadowDecl>(Previous.getRepresentativeDecl())) if (checkUsingShadowRedecl<VarTemplateDecl>(*this, Shadow, NewTemplate)) return New->setInvalidDecl(); } else { Old = dyn_cast<VarDecl>(Previous.getFoundDecl()); if (auto *Shadow = dyn_cast<UsingShadowDecl>(Previous.getRepresentativeDecl())) if (checkUsingShadowRedecl<VarDecl>(*this, Shadow, New)) return New->setInvalidDecl(); } } if (!Old) { Diag(New->getLocation(), diag::err_redefinition_different_kind) << New->getDeclName(); Diag(Previous.getRepresentativeDecl()->getLocation(), diag::note_previous_definition); return New->setInvalidDecl(); } if (!shouldLinkPossiblyHiddenDecl(Old, New)) return; // Ensure the template parameters are compatible. if (NewTemplate && !TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(), OldTemplate->getTemplateParameters(), /*Complain=*/true, TPL_TemplateMatch)) return; // C++ [class.mem]p1: // A member shall not be declared twice in the member-specification [...] // // Here, we need only consider static data members. if (Old->isStaticDataMember() && !New->isOutOfLine()) { Diag(New->getLocation(), diag::err_duplicate_member) << New->getIdentifier(); Diag(Old->getLocation(), diag::note_previous_declaration); New->setInvalidDecl(); } mergeDeclAttributes(New, Old); // Warn if an already-declared variable is made a weak_import in a subsequent // declaration if (New->hasAttr<WeakImportAttr>() && Old->getStorageClass() == SC_None && !Old->hasAttr<WeakImportAttr>()) { Diag(New->getLocation(), diag::warn_weak_import) << New->getDeclName(); Diag(Old->getLocation(), diag::note_previous_definition); // Remove weak_import attribute on new declaration. New->dropAttr<WeakImportAttr>(); } // Merge the types. VarDecl *MostRecent = Old->getMostRecentDecl(); if (MostRecent != Old) { MergeVarDeclTypes(New, MostRecent, mergeTypeWithPrevious(*this, New, MostRecent, Previous), MergeState); // HLSL Change - add MergeState if (New->isInvalidDecl()) return; } MergeVarDeclTypes(New, Old, mergeTypeWithPrevious(*this, New, Old, Previous), MergeState); // HLSL Change - add MergeState if (New->isInvalidDecl()) return; diag::kind PrevDiag; SourceLocation OldLocation; std::tie(PrevDiag, OldLocation) = getNoteDiagForInvalidRedeclaration(Old, New); // [dcl.stc]p8: Check if we have a non-static decl followed by a static. if (New->getStorageClass() == SC_Static && !New->isStaticDataMember() && Old->hasExternalFormalLinkage()) { if (getLangOpts().MicrosoftExt) { Diag(New->getLocation(), diag::ext_static_non_static) << New->getDeclName(); Diag(OldLocation, PrevDiag); } else { Diag(New->getLocation(), diag::err_static_non_static) << New->getDeclName(); Diag(OldLocation, PrevDiag); return New->setInvalidDecl(); } } // C99 6.2.2p4: // For an identifier declared with the storage-class specifier // extern in a scope in which a prior declaration of that // identifier is visible,23) if the prior declaration specifies // internal or external linkage, the linkage of the identifier at // the later declaration is the same as the linkage specified at // the prior declaration. If no prior declaration is visible, or // if the prior declaration specifies no linkage, then the // identifier has external linkage. if (New->hasExternalStorage() && Old->hasLinkage()) /* Okay */; else if (New->getCanonicalDecl()->getStorageClass() != SC_Static && !New->isStaticDataMember() && Old->getCanonicalDecl()->getStorageClass() == SC_Static) { Diag(New->getLocation(), diag::err_non_static_static) << New->getDeclName(); Diag(OldLocation, PrevDiag); return New->setInvalidDecl(); } // Check if extern is followed by non-extern and vice-versa. if (New->hasExternalStorage() && !Old->hasLinkage() && Old->isLocalVarDeclOrParm()) { Diag(New->getLocation(), diag::err_extern_non_extern) << New->getDeclName(); Diag(OldLocation, PrevDiag); return New->setInvalidDecl(); } if (Old->hasLinkage() && New->isLocalVarDeclOrParm() && !New->hasExternalStorage()) { Diag(New->getLocation(), diag::err_non_extern_extern) << New->getDeclName(); Diag(OldLocation, PrevDiag); return New->setInvalidDecl(); } // Variables with external linkage are analyzed in FinalizeDeclaratorGroup. // FIXME: The test for external storage here seems wrong? We still // need to check for mismatches. if (!New->hasExternalStorage() && !New->isFileVarDecl() && // Don't complain about out-of-line definitions of static members. !(Old->getLexicalDeclContext()->isRecord() && !New->getLexicalDeclContext()->isRecord())) { // HLSL Change Starts if (MergeState == ShadowMergeState_Disallowed) { Diag(New->getLocation(), diag::err_redefinition) << New->getDeclName(); Diag(OldLocation, PrevDiag); return New->setInvalidDecl(); } else if (MergeState == ShadowMergeState_Possible) { Diag(New->getLocation(), diag::warn_hlsl_for_redefinition) << New->getDeclName(); Diag(Old->getLocation(), diag::note_previous_definition); MergeState = ShadowMergeState_Effective; } // HLSL Change Ends } if (New->getTLSKind() != Old->getTLSKind()) { if (!Old->getTLSKind()) { Diag(New->getLocation(), diag::err_thread_non_thread) << New->getDeclName(); Diag(OldLocation, PrevDiag); } else if (!New->getTLSKind()) { Diag(New->getLocation(), diag::err_non_thread_thread) << New->getDeclName(); Diag(OldLocation, PrevDiag); } else { // Do not allow redeclaration to change the variable between requiring // static and dynamic initialization. // FIXME: GCC allows this, but uses the TLS keyword on the first // declaration to determine the kind. Do we need to be compatible here? Diag(New->getLocation(), diag::err_thread_thread_different_kind) << New->getDeclName() << (New->getTLSKind() == VarDecl::TLS_Dynamic); Diag(OldLocation, PrevDiag); } } // C++ doesn't have tentative definitions, so go right ahead and check here. VarDecl *Def; if (getLangOpts().CPlusPlus && New->isThisDeclarationADefinition() == VarDecl::Definition && (Def = Old->getDefinition())) { NamedDecl *Hidden = nullptr; if (!hasVisibleDefinition(Def, &Hidden) && (New->getFormalLinkage() == InternalLinkage || New->getDescribedVarTemplate() || New->getNumTemplateParameterLists() || New->getDeclContext()->isDependentContext())) { // The previous definition is hidden, and multiple definitions are // permitted (in separate TUs). Form another definition of it. } else { // HLSL Change Starts if (MergeState == ShadowMergeState_Disallowed) { Diag(New->getLocation(), diag::err_redefinition) << New; Diag(Def->getLocation(), diag::note_previous_definition); New->setInvalidDecl(); return; } else if (MergeState == ShadowMergeState_Possible) { Diag(New->getLocation(), diag::warn_hlsl_for_redefinition) << New->getDeclName(); Diag(Old->getLocation(), diag::note_previous_definition); MergeState = ShadowMergeState_Effective; } // HLSL Change Ends } } if (haveIncompatibleLanguageLinkages(Old, New)) { Diag(New->getLocation(), diag::err_different_language_linkage) << New; Diag(OldLocation, PrevDiag); New->setInvalidDecl(); return; } if (MergeState != ShadowMergeState_Effective) { // HLSL Change - avoid merging when shadowing // Merge "used" flag. if (Old->getMostRecentDecl()->isUsed(false)) New->setIsUsed(); // Keep a chain of previous declarations. New->setPreviousDecl(Old); if (NewTemplate) NewTemplate->setPreviousDecl(OldTemplate); // Inherit access appropriately. New->setAccess(Old->getAccess()); if (NewTemplate) NewTemplate->setAccess(New->getAccess()); } // HLSL Change - close block conditional on shadow merge state } /// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with /// no declarator (e.g. "struct foo;") is parsed. Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS) { return ParsedFreeStandingDeclSpec(S, AS, DS, MultiTemplateParamsArg()); } // The MS ABI changed between VS2013 and VS2015 with regard to numbers used to // disambiguate entities defined in different scopes. // While the VS2015 ABI fixes potential miscompiles, it is also breaks // compatibility. // We will pick our mangling number depending on which version of MSVC is being // targeted. static unsigned getMSManglingNumber(const LangOptions &LO, Scope *S) { return LO.isCompatibleWithMSVC(LangOptions::MSVC2015) ? S->getMSCurManglingNumber() : S->getMSLastManglingNumber(); } void Sema::handleTagNumbering(const TagDecl *Tag, Scope *TagScope) { if (!Context.getLangOpts().CPlusPlus) return; if (isa<CXXRecordDecl>(Tag->getParent())) { // If this tag is the direct child of a class, number it if // it is anonymous. if (!Tag->getName().empty() || Tag->getTypedefNameForAnonDecl()) return; MangleNumberingContext &MCtx = Context.getManglingNumberContext(Tag->getParent()); Context.setManglingNumber( Tag, MCtx.getManglingNumber( Tag, getMSManglingNumber(getLangOpts(), TagScope))); return; } // If this tag isn't a direct child of a class, number it if it is local. Decl *ManglingContextDecl; if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext( Tag->getDeclContext(), ManglingContextDecl)) { Context.setManglingNumber( Tag, MCtx->getManglingNumber( Tag, getMSManglingNumber(getLangOpts(), TagScope))); } } void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD) { // Do nothing if the tag is not anonymous or already has an // associated typedef (from an earlier typedef in this decl group). if (TagFromDeclSpec->getIdentifier()) return; if (TagFromDeclSpec->getTypedefNameForAnonDecl()) return; // A well-formed anonymous tag must always be a TUK_Definition. assert(TagFromDeclSpec->isThisDeclarationADefinition()); // The type must match the tag exactly; no qualifiers allowed. if (!Context.hasSameType(NewTD->getUnderlyingType(), Context.getTagDeclType(TagFromDeclSpec))) return; // If we've already computed linkage for the anonymous tag, then // adding a typedef name for the anonymous decl can change that // linkage, which might be a serious problem. Diagnose this as // unsupported and ignore the typedef name. TODO: we should // pursue this as a language defect and establish a formal rule // for how to handle it. if (TagFromDeclSpec->hasLinkageBeenComputed()) { Diag(NewTD->getLocation(), diag::err_typedef_changes_linkage); SourceLocation tagLoc = TagFromDeclSpec->getInnerLocStart(); tagLoc = getLocForEndOfToken(tagLoc); llvm::SmallString<40> textToInsert; textToInsert += ' '; textToInsert += NewTD->getIdentifier()->getName(); Diag(tagLoc, diag::note_typedef_changes_linkage) << FixItHint::CreateInsertion(tagLoc, textToInsert); return; } // Otherwise, set this is the anon-decl typedef for the tag. TagFromDeclSpec->setTypedefNameForAnonDecl(NewTD); } static unsigned GetDiagnosticTypeSpecifierID(DeclSpec::TST T) { switch (T) { case DeclSpec::TST_class: return 0; case DeclSpec::TST_struct: return 1; case DeclSpec::TST_interface: return 2; case DeclSpec::TST_union: return 3; case DeclSpec::TST_enum: return 4; default: llvm_unreachable("unexpected type specifier"); } } /// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with /// no declarator (e.g. "struct foo;") is parsed. It also accepts template /// parameters to cope with template friend declarations. Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation) { Decl *TagD = nullptr; TagDecl *Tag = nullptr; if (DS.getTypeSpecType() == DeclSpec::TST_class || DS.getTypeSpecType() == DeclSpec::TST_struct || DS.getTypeSpecType() == DeclSpec::TST_interface || DS.getTypeSpecType() == DeclSpec::TST_union || DS.getTypeSpecType() == DeclSpec::TST_enum) { TagD = DS.getRepAsDecl(); if (!TagD) // We probably had an error return nullptr; // Note that the above type specs guarantee that the // type rep is a Decl, whereas in many of the others // it's a Type. if (isa<TagDecl>(TagD)) Tag = cast<TagDecl>(TagD); else if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(TagD)) Tag = CTD->getTemplatedDecl(); } if (Tag) { handleTagNumbering(Tag, S); Tag->setFreeStanding(); if (Tag->isInvalidDecl()) return Tag; } if (unsigned TypeQuals = DS.getTypeQualifiers()) { // Enforce C99 6.7.3p2: "Types other than pointer types derived from object // or incomplete types shall not be restrict-qualified." if (TypeQuals & DeclSpec::TQ_restrict) Diag(DS.getRestrictSpecLoc(), diag::err_typecheck_invalid_restrict_not_pointer_noarg) << DS.getSourceRange(); } if (DS.isConstexprSpecified()) { // C++0x [dcl.constexpr]p1: constexpr can only be applied to declarations // and definitions of functions and variables. if (Tag) Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag) << GetDiagnosticTypeSpecifierID(DS.getTypeSpecType()); else Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_no_declarators); // Don't emit warnings after this error. return TagD; } DiagnoseFunctionSpecifiers(DS); if (DS.isFriendSpecified()) { // If we're dealing with a decl but not a TagDecl, assume that // whatever routines created it handled the friendship aspect. if (TagD && !Tag) return nullptr; return ActOnFriendTypeDecl(S, DS, TemplateParams); } const CXXScopeSpec &SS = DS.getTypeSpecScope(); bool IsExplicitSpecialization = !TemplateParams.empty() && TemplateParams.back()->size() == 0; if (Tag && SS.isNotEmpty() && !Tag->isCompleteDefinition() && !IsExplicitInstantiation && !IsExplicitSpecialization) { // Per C++ [dcl.type.elab]p1, a class declaration cannot have a // nested-name-specifier unless it is an explicit instantiation // or an explicit specialization. // Per C++ [dcl.enum]p1, an opaque-enum-declaration can't either. Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier) << GetDiagnosticTypeSpecifierID(DS.getTypeSpecType()) << SS.getRange(); return nullptr; } // Track whether this decl-specifier declares anything. bool DeclaresAnything = true; // Handle anonymous struct definitions. if (RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag)) { if (!Record->getDeclName() && Record->isCompleteDefinition() && DS.getStorageClassSpec() != DeclSpec::SCS_typedef) { if (getLangOpts().CPlusPlus || Record->getDeclContext()->isRecord()) return BuildAnonymousStructOrUnion(S, DS, AS, Record, Context.getPrintingPolicy()); DeclaresAnything = false; } } // C11 6.7.2.1p2: // A struct-declaration that does not declare an anonymous structure or // anonymous union shall contain a struct-declarator-list. // // This rule also existed in C89 and C99; the grammar for struct-declaration // did not permit a struct-declaration without a struct-declarator-list. if (!getLangOpts().CPlusPlus && CurContext->isRecord() && DS.getStorageClassSpec() == DeclSpec::SCS_unspecified) { // Check for Microsoft C extension: anonymous struct/union member. // Handle 2 kinds of anonymous struct/union: // struct STRUCT; // union UNION; // and // STRUCT_TYPE; <- where STRUCT_TYPE is a typedef struct. // UNION_TYPE; <- where UNION_TYPE is a typedef union. if ((Tag && Tag->getDeclName()) || DS.getTypeSpecType() == DeclSpec::TST_typename) { RecordDecl *Record = nullptr; if (Tag) Record = dyn_cast<RecordDecl>(Tag); else if (const RecordType *RT = DS.getRepAsType().get()->getAsStructureType()) Record = RT->getDecl(); else if (const RecordType *UT = DS.getRepAsType().get()->getAsUnionType()) Record = UT->getDecl(); if (Record && getLangOpts().MicrosoftExt) { Diag(DS.getLocStart(), diag::ext_ms_anonymous_record) << Record->isUnion() << DS.getSourceRange(); return BuildMicrosoftCAnonymousStruct(S, DS, Record); } DeclaresAnything = false; } } // Skip all the checks below if we have a type error. if (DS.getTypeSpecType() == DeclSpec::TST_error || (TagD && TagD->isInvalidDecl())) return TagD; if (getLangOpts().CPlusPlus && DS.getStorageClassSpec() != DeclSpec::SCS_typedef) if (EnumDecl *Enum = dyn_cast_or_null<EnumDecl>(Tag)) if (Enum->enumerator_begin() == Enum->enumerator_end() && !Enum->getIdentifier() && !Enum->isInvalidDecl()) DeclaresAnything = false; if (!DS.isMissingDeclaratorOk()) { // Customize diagnostic for a typedef missing a name. if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) Diag(DS.getLocStart(), diag::ext_typedef_without_a_name) << DS.getSourceRange(); else DeclaresAnything = false; } if (DS.isModulePrivateSpecified() && Tag && Tag->getDeclContext()->isFunctionOrMethod()) Diag(DS.getModulePrivateSpecLoc(), diag::err_module_private_local_class) << Tag->getTagKind() << FixItHint::CreateRemoval(DS.getModulePrivateSpecLoc()); ActOnDocumentableDecl(TagD); // C 6.7/2: // A declaration [...] shall declare at least a declarator [...], a tag, // or the members of an enumeration. // C++ [dcl.dcl]p3: // [If there are no declarators], and except for the declaration of an // unnamed bit-field, the decl-specifier-seq shall introduce one or more // names into the program, or shall redeclare a name introduced by a // previous declaration. if (!DeclaresAnything) { // In C, we allow this as a (popular) extension / bug. Don't bother // producing further diagnostics for redundant qualifiers after this. Diag(DS.getLocStart(), diag::ext_no_declarators) << DS.getSourceRange(); return TagD; } // C++ [dcl.stc]p1: // If a storage-class-specifier appears in a decl-specifier-seq, [...] the // init-declarator-list of the declaration shall not be empty. // C++ [dcl.fct.spec]p1: // If a cv-qualifier appears in a decl-specifier-seq, the // init-declarator-list of the declaration shall not be empty. // // Spurious qualifiers here appear to be valid in C. unsigned DiagID = diag::warn_standalone_specifier; if (getLangOpts().CPlusPlus) DiagID = diag::ext_standalone_specifier; // Note that a linkage-specification sets a storage class, but // 'extern "C" struct foo;' is actually valid and not theoretically // useless. if (DeclSpec::SCS SCS = DS.getStorageClassSpec()) { if (SCS == DeclSpec::SCS_mutable) // Since mutable is not a viable storage class specifier in C, there is // no reason to treat it as an extension. Instead, diagnose as an error. Diag(DS.getStorageClassSpecLoc(), diag::err_mutable_nonmember); else if (!DS.isExternInLinkageSpec() && SCS != DeclSpec::SCS_typedef) Diag(DS.getStorageClassSpecLoc(), DiagID) << DeclSpec::getSpecifierName(SCS); } if (DeclSpec::TSCS TSCS = DS.getThreadStorageClassSpec()) Diag(DS.getThreadStorageClassSpecLoc(), DiagID) << DeclSpec::getSpecifierName(TSCS); if (DS.getTypeQualifiers()) { if (DS.getTypeQualifiers() & DeclSpec::TQ_const) Diag(DS.getConstSpecLoc(), DiagID) << "const"; if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile) Diag(DS.getConstSpecLoc(), DiagID) << "volatile"; // Restrict is covered above. if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic) Diag(DS.getAtomicSpecLoc(), DiagID) << "_Atomic"; } // Warn about ignored type attributes, for example: // __attribute__((aligned)) struct A; // Attributes should be placed after tag to apply to type declaration. if (!DS.getAttributes().empty()) { DeclSpec::TST TypeSpecType = DS.getTypeSpecType(); if (TypeSpecType == DeclSpec::TST_class || TypeSpecType == DeclSpec::TST_struct || TypeSpecType == DeclSpec::TST_interface || TypeSpecType == DeclSpec::TST_union || TypeSpecType == DeclSpec::TST_enum) { for (AttributeList* attrs = DS.getAttributes().getList(); attrs; attrs = attrs->getNext()) Diag(attrs->getLoc(), diag::warn_declspec_attribute_ignored) << attrs->getName() << GetDiagnosticTypeSpecifierID(TypeSpecType); } } return TagD; } /// We are trying to inject an anonymous member into the given scope; /// check if there's an existing declaration that can't be overloaded. /// /// \return true if this is a forbidden redeclaration static bool CheckAnonMemberRedeclaration(Sema &SemaRef, Scope *S, DeclContext *Owner, DeclarationName Name, SourceLocation NameLoc, unsigned diagnostic) { LookupResult R(SemaRef, Name, NameLoc, Sema::LookupMemberName, Sema::ForRedeclaration); if (!SemaRef.LookupName(R, S)) return false; if (R.getAsSingle<TagDecl>()) return false; // Pick a representative declaration. NamedDecl *PrevDecl = R.getRepresentativeDecl()->getUnderlyingDecl(); assert(PrevDecl && "Expected a non-null Decl"); if (!SemaRef.isDeclInScope(PrevDecl, Owner, S)) return false; SemaRef.Diag(NameLoc, diagnostic) << Name; SemaRef.Diag(PrevDecl->getLocation(), diag::note_previous_declaration); return true; } /// InjectAnonymousStructOrUnionMembers - Inject the members of the /// anonymous struct or union AnonRecord into the owning context Owner /// and scope S. This routine will be invoked just after we realize /// that an unnamed union or struct is actually an anonymous union or /// struct, e.g., /// /// @code /// union { /// int i; /// float f; /// }; // InjectAnonymousStructOrUnionMembers called here to inject i and /// // f into the surrounding scope.x /// @endcode /// /// This routine is recursive, injecting the names of nested anonymous /// structs/unions into the owning context and scope as well. static bool InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S, DeclContext *Owner, RecordDecl *AnonRecord, AccessSpecifier AS, SmallVectorImpl<NamedDecl *> &Chaining, bool MSAnonStruct) { unsigned diagKind = AnonRecord->isUnion() ? diag::err_anonymous_union_member_redecl : diag::err_anonymous_struct_member_redecl; bool Invalid = false; // Look every FieldDecl and IndirectFieldDecl with a name. for (auto *D : AnonRecord->decls()) { if ((isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) && cast<NamedDecl>(D)->getDeclName()) { ValueDecl *VD = cast<ValueDecl>(D); if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, VD->getDeclName(), VD->getLocation(), diagKind)) { // C++ [class.union]p2: // The names of the members of an anonymous union shall be // distinct from the names of any other entity in the // scope in which the anonymous union is declared. Invalid = true; } else { // C++ [class.union]p2: // For the purpose of name lookup, after the anonymous union // definition, the members of the anonymous union are // considered to have been defined in the scope in which the // anonymous union is declared. unsigned OldChainingSize = Chaining.size(); if (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(VD)) Chaining.append(IF->chain_begin(), IF->chain_end()); else Chaining.push_back(VD); assert(Chaining.size() >= 2); NamedDecl **NamedChain = new (SemaRef.Context)NamedDecl*[Chaining.size()]; for (unsigned i = 0; i < Chaining.size(); i++) NamedChain[i] = Chaining[i]; IndirectFieldDecl *IndirectField = IndirectFieldDecl::Create( SemaRef.Context, Owner, VD->getLocation(), VD->getIdentifier(), VD->getType(), NamedChain, Chaining.size()); for (const auto *Attr : VD->attrs()) IndirectField->addAttr(Attr->clone(SemaRef.Context)); IndirectField->setAccess(AS); IndirectField->setImplicit(); SemaRef.PushOnScopeChains(IndirectField, S); // That includes picking up the appropriate access specifier. if (AS != AS_none) IndirectField->setAccess(AS); Chaining.resize(OldChainingSize); } } } return Invalid; } /// StorageClassSpecToVarDeclStorageClass - Maps a DeclSpec::SCS to /// a VarDecl::StorageClass. Any error reporting is up to the caller: /// illegal input values are mapped to SC_None. static StorageClass StorageClassSpecToVarDeclStorageClass(const DeclSpec &DS) { DeclSpec::SCS StorageClassSpec = DS.getStorageClassSpec(); assert(StorageClassSpec != DeclSpec::SCS_typedef && "Parser allowed 'typedef' as storage class VarDecl."); switch (StorageClassSpec) { case DeclSpec::SCS_unspecified: return SC_None; case DeclSpec::SCS_extern: if (DS.isExternInLinkageSpec()) return SC_None; return SC_Extern; case DeclSpec::SCS_static: return SC_Static; case DeclSpec::SCS_auto: return SC_Auto; case DeclSpec::SCS_register: return SC_Register; case DeclSpec::SCS_private_extern: return SC_PrivateExtern; // Illegal SCSs map to None: error reporting is up to the caller. case DeclSpec::SCS_mutable: // Fall through. case DeclSpec::SCS_typedef: return SC_None; } llvm_unreachable("unknown storage class specifier"); } static SourceLocation findDefaultInitializer(const CXXRecordDecl *Record) { assert(Record->hasInClassInitializer()); for (const auto *I : Record->decls()) { const auto *FD = dyn_cast<FieldDecl>(I); if (const auto *IFD = dyn_cast<IndirectFieldDecl>(I)) FD = IFD->getAnonField(); if (FD && FD->hasInClassInitializer()) return FD->getLocation(); } llvm_unreachable("couldn't find in-class initializer"); } static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent, SourceLocation DefaultInitLoc) { if (!Parent->isUnion() || !Parent->hasInClassInitializer()) return; S.Diag(DefaultInitLoc, diag::err_multiple_mem_union_initialization); S.Diag(findDefaultInitializer(Parent), diag::note_previous_initializer) << 0; } static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent, CXXRecordDecl *AnonUnion) { if (!Parent->isUnion() || !Parent->hasInClassInitializer()) return; checkDuplicateDefaultInit(S, Parent, findDefaultInitializer(AnonUnion)); } /// BuildAnonymousStructOrUnion - Handle the declaration of an /// anonymous structure or union. Anonymous unions are a C++ feature /// (C++ [class.union]) and a C11 feature; anonymous structures /// are a C11 feature and GNU C++ extension. Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy) { DeclContext *Owner = Record->getDeclContext(); // Diagnose whether this anonymous struct/union is an extension. if (Record->isUnion() && !getLangOpts().CPlusPlus && !getLangOpts().C11) Diag(Record->getLocation(), diag::ext_anonymous_union); else if (!Record->isUnion() && getLangOpts().CPlusPlus) Diag(Record->getLocation(), diag::ext_gnu_anonymous_struct); else if (!Record->isUnion() && !getLangOpts().C11) Diag(Record->getLocation(), diag::ext_c11_anonymous_struct); // C and C++ require different kinds of checks for anonymous // structs/unions. bool Invalid = false; if (getLangOpts().CPlusPlus) { const char *PrevSpec = nullptr; unsigned DiagID; if (Record->isUnion()) { // C++ [class.union]p6: // Anonymous unions declared in a named namespace or in the // global namespace shall be declared static. if (DS.getStorageClassSpec() != DeclSpec::SCS_static && (isa<TranslationUnitDecl>(Owner) || (isa<NamespaceDecl>(Owner) && cast<NamespaceDecl>(Owner)->getDeclName()))) { Diag(Record->getLocation(), diag::err_anonymous_union_not_static) << FixItHint::CreateInsertion(Record->getLocation(), "static "); // Recover by adding 'static'. DS.SetStorageClassSpec(*this, DeclSpec::SCS_static, SourceLocation(), PrevSpec, DiagID, Policy); } // C++ [class.union]p6: // A storage class is not allowed in a declaration of an // anonymous union in a class scope. else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified && isa<RecordDecl>(Owner)) { Diag(DS.getStorageClassSpecLoc(), diag::err_anonymous_union_with_storage_spec) << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc()); // Recover by removing the storage specifier. DS.SetStorageClassSpec(*this, DeclSpec::SCS_unspecified, SourceLocation(), PrevSpec, DiagID, Context.getPrintingPolicy()); } } // Ignore const/volatile/restrict qualifiers. if (DS.getTypeQualifiers()) { if (DS.getTypeQualifiers() & DeclSpec::TQ_const) Diag(DS.getConstSpecLoc(), diag::ext_anonymous_struct_union_qualified) << Record->isUnion() << "const" << FixItHint::CreateRemoval(DS.getConstSpecLoc()); if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile) Diag(DS.getVolatileSpecLoc(), diag::ext_anonymous_struct_union_qualified) << Record->isUnion() << "volatile" << FixItHint::CreateRemoval(DS.getVolatileSpecLoc()); if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict) Diag(DS.getRestrictSpecLoc(), diag::ext_anonymous_struct_union_qualified) << Record->isUnion() << "restrict" << FixItHint::CreateRemoval(DS.getRestrictSpecLoc()); if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic) Diag(DS.getAtomicSpecLoc(), diag::ext_anonymous_struct_union_qualified) << Record->isUnion() << "_Atomic" << FixItHint::CreateRemoval(DS.getAtomicSpecLoc()); DS.ClearTypeQualifiers(); } // C++ [class.union]p2: // The member-specification of an anonymous union shall only // define non-static data members. [Note: nested types and // functions cannot be declared within an anonymous union. ] for (auto *Mem : Record->decls()) { if (auto *FD = dyn_cast<FieldDecl>(Mem)) { // C++ [class.union]p3: // An anonymous union shall not have private or protected // members (clause 11). assert(FD->getAccess() != AS_none); if (FD->getAccess() != AS_public) { Diag(FD->getLocation(), diag::err_anonymous_record_nonpublic_member) << (int)Record->isUnion() << (int)(FD->getAccess() == AS_protected); Invalid = true; } // C++ [class.union]p1 // An object of a class with a non-trivial constructor, a non-trivial // copy constructor, a non-trivial destructor, or a non-trivial copy // assignment operator cannot be a member of a union, nor can an // array of such objects. if (CheckNontrivialField(FD)) Invalid = true; } else if (Mem->isImplicit()) { // Any implicit members are fine. } else if (isa<TagDecl>(Mem) && Mem->getDeclContext() != Record) { // This is a type that showed up in an // elaborated-type-specifier inside the anonymous struct or // union, but which actually declares a type outside of the // anonymous struct or union. It's okay. } else if (auto *MemRecord = dyn_cast<RecordDecl>(Mem)) { if (!MemRecord->isAnonymousStructOrUnion() && MemRecord->getDeclName()) { // Visual C++ allows type definition in anonymous struct or union. if (getLangOpts().MicrosoftExt) Diag(MemRecord->getLocation(), diag::ext_anonymous_record_with_type) << (int)Record->isUnion(); else { // This is a nested type declaration. Diag(MemRecord->getLocation(), diag::err_anonymous_record_with_type) << (int)Record->isUnion(); Invalid = true; } } else { // This is an anonymous type definition within another anonymous type. // This is a popular extension, provided by Plan9, MSVC and GCC, but // not part of standard C++. Diag(MemRecord->getLocation(), diag::ext_anonymous_record_with_anonymous_type) << (int)Record->isUnion(); } } else if (isa<AccessSpecDecl>(Mem)) { // Any access specifier is fine. } else if (isa<StaticAssertDecl>(Mem)) { // In C++1z, static_assert declarations are also fine. } else { // We have something that isn't a non-static data // member. Complain about it. unsigned DK = diag::err_anonymous_record_bad_member; if (isa<TypeDecl>(Mem)) DK = diag::err_anonymous_record_with_type; else if (isa<FunctionDecl>(Mem)) DK = diag::err_anonymous_record_with_function; else if (isa<VarDecl>(Mem)) DK = diag::err_anonymous_record_with_static; // Visual C++ allows type definition in anonymous struct or union. if (getLangOpts().MicrosoftExt && DK == diag::err_anonymous_record_with_type) Diag(Mem->getLocation(), diag::ext_anonymous_record_with_type) << (int)Record->isUnion(); else { Diag(Mem->getLocation(), DK) << (int)Record->isUnion(); Invalid = true; } } } // C++11 [class.union]p8 (DR1460): // At most one variant member of a union may have a // brace-or-equal-initializer. if (cast<CXXRecordDecl>(Record)->hasInClassInitializer() && Owner->isRecord()) checkDuplicateDefaultInit(*this, cast<CXXRecordDecl>(Owner), cast<CXXRecordDecl>(Record)); } if (!Record->isUnion() && !Owner->isRecord()) { Diag(Record->getLocation(), diag::err_anonymous_struct_not_member) << (int)getLangOpts().CPlusPlus; Invalid = true; } // Mock up a declarator. Declarator Dc(DS, Declarator::MemberContext); TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S); assert(TInfo && "couldn't build declarator info for anonymous struct/union"); // Create a declaration for this anonymous struct/union. NamedDecl *Anon = nullptr; if (RecordDecl *OwningClass = dyn_cast<RecordDecl>(Owner)) { Anon = FieldDecl::Create(Context, OwningClass, DS.getLocStart(), Record->getLocation(), /*IdentifierInfo=*/nullptr, Context.getTypeDeclType(Record), TInfo, /*BitWidth=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit); Anon->setAccess(AS); if (getLangOpts().CPlusPlus) FieldCollector->Add(cast<FieldDecl>(Anon)); } else { DeclSpec::SCS SCSpec = DS.getStorageClassSpec(); StorageClass SC = StorageClassSpecToVarDeclStorageClass(DS); if (SCSpec == DeclSpec::SCS_mutable) { // mutable can only appear on non-static class members, so it's always // an error here Diag(Record->getLocation(), diag::err_mutable_nonmember); Invalid = true; SC = SC_None; } Anon = VarDecl::Create(Context, Owner, DS.getLocStart(), Record->getLocation(), /*IdentifierInfo=*/nullptr, Context.getTypeDeclType(Record), TInfo, SC); // Default-initialize the implicit variable. This initialization will be // trivial in almost all cases, except if a union member has an in-class // initializer: // union { int n = 0; }; ActOnUninitializedDecl(Anon, /*TypeMayContainAuto=*/false); } Anon->setImplicit(); // Mark this as an anonymous struct/union type. Record->setAnonymousStructOrUnion(true); // Add the anonymous struct/union object to the current // context. We'll be referencing this object when we refer to one of // its members. Owner->addDecl(Anon); // Inject the members of the anonymous struct/union into the owning // context and into the identifier resolver chain for name lookup // purposes. SmallVector<NamedDecl*, 2> Chain; Chain.push_back(Anon); if (InjectAnonymousStructOrUnionMembers(*this, S, Owner, Record, AS, Chain, false)) Invalid = true; if (VarDecl *NewVD = dyn_cast<VarDecl>(Anon)) { if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) { Decl *ManglingContextDecl; if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext( NewVD->getDeclContext(), ManglingContextDecl)) { Context.setManglingNumber( NewVD, MCtx->getManglingNumber( NewVD, getMSManglingNumber(getLangOpts(), S))); Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD)); } } } if (Invalid) Anon->setInvalidDecl(); return Anon; } /// BuildMicrosoftCAnonymousStruct - Handle the declaration of an /// Microsoft C anonymous structure. /// Ref: http://msdn.microsoft.com/en-us/library/z2cx9y4f.aspx /// Example: /// /// struct A { int a; }; /// struct B { struct A; int b; }; /// /// void foo() { /// B var; /// var.a = 3; /// } /// Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record) { assert(Record && "expected a record!"); // Mock up a declarator. Declarator Dc(DS, Declarator::TypeNameContext); TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S); assert(TInfo && "couldn't build declarator info for anonymous struct"); auto *ParentDecl = cast<RecordDecl>(CurContext); QualType RecTy = Context.getTypeDeclType(Record); // Create a declaration for this anonymous struct. NamedDecl *Anon = FieldDecl::Create(Context, ParentDecl, DS.getLocStart(), DS.getLocStart(), /*IdentifierInfo=*/nullptr, RecTy, TInfo, /*BitWidth=*/nullptr, /*Mutable=*/false, /*InitStyle=*/ICIS_NoInit); Anon->setImplicit(); // Add the anonymous struct object to the current context. CurContext->addDecl(Anon); // Inject the members of the anonymous struct into the current // context and into the identifier resolver chain for name lookup // purposes. SmallVector<NamedDecl*, 2> Chain; Chain.push_back(Anon); RecordDecl *RecordDef = Record->getDefinition(); if (RequireCompleteType(Anon->getLocation(), RecTy, diag::err_field_incomplete) || InjectAnonymousStructOrUnionMembers(*this, S, CurContext, RecordDef, AS_none, Chain, true)) { Anon->setInvalidDecl(); ParentDecl->setInvalidDecl(); } return Anon; } /// GetNameForDeclarator - Determine the full declaration name for the /// given Declarator. DeclarationNameInfo Sema::GetNameForDeclarator(Declarator &D) { return GetNameFromUnqualifiedId(D.getName()); } /// \brief Retrieves the declaration name from a parsed unqualified-id. DeclarationNameInfo Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) { DeclarationNameInfo NameInfo; NameInfo.setLoc(Name.StartLocation); switch (Name.getKind()) { case UnqualifiedId::IK_ImplicitSelfParam: case UnqualifiedId::IK_Identifier: NameInfo.setName(Name.Identifier); NameInfo.setLoc(Name.StartLocation); return NameInfo; case UnqualifiedId::IK_OperatorFunctionId: NameInfo.setName(Context.DeclarationNames.getCXXOperatorName( Name.OperatorFunctionId.Operator)); NameInfo.setLoc(Name.StartLocation); NameInfo.getInfo().CXXOperatorName.BeginOpNameLoc = Name.OperatorFunctionId.SymbolLocations[0]; NameInfo.getInfo().CXXOperatorName.EndOpNameLoc = Name.EndLocation.getRawEncoding(); return NameInfo; case UnqualifiedId::IK_LiteralOperatorId: NameInfo.setName(Context.DeclarationNames.getCXXLiteralOperatorName( Name.Identifier)); NameInfo.setLoc(Name.StartLocation); NameInfo.setCXXLiteralOperatorNameLoc(Name.EndLocation); return NameInfo; case UnqualifiedId::IK_ConversionFunctionId: { TypeSourceInfo *TInfo; QualType Ty = GetTypeFromParser(Name.ConversionFunctionId, &TInfo); if (Ty.isNull()) return DeclarationNameInfo(); NameInfo.setName(Context.DeclarationNames.getCXXConversionFunctionName( Context.getCanonicalType(Ty))); NameInfo.setLoc(Name.StartLocation); NameInfo.setNamedTypeInfo(TInfo); return NameInfo; } case UnqualifiedId::IK_ConstructorName: { TypeSourceInfo *TInfo; QualType Ty = GetTypeFromParser(Name.ConstructorName, &TInfo); if (Ty.isNull()) return DeclarationNameInfo(); NameInfo.setName(Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(Ty))); NameInfo.setLoc(Name.StartLocation); NameInfo.setNamedTypeInfo(TInfo); return NameInfo; } case UnqualifiedId::IK_ConstructorTemplateId: { // In well-formed code, we can only have a constructor // template-id that refers to the current context, so go there // to find the actual type being constructed. CXXRecordDecl *CurClass = dyn_cast<CXXRecordDecl>(CurContext); if (!CurClass || CurClass->getIdentifier() != Name.TemplateId->Name) return DeclarationNameInfo(); // Determine the type of the class being constructed. QualType CurClassType = Context.getTypeDeclType(CurClass); // FIXME: Check two things: that the template-id names the same type as // CurClassType, and that the template-id does not occur when the name // was qualified. NameInfo.setName(Context.DeclarationNames.getCXXConstructorName( Context.getCanonicalType(CurClassType))); NameInfo.setLoc(Name.StartLocation); // FIXME: should we retrieve TypeSourceInfo? NameInfo.setNamedTypeInfo(nullptr); return NameInfo; } case UnqualifiedId::IK_DestructorName: { TypeSourceInfo *TInfo; QualType Ty = GetTypeFromParser(Name.DestructorName, &TInfo); if (Ty.isNull()) return DeclarationNameInfo(); NameInfo.setName(Context.DeclarationNames.getCXXDestructorName( Context.getCanonicalType(Ty))); NameInfo.setLoc(Name.StartLocation); NameInfo.setNamedTypeInfo(TInfo); return NameInfo; } case UnqualifiedId::IK_TemplateId: { TemplateName TName = Name.TemplateId->Template.get(); SourceLocation TNameLoc = Name.TemplateId->TemplateNameLoc; return Context.getNameForTemplate(TName, TNameLoc); } } // switch (Name.getKind()) llvm_unreachable("Unknown name kind"); } static QualType getCoreType(QualType Ty) { do { if (Ty->isPointerType() || Ty->isReferenceType()) Ty = Ty->getPointeeType(); else if (Ty->isArrayType()) Ty = Ty->castAsArrayTypeUnsafe()->getElementType(); else return Ty.withoutLocalFastQualifiers(); } while (true); } /// hasSimilarParameters - Determine whether the C++ functions Declaration /// and Definition have "nearly" matching parameters. This heuristic is /// used to improve diagnostics in the case where an out-of-line function /// definition doesn't match any declaration within the class or namespace. /// Also sets Params to the list of indices to the parameters that differ /// between the declaration and the definition. If hasSimilarParameters /// returns true and Params is empty, then all of the parameters match. static bool hasSimilarParameters(ASTContext &Context, FunctionDecl *Declaration, FunctionDecl *Definition, SmallVectorImpl<unsigned> &Params) { Params.clear(); if (Declaration->param_size() != Definition->param_size()) return false; for (unsigned Idx = 0; Idx < Declaration->param_size(); ++Idx) { QualType DeclParamTy = Declaration->getParamDecl(Idx)->getType(); QualType DefParamTy = Definition->getParamDecl(Idx)->getType(); // The parameter types are identical if (Context.hasSameType(DefParamTy, DeclParamTy)) continue; QualType DeclParamBaseTy = getCoreType(DeclParamTy); QualType DefParamBaseTy = getCoreType(DefParamTy); const IdentifierInfo *DeclTyName = DeclParamBaseTy.getBaseTypeIdentifier(); const IdentifierInfo *DefTyName = DefParamBaseTy.getBaseTypeIdentifier(); if (Context.hasSameUnqualifiedType(DeclParamBaseTy, DefParamBaseTy) || (DeclTyName && DeclTyName == DefTyName)) Params.push_back(Idx); else // The two parameters aren't even close return false; } return true; } /// NeedsRebuildingInCurrentInstantiation - Checks whether the given /// declarator needs to be rebuilt in the current instantiation. /// Any bits of declarator which appear before the name are valid for /// consideration here. That's specifically the type in the decl spec /// and the base type in any member-pointer chunks. static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D, DeclarationName Name) { // The types we specifically need to rebuild are: // - typenames, typeofs, and decltypes // - types which will become injected class names // Of course, we also need to rebuild any type referencing such a // type. It's safest to just say "dependent", but we call out a // few cases here. DeclSpec &DS = D.getMutableDeclSpec(); switch (DS.getTypeSpecType()) { case DeclSpec::TST_typename: case DeclSpec::TST_typeofType: case DeclSpec::TST_underlyingType: case DeclSpec::TST_atomic: { // Grab the type from the parser. TypeSourceInfo *TSI = nullptr; QualType T = S.GetTypeFromParser(DS.getRepAsType(), &TSI); if (T.isNull() || !T->isDependentType()) break; // Make sure there's a type source info. This isn't really much // of a waste; most dependent types should have type source info // attached already. if (!TSI) TSI = S.Context.getTrivialTypeSourceInfo(T, DS.getTypeSpecTypeLoc()); // Rebuild the type in the current instantiation. TSI = S.RebuildTypeInCurrentInstantiation(TSI, D.getIdentifierLoc(), Name); if (!TSI) return true; // Store the new type back in the decl spec. ParsedType LocType = S.CreateParsedType(TSI->getType(), TSI); DS.UpdateTypeRep(LocType); break; } case DeclSpec::TST_decltype: case DeclSpec::TST_typeofExpr: { Expr *E = DS.getRepAsExpr(); ExprResult Result = S.RebuildExprInCurrentInstantiation(E); if (Result.isInvalid()) return true; DS.UpdateExprRep(Result.get()); break; } default: // Nothing to do for these decl specs. break; } // It doesn't matter what order we do this in. for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) { DeclaratorChunk &Chunk = D.getTypeObject(I); // The only type information in the declarator which can come // before the declaration name is the base type of a member // pointer. if (Chunk.Kind != DeclaratorChunk::MemberPointer) continue; // Rebuild the scope specifier in-place. CXXScopeSpec &SS = Chunk.Mem.Scope(); if (S.RebuildNestedNameSpecifierInCurrentInstantiation(SS)) return true; } return false; } Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) { D.setFunctionDefinitionKind(FDK_Declaration); Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg()); if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() && Dcl && Dcl->getDeclContext()->isFileContext()) Dcl->setTopLevelDeclInObjCContainer(); return Dcl; } /// DiagnoseClassNameShadow - Implement C++ [class.mem]p13: /// If T is the name of a class, then each of the following shall have a /// name different from T: /// - every static data member of class T; /// - every member function of class T /// - every member of class T that is itself a type; /// \returns true if the declaration name violates these rules. bool Sema::DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo NameInfo) { DeclarationName Name = NameInfo.getName(); if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC)) if (Record->getIdentifier() && Record->getDeclName() == Name) { Diag(NameInfo.getLoc(), diag::err_member_name_of_class) << Name; return true; } return false; } /// \brief Diagnose a declaration whose declarator-id has the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier of the declarator-id. /// /// \param DC The declaration context to which the nested-name-specifier /// resolves. /// /// \param Name The name of the entity being declared. /// /// \param Loc The location of the name of the entity being declared. /// /// \returns true if we cannot safely recover from this error, false otherwise. bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc) { DeclContext *Cur = CurContext; while (isa<LinkageSpecDecl>(Cur) || isa<CapturedDecl>(Cur)) Cur = Cur->getParent(); // If the user provided a superfluous scope specifier that refers back to the // class in which the entity is already declared, diagnose and ignore it. // // class X { // void X::f(); // }; // // Note, it was once ill-formed to give redundant qualification in all // contexts, but that rule was removed by DR482. if (Cur->Equals(DC)) { if (Cur->isRecord()) { Diag(Loc, LangOpts.MicrosoftExt ? diag::warn_member_extra_qualification : diag::err_member_extra_qualification) << Name << FixItHint::CreateRemoval(SS.getRange()); SS.clear(); } else { Diag(Loc, diag::warn_namespace_member_extra_qualification) << Name; } return false; } // Check whether the qualifying scope encloses the scope of the original // declaration. if (!Cur->Encloses(DC)) { if (Cur->isRecord()) Diag(Loc, diag::err_member_qualification) << Name << SS.getRange(); else if (isa<TranslationUnitDecl>(DC)) Diag(Loc, diag::err_invalid_declarator_global_scope) << Name << SS.getRange(); else if (isa<FunctionDecl>(Cur)) Diag(Loc, diag::err_invalid_declarator_in_function) << Name << SS.getRange(); else if (isa<BlockDecl>(Cur)) Diag(Loc, diag::err_invalid_declarator_in_block) << Name << SS.getRange(); else Diag(Loc, diag::err_invalid_declarator_scope) << Name << cast<NamedDecl>(Cur) << cast<NamedDecl>(DC) << SS.getRange(); return true; } if (Cur->isRecord()) { // Cannot qualify members within a class. Diag(Loc, diag::err_member_qualification) << Name << SS.getRange(); SS.clear(); // C++ constructors and destructors with incorrect scopes can break // our AST invariants by having the wrong underlying types. If // that's the case, then drop this declaration entirely. if ((Name.getNameKind() == DeclarationName::CXXConstructorName || Name.getNameKind() == DeclarationName::CXXDestructorName) && !Context.hasSameType(Name.getCXXNameType(), Context.getTypeDeclType(cast<CXXRecordDecl>(Cur)))) return true; return false; } // C++11 [dcl.meaning]p1: // [...] "The nested-name-specifier of the qualified declarator-id shall // not begin with a decltype-specifer" NestedNameSpecifierLoc SpecLoc(SS.getScopeRep(), SS.location_data()); while (SpecLoc.getPrefix()) SpecLoc = SpecLoc.getPrefix(); if (dyn_cast_or_null<DecltypeType>( SpecLoc.getNestedNameSpecifier()->getAsType())) Diag(Loc, diag::err_decltype_in_declarator) << SpecLoc.getTypeLoc().getSourceRange(); return false; } NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists) { // TODO: consider using NameInfo for diagnostic. DeclarationNameInfo NameInfo = GetNameForDeclarator(D); DeclarationName Name = NameInfo.getName(); // All of these full declarators require an identifier. If it doesn't have // one, the ParsedFreeStandingDeclSpec action should be used. if (!Name) { if (!D.isInvalidType()) // Reject this if we think it is valid. Diag(D.getDeclSpec().getLocStart(), diag::err_declarator_need_ident) << D.getDeclSpec().getSourceRange() << D.getSourceRange(); return nullptr; } else if (DiagnoseUnexpandedParameterPack(NameInfo, UPPC_DeclarationType)) return nullptr; // The scope passed in may not be a decl scope. Zip up the scope tree until // we find one that is. ShadowMergeState MergeState = S->isForDeclScope() ? ShadowMergeState_Possible : ShadowMergeState_Disallowed; // HLSL Change - check on original scope while ((S->getFlags() & Scope::DeclScope) == 0 || (S->getFlags() & Scope::TemplateParamScope) != 0) S = S->getParent(); DeclContext *DC = CurContext; if (D.getCXXScopeSpec().isInvalid()) D.setInvalidType(); else if (D.getCXXScopeSpec().isSet()) { if (DiagnoseUnexpandedParameterPack(D.getCXXScopeSpec(), UPPC_DeclarationQualifier)) return nullptr; bool EnteringContext = !D.getDeclSpec().isFriendSpecified(); DC = computeDeclContext(D.getCXXScopeSpec(), EnteringContext); if (!DC || isa<EnumDecl>(DC)) { // If we could not compute the declaration context, it's because the // declaration context is dependent but does not refer to a class, // class template, or class template partial specialization. Complain // and return early, to avoid the coming semantic disaster. Diag(D.getIdentifierLoc(), diag::err_template_qualified_declarator_no_match) << D.getCXXScopeSpec().getScopeRep() << D.getCXXScopeSpec().getRange(); return nullptr; } bool IsDependentContext = DC->isDependentContext(); if (!IsDependentContext && RequireCompleteDeclContext(D.getCXXScopeSpec(), DC)) return nullptr; // If a class is incomplete, do not parse entities inside it. if (isa<CXXRecordDecl>(DC) && !cast<CXXRecordDecl>(DC)->hasDefinition()) { Diag(D.getIdentifierLoc(), diag::err_member_def_undefined_record) << Name << DC << D.getCXXScopeSpec().getRange(); return nullptr; } if (!D.getDeclSpec().isFriendSpecified()) { if (diagnoseQualifiedDeclaration(D.getCXXScopeSpec(), DC, Name, D.getIdentifierLoc())) { if (DC->isRecord()) return nullptr; D.setInvalidType(); } } // Check whether we need to rebuild the type of the given // declaration in the current instantiation. if (EnteringContext && IsDependentContext && TemplateParamLists.size() != 0) { ContextRAII SavedContext(*this, DC); if (RebuildDeclaratorInCurrentInstantiation(*this, D, Name)) D.setInvalidType(); } } TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); QualType R = TInfo->getType(); if (!R->isFunctionType() && DiagnoseClassNameShadow(DC, NameInfo)) // If this is a typedef, we'll end up spewing multiple diagnostics. // Just return early; it's safer. If this is a function, let the // "constructor cannot have a return type" diagnostic handle it. if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) return nullptr; if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo, UPPC_DeclarationType)) D.setInvalidType(); LookupResult Previous(*this, NameInfo, LookupOrdinaryName, ForRedeclaration); // See if this is a redefinition of a variable in the same scope. if (!D.getCXXScopeSpec().isSet()) { bool IsLinkageLookup = false; bool CreateBuiltins = false; // If the declaration we're planning to build will be a function // or object with linkage, then look for another declaration with // linkage (C99 6.2.2p4-5 and C++ [basic.link]p6). // // If the declaration we're planning to build will be declared with // external linkage in the translation unit, create any builtin with // the same name. if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) /* Do nothing*/; else if (CurContext->isFunctionOrMethod() && (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_extern || R->isFunctionType())) { IsLinkageLookup = true; CreateBuiltins = CurContext->getEnclosingNamespaceContext()->isTranslationUnit(); } else if (CurContext->getRedeclContext()->isTranslationUnit() && D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) CreateBuiltins = true; if (IsLinkageLookup) Previous.clear(LookupRedeclarationWithLinkage); LookupName(Previous, S, CreateBuiltins); } else { // Something like "int foo::x;" LookupQualifiedName(Previous, DC); // C++ [dcl.meaning]p1: // When the declarator-id is qualified, the declaration shall refer to a // previously declared member of the class or namespace to which the // qualifier refers (or, in the case of a namespace, of an element of the // inline namespace set of that namespace (7.3.1)) or to a specialization // thereof; [...] // // Note that we already checked the context above, and that we do not have // enough information to make sure that Previous contains the declaration // we want to match. For example, given: // // class X { // void f(); // void f(float); // }; // // void X::f(int) { } // ill-formed // // In this case, Previous will point to the overload set // containing the two f's declared in X, but neither of them // matches. // C++ [dcl.meaning]p1: // [...] the member shall not merely have been introduced by a // using-declaration in the scope of the class or namespace nominated by // the nested-name-specifier of the declarator-id. RemoveUsingDecls(Previous); } if (Previous.isSingleResult() && Previous.getFoundDecl()->isTemplateParameter()) { // Maybe we will complain about the shadowed template parameter. if (!D.isInvalidType()) DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), Previous.getFoundDecl()); // Just pretend that we didn't see the previous declaration. Previous.clear(); } // In C++, the previous declaration we find might be a tag type // (class or enum). In this case, the new declaration will hide the // tag type. Note that this does does not apply if we're declaring a // typedef (C++ [dcl.typedef]p4). if (Previous.isSingleTagDecl() && D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef) Previous.clear(); // Check that there are no default arguments other than in the parameters // of a function declaration (C++ only). if (getLangOpts().CPlusPlus) CheckExtraCXXDefaultArguments(D); // HLSL Change Starts if (getLangOpts().HLSL) { const bool IsParameterFalse = false; if (!DiagnoseHLSLDecl(D, DC, /*BitWidth*/ nullptr, TInfo, IsParameterFalse)) { assert(D.isInvalidType() && "otherwise DiagnoseHLSLDecl failed but " "didn't invalidate declaration"); return 0; } } // HLSL Change Ends NamedDecl *New; bool AddToScope = true; if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) { if (TemplateParamLists.size()) { Diag(D.getIdentifierLoc(), diag::err_template_typedef); return nullptr; } New = ActOnTypedefDeclarator(S, D, DC, TInfo, Previous); } else if (R->isFunctionType()) { New = ActOnFunctionDeclarator(S, D, DC, TInfo, Previous, TemplateParamLists, AddToScope); } else { New = ActOnVariableDeclarator(S, D, DC, TInfo, Previous, TemplateParamLists, AddToScope, MergeState); // HLSL Change - add merge state } if (!New) return nullptr; // If this has an identifier and is not an invalid redeclaration or // function template specialization, add it to the scope stack. if (New->getDeclName() && AddToScope && !(D.isRedeclaration() && New->isInvalidDecl())) { // Only make a locally-scoped extern declaration visible if it is the first // declaration of this entity. Qualified lookup for such an entity should // only find this declaration if there is no visible declaration of it. bool AddToContext = !D.isRedeclaration() || !New->isLocalExternDecl(); PushOnScopeChains(New, S, AddToContext); if (!AddToContext) CurContext->addHiddenDecl(New); } return New; } /// Helper method to turn variable array types into constant array /// types in certain situations which would otherwise be errors (for /// GCC compatibility). static QualType TryToFixInvalidVariablyModifiedType(QualType T, ASTContext &Context, bool &SizeIsNegative, llvm::APSInt &Oversized) { // This method tries to turn a variable array into a constant // array even when the size isn't an ICE. This is necessary // for compatibility with code that depends on gcc's buggy // constant expression folding, like struct {char x[(int)(char*)2];} SizeIsNegative = false; Oversized = 0; if (T->isDependentType()) return QualType(); QualifierCollector Qs; const Type *Ty = Qs.strip(T); if (const PointerType* PTy = dyn_cast<PointerType>(Ty)) { QualType Pointee = PTy->getPointeeType(); QualType FixedType = TryToFixInvalidVariablyModifiedType(Pointee, Context, SizeIsNegative, Oversized); if (FixedType.isNull()) return FixedType; FixedType = Context.getPointerType(FixedType); return Qs.apply(Context, FixedType); } if (const ParenType* PTy = dyn_cast<ParenType>(Ty)) { QualType Inner = PTy->getInnerType(); QualType FixedType = TryToFixInvalidVariablyModifiedType(Inner, Context, SizeIsNegative, Oversized); if (FixedType.isNull()) return FixedType; FixedType = Context.getParenType(FixedType); return Qs.apply(Context, FixedType); } const VariableArrayType* VLATy = dyn_cast<VariableArrayType>(T); if (!VLATy) return QualType(); // FIXME: We should probably handle this case if (VLATy->getElementType()->isVariablyModifiedType()) return QualType(); llvm::APSInt Res; if (!VLATy->getSizeExpr() || !VLATy->getSizeExpr()->EvaluateAsInt(Res, Context)) return QualType(); // Check whether the array size is negative. if (Res.isSigned() && Res.isNegative()) { SizeIsNegative = true; return QualType(); } // Check whether the array is too large to be addressed. unsigned ActiveSizeBits = ConstantArrayType::getNumAddressingBits(Context, VLATy->getElementType(), Res); if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) { Oversized = Res; return QualType(); } return Context.getConstantArrayType(VLATy->getElementType(), Res, ArrayType::Normal, 0); } static void FixInvalidVariablyModifiedTypeLoc(TypeLoc SrcTL, TypeLoc DstTL) { SrcTL = SrcTL.getUnqualifiedLoc(); DstTL = DstTL.getUnqualifiedLoc(); if (PointerTypeLoc SrcPTL = SrcTL.getAs<PointerTypeLoc>()) { PointerTypeLoc DstPTL = DstTL.castAs<PointerTypeLoc>(); FixInvalidVariablyModifiedTypeLoc(SrcPTL.getPointeeLoc(), DstPTL.getPointeeLoc()); DstPTL.setStarLoc(SrcPTL.getStarLoc()); return; } if (ParenTypeLoc SrcPTL = SrcTL.getAs<ParenTypeLoc>()) { ParenTypeLoc DstPTL = DstTL.castAs<ParenTypeLoc>(); FixInvalidVariablyModifiedTypeLoc(SrcPTL.getInnerLoc(), DstPTL.getInnerLoc()); DstPTL.setLParenLoc(SrcPTL.getLParenLoc()); DstPTL.setRParenLoc(SrcPTL.getRParenLoc()); return; } ArrayTypeLoc SrcATL = SrcTL.castAs<ArrayTypeLoc>(); ArrayTypeLoc DstATL = DstTL.castAs<ArrayTypeLoc>(); TypeLoc SrcElemTL = SrcATL.getElementLoc(); TypeLoc DstElemTL = DstATL.getElementLoc(); DstElemTL.initializeFullCopy(SrcElemTL); DstATL.setLBracketLoc(SrcATL.getLBracketLoc()); DstATL.setSizeExpr(SrcATL.getSizeExpr()); DstATL.setRBracketLoc(SrcATL.getRBracketLoc()); } /// Helper method to turn variable array types into constant array /// types in certain situations which would otherwise be errors (for /// GCC compatibility). static TypeSourceInfo* TryToFixInvalidVariablyModifiedTypeSourceInfo(TypeSourceInfo *TInfo, ASTContext &Context, bool &SizeIsNegative, llvm::APSInt &Oversized) { QualType FixedTy = TryToFixInvalidVariablyModifiedType(TInfo->getType(), Context, SizeIsNegative, Oversized); if (FixedTy.isNull()) return nullptr; TypeSourceInfo *FixedTInfo = Context.getTrivialTypeSourceInfo(FixedTy); FixInvalidVariablyModifiedTypeLoc(TInfo->getTypeLoc(), FixedTInfo->getTypeLoc()); return FixedTInfo; } /// \brief Register the given locally-scoped extern "C" declaration so /// that it can be found later for redeclarations. We include any extern "C" /// declaration that is not visible in the translation unit here, not just /// function-scope declarations. void Sema::RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S) { if (!getLangOpts().CPlusPlus && ND->getLexicalDeclContext()->getRedeclContext()->isTranslationUnit()) // Don't need to track declarations in the TU in C. return; // Note that we have a locally-scoped external with this name. Context.getExternCContextDecl()->makeDeclVisibleInContext(ND); } NamedDecl *Sema::findLocallyScopedExternCDecl(DeclarationName Name) { // FIXME: We can have multiple results via __attribute__((overloadable)). auto Result = Context.getExternCContextDecl()->lookup(Name); return Result.empty() ? nullptr : *Result.begin(); } /// \brief Diagnose function specifiers on a declaration of an identifier that /// does not identify a function. void Sema::DiagnoseFunctionSpecifiers(const DeclSpec &DS) { // FIXME: We should probably indicate the identifier in question to avoid // confusion for constructs like "inline int a(), b;" if (DS.isInlineSpecified()) Diag(DS.getInlineSpecLoc(), diag::err_inline_non_function); if (DS.isVirtualSpecified()) Diag(DS.getVirtualSpecLoc(), diag::err_virtual_non_function); if (DS.isExplicitSpecified()) Diag(DS.getExplicitSpecLoc(), diag::err_explicit_non_function); if (DS.isNoreturnSpecified()) Diag(DS.getNoreturnSpecLoc(), diag::err_noreturn_non_function); } NamedDecl* Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous) { // Typedef declarators cannot be qualified (C++ [dcl.meaning]p1). if (D.getCXXScopeSpec().isSet()) { Diag(D.getIdentifierLoc(), diag::err_qualified_typedef_declarator) << D.getCXXScopeSpec().getRange(); D.setInvalidType(); // Pretend we didn't see the scope specifier. DC = CurContext; Previous.clear(); } DiagnoseFunctionSpecifiers(D.getDeclSpec()); if (D.getDeclSpec().isConstexprSpecified()) Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr) << 1; if (D.getName().Kind != UnqualifiedId::IK_Identifier) { Diag(D.getName().StartLocation, diag::err_typedef_not_identifier) << D.getName().getSourceRange(); return nullptr; } TypedefDecl *NewTD = ParseTypedefDecl(S, D, TInfo->getType(), TInfo); if (!NewTD) return nullptr; // Handle attributes prior to checking for duplicates in MergeVarDecl ProcessDeclAttributes(S, NewTD, D); CheckTypedefForVariablyModifiedType(S, NewTD); bool Redeclaration = D.isRedeclaration(); NamedDecl *ND = ActOnTypedefNameDecl(S, DC, NewTD, Previous, Redeclaration); D.setRedeclaration(Redeclaration); TransferUnusualAttributes(D, ND); // HLSL Change return ND; } void Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) { // C99 6.7.7p2: If a typedef name specifies a variably modified type // then it shall have block scope. // Note that variably modified types must be fixed before merging the decl so // that redeclarations will match. TypeSourceInfo *TInfo = NewTD->getTypeSourceInfo(); QualType T = TInfo->getType(); if (T->isVariablyModifiedType()) { getCurFunction()->setHasBranchProtectedScope(); if (S->getFnParent() == nullptr) { bool SizeIsNegative; llvm::APSInt Oversized; TypeSourceInfo *FixedTInfo = TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context, SizeIsNegative, Oversized); if (FixedTInfo) { Diag(NewTD->getLocation(), diag::warn_illegal_constant_array_size); NewTD->setTypeSourceInfo(FixedTInfo); } else { if (SizeIsNegative) Diag(NewTD->getLocation(), diag::err_typecheck_negative_array_size); else if (T->isVariableArrayType()) Diag(NewTD->getLocation(), diag::err_vla_decl_in_file_scope); else if (Oversized.getBoolValue()) Diag(NewTD->getLocation(), diag::err_array_too_large) << Oversized.toString(10); else Diag(NewTD->getLocation(), diag::err_vm_decl_in_file_scope); NewTD->setInvalidDecl(); } } } } /// ActOnTypedefNameDecl - Perform semantic checking for a declaration which /// declares a typedef-name, either using the 'typedef' type specifier or via /// a C++0x [dcl.typedef]p2 alias-declaration: 'using T = A;'. NamedDecl* Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD, LookupResult &Previous, bool &Redeclaration) { // Merge the decl with the existing one if appropriate. If the decl is // in an outer scope, it isn't the same thing. FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage*/false, /*AllowInlineNamespace*/false); filterNonConflictingPreviousTypedefDecls(*this, NewTD, Previous); if (!Previous.empty()) { Redeclaration = true; MergeTypedefNameDecl(NewTD, Previous); } // If this is the C FILE type, notify the AST context. if (IdentifierInfo *II = NewTD->getIdentifier()) if (!NewTD->isInvalidDecl() && NewTD->getDeclContext()->getRedeclContext()->isTranslationUnit()) { if (II->isStr("FILE")) Context.setFILEDecl(NewTD); else if (II->isStr("jmp_buf")) Context.setjmp_bufDecl(NewTD); else if (II->isStr("sigjmp_buf")) Context.setsigjmp_bufDecl(NewTD); else if (II->isStr("ucontext_t")) Context.setucontext_tDecl(NewTD); } return NewTD; } /// \brief Determines whether the given declaration is an out-of-scope /// previous declaration. /// /// This routine should be invoked when name lookup has found a /// previous declaration (PrevDecl) that is not in the scope where a /// new declaration by the same name is being introduced. If the new /// declaration occurs in a local scope, previous declarations with /// linkage may still be considered previous declarations (C99 /// 6.2.2p4-5, C++ [basic.link]p6). /// /// \param PrevDecl the previous declaration found by name /// lookup /// /// \param DC the context in which the new declaration is being /// declared. /// /// \returns true if PrevDecl is an out-of-scope previous declaration /// for a new delcaration with the same name. static bool isOutOfScopePreviousDeclaration(NamedDecl *PrevDecl, DeclContext *DC, ASTContext &Context) { if (!PrevDecl) return false; if (!PrevDecl->hasLinkage()) return false; if (Context.getLangOpts().CPlusPlus) { // C++ [basic.link]p6: // If there is a visible declaration of an entity with linkage // having the same name and type, ignoring entities declared // outside the innermost enclosing namespace scope, the block // scope declaration declares that same entity and receives the // linkage of the previous declaration. DeclContext *OuterContext = DC->getRedeclContext(); if (!OuterContext->isFunctionOrMethod()) // This rule only applies to block-scope declarations. return false; DeclContext *PrevOuterContext = PrevDecl->getDeclContext(); if (PrevOuterContext->isRecord()) // We found a member function: ignore it. return false; // Find the innermost enclosing namespace for the new and // previous declarations. OuterContext = OuterContext->getEnclosingNamespaceContext(); PrevOuterContext = PrevOuterContext->getEnclosingNamespaceContext(); // The previous declaration is in a different namespace, so it // isn't the same function. if (!OuterContext->Equals(PrevOuterContext)) return false; } return true; } static void SetNestedNameSpecifier(DeclaratorDecl *DD, Declarator &D) { CXXScopeSpec &SS = D.getCXXScopeSpec(); if (!SS.isSet()) return; DD->setQualifierInfo(SS.getWithLocInContext(DD->getASTContext())); } bool Sema::inferObjCARCLifetime(ValueDecl *decl) { QualType type = decl->getType(); Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime(); if (lifetime == Qualifiers::OCL_Autoreleasing) { // Various kinds of declaration aren't allowed to be __autoreleasing. unsigned kind = -1U; if (VarDecl *var = dyn_cast<VarDecl>(decl)) { if (var->hasAttr<BlocksAttr>()) kind = 0; // __block else if (!var->hasLocalStorage()) kind = 1; // global } else if (isa<ObjCIvarDecl>(decl)) { kind = 3; // ivar } else if (isa<FieldDecl>(decl)) { kind = 2; // field } if (kind != -1U) { Diag(decl->getLocation(), diag::err_arc_autoreleasing_var) << kind; } } else if (lifetime == Qualifiers::OCL_None) { // Try to infer lifetime. if (!type->isObjCLifetimeType()) return false; lifetime = type->getObjCARCImplicitLifetime(); type = Context.getLifetimeQualifiedType(type, lifetime); decl->setType(type); } if (VarDecl *var = dyn_cast<VarDecl>(decl)) { // Thread-local variables cannot have lifetime. if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone && var->getTLSKind()) { Diag(var->getLocation(), diag::err_arc_thread_ownership) << var->getType(); return true; } } return false; } static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) { // Ensure that an auto decl is deduced otherwise the checks below might cache // the wrong linkage. assert(S.ParsingInitForAutoVars.count(&ND) == 0); // 'weak' only applies to declarations with external linkage. if (WeakAttr *Attr = ND.getAttr<WeakAttr>()) { if (!ND.isExternallyVisible()) { S.Diag(Attr->getLocation(), diag::err_attribute_weak_static); ND.dropAttr<WeakAttr>(); } } if (WeakRefAttr *Attr = ND.getAttr<WeakRefAttr>()) { if (ND.isExternallyVisible()) { S.Diag(Attr->getLocation(), diag::err_attribute_weakref_not_static); ND.dropAttr<WeakRefAttr>(); ND.dropAttr<AliasAttr>(); } } if (auto *VD = dyn_cast<VarDecl>(&ND)) { if (VD->hasInit()) { if (const auto *Attr = VD->getAttr<AliasAttr>()) { assert(VD->isThisDeclarationADefinition() && !VD->isExternallyVisible() && "Broken AliasAttr handled late!"); S.Diag(Attr->getLocation(), diag::err_alias_is_definition) << VD; VD->dropAttr<AliasAttr>(); } } } // 'selectany' only applies to externally visible variable declarations. // It does not apply to functions. if (SelectAnyAttr *Attr = ND.getAttr<SelectAnyAttr>()) { if (isa<FunctionDecl>(ND) || !ND.isExternallyVisible()) { S.Diag(Attr->getLocation(), diag::err_attribute_selectany_non_extern_data); ND.dropAttr<SelectAnyAttr>(); } } // dll attributes require external linkage. if (const InheritableAttr *Attr = getDLLAttr(&ND)) { if (!ND.isExternallyVisible()) { S.Diag(ND.getLocation(), diag::err_attribute_dll_not_extern) << &ND << Attr; ND.setInvalidDecl(); } } } static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl, NamedDecl *NewDecl, bool IsSpecialization) { if (TemplateDecl *OldTD = dyn_cast<TemplateDecl>(OldDecl)) OldDecl = OldTD->getTemplatedDecl(); if (TemplateDecl *NewTD = dyn_cast<TemplateDecl>(NewDecl)) NewDecl = NewTD->getTemplatedDecl(); if (!OldDecl || !NewDecl) return; const DLLImportAttr *OldImportAttr = OldDecl->getAttr<DLLImportAttr>(); const DLLExportAttr *OldExportAttr = OldDecl->getAttr<DLLExportAttr>(); const DLLImportAttr *NewImportAttr = NewDecl->getAttr<DLLImportAttr>(); const DLLExportAttr *NewExportAttr = NewDecl->getAttr<DLLExportAttr>(); // dllimport and dllexport are inheritable attributes so we have to exclude // inherited attribute instances. bool HasNewAttr = (NewImportAttr && !NewImportAttr->isInherited()) || (NewExportAttr && !NewExportAttr->isInherited()); // A redeclaration is not allowed to add a dllimport or dllexport attribute, // the only exception being explicit specializations. // Implicitly generated declarations are also excluded for now because there // is no other way to switch these to use dllimport or dllexport. bool AddsAttr = !(OldImportAttr || OldExportAttr) && HasNewAttr; if (AddsAttr && !IsSpecialization && !OldDecl->isImplicit()) { // Allow with a warning for free functions and global variables. bool JustWarn = false; if (!OldDecl->isCXXClassMember()) { auto *VD = dyn_cast<VarDecl>(OldDecl); if (VD && !VD->getDescribedVarTemplate()) JustWarn = true; auto *FD = dyn_cast<FunctionDecl>(OldDecl); if (FD && FD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) JustWarn = true; } // We cannot change a declaration that's been used because IR has already // been emitted. Dllimported functions will still work though (modulo // address equality) as they can use the thunk. if (OldDecl->isUsed()) if (!isa<FunctionDecl>(OldDecl) || !NewImportAttr) JustWarn = false; unsigned DiagID = JustWarn ? diag::warn_attribute_dll_redeclaration : diag::err_attribute_dll_redeclaration; S.Diag(NewDecl->getLocation(), DiagID) << NewDecl << (NewImportAttr ? (const Attr *)NewImportAttr : NewExportAttr); S.Diag(OldDecl->getLocation(), diag::note_previous_declaration); if (!JustWarn) { NewDecl->setInvalidDecl(); return; } } // A redeclaration is not allowed to drop a dllimport attribute, the only // exceptions being inline function definitions, local extern declarations, // and qualified friend declarations. // NB: MSVC converts such a declaration to dllexport. bool IsInline = false, IsStaticDataMember = false, IsQualifiedFriend = false; if (const auto *VD = dyn_cast<VarDecl>(NewDecl)) // Ignore static data because out-of-line definitions are diagnosed // separately. IsStaticDataMember = VD->isStaticDataMember(); else if (const auto *FD = dyn_cast<FunctionDecl>(NewDecl)) { IsInline = FD->isInlined(); IsQualifiedFriend = FD->getQualifier() && FD->getFriendObjectKind() == Decl::FOK_Declared; } if (OldImportAttr && !HasNewAttr && !IsInline && !IsStaticDataMember && !NewDecl->isLocalExternDecl() && !IsQualifiedFriend) { S.Diag(NewDecl->getLocation(), diag::warn_redeclaration_without_attribute_prev_attribute_ignored) << NewDecl << OldImportAttr; S.Diag(OldDecl->getLocation(), diag::note_previous_declaration); S.Diag(OldImportAttr->getLocation(), diag::note_previous_attribute); OldDecl->dropAttr<DLLImportAttr>(); NewDecl->dropAttr<DLLImportAttr>(); } else if (IsInline && OldImportAttr && !S.Context.getTargetInfo().getCXXABI().isMicrosoft()) { // In MinGW, seeing a function declared inline drops the dllimport attribute. OldDecl->dropAttr<DLLImportAttr>(); NewDecl->dropAttr<DLLImportAttr>(); S.Diag(NewDecl->getLocation(), diag::warn_dllimport_dropped_from_inline_function) << NewDecl << OldImportAttr; } } /// Given that we are within the definition of the given function, /// will that definition behave like C99's 'inline', where the /// definition is discarded except for optimization purposes? static bool isFunctionDefinitionDiscarded(Sema &S, FunctionDecl *FD) { // Try to avoid calling GetGVALinkageForFunction. // All cases of this require the 'inline' keyword. if (!FD->isInlined()) return false; // This is only possible in C++ with the gnu_inline attribute. if (S.getLangOpts().CPlusPlus && !FD->hasAttr<GNUInlineAttr>()) return false; // Okay, go ahead and call the relatively-more-expensive function. #ifndef NDEBUG // AST quite reasonably asserts that it's working on a function // definition. We don't really have a way to tell it that we're // currently defining the function, so just lie to it in +Asserts // builds. This is an awful hack. FD->setLazyBody(1); #endif bool isC99Inline = S.Context.GetGVALinkageForFunction(FD) == GVA_AvailableExternally; #ifndef NDEBUG FD->setLazyBody(0); #endif return isC99Inline; } /// Determine whether a variable is extern "C" prior to attaching /// an initializer. We can't just call isExternC() here, because that /// will also compute and cache whether the declaration is externally /// visible, which might change when we attach the initializer. /// /// This can only be used if the declaration is known to not be a /// redeclaration of an internal linkage declaration. /// /// For instance: /// /// auto x = []{}; /// /// Attaching the initializer here makes this declaration not externally /// visible, because its type has internal linkage. /// /// FIXME: This is a hack. template<typename T> static bool isIncompleteDeclExternC(Sema &S, const T *D) { if (S.getLangOpts().CPlusPlus) { // In C++, the overloadable attribute negates the effects of extern "C". if (!D->isInExternCContext() || D->template hasAttr<OverloadableAttr>()) return false; } return D->isExternC(); } static bool shouldConsiderLinkage(const VarDecl *VD) { const DeclContext *DC = VD->getDeclContext()->getRedeclContext(); if (DC->isFunctionOrMethod()) return VD->hasExternalStorage(); if (DC->isFileContext()) return true; if (DC->isRecord()) return false; llvm_unreachable("Unexpected context"); } static bool shouldConsiderLinkage(const FunctionDecl *FD) { const DeclContext *DC = FD->getDeclContext()->getRedeclContext(); if (DC->isFileContext() || DC->isFunctionOrMethod()) return true; if (DC->isRecord()) return false; llvm_unreachable("Unexpected context"); } static bool hasParsedAttr(Scope *S, const AttributeList *AttrList, AttributeList::Kind Kind) { for (const AttributeList *L = AttrList; L; L = L->getNext()) if (L->getKind() == Kind) return true; return false; } static bool hasParsedAttr(Scope *S, const Declarator &PD, AttributeList::Kind Kind) { // Check decl attributes on the DeclSpec. if (hasParsedAttr(S, PD.getDeclSpec().getAttributes().getList(), Kind)) return true; // Walk the declarator structure, checking decl attributes that were in a type // position to the decl itself. for (unsigned I = 0, E = PD.getNumTypeObjects(); I != E; ++I) { if (hasParsedAttr(S, PD.getTypeObject(I).getAttrs(), Kind)) return true; } // Finally, check attributes on the decl itself. return hasParsedAttr(S, PD.getAttributes(), Kind); } /// Adjust the \c DeclContext for a function or variable that might be a /// function-local external declaration. bool Sema::adjustContextForLocalExternDecl(DeclContext *&DC) { if (!DC->isFunctionOrMethod()) return false; // If this is a local extern function or variable declared within a function // template, don't add it into the enclosing namespace scope until it is // instantiated; it might have a dependent type right now. if (DC->isDependentContext()) return true; // C++11 [basic.link]p7: // When a block scope declaration of an entity with linkage is not found to // refer to some other declaration, then that entity is a member of the // innermost enclosing namespace. // // Per C++11 [namespace.def]p6, the innermost enclosing namespace is a // semantically-enclosing namespace, not a lexically-enclosing one. while (!DC->isFileContext() && !isa<LinkageSpecDecl>(DC)) DC = DC->getParent(); return true; } /// \brief Returns true if given declaration is TU-scoped and externally /// visible. static bool isDeclTUScopedExternallyVisible(const Decl *D) { if (auto *FD = dyn_cast<FunctionDecl>(D)) return (FD->getDeclContext()->isTranslationUnit() || FD->isExternC()) && FD->hasExternalFormalLinkage(); else if (auto *VD = dyn_cast<VarDecl>(D)) return (VD->getDeclContext()->isTranslationUnit() || VD->isExternC()) && VD->hasExternalFormalLinkage(); llvm_unreachable("Unknown type of decl!"); } NamedDecl * Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ShadowMergeState MergeState) { // HLSL Change - add merge state QualType R = TInfo->getType(); DeclarationName Name = GetNameForDeclarator(D).getName(); DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec(); StorageClass SC = StorageClassSpecToVarDeclStorageClass(D.getDeclSpec()); // dllimport globals without explicit storage class are treated as extern. We // have to change the storage class this early to get the right DeclContext. if (SC == SC_None && !DC->isRecord() && hasParsedAttr(S, D, AttributeList::AT_DLLImport) && !hasParsedAttr(S, D, AttributeList::AT_DLLExport)) SC = SC_Extern; DeclContext *OriginalDC = DC; bool IsLocalExternDecl = SC == SC_Extern && adjustContextForLocalExternDecl(DC); if (getLangOpts().OpenCL) { // OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed. QualType NR = R; while (NR->isPointerType()) { if (NR->isFunctionPointerType()) { Diag(D.getIdentifierLoc(), diag::err_opencl_function_pointer_variable); D.setInvalidType(); break; } NR = NR->getPointeeType(); } if (!getOpenCLOptions().cl_khr_fp16) { // OpenCL v1.2 s6.1.1.1: reject declaring variables of the half and // half array type (unless the cl_khr_fp16 extension is enabled). if (Context.getBaseElementType(R)->isHalfType()) { Diag(D.getIdentifierLoc(), diag::err_opencl_half_declaration) << R; D.setInvalidType(); } } } if (SCSpec == DeclSpec::SCS_mutable) { // mutable can only appear on non-static class members, so it's always // an error here Diag(D.getIdentifierLoc(), diag::err_mutable_nonmember); D.setInvalidType(); SC = SC_None; } if (getLangOpts().CPlusPlus11 && SCSpec == DeclSpec::SCS_register && !D.getAsmLabel() && !getSourceManager().isInSystemMacro( D.getDeclSpec().getStorageClassSpecLoc())) { // In C++11, the 'register' storage class specifier is deprecated. // Suppress the warning in system macros, it's used in macros in some // popular C system headers, such as in glibc's htonl() macro. Diag(D.getDeclSpec().getStorageClassSpecLoc(), diag::warn_deprecated_register) << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc()); } IdentifierInfo *II = Name.getAsIdentifierInfo(); if (!II) { Diag(D.getIdentifierLoc(), diag::err_bad_variable_name) << Name; return nullptr; } DiagnoseFunctionSpecifiers(D.getDeclSpec()); if (!DC->isRecord() && S->getFnParent() == nullptr) { // C99 6.9p2: The storage-class specifiers auto and register shall not // appear in the declaration specifiers in an external declaration. // Global Register+Asm is a GNU extension we support. if (SC == SC_Auto || (SC == SC_Register && !D.getAsmLabel())) { Diag(D.getIdentifierLoc(), diag::err_typecheck_sclass_fscope); D.setInvalidType(); } } if (getLangOpts().OpenCL) { // Set up the special work-group-local storage class for variables in the // OpenCL __local address space. if (R.getAddressSpace() == LangAS::opencl_local) { SC = SC_OpenCLWorkGroupLocal; } // OpenCL v1.2 s6.9.b p4: // The sampler type cannot be used with the __local and __global address // space qualifiers. if (R->isSamplerT() && (R.getAddressSpace() == LangAS::opencl_local || R.getAddressSpace() == LangAS::opencl_global)) { Diag(D.getIdentifierLoc(), diag::err_wrong_sampler_addressspace); } // OpenCL 1.2 spec, p6.9 r: // The event type cannot be used to declare a program scope variable. // The event type cannot be used with the __local, __constant and __global // address space qualifiers. if (R->isEventT()) { if (S->getParent() == nullptr) { Diag(D.getLocStart(), diag::err_event_t_global_var); D.setInvalidType(); } if (R.getAddressSpace()) { Diag(D.getLocStart(), diag::err_event_t_addr_space_qual); D.setInvalidType(); } } } bool IsExplicitSpecialization = false; bool IsVariableTemplateSpecialization = false; bool IsPartialSpecialization = false; bool IsVariableTemplate = false; VarDecl *NewVD = nullptr; VarTemplateDecl *NewTemplate = nullptr; TemplateParameterList *TemplateParams = nullptr; if (!getLangOpts().CPlusPlus) { NewVD = VarDecl::Create(Context, DC, D.getLocStart(), D.getIdentifierLoc(), II, R, TInfo, SC); if (D.isInvalidType()) NewVD->setInvalidDecl(); } else { bool Invalid = false; if (DC->isRecord() && !CurContext->isRecord()) { // This is an out-of-line definition of a static data member. switch (SC) { case SC_None: break; case SC_Static: if (!getLangOpts().HLSL) // HLSL Change: We had to set SC_Static for // static data member to distinguish it from // global constant variable. // So we ignore this warning. Diag(D.getDeclSpec().getStorageClassSpecLoc(), diag::err_static_out_of_line) << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc()); break; case SC_Auto: case SC_Register: case SC_Extern: // [dcl.stc] p2: The auto or register specifiers shall be applied only // to names of variables declared in a block or to function parameters. // [dcl.stc] p6: The extern specifier cannot be used in the declaration // of class members Diag(D.getDeclSpec().getStorageClassSpecLoc(), diag::err_storage_class_for_static_member) << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc()); break; case SC_PrivateExtern: llvm_unreachable("C storage class in c++!"); case SC_OpenCLWorkGroupLocal: llvm_unreachable("OpenCL storage class in c++!"); } } if (SC == SC_Static && CurContext->isRecord()) { if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) { if (RD->isLocalClass()) Diag(D.getIdentifierLoc(), diag::err_static_data_member_not_allowed_in_local_class) << Name << RD->getDeclName(); // C++98 [class.union]p1: If a union contains a static data member, // the program is ill-formed. C++11 drops this restriction. if (RD->isUnion()) Diag(D.getIdentifierLoc(), getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_static_data_member_in_union : diag::ext_static_data_member_in_union) << Name; // We conservatively disallow static data members in anonymous structs. else if (!RD->getDeclName()) Diag(D.getIdentifierLoc(), diag::err_static_data_member_not_allowed_in_anon_struct) << Name << RD->isUnion(); } } // Match up the template parameter lists with the scope specifier, then // determine whether we have a template or a template specialization. TemplateParams = MatchTemplateParametersToScopeSpecifier( D.getDeclSpec().getLocStart(), D.getIdentifierLoc(), D.getCXXScopeSpec(), D.getName().getKind() == UnqualifiedId::IK_TemplateId ? D.getName().TemplateId : nullptr, TemplateParamLists, /*never a friend*/ false, IsExplicitSpecialization, Invalid); if (TemplateParams) { if (!TemplateParams->size() && D.getName().getKind() != UnqualifiedId::IK_TemplateId) { // There is an extraneous 'template<>' for this variable. Complain // about it, but allow the declaration of the variable. Diag(TemplateParams->getTemplateLoc(), diag::err_template_variable_noparams) << II << SourceRange(TemplateParams->getTemplateLoc(), TemplateParams->getRAngleLoc()); TemplateParams = nullptr; } else { if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) { // This is an explicit specialization or a partial specialization. // FIXME: Check that we can declare a specialization here. IsVariableTemplateSpecialization = true; IsPartialSpecialization = TemplateParams->size() > 0; } else { // if (TemplateParams->size() > 0) // This is a template declaration. IsVariableTemplate = true; // Check that we can declare a template here. if (CheckTemplateDeclScope(S, TemplateParams)) return nullptr; // Only C++1y supports variable templates (N3651). Diag(D.getIdentifierLoc(), getLangOpts().CPlusPlus14 ? diag::warn_cxx11_compat_variable_template : diag::ext_variable_template); } } } else { assert( (Invalid || D.getName().getKind() != UnqualifiedId::IK_TemplateId) && "should have a 'template<>' for this decl"); } if (IsVariableTemplateSpecialization) { SourceLocation TemplateKWLoc = TemplateParamLists.size() > 0 ? TemplateParamLists[0]->getTemplateLoc() : SourceLocation(); DeclResult Res = ActOnVarTemplateSpecialization( S, D, TInfo, TemplateKWLoc, TemplateParams, SC, IsPartialSpecialization); if (Res.isInvalid()) return nullptr; NewVD = cast<VarDecl>(Res.get()); AddToScope = false; } else NewVD = VarDecl::Create(Context, DC, D.getLocStart(), D.getIdentifierLoc(), II, R, TInfo, SC); // If this is supposed to be a variable template, create it as such. if (IsVariableTemplate) { NewTemplate = VarTemplateDecl::Create(Context, DC, D.getIdentifierLoc(), Name, TemplateParams, NewVD); NewVD->setDescribedVarTemplate(NewTemplate); } // If this decl has an auto type in need of deduction, make a note of the // Decl so we can diagnose uses of it in its own initializer. if (D.getDeclSpec().containsPlaceholderType() && R->getContainedAutoType()) ParsingInitForAutoVars.insert(NewVD); if (D.isInvalidType() || Invalid) { NewVD->setInvalidDecl(); if (NewTemplate) NewTemplate->setInvalidDecl(); } SetNestedNameSpecifier(NewVD, D); // If we have any template parameter lists that don't directly belong to // the variable (matching the scope specifier), store them. unsigned VDTemplateParamLists = TemplateParams ? 1 : 0; if (TemplateParamLists.size() > VDTemplateParamLists) NewVD->setTemplateParameterListsInfo( Context, TemplateParamLists.size() - VDTemplateParamLists, TemplateParamLists.data()); if (D.getDeclSpec().isConstexprSpecified()) NewVD->setConstexpr(true); } // Set the lexical context. If the declarator has a C++ scope specifier, the // lexical context will be different from the semantic context. NewVD->setLexicalDeclContext(CurContext); if (NewTemplate) NewTemplate->setLexicalDeclContext(CurContext); if (IsLocalExternDecl) NewVD->setLocalExternDecl(); bool EmitTLSUnsupportedError = false; if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) { // C++11 [dcl.stc]p4: // When thread_local is applied to a variable of block scope the // storage-class-specifier static is implied if it does not appear // explicitly. // Core issue: 'static' is not implied if the variable is declared // 'extern'. if (NewVD->hasLocalStorage() && (SCSpec != DeclSpec::SCS_unspecified || TSCS != DeclSpec::TSCS_thread_local || !DC->isFunctionOrMethod())) Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), diag::err_thread_non_global) << DeclSpec::getSpecifierName(TSCS); else if (!Context.getTargetInfo().isTLSSupported()) { if (getLangOpts().CUDA) { // Postpone error emission until we've collected attributes required to // figure out whether it's a host or device variable and whether the // error should be ignored. EmitTLSUnsupportedError = true; // We still need to mark the variable as TLS so it shows up in AST with // proper storage class for other tools to use even if we're not going // to emit any code for it. NewVD->setTSCSpec(TSCS); } else Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), diag::err_thread_unsupported); } else NewVD->setTSCSpec(TSCS); } // C99 6.7.4p3 // An inline definition of a function with external linkage shall // not contain a definition of a modifiable object with static or // thread storage duration... // We only apply this when the function is required to be defined // elsewhere, i.e. when the function is not 'extern inline'. Note // that a local variable with thread storage duration still has to // be marked 'static'. Also note that it's possible to get these // semantics in C++ using __attribute__((gnu_inline)). if (SC == SC_Static && S->getFnParent() != nullptr && !NewVD->getType().isConstQualified()) { FunctionDecl *CurFD = getCurFunctionDecl(); if (CurFD && isFunctionDefinitionDiscarded(*this, CurFD)) { Diag(D.getDeclSpec().getStorageClassSpecLoc(), diag::warn_static_local_in_extern_inline); MaybeSuggestAddingStaticToDecl(CurFD); } } if (D.getDeclSpec().isModulePrivateSpecified()) { if (IsVariableTemplateSpecialization) Diag(NewVD->getLocation(), diag::err_module_private_specialization) << (IsPartialSpecialization ? 1 : 0) << FixItHint::CreateRemoval( D.getDeclSpec().getModulePrivateSpecLoc()); else if (IsExplicitSpecialization) Diag(NewVD->getLocation(), diag::err_module_private_specialization) << 2 << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc()); else if (NewVD->hasLocalStorage()) Diag(NewVD->getLocation(), diag::err_module_private_local) << 0 << NewVD->getDeclName() << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc()) << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc()); else { NewVD->setModulePrivate(); if (NewTemplate) NewTemplate->setModulePrivate(); } } // Handle attributes prior to checking for duplicates in MergeVarDecl ProcessDeclAttributes(S, NewVD, D); if (getLangOpts().CUDA) { if (EmitTLSUnsupportedError && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), diag::err_thread_unsupported); // CUDA B.2.5: "__shared__ and __constant__ variables have implied static // storage [duration]." if (SC == SC_None && S->getFnParent() != nullptr && (NewVD->hasAttr<CUDASharedAttr>() || NewVD->hasAttr<CUDAConstantAttr>())) { NewVD->setStorageClass(SC_Static); } } // Ensure that dllimport globals without explicit storage class are treated as // extern. The storage class is set above using parsed attributes. Now we can // check the VarDecl itself. assert(!NewVD->hasAttr<DLLImportAttr>() || NewVD->getAttr<DLLImportAttr>()->isInherited() || NewVD->isStaticDataMember() || NewVD->getStorageClass() != SC_None); // In auto-retain/release, infer strong retension for variables of // retainable type. if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewVD)) NewVD->setInvalidDecl(); // Handle GNU asm-label extension (encoded as an attribute). if (Expr *E = (Expr*)D.getAsmLabel()) { // The parser guarantees this is a string. StringLiteral *SE = cast<StringLiteral>(E); StringRef Label = SE->getString(); if (S->getFnParent() != nullptr) { switch (SC) { case SC_None: case SC_Auto: Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label; break; case SC_Register: // Local Named register if (!Context.getTargetInfo().isValidGCCRegisterName(Label)) Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label; break; case SC_Static: case SC_Extern: case SC_PrivateExtern: case SC_OpenCLWorkGroupLocal: break; } } else if (SC == SC_Register) { // Global Named register if (!Context.getTargetInfo().isValidGCCRegisterName(Label)) Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label; if (!R->isIntegralType(Context) && !R->isPointerType()) { Diag(D.getLocStart(), diag::err_asm_bad_register_type); NewVD->setInvalidDecl(true); } } NewVD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0), Context, Label, 0)); } else if (!ExtnameUndeclaredIdentifiers.empty() && isDeclTUScopedExternallyVisible(NewVD)) { llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I = ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier()); if (I != ExtnameUndeclaredIdentifiers.end()) { NewVD->addAttr(I->second); ExtnameUndeclaredIdentifiers.erase(I); } } // Diagnose shadowed variables before filtering for scope. if (D.getCXXScopeSpec().isEmpty()) CheckShadow(S, NewVD, Previous); // Don't consider existing declarations that are in a different // scope and are out-of-semantic-context declarations (if the new // declaration has linkage). FilterLookupForScope(Previous, OriginalDC, S, shouldConsiderLinkage(NewVD), D.getCXXScopeSpec().isNotEmpty() || IsExplicitSpecialization || IsVariableTemplateSpecialization); // Check whether the previous declaration is in the same block scope. This // affects whether we merge types with it, per C++11 [dcl.array]p3. if (getLangOpts().CPlusPlus && NewVD->isLocalVarDecl() && NewVD->hasExternalStorage()) NewVD->setPreviousDeclInSameBlockScope( Previous.isSingleResult() && !Previous.isShadowed() && isDeclInScope(Previous.getFoundDecl(), OriginalDC, S, false)); if (!getLangOpts().CPlusPlus) { D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous)); } else { // If this is an explicit specialization of a static data member, check it. if (IsExplicitSpecialization && !NewVD->isInvalidDecl() && CheckMemberSpecialization(NewVD, Previous)) NewVD->setInvalidDecl(); // Merge the decl with the existing one if appropriate. if (!Previous.empty()) { if (Previous.isSingleResult() && isa<FieldDecl>(Previous.getFoundDecl()) && D.getCXXScopeSpec().isSet()) { // The user tried to define a non-static data member // out-of-line (C++ [dcl.meaning]p1). Diag(NewVD->getLocation(), diag::err_nonstatic_member_out_of_line) << D.getCXXScopeSpec().getRange(); Previous.clear(); NewVD->setInvalidDecl(); } } else if (D.getCXXScopeSpec().isSet()) { // No previous declaration in the qualifying scope. Diag(D.getIdentifierLoc(), diag::err_no_member) << Name << computeDeclContext(D.getCXXScopeSpec(), true) << D.getCXXScopeSpec().getRange(); NewVD->setInvalidDecl(); } if (!IsVariableTemplateSpecialization) D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous, MergeState)); // HLSL Change - add merge state if (NewTemplate) { VarTemplateDecl *PrevVarTemplate = NewVD->getPreviousDecl() ? NewVD->getPreviousDecl()->getDescribedVarTemplate() : nullptr; // Check the template parameter list of this declaration, possibly // merging in the template parameter list from the previous variable // template declaration. if (CheckTemplateParameterList( TemplateParams, PrevVarTemplate ? PrevVarTemplate->getTemplateParameters() : nullptr, (D.getCXXScopeSpec().isSet() && DC && DC->isRecord() && DC->isDependentContext()) ? TPC_ClassTemplateMember : TPC_VarTemplate)) NewVD->setInvalidDecl(); // If we are providing an explicit specialization of a static variable // template, make a note of that. if (PrevVarTemplate && PrevVarTemplate->getInstantiatedFromMemberTemplate()) PrevVarTemplate->setMemberSpecialization(); } } ProcessPragmaWeak(S, NewVD); // If this is the first declaration of an extern C variable, update // the map of such variables. if (NewVD->isFirstDecl() && !NewVD->isInvalidDecl() && isIncompleteDeclExternC(*this, NewVD)) RegisterLocallyScopedExternCDecl(NewVD, S); if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) { Decl *ManglingContextDecl; if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext( NewVD->getDeclContext(), ManglingContextDecl)) { Context.setManglingNumber( NewVD, MCtx->getManglingNumber( NewVD, getMSManglingNumber(getLangOpts(), S))); Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD)); } } if (D.isRedeclaration() && !Previous.empty()) { checkDLLAttributeRedeclaration( *this, dyn_cast<NamedDecl>(Previous.getRepresentativeDecl()), NewVD, IsExplicitSpecialization); } if (NewTemplate) { if (NewVD->isInvalidDecl()) NewTemplate->setInvalidDecl(); ActOnDocumentableDecl(NewTemplate); return NewTemplate; } TransferUnusualAttributes(D, NewVD); // HLSL Change return NewVD; } /// \brief Diagnose variable or built-in function shadowing. Implements /// -Wshadow. /// /// This method is called whenever a VarDecl is added to a "useful" /// scope. /// /// \param S the scope in which the shadowing name is being declared /// \param R the lookup of the name /// void Sema::CheckShadow(Scope *S, VarDecl *D, const LookupResult& R) { // Return if warning is ignored. if (Diags.isIgnored(diag::warn_decl_shadow, R.getNameLoc())) return; // Don't diagnose declarations at file scope. if (D->hasGlobalStorage()) return; DeclContext *NewDC = D->getDeclContext(); // Only diagnose if we're shadowing an unambiguous field or variable. if (R.getResultKind() != LookupResult::Found) return; NamedDecl* ShadowedDecl = R.getFoundDecl(); if (!isa<VarDecl>(ShadowedDecl) && !isa<FieldDecl>(ShadowedDecl)) return; // Fields are not shadowed by variables in C++ static methods. if (isa<FieldDecl>(ShadowedDecl)) if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewDC)) if (MD->isStatic()) return; if (VarDecl *shadowedVar = dyn_cast<VarDecl>(ShadowedDecl)) if (shadowedVar->isExternC()) { // For shadowing external vars, make sure that we point to the global // declaration, not a locally scoped extern declaration. for (auto I : shadowedVar->redecls()) if (I->isFileVarDecl()) { ShadowedDecl = I; break; } } DeclContext *OldDC = ShadowedDecl->getDeclContext(); // Only warn about certain kinds of shadowing for class members. if (NewDC && NewDC->isRecord()) { // In particular, don't warn about shadowing non-class members. if (!OldDC->isRecord()) return; // TODO: should we warn about static data members shadowing // static data members from base classes? // TODO: don't diagnose for inaccessible shadowed members. // This is hard to do perfectly because we might friend the // shadowing context, but that's just a false negative. } // Determine what kind of declaration we're shadowing. unsigned Kind; if (isa<RecordDecl>(OldDC)) { if (isa<FieldDecl>(ShadowedDecl)) Kind = 3; // field else Kind = 2; // static data member } else if (OldDC->isFileContext()) Kind = 1; // global else Kind = 0; // local DeclarationName Name = R.getLookupName(); // Emit warning and note. if (getSourceManager().isInSystemMacro(R.getNameLoc())) return; Diag(R.getNameLoc(), diag::warn_decl_shadow) << Name << Kind << OldDC; Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration); } /// \brief Check -Wshadow without the advantage of a previous lookup. void Sema::CheckShadow(Scope *S, VarDecl *D) { if (Diags.isIgnored(diag::warn_decl_shadow, D->getLocation())) return; LookupResult R(*this, D->getDeclName(), D->getLocation(), Sema::LookupOrdinaryName, Sema::ForRedeclaration); LookupName(R, S); CheckShadow(S, D, R); } /// Check for conflict between this global or extern "C" declaration and /// previous global or extern "C" declarations. This is only used in C++. template<typename T> static bool checkGlobalOrExternCConflict( Sema &S, const T *ND, bool IsGlobal, LookupResult &Previous) { assert(S.getLangOpts().CPlusPlus && "only C++ has extern \"C\""); NamedDecl *Prev = S.findLocallyScopedExternCDecl(ND->getDeclName()); if (!Prev && IsGlobal && !isIncompleteDeclExternC(S, ND)) { // The common case: this global doesn't conflict with any extern "C" // declaration. return false; } if (Prev) { if (!IsGlobal || isIncompleteDeclExternC(S, ND)) { // Both the old and new declarations have C language linkage. This is a // redeclaration. Previous.clear(); Previous.addDecl(Prev); return true; } // This is a global, non-extern "C" declaration, and there is a previous // non-global extern "C" declaration. Diagnose if this is a variable // declaration. if (!isa<VarDecl>(ND)) return false; } else { // The declaration is extern "C". Check for any declaration in the // translation unit which might conflict. if (IsGlobal) { // We have already performed the lookup into the translation unit. IsGlobal = false; for (LookupResult::iterator I = Previous.begin(), E = Previous.end(); I != E; ++I) { if (isa<VarDecl>(*I)) { Prev = *I; break; } } } else { DeclContext::lookup_result R = S.Context.getTranslationUnitDecl()->lookup(ND->getDeclName()); for (DeclContext::lookup_result::iterator I = R.begin(), E = R.end(); I != E; ++I) { if (isa<VarDecl>(*I)) { Prev = *I; break; } // FIXME: If we have any other entity with this name in global scope, // the declaration is ill-formed, but that is a defect: it breaks the // 'stat' hack, for instance. Only variables can have mangled name // clashes with extern "C" declarations, so only they deserve a // diagnostic. } } if (!Prev) return false; } // Use the first declaration's location to ensure we point at something which // is lexically inside an extern "C" linkage-spec. assert(Prev && "should have found a previous declaration to diagnose"); if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Prev)) Prev = FD->getFirstDecl(); else Prev = cast<VarDecl>(Prev)->getFirstDecl(); S.Diag(ND->getLocation(), diag::err_extern_c_global_conflict) << IsGlobal << ND; S.Diag(Prev->getLocation(), diag::note_extern_c_global_conflict) << IsGlobal; return false; } /// Apply special rules for handling extern "C" declarations. Returns \c true /// if we have found that this is a redeclaration of some prior entity. /// /// Per C++ [dcl.link]p6: /// Two declarations [for a function or variable] with C language linkage /// with the same name that appear in different scopes refer to the same /// [entity]. An entity with C language linkage shall not be declared with /// the same name as an entity in global scope. template<typename T> static bool checkForConflictWithNonVisibleExternC(Sema &S, const T *ND, LookupResult &Previous) { if (!S.getLangOpts().CPlusPlus) { // In C, when declaring a global variable, look for a corresponding 'extern' // variable declared in function scope. We don't need this in C++, because // we find local extern decls in the surrounding file-scope DeclContext. if (ND->getDeclContext()->getRedeclContext()->isTranslationUnit()) { if (NamedDecl *Prev = S.findLocallyScopedExternCDecl(ND->getDeclName())) { Previous.clear(); Previous.addDecl(Prev); return true; } } return false; } // A declaration in the translation unit can conflict with an extern "C" // declaration. if (ND->getDeclContext()->getRedeclContext()->isTranslationUnit()) return checkGlobalOrExternCConflict(S, ND, /*IsGlobal*/true, Previous); // An extern "C" declaration can conflict with a declaration in the // translation unit or can be a redeclaration of an extern "C" declaration // in another scope. if (isIncompleteDeclExternC(S,ND)) return checkGlobalOrExternCConflict(S, ND, /*IsGlobal*/false, Previous); // Neither global nor extern "C": nothing to do. return false; } void Sema::CheckVariableDeclarationType(VarDecl *NewVD) { // If the decl is already known invalid, don't check it. if (NewVD->isInvalidDecl()) return; TypeSourceInfo *TInfo = NewVD->getTypeSourceInfo(); QualType T = TInfo->getType(); // Defer checking an 'auto' type until its initializer is attached. if (T->isUndeducedType()) return; if (NewVD->hasAttrs()) CheckAlignasUnderalignment(NewVD); if (T->isObjCObjectType()) { Diag(NewVD->getLocation(), diag::err_statically_allocated_object) << FixItHint::CreateInsertion(NewVD->getLocation(), "*"); T = Context.getObjCObjectPointerType(T); NewVD->setType(T); } // Emit an error if an address space was applied to decl with local storage. // This includes arrays of objects with address space qualifiers, but not // automatic variables that point to other address spaces. // ISO/IEC TR 18037 S5.1.2 if (NewVD->hasLocalStorage() && T.getAddressSpace() != 0) { Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl); NewVD->setInvalidDecl(); return; } // OpenCL v1.2 s6.5 - All program scope variables must be declared in the // __constant address space. if (getLangOpts().OpenCL && NewVD->isFileVarDecl() && T.getAddressSpace() != LangAS::opencl_constant && !T->isSamplerT()){ Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space); NewVD->setInvalidDecl(); return; } // OpenCL v1.2 s6.8 -- The static qualifier is valid only in program // scope. if ((getLangOpts().OpenCLVersion >= 120) && NewVD->isStaticLocal()) { Diag(NewVD->getLocation(), diag::err_static_function_scope); NewVD->setInvalidDecl(); return; } if (NewVD->hasLocalStorage() && T.isObjCGCWeak() && !NewVD->hasAttr<BlocksAttr>()) { if (getLangOpts().getGC() != LangOptions::NonGC) Diag(NewVD->getLocation(), diag::warn_gc_attribute_weak_on_local); else { assert(!getLangOpts().ObjCAutoRefCount); Diag(NewVD->getLocation(), diag::warn_attribute_weak_on_local); } } bool isVM = T->isVariablyModifiedType(); if (isVM || NewVD->hasAttr<CleanupAttr>() || NewVD->hasAttr<BlocksAttr>()) getCurFunction()->setHasBranchProtectedScope(); if ((isVM && NewVD->hasLinkage()) || (T->isVariableArrayType() && NewVD->hasGlobalStorage())) { bool SizeIsNegative; llvm::APSInt Oversized; TypeSourceInfo *FixedTInfo = TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context, SizeIsNegative, Oversized); if (!FixedTInfo && T->isVariableArrayType()) { const VariableArrayType *VAT = Context.getAsVariableArrayType(T); // FIXME: This won't give the correct result for // int a[10][n]; SourceRange SizeRange = VAT->getSizeExpr()->getSourceRange(); if (NewVD->isFileVarDecl()) Diag(NewVD->getLocation(), diag::err_vla_decl_in_file_scope) << SizeRange; else if (NewVD->isStaticLocal()) Diag(NewVD->getLocation(), diag::err_vla_decl_has_static_storage) << SizeRange; else Diag(NewVD->getLocation(), diag::err_vla_decl_has_extern_linkage) << SizeRange; NewVD->setInvalidDecl(); return; } if (!FixedTInfo) { if (NewVD->isFileVarDecl()) Diag(NewVD->getLocation(), diag::err_vm_decl_in_file_scope); else Diag(NewVD->getLocation(), diag::err_vm_decl_has_extern_linkage); NewVD->setInvalidDecl(); return; } Diag(NewVD->getLocation(), diag::warn_illegal_constant_array_size); NewVD->setType(FixedTInfo->getType()); NewVD->setTypeSourceInfo(FixedTInfo); } if (T->isVoidType()) { // C++98 [dcl.stc]p5: The extern specifier can be applied only to the names // of objects and functions. if (NewVD->isThisDeclarationADefinition() || getLangOpts().CPlusPlus) { Diag(NewVD->getLocation(), diag::err_typecheck_decl_incomplete_type) << T; NewVD->setInvalidDecl(); return; } } if (!NewVD->hasLocalStorage() && NewVD->hasAttr<BlocksAttr>()) { Diag(NewVD->getLocation(), diag::err_block_on_nonlocal); NewVD->setInvalidDecl(); return; } if (isVM && NewVD->hasAttr<BlocksAttr>()) { Diag(NewVD->getLocation(), diag::err_block_on_vm); NewVD->setInvalidDecl(); return; } if (NewVD->isConstexpr() && !T->isDependentType() && RequireLiteralType(NewVD->getLocation(), T, diag::err_constexpr_var_non_literal)) { NewVD->setInvalidDecl(); return; } } /// \brief Perform semantic checking on a newly-created variable /// declaration. /// /// This routine performs all of the type-checking required for a /// variable declaration once it has been built. It is used both to /// check variables after they have been parsed and their declarators /// have been translated into a declaration, and to check variables /// that have been instantiated from a template. /// /// Sets NewVD->isInvalidDecl() if an error was encountered. /// /// Returns true if the variable declaration is a redeclaration. bool Sema::CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState) { // HLSL Change - add merge state CheckVariableDeclarationType(NewVD); // If the decl is already known invalid, don't check it. if (NewVD->isInvalidDecl()) return false; // If we did not find anything by this name, look for a non-visible // extern "C" declaration with the same name. if (Previous.empty() && checkForConflictWithNonVisibleExternC(*this, NewVD, Previous)) Previous.setShadowed(); // Filter out any non-conflicting previous declarations. filterNonConflictingPreviousDecls(*this, NewVD, Previous); if (!Previous.empty()) { MergeVarDecl(NewVD, Previous, MergeState); // HLSL Change - add merge state return true; } return false; } /// \brief Data used with FindOverriddenMethod struct FindOverriddenMethodData { Sema *S; CXXMethodDecl *Method; }; /// \brief Member lookup function that determines whether a given C++ /// method overrides a method in a base class, to be used with /// CXXRecordDecl::lookupInBases(). static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier, CXXBasePath &Path, void *UserData) { RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl(); FindOverriddenMethodData *Data = reinterpret_cast<FindOverriddenMethodData*>(UserData); DeclarationName Name = Data->Method->getDeclName(); // FIXME: Do we care about other names here too? if (Name.getNameKind() == DeclarationName::CXXDestructorName) { // We really want to find the base class destructor here. QualType T = Data->S->Context.getTypeDeclType(BaseRecord); CanQualType CT = Data->S->Context.getCanonicalType(T); Name = Data->S->Context.DeclarationNames.getCXXDestructorName(CT); } for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty(); Path.Decls = Path.Decls.slice(1)) { NamedDecl *D = Path.Decls.front(); if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) { if (MD->isVirtual() && !Data->S->IsOverload(Data->Method, MD, false)) return true; } } return false; } namespace { enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted }; } /// \brief Report an error regarding overriding, along with any relevant /// overriden methods. /// /// \param DiagID the primary error to report. /// \param MD the overriding method. /// \param OEK which overrides to include as notes. static void ReportOverrides(Sema& S, unsigned DiagID, const CXXMethodDecl *MD, OverrideErrorKind OEK = OEK_All) { S.Diag(MD->getLocation(), DiagID) << MD->getDeclName(); for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(), E = MD->end_overridden_methods(); I != E; ++I) { // This check (& the OEK parameter) could be replaced by a predicate, but // without lambdas that would be overkill. This is still nicer than writing // out the diag loop 3 times. if ((OEK == OEK_All) || (OEK == OEK_NonDeleted && !(*I)->isDeleted()) || (OEK == OEK_Deleted && (*I)->isDeleted())) S.Diag((*I)->getLocation(), diag::note_overridden_virtual_function); } } /// AddOverriddenMethods - See if a method overrides any in the base classes, /// and if so, check that it's a valid override and remember it. bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) { // Look for methods in base classes that this method might override. CXXBasePaths Paths; FindOverriddenMethodData Data; Data.Method = MD; Data.S = this; bool hasDeletedOverridenMethods = false; bool hasNonDeletedOverridenMethods = false; bool AddedAny = false; if (DC->lookupInBases(&FindOverriddenMethod, &Data, Paths)) { for (auto *I : Paths.found_decls()) { if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(I)) { MD->addOverriddenMethod(OldMD->getCanonicalDecl()); if (!CheckOverridingFunctionReturnType(MD, OldMD) && !CheckOverridingFunctionAttributes(MD, OldMD) && !CheckOverridingFunctionExceptionSpec(MD, OldMD) && !CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) { hasDeletedOverridenMethods |= OldMD->isDeleted(); hasNonDeletedOverridenMethods |= !OldMD->isDeleted(); AddedAny = true; } } } } if (hasDeletedOverridenMethods && !MD->isDeleted()) { ReportOverrides(*this, diag::err_non_deleted_override, MD, OEK_Deleted); } if (hasNonDeletedOverridenMethods && MD->isDeleted()) { ReportOverrides(*this, diag::err_deleted_override, MD, OEK_NonDeleted); } return AddedAny; } namespace { // Struct for holding all of the extra arguments needed by // DiagnoseInvalidRedeclaration to call Sema::ActOnFunctionDeclarator. struct ActOnFDArgs { Scope *S; Declarator &D; MultiTemplateParamsArg TemplateParamLists; bool AddToScope; }; } namespace { // Callback to only accept typo corrections that have a non-zero edit distance. // Also only accept corrections that have the same parent decl. class DifferentNameValidatorCCC : public CorrectionCandidateCallback { public: DifferentNameValidatorCCC(ASTContext &Context, FunctionDecl *TypoFD, CXXRecordDecl *Parent) : Context(Context), OriginalFD(TypoFD), ExpectedParent(Parent ? Parent->getCanonicalDecl() : nullptr) {} bool ValidateCandidate(const TypoCorrection &candidate) override { if (candidate.getEditDistance() == 0) return false; SmallVector<unsigned, 1> MismatchedParams; for (TypoCorrection::const_decl_iterator CDecl = candidate.begin(), CDeclEnd = candidate.end(); CDecl != CDeclEnd; ++CDecl) { FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl); if (FD && !FD->hasBody() && hasSimilarParameters(Context, FD, OriginalFD, MismatchedParams)) { if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { CXXRecordDecl *Parent = MD->getParent(); if (Parent && Parent->getCanonicalDecl() == ExpectedParent) return true; } else if (!ExpectedParent) { return true; } } } return false; } private: ASTContext &Context; FunctionDecl *OriginalFD; CXXRecordDecl *ExpectedParent; }; } /// \brief Generate diagnostics for an invalid function redeclaration. /// /// This routine handles generating the diagnostic messages for an invalid /// function redeclaration, including finding possible similar declarations /// or performing typo correction if there are no previous declarations with /// the same name. /// /// Returns a NamedDecl iff typo correction was performed and substituting in /// the new declaration name does not cause new errors. static NamedDecl *DiagnoseInvalidRedeclaration( Sema &SemaRef, LookupResult &Previous, FunctionDecl *NewFD, ActOnFDArgs &ExtraArgs, bool IsLocalFriend, Scope *S) { DeclarationName Name = NewFD->getDeclName(); DeclContext *NewDC = NewFD->getDeclContext(); SmallVector<unsigned, 1> MismatchedParams; SmallVector<std::pair<FunctionDecl *, unsigned>, 1> NearMatches; TypoCorrection Correction; bool IsDefinition = ExtraArgs.D.isFunctionDefinition(); unsigned DiagMsg = IsLocalFriend ? diag::err_no_matching_local_friend : diag::err_member_decl_does_not_match; LookupResult Prev(SemaRef, Name, NewFD->getLocation(), IsLocalFriend ? Sema::LookupLocalFriendName : Sema::LookupOrdinaryName, Sema::ForRedeclaration); NewFD->setInvalidDecl(); if (IsLocalFriend) SemaRef.LookupName(Prev, S); else SemaRef.LookupQualifiedName(Prev, NewDC); assert(!Prev.isAmbiguous() && "Cannot have an ambiguity in previous-declaration lookup"); CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD); if (!Prev.empty()) { for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end(); Func != FuncEnd; ++Func) { FunctionDecl *FD = dyn_cast<FunctionDecl>(*Func); if (FD && hasSimilarParameters(SemaRef.Context, FD, NewFD, MismatchedParams)) { // Add 1 to the index so that 0 can mean the mismatch didn't // involve a parameter unsigned ParamNum = MismatchedParams.empty() ? 0 : MismatchedParams.front() + 1; NearMatches.push_back(std::make_pair(FD, ParamNum)); } } // If the qualified name lookup yielded nothing, try typo correction } else if ((Correction = SemaRef.CorrectTypo( Prev.getLookupNameInfo(), Prev.getLookupKind(), S, &ExtraArgs.D.getCXXScopeSpec(), llvm::make_unique<DifferentNameValidatorCCC>( SemaRef.Context, NewFD, MD ? MD->getParent() : nullptr), Sema::CTK_ErrorRecovery, IsLocalFriend ? nullptr : NewDC))) { // Set up everything for the call to ActOnFunctionDeclarator ExtraArgs.D.SetIdentifier(Correction.getCorrectionAsIdentifierInfo(), ExtraArgs.D.getIdentifierLoc()); Previous.clear(); Previous.setLookupName(Correction.getCorrection()); for (TypoCorrection::decl_iterator CDecl = Correction.begin(), CDeclEnd = Correction.end(); CDecl != CDeclEnd; ++CDecl) { FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl); if (FD && !FD->hasBody() && hasSimilarParameters(SemaRef.Context, FD, NewFD, MismatchedParams)) { Previous.addDecl(FD); } } bool wasRedeclaration = ExtraArgs.D.isRedeclaration(); NamedDecl *Result; // Retry building the function declaration with the new previous // declarations, and with errors suppressed. { // Trap errors. Sema::SFINAETrap Trap(SemaRef); // TODO: Refactor ActOnFunctionDeclarator so that we can call only the // pieces need to verify the typo-corrected C++ declaration and hopefully // eliminate the need for the parameter pack ExtraArgs. Result = SemaRef.ActOnFunctionDeclarator( ExtraArgs.S, ExtraArgs.D, Correction.getCorrectionDecl()->getDeclContext(), NewFD->getTypeSourceInfo(), Previous, ExtraArgs.TemplateParamLists, ExtraArgs.AddToScope); if (Trap.hasErrorOccurred()) Result = nullptr; } if (Result) { // Determine which correction we picked. Decl *Canonical = Result->getCanonicalDecl(); for (LookupResult::iterator I = Previous.begin(), E = Previous.end(); I != E; ++I) if ((*I)->getCanonicalDecl() == Canonical) Correction.setCorrectionDecl(*I); SemaRef.diagnoseTypo( Correction, SemaRef.PDiag(IsLocalFriend ? diag::err_no_matching_local_friend_suggest : diag::err_member_decl_does_not_match_suggest) << Name << NewDC << IsDefinition); return Result; } // Pretend the typo correction never occurred ExtraArgs.D.SetIdentifier(Name.getAsIdentifierInfo(), ExtraArgs.D.getIdentifierLoc()); ExtraArgs.D.setRedeclaration(wasRedeclaration); Previous.clear(); Previous.setLookupName(Name); } SemaRef.Diag(NewFD->getLocation(), DiagMsg) << Name << NewDC << IsDefinition << NewFD->getLocation(); bool NewFDisConst = false; if (CXXMethodDecl *NewMD = dyn_cast<CXXMethodDecl>(NewFD)) NewFDisConst = NewMD->isConst(); for (SmallVectorImpl<std::pair<FunctionDecl *, unsigned> >::iterator NearMatch = NearMatches.begin(), NearMatchEnd = NearMatches.end(); NearMatch != NearMatchEnd; ++NearMatch) { FunctionDecl *FD = NearMatch->first; CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD); bool FDisConst = MD && MD->isConst(); bool IsMember = MD || !IsLocalFriend; // FIXME: These notes are poorly worded for the local friend case. if (unsigned Idx = NearMatch->second) { ParmVarDecl *FDParam = FD->getParamDecl(Idx-1); SourceLocation Loc = FDParam->getTypeSpecStartLoc(); if (Loc.isInvalid()) Loc = FD->getLocation(); SemaRef.Diag(Loc, IsMember ? diag::note_member_def_close_param_match : diag::note_local_decl_close_param_match) << Idx << FDParam->getType() << NewFD->getParamDecl(Idx - 1)->getType(); } else if (FDisConst != NewFDisConst) { SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match) << NewFDisConst << FD->getSourceRange().getEnd(); } else SemaRef.Diag(FD->getLocation(), IsMember ? diag::note_member_def_close_match : diag::note_local_decl_close_match); } return nullptr; } static StorageClass getFunctionStorageClass(Sema &SemaRef, Declarator &D) { switch (D.getDeclSpec().getStorageClassSpec()) { default: llvm_unreachable("Unknown storage class!"); case DeclSpec::SCS_auto: case DeclSpec::SCS_register: case DeclSpec::SCS_mutable: SemaRef.Diag(D.getDeclSpec().getStorageClassSpecLoc(), diag::err_typecheck_sclass_func); D.setInvalidType(); break; case DeclSpec::SCS_unspecified: break; case DeclSpec::SCS_extern: if (D.getDeclSpec().isExternInLinkageSpec()) return SC_None; return SC_Extern; case DeclSpec::SCS_static: { if (SemaRef.CurContext->getRedeclContext()->isFunctionOrMethod()) { // C99 6.7.1p5: // The declaration of an identifier for a function that has // block scope shall have no explicit storage-class specifier // other than extern // See also (C++ [dcl.stc]p4). SemaRef.Diag(D.getDeclSpec().getStorageClassSpecLoc(), diag::err_static_block_func); break; } else return SC_Static; } case DeclSpec::SCS_private_extern: return SC_PrivateExtern; } // No explicit storage class has already been returned return SC_None; } static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D, DeclContext *DC, QualType &R, TypeSourceInfo *TInfo, StorageClass SC, bool &IsVirtualOkay) { DeclarationNameInfo NameInfo = SemaRef.GetNameForDeclarator(D); DeclarationName Name = NameInfo.getName(); FunctionDecl *NewFD = nullptr; bool isInline = D.getDeclSpec().isInlineSpecified(); if (!SemaRef.getLangOpts().CPlusPlus) { // Determine whether the function was written with a // prototype. This true when: // - there is a prototype in the declarator, or // - the type R of the function is some kind of typedef or other reference // to a type name (which eventually refers to a function type). bool HasPrototype = (D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) || (!isa<FunctionType>(R.getTypePtr()) && R->isFunctionProtoType()); NewFD = FunctionDecl::Create(SemaRef.Context, DC, D.getLocStart(), NameInfo, R, TInfo, SC, isInline, HasPrototype, false); if (D.isInvalidType()) NewFD->setInvalidDecl(); return NewFD; } bool isExplicit = D.getDeclSpec().isExplicitSpecified(); bool isConstexpr = D.getDeclSpec().isConstexprSpecified(); // Check that the return type is not an abstract class type. // For record types, this is done by the AbstractClassUsageDiagnoser once // the class has been completely parsed. if (!DC->isRecord() && SemaRef.RequireNonAbstractType( D.getIdentifierLoc(), R->getAs<FunctionType>()->getReturnType(), diag::err_abstract_type_in_decl, SemaRef.AbstractReturnType)) D.setInvalidType(); if (Name.getNameKind() == DeclarationName::CXXConstructorName) { // This is a C++ constructor declaration. assert(DC->isRecord() && "Constructors can only be declared in a member context"); R = SemaRef.CheckConstructorDeclarator(D, R, SC); return CXXConstructorDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC), D.getLocStart(), NameInfo, R, TInfo, isExplicit, isInline, /*isImplicitlyDeclared=*/false, isConstexpr); } else if (Name.getNameKind() == DeclarationName::CXXDestructorName) { // This is a C++ destructor declaration. if (DC->isRecord()) { R = SemaRef.CheckDestructorDeclarator(D, R, SC); CXXRecordDecl *Record = cast<CXXRecordDecl>(DC); CXXDestructorDecl *NewDD = CXXDestructorDecl::Create( SemaRef.Context, Record, D.getLocStart(), NameInfo, R, TInfo, isInline, /*isImplicitlyDeclared=*/false); // If the class is complete, then we now create the implicit exception // specification. If the class is incomplete or dependent, we can't do // it yet. if (SemaRef.getLangOpts().CPlusPlus11 && !Record->isDependentType() && Record->getDefinition() && !Record->isBeingDefined() && R->getAs<FunctionProtoType>()->getExceptionSpecType() == EST_None) { SemaRef.AdjustDestructorExceptionSpec(Record, NewDD); } IsVirtualOkay = true; return NewDD; } else { SemaRef.Diag(D.getIdentifierLoc(), diag::err_destructor_not_member); D.setInvalidType(); // Create a FunctionDecl to satisfy the function definition parsing // code path. return FunctionDecl::Create(SemaRef.Context, DC, D.getLocStart(), D.getIdentifierLoc(), Name, R, TInfo, SC, isInline, /*hasPrototype=*/true, isConstexpr); } } else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) { if (!DC->isRecord()) { SemaRef.Diag(D.getIdentifierLoc(), diag::err_conv_function_not_member); return nullptr; } SemaRef.CheckConversionDeclarator(D, R, SC); IsVirtualOkay = true; return CXXConversionDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC), D.getLocStart(), NameInfo, R, TInfo, isInline, isExplicit, isConstexpr, SourceLocation()); } else if (DC->isRecord()) { // If the name of the function is the same as the name of the record, // then this must be an invalid constructor that has a return type. // (The parser checks for a return type and makes the declarator a // constructor if it has no return type). if (Name.getAsIdentifierInfo() && Name.getAsIdentifierInfo() == cast<CXXRecordDecl>(DC)->getIdentifier()){ SemaRef.Diag(D.getIdentifierLoc(), diag::err_constructor_return_type) << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc()) << SourceRange(D.getIdentifierLoc()); return nullptr; } // This is a C++ method declaration. CXXMethodDecl *Ret = CXXMethodDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC), D.getLocStart(), NameInfo, R, TInfo, SC, isInline, isConstexpr, SourceLocation()); IsVirtualOkay = !Ret->isStatic(); return Ret; } else { bool isFriend = SemaRef.getLangOpts().CPlusPlus && D.getDeclSpec().isFriendSpecified(); if (!isFriend && SemaRef.CurContext->isRecord()) return nullptr; // Determine whether the function was written with a // prototype. This true when: // - we're in C++ (where every function has a prototype), return FunctionDecl::Create(SemaRef.Context, DC, D.getLocStart(), NameInfo, R, TInfo, SC, isInline, true/*HasPrototype*/, isConstexpr); } } enum OpenCLParamType { ValidKernelParam, PtrPtrKernelParam, PtrKernelParam, PrivatePtrKernelParam, InvalidKernelParam, RecordKernelParam }; static OpenCLParamType getOpenCLKernelParameterType(QualType PT) { if (PT->isPointerType()) { QualType PointeeType = PT->getPointeeType(); if (PointeeType->isPointerType()) return PtrPtrKernelParam; return PointeeType.getAddressSpace() == 0 ? PrivatePtrKernelParam : PtrKernelParam; } // TODO: Forbid the other integer types (size_t, ptrdiff_t...) when they can // be used as builtin types. if (PT->isImageType()) return PtrKernelParam; if (PT->isBooleanType()) return InvalidKernelParam; if (PT->isEventT()) return InvalidKernelParam; if (PT->isHalfType()) return InvalidKernelParam; if (PT->isRecordType()) return RecordKernelParam; return ValidKernelParam; } static void checkIsValidOpenCLKernelParameter( Sema &S, Declarator &D, ParmVarDecl *Param, llvm::SmallPtrSetImpl<const Type *> &ValidTypes) { QualType PT = Param->getType(); // Cache the valid types we encounter to avoid rechecking structs that are // used again if (ValidTypes.count(PT.getTypePtr())) return; switch (getOpenCLKernelParameterType(PT)) { case PtrPtrKernelParam: // OpenCL v1.2 s6.9.a: // A kernel function argument cannot be declared as a // pointer to a pointer type. S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param); D.setInvalidType(); return; case PrivatePtrKernelParam: // OpenCL v1.2 s6.9.a: // A kernel function argument cannot be declared as a // pointer to the private address space. S.Diag(Param->getLocation(), diag::err_opencl_private_ptr_kernel_param); D.setInvalidType(); return; // OpenCL v1.2 s6.9.k: // Arguments to kernel functions in a program cannot be declared with the // built-in scalar types bool, half, size_t, ptrdiff_t, intptr_t, and // uintptr_t or a struct and/or union that contain fields declared to be // one of these built-in scalar types. case InvalidKernelParam: // OpenCL v1.2 s6.8 n: // A kernel function argument cannot be declared // of event_t type. S.Diag(Param->getLocation(), diag::err_bad_kernel_param_type) << PT; D.setInvalidType(); return; case PtrKernelParam: case ValidKernelParam: ValidTypes.insert(PT.getTypePtr()); return; case RecordKernelParam: break; } // Track nested structs we will inspect SmallVector<const Decl *, 4> VisitStack; // Track where we are in the nested structs. Items will migrate from // VisitStack to HistoryStack as we do the DFS for bad field. SmallVector<const FieldDecl *, 4> HistoryStack; HistoryStack.push_back(nullptr); const RecordDecl *PD = PT->castAs<RecordType>()->getDecl(); VisitStack.push_back(PD); assert(VisitStack.back() && "First decl null?"); do { const Decl *Next = VisitStack.pop_back_val(); if (!Next) { assert(!HistoryStack.empty()); // Found a marker, we have gone up a level if (const FieldDecl *Hist = HistoryStack.pop_back_val()) ValidTypes.insert(Hist->getType().getTypePtr()); continue; } // Adds everything except the original parameter declaration (which is not a // field itself) to the history stack. const RecordDecl *RD; if (const FieldDecl *Field = dyn_cast<FieldDecl>(Next)) { HistoryStack.push_back(Field); RD = Field->getType()->castAs<RecordType>()->getDecl(); } else { RD = cast<RecordDecl>(Next); } // Add a null marker so we know when we've gone back up a level VisitStack.push_back(nullptr); for (const auto *FD : RD->fields()) { QualType QT = FD->getType(); if (ValidTypes.count(QT.getTypePtr())) continue; OpenCLParamType ParamType = getOpenCLKernelParameterType(QT); if (ParamType == ValidKernelParam) continue; if (ParamType == RecordKernelParam) { VisitStack.push_back(FD); continue; } // OpenCL v1.2 s6.9.p: // Arguments to kernel functions that are declared to be a struct or union // do not allow OpenCL objects to be passed as elements of the struct or // union. if (ParamType == PtrKernelParam || ParamType == PtrPtrKernelParam || ParamType == PrivatePtrKernelParam) { S.Diag(Param->getLocation(), diag::err_record_with_pointers_kernel_param) << PT->isUnionType() << PT; } else { S.Diag(Param->getLocation(), diag::err_bad_kernel_param_type) << PT; } S.Diag(PD->getLocation(), diag::note_within_field_of_type) << PD->getDeclName(); // We have an error, now let's go back up through history and show where // the offending field came from for (ArrayRef<const FieldDecl *>::const_iterator I = HistoryStack.begin() + 1, E = HistoryStack.end(); I != E; ++I) { const FieldDecl *OuterField = *I; S.Diag(OuterField->getLocation(), diag::note_within_field_of_type) << OuterField->getType(); } S.Diag(FD->getLocation(), diag::note_illegal_field_declared_here) << QT->isPointerType() << QT; D.setInvalidType(); return; } } while (!VisitStack.empty()); } NamedDecl* Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope) { QualType R = TInfo->getType(); assert(R.getTypePtr()->isFunctionType()); // TODO: consider using NameInfo for diagnostic. DeclarationNameInfo NameInfo = GetNameForDeclarator(D); DeclarationName Name = NameInfo.getName(); StorageClass SC = getFunctionStorageClass(*this, D); // HLSL Change - reject 'extern' functions if (getLangOpts().HLSL && SC == SC_Extern) { Diag(D.getLocStart(), diag::err_hlsl_varmodifierna) << "'extern'" << "function"; } if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), diag::err_invalid_thread) << DeclSpec::getSpecifierName(TSCS); if (D.isFirstDeclarationOfMember()) adjustMemberFunctionCC(R, D.isStaticMember()); bool isFriend = false; FunctionTemplateDecl *FunctionTemplate = nullptr; bool isExplicitSpecialization = false; bool isFunctionTemplateSpecialization = false; bool isDependentClassScopeExplicitSpecialization = false; bool HasExplicitTemplateArgs = false; TemplateArgumentListInfo TemplateArgs; bool isVirtualOkay = false; DeclContext *OriginalDC = DC; bool IsLocalExternDecl = adjustContextForLocalExternDecl(DC); FunctionDecl *NewFD = CreateNewFunctionDecl(*this, D, DC, R, TInfo, SC, isVirtualOkay); if (!NewFD) return nullptr; if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer()) NewFD->setTopLevelDeclInObjCContainer(); // Set the lexical context. If this is a function-scope declaration, or has a // C++ scope specifier, or is the object of a friend declaration, the lexical // context will be different from the semantic context. NewFD->setLexicalDeclContext(CurContext); if (IsLocalExternDecl) NewFD->setLocalExternDecl(); if (getLangOpts().CPlusPlus) { bool isInline = D.getDeclSpec().isInlineSpecified(); bool isVirtual = D.getDeclSpec().isVirtualSpecified(); bool isExplicit = D.getDeclSpec().isExplicitSpecified(); bool isConstexpr = D.getDeclSpec().isConstexprSpecified(); isFriend = D.getDeclSpec().isFriendSpecified(); if (isFriend && !isInline && D.isFunctionDefinition()) { // C++ [class.friend]p5 // A function can be defined in a friend declaration of a // class . . . . Such a function is implicitly inline. NewFD->setImplicitlyInline(); } // If this is a method defined in an __interface, and is not a constructor // or an overloaded operator, then set the pure flag (isVirtual will already // return true). if (const CXXRecordDecl *Parent = dyn_cast<CXXRecordDecl>(NewFD->getDeclContext())) { if (Parent->isInterface() && cast<CXXMethodDecl>(NewFD)->isUserProvided()) NewFD->setPure(true); // C++ [class.union]p2 // A union can have member functions, but not virtual functions. if (isVirtual && Parent->isUnion()) Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_in_union); } SetNestedNameSpecifier(NewFD, D); isExplicitSpecialization = false; isFunctionTemplateSpecialization = false; if (D.isInvalidType()) NewFD->setInvalidDecl(); // Match up the template parameter lists with the scope specifier, then // determine whether we have a template or a template specialization. bool Invalid = false; if (TemplateParameterList *TemplateParams = MatchTemplateParametersToScopeSpecifier( D.getDeclSpec().getLocStart(), D.getIdentifierLoc(), D.getCXXScopeSpec(), D.getName().getKind() == UnqualifiedId::IK_TemplateId ? D.getName().TemplateId : nullptr, TemplateParamLists, isFriend, isExplicitSpecialization, Invalid)) { if (TemplateParams->size() > 0) { // This is a function template // Check that we can declare a template here. if (CheckTemplateDeclScope(S, TemplateParams)) NewFD->setInvalidDecl(); // A destructor cannot be a template. if (Name.getNameKind() == DeclarationName::CXXDestructorName) { Diag(NewFD->getLocation(), diag::err_destructor_template); NewFD->setInvalidDecl(); } // If we're adding a template to a dependent context, we may need to // rebuilding some of the types used within the template parameter list, // now that we know what the current instantiation is. if (DC->isDependentContext()) { ContextRAII SavedContext(*this, DC); if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams)) Invalid = true; } FunctionTemplate = FunctionTemplateDecl::Create(Context, DC, NewFD->getLocation(), Name, TemplateParams, NewFD); FunctionTemplate->setLexicalDeclContext(CurContext); NewFD->setDescribedFunctionTemplate(FunctionTemplate); // For source fidelity, store the other template param lists. if (TemplateParamLists.size() > 1) { NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists.size() - 1, TemplateParamLists.data()); } } else { // This is a function template specialization. isFunctionTemplateSpecialization = true; // For source fidelity, store all the template param lists. if (TemplateParamLists.size() > 0) NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists.size(), TemplateParamLists.data()); // C++0x [temp.expl.spec]p20 forbids "template<> friend void foo(int);". if (isFriend) { // We want to remove the "template<>", found here. SourceRange RemoveRange = TemplateParams->getSourceRange(); // If we remove the template<> and the name is not a // template-id, we're actually silently creating a problem: // the friend declaration will refer to an untemplated decl, // and clearly the user wants a template specialization. So // we need to insert '<>' after the name. SourceLocation InsertLoc; if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) { InsertLoc = D.getName().getSourceRange().getEnd(); InsertLoc = getLocForEndOfToken(InsertLoc); } Diag(D.getIdentifierLoc(), diag::err_template_spec_decl_friend) << Name << RemoveRange << FixItHint::CreateRemoval(RemoveRange) << FixItHint::CreateInsertion(InsertLoc, "<>"); } } } else { // All template param lists were matched against the scope specifier: // this is NOT (an explicit specialization of) a template. if (TemplateParamLists.size() > 0) // For source fidelity, store all the template param lists. NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists.size(), TemplateParamLists.data()); } if (Invalid) { NewFD->setInvalidDecl(); if (FunctionTemplate) FunctionTemplate->setInvalidDecl(); } // C++ [dcl.fct.spec]p5: // The virtual specifier shall only be used in declarations of // nonstatic class member functions that appear within a // member-specification of a class declaration; see 10.3. // if (isVirtual && !NewFD->isInvalidDecl()) { if (!isVirtualOkay) { Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_non_function); } else if (!CurContext->isRecord()) { // 'virtual' was specified outside of the class. Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_out_of_class) << FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc()); } else if (NewFD->getDescribedFunctionTemplate()) { // C++ [temp.mem]p3: // A member function template shall not be virtual. Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_member_function_template) << FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc()); } else { // Okay: Add virtual to the method. NewFD->setVirtualAsWritten(true); } if (getLangOpts().CPlusPlus14 && NewFD->getReturnType()->isUndeducedType()) Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_auto_fn_virtual); } if (getLangOpts().CPlusPlus14 && (NewFD->isDependentContext() || (isFriend && CurContext->isDependentContext())) && NewFD->getReturnType()->isUndeducedType()) { // If the function template is referenced directly (for instance, as a // member of the current instantiation), pretend it has a dependent type. // This is not really justified by the standard, but is the only sane // thing to do. // FIXME: For a friend function, we have not marked the function as being // a friend yet, so 'isDependentContext' on the FD doesn't work. const FunctionProtoType *FPT = NewFD->getType()->castAs<FunctionProtoType>(); QualType Result = SubstAutoType(FPT->getReturnType(), Context.DependentTy); NewFD->setType(Context.getFunctionType(Result, FPT->getParamTypes(), FPT->getExtProtoInfo(), FPT->getParamMods())); // HLSL Change - add param mods } // C++ [dcl.fct.spec]p3: // The inline specifier shall not appear on a block scope function // declaration. if (isInline && !NewFD->isInvalidDecl()) { if (CurContext->isFunctionOrMethod()) { // 'inline' is not allowed on block scope function declaration. Diag(D.getDeclSpec().getInlineSpecLoc(), diag::err_inline_declaration_block_scope) << Name << FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc()); } } // C++ [dcl.fct.spec]p6: // The explicit specifier shall be used only in the declaration of a // constructor or conversion function within its class definition; // see 12.3.1 and 12.3.2. if (isExplicit && !NewFD->isInvalidDecl()) { if (!CurContext->isRecord()) { // 'explicit' was specified outside of the class. Diag(D.getDeclSpec().getExplicitSpecLoc(), diag::err_explicit_out_of_class) << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc()); } else if (!isa<CXXConstructorDecl>(NewFD) && !isa<CXXConversionDecl>(NewFD)) { // 'explicit' was specified on a function that wasn't a constructor // or conversion function. Diag(D.getDeclSpec().getExplicitSpecLoc(), diag::err_explicit_non_ctor_or_conv_function) << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc()); } } if (isConstexpr) { // C++11 [dcl.constexpr]p2: constexpr functions and constexpr constructors // are implicitly inline. NewFD->setImplicitlyInline(); // C++11 [dcl.constexpr]p3: functions declared constexpr are required to // be either constructors or to return a literal type. Therefore, // destructors cannot be declared constexpr. if (isa<CXXDestructorDecl>(NewFD)) Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor); } // If __module_private__ was specified, mark the function accordingly. if (D.getDeclSpec().isModulePrivateSpecified()) { if (isFunctionTemplateSpecialization) { SourceLocation ModulePrivateLoc = D.getDeclSpec().getModulePrivateSpecLoc(); Diag(ModulePrivateLoc, diag::err_module_private_specialization) << 0 << FixItHint::CreateRemoval(ModulePrivateLoc); } else { NewFD->setModulePrivate(); if (FunctionTemplate) FunctionTemplate->setModulePrivate(); } } if (isFriend) { if (FunctionTemplate) { FunctionTemplate->setObjectOfFriendDecl(); FunctionTemplate->setAccess(AS_public); } NewFD->setObjectOfFriendDecl(); NewFD->setAccess(AS_public); } // If a function is defined as defaulted or deleted, mark it as such now. // FIXME: Does this ever happen? ActOnStartOfFunctionDef forces the function // definition kind to FDK_Definition. switch (D.getFunctionDefinitionKind()) { case FDK_Declaration: case FDK_Definition: break; case FDK_Defaulted: NewFD->setDefaulted(); break; case FDK_Deleted: NewFD->setDeletedAsWritten(); break; } if (isa<CXXMethodDecl>(NewFD) && DC == CurContext && D.isFunctionDefinition()) { // C++ [class.mfct]p2: // A member function may be defined (8.4) in its class definition, in // which case it is an inline member function (7.1.2) NewFD->setImplicitlyInline(); } // HLSL Change: We had to set SC_Static for static data member to // distinguish it from global constant variable. if (SC == SC_Static && isa<CXXMethodDecl>(NewFD) && !CurContext->isRecord() && !getLangOpts().HLSL) { // C++ [class.static]p1: // A data or function member of a class may be declared static // in a class definition, in which case it is a static member of // the class. // Complain about the 'static' specifier if it's on an out-of-line // member function definition. Diag(D.getDeclSpec().getStorageClassSpecLoc(), diag::err_static_out_of_line) << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc()); } // C++11 [except.spec]p15: // A deallocation function with no exception-specification is treated // as if it were specified with noexcept(true). const FunctionProtoType *FPT = R->getAs<FunctionProtoType>(); if ((Name.getCXXOverloadedOperator() == OO_Delete || Name.getCXXOverloadedOperator() == OO_Array_Delete) && getLangOpts().CPlusPlus11 && FPT && !FPT->hasExceptionSpec()) NewFD->setType(Context.getFunctionType( FPT->getReturnType(), FPT->getParamTypes(), FPT->getExtProtoInfo().withExceptionSpec(EST_BasicNoexcept), FPT->getParamMods())); // HLSL Change - get param mods } // Filter out previous declarations that don't match the scope. FilterLookupForScope(Previous, OriginalDC, S, shouldConsiderLinkage(NewFD), D.getCXXScopeSpec().isNotEmpty() || isExplicitSpecialization || isFunctionTemplateSpecialization); // Handle GNU asm-label extension (encoded as an attribute). if (Expr *E = (Expr*) D.getAsmLabel()) { // The parser guarantees this is a string. StringLiteral *SE = cast<StringLiteral>(E); NewFD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0), Context, SE->getString(), 0)); } else if (!ExtnameUndeclaredIdentifiers.empty() && isDeclTUScopedExternallyVisible(NewFD)) { llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I = ExtnameUndeclaredIdentifiers.find(NewFD->getIdentifier()); if (I != ExtnameUndeclaredIdentifiers.end()) { NewFD->addAttr(I->second); ExtnameUndeclaredIdentifiers.erase(I); } } // Copy the parameter declarations from the declarator D to the function // declaration NewFD, if they are available. First scavenge them into Params. SmallVector<ParmVarDecl*, 16> Params; if (D.isFunctionDeclarator()) { DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo(); // Check for C99 6.7.5.3p10 - foo(void) is a non-varargs // function that takes no arguments, not a function that takes a // single void argument. // We let through "const void" here because Sema::GetTypeForDeclarator // already checks for that case. if (FTIHasNonVoidParameters(FTI) && FTI.Params[0].Param) { for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) { ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param); assert(Param->getDeclContext() != NewFD && "Was set before ?"); Param->setDeclContext(NewFD); Params.push_back(Param); if (Param->isInvalidDecl()) NewFD->setInvalidDecl(); } } } else if (const FunctionProtoType *FT = R->getAs<FunctionProtoType>()) { // When we're declaring a function with a typedef, typeof, etc as in the // following example, we'll need to synthesize (unnamed) // parameters for use in the declaration. // // @code // typedef void fn(int); // fn f; // @endcode // Synthesize a parameter for each argument type. for (const auto &AI : FT->param_types()) { ParmVarDecl *Param = BuildParmVarDeclForTypedef(NewFD, D.getIdentifierLoc(), AI); Param->setScopeInfo(0, Params.size()); Params.push_back(Param); } } else { assert(R->isFunctionNoProtoType() && NewFD->getNumParams() == 0 && "Should not need args for typedef of non-prototype fn"); } // Finally, we know we have the right number of parameters, install them. NewFD->setParams(Params); // Find all anonymous symbols defined during the declaration of this function // and add to NewFD. This lets us track decls such 'enum Y' in: // // void f(enum Y {AA} x) {} // // which would otherwise incorrectly end up in the translation unit scope. NewFD->setDeclsInPrototypeScope(DeclsInPrototypeScope); DeclsInPrototypeScope.clear(); if (D.getDeclSpec().isNoreturnSpecified()) NewFD->addAttr( ::new(Context) C11NoReturnAttr(D.getDeclSpec().getNoreturnSpecLoc(), Context, 0)); // Functions returning a variably modified type violate C99 6.7.5.2p2 // because all functions have linkage. if (!NewFD->isInvalidDecl() && NewFD->getReturnType()->isVariablyModifiedType()) { Diag(NewFD->getLocation(), diag::err_vm_func_decl); NewFD->setInvalidDecl(); } // Apply an implicit SectionAttr if #pragma code_seg is active. if (CodeSegStack.CurrentValue && D.isFunctionDefinition() && !NewFD->hasAttr<SectionAttr>()) { NewFD->addAttr( SectionAttr::CreateImplicit(Context, SectionAttr::Declspec_allocate, CodeSegStack.CurrentValue->getString(), CodeSegStack.CurrentPragmaLocation)); if (UnifySection(CodeSegStack.CurrentValue->getString(), ASTContext::PSF_Implicit | ASTContext::PSF_Execute | ASTContext::PSF_Read, NewFD)) NewFD->dropAttr<SectionAttr>(); } // Handle attributes. ProcessDeclAttributes(S, NewFD, D); if (getLangOpts().OpenCL) { // OpenCL v1.1 s6.5: Using an address space qualifier in a function return // type declaration will generate a compilation error. unsigned AddressSpace = NewFD->getReturnType().getAddressSpace(); if (AddressSpace == LangAS::opencl_local || AddressSpace == LangAS::opencl_global || AddressSpace == LangAS::opencl_constant) { Diag(NewFD->getLocation(), diag::err_opencl_return_value_with_address_space); NewFD->setInvalidDecl(); } } if (!getLangOpts().CPlusPlus) { // Perform semantic checking on the function declaration. bool isExplicitSpecialization=false; if (!NewFD->isInvalidDecl() && NewFD->isMain()) CheckMain(NewFD, D.getDeclSpec()); if (!NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint()) CheckMSVCRTEntryPoint(NewFD); if (!NewFD->isInvalidDecl()) D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous, isExplicitSpecialization)); else if (!Previous.empty()) // Recover gracefully from an invalid redeclaration. D.setRedeclaration(true); assert((NewFD->isInvalidDecl() || !D.isRedeclaration() || Previous.getResultKind() != LookupResult::FoundOverloaded) && "previous declaration set still overloaded"); // Diagnose no-prototype function declarations with calling conventions that // don't support variadic calls. Only do this in C and do it after merging // possibly prototyped redeclarations. const FunctionType *FT = NewFD->getType()->castAs<FunctionType>(); if (isa<FunctionNoProtoType>(FT) && !D.isFunctionDefinition()) { CallingConv CC = FT->getExtInfo().getCC(); if (!supportsVariadicCall(CC)) { // Windows system headers sometimes accidentally use stdcall without // (void) parameters, so we relax this to a warning. int DiagID = CC == CC_X86StdCall ? diag::warn_cconv_knr : diag::err_cconv_knr; Diag(NewFD->getLocation(), DiagID) << FunctionType::getNameForCallConv(CC); } } } else { // C++11 [replacement.functions]p3: // The program's definitions shall not be specified as inline. // // N.B. We diagnose declarations instead of definitions per LWG issue 2340. // // Suppress the diagnostic if the function is __attribute__((used)), since // that forces an external definition to be emitted. if (D.getDeclSpec().isInlineSpecified() && NewFD->isReplaceableGlobalAllocationFunction() && !NewFD->hasAttr<UsedAttr>()) Diag(D.getDeclSpec().getInlineSpecLoc(), diag::ext_operator_new_delete_declared_inline) << NewFD->getDeclName(); // If the declarator is a template-id, translate the parser's template // argument list into our AST format. if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) { TemplateIdAnnotation *TemplateId = D.getName().TemplateId; TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc); TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc); ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); translateTemplateArguments(TemplateArgsPtr, TemplateArgs); HasExplicitTemplateArgs = true; if (NewFD->isInvalidDecl()) { HasExplicitTemplateArgs = false; } else if (FunctionTemplate) { // Function template with explicit template arguments. Diag(D.getIdentifierLoc(), diag::err_function_template_partial_spec) << SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc); HasExplicitTemplateArgs = false; } else { assert((isFunctionTemplateSpecialization || D.getDeclSpec().isFriendSpecified()) && "should have a 'template<>' for this decl"); // "friend void foo<>(int);" is an implicit specialization decl. isFunctionTemplateSpecialization = true; } } else if (isFriend && isFunctionTemplateSpecialization) { // This combination is only possible in a recovery case; the user // wrote something like: // template <> friend void foo(int); // which we're recovering from as if the user had written: // friend void foo<>(int); // Go ahead and fake up a template id. HasExplicitTemplateArgs = true; TemplateArgs.setLAngleLoc(D.getIdentifierLoc()); TemplateArgs.setRAngleLoc(D.getIdentifierLoc()); } // If it's a friend (and only if it's a friend), it's possible // that either the specialized function type or the specialized // template is dependent, and therefore matching will fail. In // this case, don't check the specialization yet. bool InstantiationDependent = false; if (isFunctionTemplateSpecialization && isFriend && (NewFD->getType()->isDependentType() || DC->isDependentContext() || TemplateSpecializationType::anyDependentTemplateArguments( TemplateArgs.getArgumentArray(), TemplateArgs.size(), InstantiationDependent))) { assert(HasExplicitTemplateArgs && "friend function specialization without template args"); if (CheckDependentFunctionTemplateSpecialization(NewFD, TemplateArgs, Previous)) NewFD->setInvalidDecl(); } else if (isFunctionTemplateSpecialization) { if (CurContext->isDependentContext() && CurContext->isRecord() && !isFriend) { isDependentClassScopeExplicitSpecialization = true; Diag(NewFD->getLocation(), getLangOpts().MicrosoftExt ? diag::ext_function_specialization_in_class : diag::err_function_specialization_in_class) << NewFD->getDeclName(); } else if (CheckFunctionTemplateSpecialization(NewFD, (HasExplicitTemplateArgs ? &TemplateArgs : nullptr), Previous)) NewFD->setInvalidDecl(); // C++ [dcl.stc]p1: // A storage-class-specifier shall not be specified in an explicit // specialization (14.7.3) FunctionTemplateSpecializationInfo *Info = NewFD->getTemplateSpecializationInfo(); if (Info && SC != SC_None) { if (SC != Info->getTemplate()->getTemplatedDecl()->getStorageClass()) Diag(NewFD->getLocation(), diag::err_explicit_specialization_inconsistent_storage_class) << SC << FixItHint::CreateRemoval( D.getDeclSpec().getStorageClassSpecLoc()); else Diag(NewFD->getLocation(), diag::ext_explicit_specialization_storage_class) << FixItHint::CreateRemoval( D.getDeclSpec().getStorageClassSpecLoc()); } } else if (isExplicitSpecialization && isa<CXXMethodDecl>(NewFD)) { if (CheckMemberSpecialization(NewFD, Previous)) NewFD->setInvalidDecl(); } // Perform semantic checking on the function declaration. if (!isDependentClassScopeExplicitSpecialization) { if (!getLangOpts().HLSL && !NewFD->isInvalidDecl() && NewFD->isMain()) // HLSL Change: no main() checks in HLSL CheckMain(NewFD, D.getDeclSpec()); if (!getLangOpts().HLSL && !NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint()) // HLSL Change: no entry point checks in HLSL CheckMSVCRTEntryPoint(NewFD); if (!NewFD->isInvalidDecl()) D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous, isExplicitSpecialization)); else if (!Previous.empty()) // Recover gracefully from an invalid redeclaration. D.setRedeclaration(true); } assert((NewFD->isInvalidDecl() || !D.isRedeclaration() || Previous.getResultKind() != LookupResult::FoundOverloaded) && "previous declaration set still overloaded"); NamedDecl *PrincipalDecl = (FunctionTemplate ? cast<NamedDecl>(FunctionTemplate) : NewFD); if (isFriend && D.isRedeclaration()) { AccessSpecifier Access = AS_public; if (!NewFD->isInvalidDecl()) Access = NewFD->getPreviousDecl()->getAccess(); NewFD->setAccess(Access); if (FunctionTemplate) FunctionTemplate->setAccess(Access); } if (NewFD->isOverloadedOperator() && !DC->isRecord() && PrincipalDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary)) PrincipalDecl->setNonMemberOperator(); // If we have a function template, check the template parameter // list. This will check and merge default template arguments. if (FunctionTemplate) { FunctionTemplateDecl *PrevTemplate = FunctionTemplate->getPreviousDecl(); CheckTemplateParameterList(FunctionTemplate->getTemplateParameters(), PrevTemplate ? PrevTemplate->getTemplateParameters() : nullptr, D.getDeclSpec().isFriendSpecified() ? (D.isFunctionDefinition() ? TPC_FriendFunctionTemplateDefinition : TPC_FriendFunctionTemplate) : (D.getCXXScopeSpec().isSet() && DC && DC->isRecord() && DC->isDependentContext()) ? TPC_ClassTemplateMember : TPC_FunctionTemplate); } if (NewFD->isInvalidDecl()) { // Ignore all the rest of this. } else if (!D.isRedeclaration()) { struct ActOnFDArgs ExtraArgs = { S, D, TemplateParamLists, AddToScope }; // Fake up an access specifier if it's supposed to be a class member. if (isa<CXXRecordDecl>(NewFD->getDeclContext())) NewFD->setAccess(AS_public); // Qualified decls generally require a previous declaration. if (D.getCXXScopeSpec().isSet()) { // ...with the major exception of templated-scope or // dependent-scope friend declarations. // TODO: we currently also suppress this check in dependent // contexts because (1) the parameter depth will be off when // matching friend templates and (2) we might actually be // selecting a friend based on a dependent factor. But there // are situations where these conditions don't apply and we // can actually do this check immediately. if (isFriend && (TemplateParamLists.size() || D.getCXXScopeSpec().getScopeRep()->isDependent() || CurContext->isDependentContext())) { // ignore these } else { // The user tried to provide an out-of-line definition for a // function that is a member of a class or namespace, but there // was no such member function declared (C++ [class.mfct]p2, // C++ [namespace.memdef]p2). For example: // // class X { // void f() const; // }; // // void X::f() { } // ill-formed // // Complain about this problem, and attempt to suggest close // matches (e.g., those that differ only in cv-qualifiers and // whether the parameter types are references). if (NamedDecl *Result = DiagnoseInvalidRedeclaration( *this, Previous, NewFD, ExtraArgs, false, nullptr)) { AddToScope = ExtraArgs.AddToScope; return Result; } } // Unqualified local friend declarations are required to resolve // to something. } else if (isFriend && cast<CXXRecordDecl>(CurContext)->isLocalClass()) { if (NamedDecl *Result = DiagnoseInvalidRedeclaration( *this, Previous, NewFD, ExtraArgs, true, S)) { AddToScope = ExtraArgs.AddToScope; return Result; } } } else if (!D.isFunctionDefinition() && isa<CXXMethodDecl>(NewFD) && NewFD->isOutOfLine() && !isFriend && !isFunctionTemplateSpecialization && !isExplicitSpecialization) { // An out-of-line member function declaration must also be a // definition (C++ [class.mfct]p2). // Note that this is not the case for explicit specializations of // function templates or member functions of class templates, per // C++ [temp.expl.spec]p2. We also allow these declarations as an // extension for compatibility with old SWIG code which likes to // generate them. Diag(NewFD->getLocation(), diag::ext_out_of_line_declaration) << D.getCXXScopeSpec().getRange(); } } ProcessPragmaWeak(S, NewFD); checkAttributesAfterMerging(*this, *NewFD); AddKnownFunctionAttributes(NewFD); if (NewFD->hasAttr<OverloadableAttr>() && !NewFD->getType()->getAs<FunctionProtoType>()) { Diag(NewFD->getLocation(), diag::err_attribute_overloadable_no_prototype) << NewFD; // Turn this into a variadic function with no parameters. const FunctionType *FT = NewFD->getType()->getAs<FunctionType>(); FunctionProtoType::ExtProtoInfo EPI( Context.getDefaultCallingConvention(true, false)); EPI.Variadic = true; EPI.ExtInfo = FT->getExtInfo(); QualType R = Context.getFunctionType(FT->getReturnType(), None, EPI, None); // HLSL Change - all defaults are in NewFD->setType(R); } // If there's a #pragma GCC visibility in scope, and this isn't a class // member, set the visibility of this function. if (!DC->isRecord() && NewFD->isExternallyVisible()) AddPushedVisibilityAttribute(NewFD); // If there's a #pragma clang arc_cf_code_audited in scope, consider // marking the function. AddCFAuditedAttribute(NewFD); // If this is a function definition, check if we have to apply optnone due to // a pragma. if(D.isFunctionDefinition()) AddRangeBasedOptnone(NewFD); // If this is the first declaration of an extern C variable, update // the map of such variables. if (NewFD->isFirstDecl() && !NewFD->isInvalidDecl() && isIncompleteDeclExternC(*this, NewFD)) RegisterLocallyScopedExternCDecl(NewFD, S); // Set this FunctionDecl's range up to the right paren. NewFD->setRangeEnd(D.getSourceRange().getEnd()); if (D.isRedeclaration() && !Previous.empty()) { checkDLLAttributeRedeclaration( *this, dyn_cast<NamedDecl>(Previous.getRepresentativeDecl()), NewFD, isExplicitSpecialization || isFunctionTemplateSpecialization); } if (getLangOpts().CPlusPlus) { if (FunctionTemplate) { if (NewFD->isInvalidDecl()) FunctionTemplate->setInvalidDecl(); return FunctionTemplate; } } if (NewFD->hasAttr<OpenCLKernelAttr>()) { // OpenCL v1.2 s6.8 static is invalid for kernel functions. if ((getLangOpts().OpenCLVersion >= 120) && (SC == SC_Static)) { Diag(D.getIdentifierLoc(), diag::err_static_kernel); D.setInvalidType(); } // OpenCL v1.2, s6.9 -- Kernels can only have return type void. if (!NewFD->getReturnType()->isVoidType()) { SourceRange RTRange = NewFD->getReturnTypeSourceRange(); Diag(D.getIdentifierLoc(), diag::err_expected_kernel_void_return_type) << (RTRange.isValid() ? FixItHint::CreateReplacement(RTRange, "void") : FixItHint()); D.setInvalidType(); } llvm::SmallPtrSet<const Type *, 16> ValidTypes; for (auto Param : NewFD->params()) checkIsValidOpenCLKernelParameter(*this, D, Param, ValidTypes); } MarkUnusedFileScopedDecl(NewFD); if (getLangOpts().CUDA) if (IdentifierInfo *II = NewFD->getIdentifier()) if (!NewFD->isInvalidDecl() && NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) { if (II->isStr("cudaConfigureCall")) { if (!R->getAs<FunctionType>()->getReturnType()->isScalarType()) Diag(NewFD->getLocation(), diag::err_config_scalar_return); Context.setcudaConfigureCallDecl(NewFD); } } // Here we have an function template explicit specialization at class scope. // The actually specialization will be postponed to template instatiation // time via the ClassScopeFunctionSpecializationDecl node. if (isDependentClassScopeExplicitSpecialization) { ClassScopeFunctionSpecializationDecl *NewSpec = ClassScopeFunctionSpecializationDecl::Create( Context, CurContext, SourceLocation(), cast<CXXMethodDecl>(NewFD), HasExplicitTemplateArgs, TemplateArgs); CurContext->addDecl(NewSpec); AddToScope = false; } // HLSL Change Starts TransferUnusualAttributes(D, NewFD); if (getLangOpts().HLSL && D.isFunctionDefinition() && D.hasName() && NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) { hlsl::DiagnoseEntry(*this, NewFD); } // HLSL Change Ends return NewFD; } /// \brief Perform semantic checking of a new function declaration. /// /// Performs semantic analysis of the new function declaration /// NewFD. This routine performs all semantic checking that does not /// require the actual declarator involved in the declaration, and is /// used both for the declaration of functions as they are parsed /// (called via ActOnDeclarator) and for the declaration of functions /// that have been instantiated via C++ template instantiation (called /// via InstantiateDecl). /// /// \param IsExplicitSpecialization whether this new function declaration is /// an explicit specialization of the previous declaration. /// /// This sets NewFD->isInvalidDecl() to true if there was an error. /// /// \returns true if the function declaration is a redeclaration. bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization) { assert(!NewFD->getReturnType()->isVariablyModifiedType() && "Variably modified return types are not handled here"); // Determine whether the type of this function should be merged with // a previous visible declaration. This never happens for functions in C++, // and always happens in C if the previous declaration was visible. bool MergeTypeWithPrevious = !getLangOpts().CPlusPlus && !Previous.isShadowed(); // Filter out any non-conflicting previous declarations. filterNonConflictingPreviousDecls(*this, NewFD, Previous); bool Redeclaration = false; NamedDecl *OldDecl = nullptr; // Merge or overload the declaration with an existing declaration of // the same name, if appropriate. if (!Previous.empty()) { // Determine whether NewFD is an overload of PrevDecl or // a declaration that requires merging. If it's an overload, // there's no more work to do here; we'll just add the new // function to the scope. if (!AllowOverloadingOfFunction(Previous, Context)) { NamedDecl *Candidate = Previous.getFoundDecl(); if (shouldLinkPossiblyHiddenDecl(Candidate, NewFD)) { Redeclaration = true; OldDecl = Candidate; } } else { switch (CheckOverload(S, NewFD, Previous, OldDecl, /*NewIsUsingDecl*/ false)) { case Ovl_Match: Redeclaration = true; break; case Ovl_NonFunction: Redeclaration = true; break; case Ovl_Overload: Redeclaration = false; break; } if (!getLangOpts().CPlusPlus && !NewFD->hasAttr<OverloadableAttr>()) { // If a function name is overloadable in C, then every function // with that name must be marked "overloadable". Diag(NewFD->getLocation(), diag::err_attribute_overloadable_missing) << Redeclaration << NewFD; NamedDecl *OverloadedDecl = nullptr; if (Redeclaration) OverloadedDecl = OldDecl; else if (!Previous.empty()) OverloadedDecl = Previous.getRepresentativeDecl(); if (OverloadedDecl) Diag(OverloadedDecl->getLocation(), diag::note_attribute_overloadable_prev_overload); NewFD->addAttr(OverloadableAttr::CreateImplicit(Context)); } } } // Check for a previous extern "C" declaration with this name. if (!Redeclaration && checkForConflictWithNonVisibleExternC(*this, NewFD, Previous)) { filterNonConflictingPreviousDecls(*this, NewFD, Previous); if (!Previous.empty()) { // This is an extern "C" declaration with the same name as a previous // declaration, and thus redeclares that entity... Redeclaration = true; OldDecl = Previous.getFoundDecl(); MergeTypeWithPrevious = false; // ... except in the presence of __attribute__((overloadable)). if (OldDecl->hasAttr<OverloadableAttr>()) { if (!getLangOpts().CPlusPlus && !NewFD->hasAttr<OverloadableAttr>()) { Diag(NewFD->getLocation(), diag::err_attribute_overloadable_missing) << Redeclaration << NewFD; Diag(Previous.getFoundDecl()->getLocation(), diag::note_attribute_overloadable_prev_overload); NewFD->addAttr(OverloadableAttr::CreateImplicit(Context)); } if (IsOverload(NewFD, cast<FunctionDecl>(OldDecl), false)) { Redeclaration = false; OldDecl = nullptr; } } } } // HLSL Change Starts // Rather than fix for inout parameters, comment out - this is N/A for HLSL #if 0 // HLSL Change Ends // C++11 [dcl.constexpr]p8: // A constexpr specifier for a non-static member function that is not // a constructor declares that member function to be const. // // This needs to be delayed until we know whether this is an out-of-line // definition of a static member function. // // This rule is not present in C++1y, so we produce a backwards // compatibility warning whenever it happens in C++11. CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD); if (!getLangOpts().CPlusPlus14 && MD && MD->isConstexpr() && !MD->isStatic() && !isa<CXXConstructorDecl>(MD) && (MD->getTypeQualifiers() & Qualifiers::Const) == 0) { CXXMethodDecl *OldMD = nullptr; if (OldDecl) OldMD = dyn_cast_or_null<CXXMethodDecl>(OldDecl->getAsFunction()); if (!OldMD || !OldMD->isStatic()) { const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); EPI.TypeQuals |= Qualifiers::Const; MD->setType(Context.getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI)); // Warn that we did this, if we're not performing template instantiation. // In that case, we'll have warned already when the template was defined. if (ActiveTemplateInstantiations.empty()) { SourceLocation AddConstLoc; if (FunctionTypeLoc FTL = MD->getTypeSourceInfo()->getTypeLoc() .IgnoreParens().getAs<FunctionTypeLoc>()) AddConstLoc = getLocForEndOfToken(FTL.getRParenLoc()); Diag(MD->getLocation(), diag::warn_cxx14_compat_constexpr_not_const) << FixItHint::CreateInsertion(AddConstLoc, " const"); } } } #endif // HLSL Change Ends if (Redeclaration) { // NewFD and OldDecl represent declarations that need to be // merged. if (MergeFunctionDecl(NewFD, OldDecl, S, MergeTypeWithPrevious)) { NewFD->setInvalidDecl(); return Redeclaration; } Previous.clear(); Previous.addDecl(OldDecl); if (FunctionTemplateDecl *OldTemplateDecl = dyn_cast<FunctionTemplateDecl>(OldDecl)) { NewFD->setPreviousDeclaration(OldTemplateDecl->getTemplatedDecl()); FunctionTemplateDecl *NewTemplateDecl = NewFD->getDescribedFunctionTemplate(); assert(NewTemplateDecl && "Template/non-template mismatch"); if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewTemplateDecl->getTemplatedDecl())) { Method->setAccess(OldTemplateDecl->getAccess()); NewTemplateDecl->setAccess(OldTemplateDecl->getAccess()); } // If this is an explicit specialization of a member that is a function // template, mark it as a member specialization. if (IsExplicitSpecialization && NewTemplateDecl->getInstantiatedFromMemberTemplate()) { NewTemplateDecl->setMemberSpecialization(); assert(OldTemplateDecl->isMemberSpecialization()); } } else { // This needs to happen first so that 'inline' propagates. NewFD->setPreviousDeclaration(cast<FunctionDecl>(OldDecl)); if (isa<CXXMethodDecl>(NewFD)) NewFD->setAccess(OldDecl->getAccess()); } } // Semantic checking for this function declaration (in isolation). if (getLangOpts().CPlusPlus) { // C++-specific checks. if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(NewFD)) { CheckConstructor(Constructor); } else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(NewFD)) { CXXRecordDecl *Record = Destructor->getParent(); QualType ClassType = Context.getTypeDeclType(Record); // FIXME: Shouldn't we be able to perform this check even when the class // type is dependent? Both gcc and edg can handle that. if (!ClassType->isDependentType()) { DeclarationName Name = Context.DeclarationNames.getCXXDestructorName( Context.getCanonicalType(ClassType)); if (NewFD->getDeclName() != Name) { Diag(NewFD->getLocation(), diag::err_destructor_name); NewFD->setInvalidDecl(); return Redeclaration; } } } else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(NewFD)) { ActOnConversionDeclarator(Conversion); } // Find any virtual functions that this function overrides. if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD)) { if (!Method->isFunctionTemplateSpecialization() && !Method->getDescribedFunctionTemplate() && Method->isCanonicalDecl()) { if (AddOverriddenMethods(Method->getParent(), Method)) { // If the function was marked as "static", we have a problem. if (NewFD->getStorageClass() == SC_Static) { ReportOverrides(*this, diag::err_static_overrides_virtual, Method); } } } if (Method->isStatic()) checkThisInStaticMemberFunctionType(Method); } // Extra checking for C++ overloaded operators (C++ [over.oper]). if (NewFD->isOverloadedOperator() && CheckOverloadedOperatorDeclaration(NewFD)) { NewFD->setInvalidDecl(); return Redeclaration; } // Extra checking for C++0x literal operators (C++0x [over.literal]). if (NewFD->getLiteralIdentifier() && CheckLiteralOperatorDeclaration(NewFD)) { NewFD->setInvalidDecl(); return Redeclaration; } // In C++, check default arguments now that we have merged decls. Unless // the lexical context is the class, because in this case this is done // during delayed parsing anyway. if (!CurContext->isRecord()) CheckCXXDefaultArguments(NewFD); // If this function declares a builtin function, check the type of this // declaration against the expected type for the builtin. if (unsigned BuiltinID = NewFD->getBuiltinID()) { ASTContext::GetBuiltinTypeError Error; LookupPredefedObjCSuperType(*this, S, NewFD->getIdentifier()); QualType T = Context.GetBuiltinType(BuiltinID, Error); if (!T.isNull() && !Context.hasSameType(T, NewFD->getType())) { // The type of this function differs from the type of the builtin, // so forget about the builtin entirely. Context.BuiltinInfo.ForgetBuiltin(BuiltinID, Context.Idents); } } // If this function is declared as being extern "C", then check to see if // the function returns a UDT (class, struct, or union type) that is not C // compatible, and if it does, warn the user. // But, issue any diagnostic on the first declaration only. if (Previous.empty() && NewFD->isExternC()) { QualType R = NewFD->getReturnType(); if (R->isIncompleteType() && !R->isVoidType()) Diag(NewFD->getLocation(), diag::warn_return_value_udt_incomplete) << NewFD << R; else if (!R.isPODType(Context) && !R->isVoidType() && !R->isObjCObjectPointerType()) Diag(NewFD->getLocation(), diag::warn_return_value_udt) << NewFD << R; } } return Redeclaration; } void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) { // C++11 [basic.start.main]p3: // A program that [...] declares main to be inline, static or // constexpr is ill-formed. // C11 6.7.4p4: In a hosted environment, no function specifier(s) shall // appear in a declaration of main. // static main is not an error under C99, but we should warn about it. // We accept _Noreturn main as an extension. if (FD->getStorageClass() == SC_Static) Diag(DS.getStorageClassSpecLoc(), getLangOpts().CPlusPlus ? diag::err_static_main : diag::warn_static_main) << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc()); if (FD->isInlineSpecified()) Diag(DS.getInlineSpecLoc(), diag::err_inline_main) << FixItHint::CreateRemoval(DS.getInlineSpecLoc()); if (DS.isNoreturnSpecified()) { SourceLocation NoreturnLoc = DS.getNoreturnSpecLoc(); SourceRange NoreturnRange(NoreturnLoc, getLocForEndOfToken(NoreturnLoc)); Diag(NoreturnLoc, diag::ext_noreturn_main); Diag(NoreturnLoc, diag::note_main_remove_noreturn) << FixItHint::CreateRemoval(NoreturnRange); } if (FD->isConstexpr()) { Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_main) << FixItHint::CreateRemoval(DS.getConstexprSpecLoc()); FD->setConstexpr(false); } if (getLangOpts().OpenCL) { Diag(FD->getLocation(), diag::err_opencl_no_main) << FD->hasAttr<OpenCLKernelAttr>(); FD->setInvalidDecl(); return; } QualType T = FD->getType(); assert(T->isFunctionType() && "function decl is not of function type"); const FunctionType* FT = T->castAs<FunctionType>(); if (getLangOpts().GNUMode && !getLangOpts().CPlusPlus) { // In C with GNU extensions we allow main() to have non-integer return // type, but we should warn about the extension, and we disable the // implicit-return-zero rule. // GCC in C mode accepts qualified 'int'. if (Context.hasSameUnqualifiedType(FT->getReturnType(), Context.IntTy)) FD->setHasImplicitReturnZero(true); else { Diag(FD->getTypeSpecStartLoc(), diag::ext_main_returns_nonint); SourceRange RTRange = FD->getReturnTypeSourceRange(); if (RTRange.isValid()) Diag(RTRange.getBegin(), diag::note_main_change_return_type) << FixItHint::CreateReplacement(RTRange, "int"); } } else { // In C and C++, main magically returns 0 if you fall off the end; // set the flag which tells us that. // This is C++ [basic.start.main]p5 and C99 5.1.2.2.3. // All the standards say that main() should return 'int'. if (Context.hasSameType(FT->getReturnType(), Context.IntTy)) FD->setHasImplicitReturnZero(true); else { // Otherwise, this is just a flat-out error. SourceRange RTRange = FD->getReturnTypeSourceRange(); Diag(FD->getTypeSpecStartLoc(), diag::err_main_returns_nonint) << (RTRange.isValid() ? FixItHint::CreateReplacement(RTRange, "int") : FixItHint()); FD->setInvalidDecl(true); } } // Treat protoless main() as nullary. if (isa<FunctionNoProtoType>(FT)) return; const FunctionProtoType* FTP = cast<const FunctionProtoType>(FT); unsigned nparams = FTP->getNumParams(); assert(FD->getNumParams() == nparams); bool HasExtraParameters = (nparams > 3); if (FTP->isVariadic()) { Diag(FD->getLocation(), diag::ext_variadic_main); // FIXME: if we had information about the location of the ellipsis, we // could add a FixIt hint to remove it as a parameter. } // Darwin passes an undocumented fourth argument of type char**. If // other platforms start sprouting these, the logic below will start // getting shifty. if (nparams == 4 && Context.getTargetInfo().getTriple().isOSDarwin()) HasExtraParameters = false; if (HasExtraParameters) { Diag(FD->getLocation(), diag::err_main_surplus_args) << nparams; FD->setInvalidDecl(true); nparams = 3; } // FIXME: a lot of the following diagnostics would be improved // if we had some location information about types. QualType CharPP = Context.getPointerType(Context.getPointerType(Context.CharTy)); QualType Expected[] = { Context.IntTy, CharPP, CharPP, CharPP }; for (unsigned i = 0; i < nparams; ++i) { QualType AT = FTP->getParamType(i); bool mismatch = true; if (Context.hasSameUnqualifiedType(AT, Expected[i])) mismatch = false; else if (Expected[i] == CharPP) { // As an extension, the following forms are okay: // char const ** // char const * const * // char * const * QualifierCollector qs; const PointerType* PT; if ((PT = qs.strip(AT)->getAs<PointerType>()) && (PT = qs.strip(PT->getPointeeType())->getAs<PointerType>()) && Context.hasSameType(QualType(qs.strip(PT->getPointeeType()), 0), Context.CharTy)) { qs.removeConst(); mismatch = !qs.empty(); } } if (mismatch) { Diag(FD->getLocation(), diag::err_main_arg_wrong) << i << Expected[i]; // TODO: suggest replacing given type with expected type FD->setInvalidDecl(true); } } if (nparams == 1 && !FD->isInvalidDecl()) { Diag(FD->getLocation(), diag::warn_main_one_arg); } if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) { Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD; FD->setInvalidDecl(); } } void Sema::CheckMSVCRTEntryPoint(FunctionDecl *FD) { QualType T = FD->getType(); assert(T->isFunctionType() && "function decl is not of function type"); const FunctionType *FT = T->castAs<FunctionType>(); // Set an implicit return of 'zero' if the function can return some integral, // enumeration, pointer or nullptr type. if (FT->getReturnType()->isIntegralOrEnumerationType() || FT->getReturnType()->isAnyPointerType() || FT->getReturnType()->isNullPtrType()) // DllMain is exempt because a return value of zero means it failed. if (FD->getName() != "DllMain") FD->setHasImplicitReturnZero(true); if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) { Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD; FD->setInvalidDecl(); } } bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) { // FIXME: Need strict checking. In C89, we need to check for // any assignment, increment, decrement, function-calls, or // commas outside of a sizeof. In C99, it's the same list, // except that the aforementioned are allowed in unevaluated // expressions. Everything else falls under the // "may accept other forms of constant expressions" exception. // (We never end up here for C++, so the constant expression // rules there don't matter.) const Expr *Culprit; if (Init->isConstantInitializer(Context, false, &Culprit)) return false; Diag(Culprit->getExprLoc(), diag::err_init_element_not_constant) << Culprit->getSourceRange(); return true; } namespace { // Visits an initialization expression to see if OrigDecl is evaluated in // its own initialization and throws a warning if it does. class SelfReferenceChecker : public EvaluatedExprVisitor<SelfReferenceChecker> { Sema &S; Decl *OrigDecl; bool isRecordType; bool isPODType; bool isReferenceType; bool isInitList; llvm::SmallVector<unsigned, 4> InitFieldIndex; public: typedef EvaluatedExprVisitor<SelfReferenceChecker> Inherited; SelfReferenceChecker(Sema &S, Decl *OrigDecl) : Inherited(S.Context), S(S), OrigDecl(OrigDecl) { isPODType = false; isRecordType = false; isReferenceType = false; isInitList = false; if (ValueDecl *VD = dyn_cast<ValueDecl>(OrigDecl)) { isPODType = VD->getType().isPODType(S.Context); isRecordType = VD->getType()->isRecordType(); isReferenceType = VD->getType()->isReferenceType(); } } // For most expressions, just call the visitor. For initializer lists, // track the index of the field being initialized since fields are // initialized in order allowing use of previously initialized fields. void CheckExpr(Expr *E) { InitListExpr *InitList = dyn_cast<InitListExpr>(E); if (!InitList) { Visit(E); return; } // Track and increment the index here. isInitList = true; InitFieldIndex.push_back(0); for (auto Child : InitList->children()) { CheckExpr(cast<Expr>(Child)); ++InitFieldIndex.back(); } InitFieldIndex.pop_back(); } // Returns true if MemberExpr is checked and no futher checking is needed. // Returns false if additional checking is required. bool CheckInitListMemberExpr(MemberExpr *E, bool CheckReference) { llvm::SmallVector<FieldDecl*, 4> Fields; Expr *Base = E; bool ReferenceField = false; // Get the field memebers used. while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) { FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()); if (!FD) return false; Fields.push_back(FD); if (FD->getType()->isReferenceType()) ReferenceField = true; Base = ME->getBase()->IgnoreParenImpCasts(); } // Keep checking only if the base Decl is the same. DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base); if (!DRE || DRE->getDecl() != OrigDecl) return false; // A reference field can be bound to an unininitialized field. if (CheckReference && !ReferenceField) return true; // Convert FieldDecls to their index number. llvm::SmallVector<unsigned, 4> UsedFieldIndex; for (auto I = Fields.rbegin(), E = Fields.rend(); I != E; ++I) { UsedFieldIndex.push_back((*I)->getFieldIndex()); } // See if a warning is needed by checking the first difference in index // numbers. If field being used has index less than the field being // initialized, then the use is safe. for (auto UsedIter = UsedFieldIndex.begin(), UsedEnd = UsedFieldIndex.end(), OrigIter = InitFieldIndex.begin(), OrigEnd = InitFieldIndex.end(); UsedIter != UsedEnd && OrigIter != OrigEnd; ++UsedIter, ++OrigIter) { if (*UsedIter < *OrigIter) return true; if (*UsedIter > *OrigIter) break; } // TODO: Add a different warning which will print the field names. HandleDeclRefExpr(DRE); return true; } // For most expressions, the cast is directly above the DeclRefExpr. // For conditional operators, the cast can be outside the conditional // operator if both expressions are DeclRefExpr's. void HandleValue(Expr *E) { E = E->IgnoreParens(); if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(E)) { HandleDeclRefExpr(DRE); return; } if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) { Visit(CO->getCond()); HandleValue(CO->getTrueExpr()); HandleValue(CO->getFalseExpr()); return; } if (BinaryConditionalOperator *BCO = dyn_cast<BinaryConditionalOperator>(E)) { Visit(BCO->getCond()); HandleValue(BCO->getFalseExpr()); return; } if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) { HandleValue(OVE->getSourceExpr()); return; } if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { if (BO->getOpcode() == BO_Comma) { Visit(BO->getLHS()); HandleValue(BO->getRHS()); return; } } if (isa<MemberExpr>(E)) { if (isInitList) { if (CheckInitListMemberExpr(cast<MemberExpr>(E), false /*CheckReference*/)) return; } Expr *Base = E->IgnoreParenImpCasts(); while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) { // Check for static member variables and don't warn on them. if (!isa<FieldDecl>(ME->getMemberDecl())) return; Base = ME->getBase()->IgnoreParenImpCasts(); } if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) HandleDeclRefExpr(DRE); return; } Visit(E); } // Reference types not handled in HandleValue are handled here since all // uses of references are bad, not just r-value uses. void VisitDeclRefExpr(DeclRefExpr *E) { if (isReferenceType) HandleDeclRefExpr(E); } void VisitImplicitCastExpr(ImplicitCastExpr *E) { if (E->getCastKind() == CK_LValueToRValue) { HandleValue(E->getSubExpr()); return; } Inherited::VisitImplicitCastExpr(E); } void VisitMemberExpr(MemberExpr *E) { if (isInitList) { if (CheckInitListMemberExpr(E, true /*CheckReference*/)) return; } // Don't warn on arrays since they can be treated as pointers. if (E->getType()->canDecayToPointerType()) return; // Warn when a non-static method call is followed by non-static member // field accesses, which is followed by a DeclRefExpr. CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl()); bool Warn = (MD && !MD->isStatic()); Expr *Base = E->getBase()->IgnoreParenImpCasts(); while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) { if (!isa<FieldDecl>(ME->getMemberDecl())) Warn = false; Base = ME->getBase()->IgnoreParenImpCasts(); } if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { if (Warn) HandleDeclRefExpr(DRE); return; } // The base of a MemberExpr is not a MemberExpr or a DeclRefExpr. // Visit that expression. Visit(Base); } void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) { Expr *Callee = E->getCallee(); if (isa<UnresolvedLookupExpr>(Callee)) return Inherited::VisitCXXOperatorCallExpr(E); Visit(Callee); for (auto Arg: E->arguments()) HandleValue(Arg->IgnoreParenImpCasts()); } void VisitUnaryOperator(UnaryOperator *E) { // For POD record types, addresses of its own members are well-defined. if (E->getOpcode() == UO_AddrOf && isRecordType && isa<MemberExpr>(E->getSubExpr()->IgnoreParens())) { if (!isPODType) HandleValue(E->getSubExpr()); return; } if (E->isIncrementDecrementOp()) { HandleValue(E->getSubExpr()); return; } Inherited::VisitUnaryOperator(E); } void VisitObjCMessageExpr(ObjCMessageExpr *E) { return; } void VisitCXXConstructExpr(CXXConstructExpr *E) { if (E->getConstructor()->isCopyConstructor()) { Expr *ArgExpr = E->getArg(0); if (InitListExpr *ILE = dyn_cast<InitListExpr>(ArgExpr)) if (ILE->getNumInits() == 1) ArgExpr = ILE->getInit(0); if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr)) if (ICE->getCastKind() == CK_NoOp) ArgExpr = ICE->getSubExpr(); HandleValue(ArgExpr); return; } Inherited::VisitCXXConstructExpr(E); } void VisitCallExpr(CallExpr *E) { // Treat std::move as a use. if (E->getNumArgs() == 1) { if (FunctionDecl *FD = E->getDirectCallee()) { if (FD->isInStdNamespace() && FD->getIdentifier() && FD->getIdentifier()->isStr("move")) { HandleValue(E->getArg(0)); return; } } } Inherited::VisitCallExpr(E); } void VisitBinaryOperator(BinaryOperator *E) { if (E->isCompoundAssignmentOp()) { HandleValue(E->getLHS()); Visit(E->getRHS()); return; } Inherited::VisitBinaryOperator(E); } // A custom visitor for BinaryConditionalOperator is needed because the // regular visitor would check the condition and true expression separately // but both point to the same place giving duplicate diagnostics. void VisitBinaryConditionalOperator(BinaryConditionalOperator *E) { Visit(E->getCond()); Visit(E->getFalseExpr()); } void HandleDeclRefExpr(DeclRefExpr *DRE) { Decl* ReferenceDecl = DRE->getDecl(); if (OrigDecl != ReferenceDecl) return; unsigned diag; if (isReferenceType) { diag = diag::warn_uninit_self_reference_in_reference_init; } else if (cast<VarDecl>(OrigDecl)->isStaticLocal()) { diag = diag::warn_static_self_reference_in_init; } else if (isa<TranslationUnitDecl>(OrigDecl->getDeclContext()) || isa<NamespaceDecl>(OrigDecl->getDeclContext()) || DRE->getDecl()->getType()->isRecordType()) { diag = diag::warn_uninit_self_reference_in_init; } else { // Local variables will be handled by the CFG analysis. return; } S.DiagRuntimeBehavior(DRE->getLocStart(), DRE, S.PDiag(diag) << DRE->getNameInfo().getName() << OrigDecl->getLocation() << DRE->getSourceRange()); } }; /// CheckSelfReference - Warns if OrigDecl is used in expression E. static void CheckSelfReference(Sema &S, Decl* OrigDecl, Expr *E, bool DirectInit) { // Parameters arguments are occassionially constructed with itself, // for instance, in recursive functions. Skip them. if (isa<ParmVarDecl>(OrigDecl)) return; E = E->IgnoreParens(); // Skip checking T a = a where T is not a record or reference type. // Doing so is a way to silence uninitialized warnings. if (!DirectInit && !cast<VarDecl>(OrigDecl)->getType()->isRecordType()) if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) if (ICE->getCastKind() == CK_LValueToRValue) if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) if (DRE->getDecl() == OrigDecl) return; SelfReferenceChecker(S, OrigDecl).CheckExpr(E); } } /// AddInitializerToDecl - Adds the initializer Init to the /// declaration dcl. If DirectInit is true, this is C++ direct /// initialization rather than copy initialization. void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit, bool TypeMayContainAuto) { // If there is no declaration, there was an error parsing it. Just ignore // the initializer. if (!RealDecl || RealDecl->isInvalidDecl()) { CorrectDelayedTyposInExpr(Init, dyn_cast_or_null<VarDecl>(RealDecl)); return; } if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(RealDecl)) { // Pure-specifiers are handled in ActOnPureSpecifier. Diag(Method->getLocation(), diag::err_member_function_initialization) << Method->getDeclName() << Init->getSourceRange(); Method->setInvalidDecl(); return; } VarDecl *VDecl = dyn_cast<VarDecl>(RealDecl); if (!VDecl) { assert(!isa<FieldDecl>(RealDecl) && "field init shouldn't get here"); Diag(RealDecl->getLocation(), diag::err_illegal_initializer); RealDecl->setInvalidDecl(); return; } ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init); // C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for. if (TypeMayContainAuto && VDecl->getType()->isUndeducedType()) { // Attempt typo correction early so that the type of the init expression can // be deduced based on the chosen correction:if the original init contains a // TypoExpr. ExprResult Res = CorrectDelayedTyposInExpr(Init, VDecl); if (!Res.isUsable()) { RealDecl->setInvalidDecl(); return; } if (Res.get() != Init) { Init = Res.get(); if (CXXDirectInit) CXXDirectInit = dyn_cast<ParenListExpr>(Init); } Expr *DeduceInit = Init; // Initializer could be a C++ direct-initializer. Deduction only works if it // contains exactly one expression. if (CXXDirectInit) { if (CXXDirectInit->getNumExprs() == 0) { // It isn't possible to write this directly, but it is possible to // end up in this situation with "auto x(some_pack...);" Diag(CXXDirectInit->getLocStart(), VDecl->isInitCapture() ? diag::err_init_capture_no_expression : diag::err_auto_var_init_no_expression) << VDecl->getDeclName() << VDecl->getType() << VDecl->getSourceRange(); RealDecl->setInvalidDecl(); return; } else if (CXXDirectInit->getNumExprs() > 1) { Diag(CXXDirectInit->getExpr(1)->getLocStart(), VDecl->isInitCapture() ? diag::err_init_capture_multiple_expressions : diag::err_auto_var_init_multiple_expressions) << VDecl->getDeclName() << VDecl->getType() << VDecl->getSourceRange(); RealDecl->setInvalidDecl(); return; } else { DeduceInit = CXXDirectInit->getExpr(0); if (isa<InitListExpr>(DeduceInit)) Diag(CXXDirectInit->getLocStart(), diag::err_auto_var_init_paren_braces) << VDecl->getDeclName() << VDecl->getType() << VDecl->getSourceRange(); } } // Expressions default to 'id' when we're in a debugger. bool DefaultedToAuto = false; if (getLangOpts().DebuggerCastResultToId && Init->getType() == Context.UnknownAnyTy) { ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType()); if (Result.isInvalid()) { VDecl->setInvalidDecl(); return; } Init = Result.get(); DefaultedToAuto = true; } QualType DeducedType; if (DeduceAutoType(VDecl->getTypeSourceInfo(), DeduceInit, DeducedType) == DAR_Failed) DiagnoseAutoDeductionFailure(VDecl, DeduceInit); if (DeducedType.isNull()) { RealDecl->setInvalidDecl(); return; } VDecl->setType(DeducedType); assert(VDecl->isLinkageValid()); // In ARC, infer lifetime. if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(VDecl)) VDecl->setInvalidDecl(); // Warn if we deduced 'id'. 'auto' usually implies type-safety, but using // 'id' instead of a specific object type prevents most of our usual checks. // We only want to warn outside of template instantiations, though: // inside a template, the 'id' could have come from a parameter. if (ActiveTemplateInstantiations.empty() && !DefaultedToAuto && DeducedType->isObjCIdType()) { SourceLocation Loc = VDecl->getTypeSourceInfo()->getTypeLoc().getBeginLoc(); Diag(Loc, diag::warn_auto_var_is_id) << VDecl->getDeclName() << DeduceInit->getSourceRange(); } // If this is a redeclaration, check that the type we just deduced matches // the previously declared type. assert(!getLangOpts().HLSL && "auto types are not supported - merge type below is inconsequential"); // HLSL Change if (VarDecl *Old = VDecl->getPreviousDecl()) { // We never need to merge the type, because we cannot form an incomplete // array of auto, nor deduce such a type. ShadowMergeState MergeState = ShadowMergeState_Disallowed; // HLSL Change - add merge state MergeVarDeclTypes(VDecl, Old, /*MergeTypeWithPrevious*/false, MergeState); // HLSL Change - add merge state } // Check the deduced type is valid for a variable declaration. CheckVariableDeclarationType(VDecl); if (VDecl->isInvalidDecl()) return; // If all looks well, warn if this is a case that will change meaning when // we implement N3922. if (DirectInit && !CXXDirectInit && isa<InitListExpr>(Init)) { Diag(Init->getLocStart(), diag::warn_auto_var_direct_list_init) << FixItHint::CreateInsertion(Init->getLocStart(), "="); } } // dllimport cannot be used on variable definitions. if (VDecl->hasAttr<DLLImportAttr>() && !VDecl->isStaticDataMember()) { Diag(VDecl->getLocation(), diag::err_attribute_dllimport_data_definition); VDecl->setInvalidDecl(); return; } if (VDecl->isLocalVarDecl() && VDecl->hasExternalStorage()) { // C99 6.7.8p5. C++ has no such restriction, but that is a defect. Diag(VDecl->getLocation(), diag::err_block_extern_cant_init); VDecl->setInvalidDecl(); return; } if (!VDecl->getType()->isDependentType()) { // A definition must end up with a complete type, which means it must be // complete with the restriction that an array type might be completed by // the initializer; note that later code assumes this restriction. QualType BaseDeclType = VDecl->getType(); if (const ArrayType *Array = Context.getAsIncompleteArrayType(BaseDeclType)) BaseDeclType = Array->getElementType(); if (RequireCompleteType(VDecl->getLocation(), BaseDeclType, diag::err_typecheck_decl_incomplete_type)) { RealDecl->setInvalidDecl(); return; } // The variable can not have an abstract class type. if (RequireNonAbstractType(VDecl->getLocation(), VDecl->getType(), diag::err_abstract_type_in_decl, AbstractVariableType)) VDecl->setInvalidDecl(); } VarDecl *Def; if ((Def = VDecl->getDefinition()) && Def != VDecl) { NamedDecl *Hidden = nullptr; if (!hasVisibleDefinition(Def, &Hidden) && (VDecl->getFormalLinkage() == InternalLinkage || VDecl->getDescribedVarTemplate() || VDecl->getNumTemplateParameterLists() || VDecl->getDeclContext()->isDependentContext())) { // The previous definition is hidden, and multiple definitions are // permitted (in separate TUs). Form another definition of it. } else { Diag(VDecl->getLocation(), diag::err_redefinition) << VDecl->getDeclName(); Diag(Def->getLocation(), diag::note_previous_definition); VDecl->setInvalidDecl(); return; } } if (getLangOpts().CPlusPlus) { // C++ [class.static.data]p4 // If a static data member is of const integral or const // enumeration type, its declaration in the class definition can // specify a constant-initializer which shall be an integral // constant expression (5.19). In that case, the member can appear // in integral constant expressions. The member shall still be // defined in a namespace scope if it is used in the program and the // namespace scope definition shall not contain an initializer. // // We already performed a redefinition check above, but for static // data members we also need to check whether there was an in-class // declaration with an initializer. if (VDecl->isStaticDataMember() && VDecl->getCanonicalDecl()->hasInit()) { Diag(Init->getExprLoc(), diag::err_static_data_member_reinitialization) << VDecl->getDeclName(); Diag(VDecl->getCanonicalDecl()->getInit()->getExprLoc(), diag::note_previous_initializer) << 0; return; } if (VDecl->hasLocalStorage()) getCurFunction()->setHasBranchProtectedScope(); if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer)) { VDecl->setInvalidDecl(); return; } } // OpenCL 1.1 6.5.2: "Variables allocated in the __local address space inside // a kernel function cannot be initialized." if (VDecl->getStorageClass() == SC_OpenCLWorkGroupLocal) { Diag(VDecl->getLocation(), diag::err_local_cant_init); VDecl->setInvalidDecl(); return; } // Get the decls type and save a reference for later, since // CheckInitializerTypes may change it. QualType DclT = VDecl->getType(), SavT = DclT; // HLSL Change begin // When initializing an HLSL resource type we should diagnose mismatches in // globally coherent annotations _unless_ the source is a dynamic resource // placeholder type where we safely infer the globallycoherent annotaiton. DiagnoseGloballyCoherentMismatch(Init, DclT, Init->getExprLoc()); // HLSL Change end // Expressions default to 'id' when we're in a debugger // and we are assigning it to a variable of Objective-C pointer type. if (getLangOpts().DebuggerCastResultToId && DclT->isObjCObjectPointerType() && Init->getType() == Context.UnknownAnyTy) { ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType()); if (Result.isInvalid()) { VDecl->setInvalidDecl(); return; } Init = Result.get(); } // Perform the initialization. if (!VDecl->isInvalidDecl()) { InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl); InitializationKind Kind = DirectInit ? CXXDirectInit ? InitializationKind::CreateDirect(VDecl->getLocation(), Init->getLocStart(), Init->getLocEnd()) : InitializationKind::CreateDirectList( VDecl->getLocation()) : InitializationKind::CreateCopy(VDecl->getLocation(), Init->getLocStart()); MultiExprArg Args = Init; if (CXXDirectInit) Args = MultiExprArg(CXXDirectInit->getExprs(), CXXDirectInit->getNumExprs()); // Try to correct any TypoExprs in the initialization arguments. for (size_t Idx = 0; Idx < Args.size(); ++Idx) { ExprResult Res = CorrectDelayedTyposInExpr( Args[Idx], VDecl, [this, Entity, Kind](Expr *E) { InitializationSequence Init(*this, Entity, Kind, MultiExprArg(E)); return Init.Failed() ? ExprError() : E; }); if (Res.isInvalid()) { VDecl->setInvalidDecl(); } else if (Res.get() != Args[Idx]) { Args[Idx] = Res.get(); } } if (VDecl->isInvalidDecl()) return; InitializationSequence InitSeq(*this, Entity, Kind, Args); ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT); if (Result.isInvalid()) { VDecl->setInvalidDecl(); return; } Init = Result.getAs<Expr>(); assert(Init != nullptr && "otherwise result should be invalid"); // HLSL Change } // Check for self-references within variable initializers. // Variables declared within a function/method body (except for references) // are handled by a dataflow analysis. if (!VDecl->hasLocalStorage() || VDecl->getType()->isRecordType() || VDecl->getType()->isReferenceType()) { CheckSelfReference(*this, RealDecl, Init, DirectInit); } // If the type changed, it means we had an incomplete type that was // completed by the initializer. For example: // int ary[] = { 1, 3, 5 }; // "ary" transitions from an IncompleteArrayType to a ConstantArrayType. if (!VDecl->isInvalidDecl() && (DclT != SavT)) VDecl->setType(DclT); #if 0 // HLSL Change Starts - no ObjC support if (!VDecl->isInvalidDecl()) { checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init); if (VDecl->hasAttr<BlocksAttr>()) checkRetainCycles(VDecl, Init); // It is safe to assign a weak reference into a strong variable. // Although this code can still have problems: // id x = self.weakProp; // id y = self.weakProp; // we do not warn to warn spuriously when 'x' and 'y' are on separate // paths through the function. This should be revisited if // -Wrepeated-use-of-weak is made flow-sensitive. if (VDecl->getType().getObjCLifetime() == Qualifiers::OCL_Strong && !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Init->getLocStart())) getCurFunction()->markSafeWeakUse(Init); } #endif // HLSL Change Ends - no ObjC support // The initialization is usually a full-expression. // // FIXME: If this is a braced initialization of an aggregate, it is not // an expression, and each individual field initializer is a separate // full-expression. For instance, in: // // struct Temp { ~Temp(); }; // struct S { S(Temp); }; // struct T { S a, b; } t = { Temp(), Temp() } // // we should destroy the first Temp before constructing the second. ExprResult Result = ActOnFinishFullExpr(Init, VDecl->getLocation(), false, VDecl->isConstexpr()); if (Result.isInvalid()) { VDecl->setInvalidDecl(); return; } Init = Result.get(); // Attach the initializer to the decl. VDecl->setInit(Init); if (VDecl->isLocalVarDecl()) { // C99 6.7.8p4: All the expressions in an initializer for an object that has // static storage duration shall be constant expressions or string literals. // C++ does not have this restriction. if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl()) { const Expr *Culprit; if (VDecl->getStorageClass() == SC_Static) CheckForConstantInitializer(Init, DclT); // C89 is stricter than C99 for non-static aggregate types. // C89 6.5.7p3: All the expressions [...] in an initializer list // for an object that has aggregate or union type shall be // constant expressions. else if (!getLangOpts().C99 && VDecl->getType()->isAggregateType() && isa<InitListExpr>(Init) && !Init->isConstantInitializer(Context, false, &Culprit)) Diag(Culprit->getExprLoc(), diag::ext_aggregate_init_not_constant) << Culprit->getSourceRange(); } } else if (VDecl->isStaticDataMember() && VDecl->getLexicalDeclContext()->isRecord()) { // This is an in-class initialization for a static data member, e.g., // // struct S { // static const int value = 17; // }; // C++ [class.mem]p4: // A member-declarator can contain a constant-initializer only // if it declares a static member (9.4) of const integral or // const enumeration type, see 9.4.2. // // C++11 [class.static.data]p3: // If a non-volatile const static data member is of integral or // enumeration type, its declaration in the class definition can // specify a brace-or-equal-initializer in which every initalizer-clause // that is an assignment-expression is a constant expression. A static // data member of literal type can be declared in the class definition // with the constexpr specifier; if so, its declaration shall specify a // brace-or-equal-initializer in which every initializer-clause that is // an assignment-expression is a constant expression. // Do nothing on dependent types. if (DclT->isDependentType()) { // Allow any 'static constexpr' members, whether or not they are of literal // type. We separately check that every constexpr variable is of literal // type. } else if (VDecl->isConstexpr()) { // Require constness. } else if (!DclT.isConstQualified()) { Diag(VDecl->getLocation(), diag::err_in_class_initializer_non_const) << Init->getSourceRange(); VDecl->setInvalidDecl(); // We allow integer constant expressions in all cases. } else if (DclT->isIntegralOrEnumerationType()) { // Check whether the expression is a constant expression. SourceLocation Loc; if (getLangOpts().CPlusPlus11 && DclT.isVolatileQualified()) // In C++11, a non-constexpr const static data member with an // in-class initializer cannot be volatile. Diag(VDecl->getLocation(), diag::err_in_class_initializer_volatile); else if (Init->isValueDependent()) ; // Nothing to check. else if (Init->isIntegerConstantExpr(Context, &Loc)) ; // Ok, it's an ICE! else if (Init->isEvaluatable(Context)) { // If we can constant fold the initializer through heroics, accept it, // but report this as a use of an extension for -pedantic. Diag(Loc, diag::ext_in_class_initializer_non_constant) << Init->getSourceRange(); } else { // Otherwise, this is some crazy unknown case. Report the issue at the // location provided by the isIntegerConstantExpr failed check. Diag(Loc, diag::err_in_class_initializer_non_constant) << Init->getSourceRange(); VDecl->setInvalidDecl(); } // We allow foldable floating-point constants as an extension. } else if (DclT->isFloatingType()) { // also permits complex, which is ok // In C++98, this is a GNU extension. In C++11, it is not, but we support // it anyway and provide a fixit to add the 'constexpr'. if (getLangOpts().CPlusPlus11) { Diag(VDecl->getLocation(), diag::ext_in_class_initializer_float_type_cxx11) << DclT << Init->getSourceRange(); Diag(VDecl->getLocStart(), diag::note_in_class_initializer_float_type_cxx11) << FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr "); } else { Diag(VDecl->getLocation(), diag::ext_in_class_initializer_float_type) << DclT << Init->getSourceRange(); if (!Init->isValueDependent() && !Init->isEvaluatable(Context)) { Diag(Init->getExprLoc(), diag::err_in_class_initializer_non_constant) << Init->getSourceRange(); VDecl->setInvalidDecl(); } } // Suggest adding 'constexpr' in C++11 for literal types. } else if (getLangOpts().CPlusPlus11 && DclT->isLiteralType(Context)) { Diag(VDecl->getLocation(), diag::err_in_class_initializer_literal_type) << DclT << Init->getSourceRange() << FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr "); VDecl->setConstexpr(true); } else { Diag(VDecl->getLocation(), diag::err_in_class_initializer_bad_type) << DclT << Init->getSourceRange(); VDecl->setInvalidDecl(); } } else if (VDecl->isFileVarDecl()) { if (VDecl->getStorageClass() == SC_Extern && (!getLangOpts().CPlusPlus || !VDecl->isExternC()) && !isTemplateInstantiation(VDecl->getTemplateSpecializationKind())) Diag(VDecl->getLocation(), diag::warn_extern_init); // C99 6.7.8p4. All file scoped initializers need to be constant. if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl()) CheckForConstantInitializer(Init, DclT); } // We will represent direct-initialization similarly to copy-initialization: // int x(1); -as-> int x = 1; // ClassType x(a,b,c); -as-> ClassType x = ClassType(a,b,c); // // Clients that want to distinguish between the two forms, can check for // direct initializer using VarDecl::getInitStyle(). // A major benefit is that clients that don't particularly care about which // exactly form was it (like the CodeGen) can handle both cases without // special case code. // C++ 8.5p11: // The form of initialization (using parentheses or '=') is generally // insignificant, but does matter when the entity being initialized has a // class type. if (CXXDirectInit) { assert(DirectInit && "Call-style initializer must be direct init."); VDecl->setInitStyle(VarDecl::CallInit); } else if (DirectInit) { // This must be list-initialization. No other way is direct-initialization. VDecl->setInitStyle(VarDecl::ListInit); } CheckCompleteVariableDeclaration(VDecl); } /// ActOnInitializerError - Given that there was an error parsing an /// initializer for the given declaration, try to return to some form /// of sanity. void Sema::ActOnInitializerError(Decl *D) { // Our main concern here is re-establishing invariants like "a // variable's type is either dependent or complete". if (!D || D->isInvalidDecl()) return; VarDecl *VD = dyn_cast<VarDecl>(D); if (!VD) return; // Auto types are meaningless if we can't make sense of the initializer. if (ParsingInitForAutoVars.count(D)) { D->setInvalidDecl(); return; } QualType Ty = VD->getType(); if (Ty->isDependentType()) return; // Require a complete type. if (RequireCompleteType(VD->getLocation(), Context.getBaseElementType(Ty), diag::err_typecheck_decl_incomplete_type)) { VD->setInvalidDecl(); return; } // Require a non-abstract type. if (RequireNonAbstractType(VD->getLocation(), Ty, diag::err_abstract_type_in_decl, AbstractVariableType)) { VD->setInvalidDecl(); return; } // Don't bother complaining about constructors or destructors, // though. } void Sema::ActOnUninitializedDecl(Decl *RealDecl, bool TypeMayContainAuto) { // If there is no declaration, there was an error parsing it. Just ignore it. if (!RealDecl) return; if (VarDecl *Var = dyn_cast<VarDecl>(RealDecl)) { QualType Type = Var->getType(); // C++11 [dcl.spec.auto]p3 if (TypeMayContainAuto && Type->getContainedAutoType()) { Diag(Var->getLocation(), diag::err_auto_var_requires_init) << Var->getDeclName() << Type; Var->setInvalidDecl(); return; } // C++11 [class.static.data]p3: A static data member can be declared with // the constexpr specifier; if so, its declaration shall specify // a brace-or-equal-initializer. // C++11 [dcl.constexpr]p1: The constexpr specifier shall be applied only to // the definition of a variable [...] or the declaration of a static data // member. if (Var->isConstexpr() && !Var->isThisDeclarationADefinition()) { if (Var->isStaticDataMember()) Diag(Var->getLocation(), diag::err_constexpr_static_mem_var_requires_init) << Var->getDeclName(); else Diag(Var->getLocation(), diag::err_invalid_constexpr_var_decl); Var->setInvalidDecl(); return; } // OpenCL v1.1 s6.5.3: variables declared in the constant address space must // be initialized. if (!Var->isInvalidDecl() && Var->getType().getAddressSpace() == LangAS::opencl_constant && Var->getStorageClass() != SC_Extern && !Var->getInit()) { Diag(Var->getLocation(), diag::err_opencl_constant_no_init); Var->setInvalidDecl(); return; } switch (Var->isThisDeclarationADefinition()) { case VarDecl::Definition: if (!Var->isStaticDataMember() || !Var->getAnyInitializer()) break; // We have an out-of-line definition of a static data member // that has an in-class initializer, so we type-check this like // a declaration. // LLVM_FALLTHROUGH; // HLSL Change case VarDecl::DeclarationOnly: // It's only a declaration. // Block scope. C99 6.7p7: If an identifier for an object is // declared with no linkage (C99 6.2.2p6), the type for the // object shall be complete. if (!Type->isDependentType() && Var->isLocalVarDecl() && !Var->hasLinkage() && !Var->isInvalidDecl() && RequireCompleteType(Var->getLocation(), Type, diag::err_typecheck_decl_incomplete_type)) Var->setInvalidDecl(); // Make sure that the type is not abstract. if (!Type->isDependentType() && !Var->isInvalidDecl() && RequireNonAbstractType(Var->getLocation(), Type, diag::err_abstract_type_in_decl, AbstractVariableType)) Var->setInvalidDecl(); if (!Type->isDependentType() && !Var->isInvalidDecl() && Var->getStorageClass() == SC_PrivateExtern) { Diag(Var->getLocation(), diag::warn_private_extern); Diag(Var->getLocation(), diag::note_private_extern); } return; case VarDecl::TentativeDefinition: // File scope. C99 6.9.2p2: A declaration of an identifier for an // object that has file scope without an initializer, and without a // storage-class specifier or with the storage-class specifier "static", // constitutes a tentative definition. Note: A tentative definition with // external linkage is valid (C99 6.2.2p5). if (!Var->isInvalidDecl()) { if (const IncompleteArrayType *ArrayT = Context.getAsIncompleteArrayType(Type)) { if (RequireCompleteType(Var->getLocation(), ArrayT->getElementType(), diag::err_illegal_decl_array_incomplete_type)) Var->setInvalidDecl(); } else if (Var->getStorageClass() == SC_Static) { // C99 6.9.2p3: If the declaration of an identifier for an object is // a tentative definition and has internal linkage (C99 6.2.2p3), the // declared type shall not be an incomplete type. // NOTE: code such as the following // static struct s; // struct s { int a; }; // is accepted by gcc. Hence here we issue a warning instead of // an error and we do not invalidate the static declaration. // NOTE: to avoid multiple warnings, only check the first declaration. if (Var->isFirstDecl()) RequireCompleteType(Var->getLocation(), Type, diag::ext_typecheck_decl_incomplete_type); } } // Record the tentative definition; we're done. if (!Var->isInvalidDecl()) TentativeDefinitions.push_back(Var); return; } // Provide a specific diagnostic for uninitialized variable // definitions with incomplete array type. if (Type->isIncompleteArrayType() // HLSL Change Starts // Allow incomplete resource array. && !hlsl::IsIncompleteHLSLResourceArrayType(getASTContext(), Type) // Allow incomplete ConstantBufferView array. && !(IsOnHLSLBufferView()) // HLSL Change Ends ) { Diag(Var->getLocation(), diag::err_typecheck_incomplete_array_needs_initializer); Var->setInvalidDecl(); return; } // Provide a specific diagnostic for uninitialized variable // definitions with reference type. if (Type->isReferenceType()) { Diag(Var->getLocation(), diag::err_reference_var_requires_init) << Var->getDeclName() << SourceRange(Var->getLocation(), Var->getLocation()); Var->setInvalidDecl(); return; } // Do not attempt to type-check the default initializer for a // variable with dependent type. if (Type->isDependentType()) return; if (Var->isInvalidDecl()) return; if (!Var->hasAttr<AliasAttr>()) { if (RequireCompleteType(Var->getLocation(), Context.getBaseElementType(Type), diag::err_typecheck_decl_incomplete_type)) { Var->setInvalidDecl(); return; } } else { return; } // The variable can not have an abstract class type. if (RequireNonAbstractType(Var->getLocation(), Type, diag::err_abstract_type_in_decl, AbstractVariableType)) { Var->setInvalidDecl(); return; } // Check for jumps past the implicit initializer. C++0x // clarifies that this applies to a "variable with automatic // storage duration", not a "local variable". // C++11 [stmt.dcl]p3 // A program that jumps from a point where a variable with automatic // storage duration is not in scope to a point where it is in scope is // ill-formed unless the variable has scalar type, class type with a // trivial default constructor and a trivial destructor, a cv-qualified // version of one of these types, or an array of one of the preceding // types and is declared without an initializer. if (getLangOpts().CPlusPlus && Var->hasLocalStorage()) { if (const RecordType *Record = Context.getBaseElementType(Type)->getAs<RecordType>()) { CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record->getDecl()); // Mark the function for further checking even if the looser rules of // C++11 do not require such checks, so that we can diagnose // incompatibilities with C++98. if (!CXXRecord->isPOD()) getCurFunction()->setHasBranchProtectedScope(); } } // C++03 [dcl.init]p9: // If no initializer is specified for an object, and the // object is of (possibly cv-qualified) non-POD class type (or // array thereof), the object shall be default-initialized; if // the object is of const-qualified type, the underlying class // type shall have a user-declared default // constructor. Otherwise, if no initializer is specified for // a non- static object, the object and its subobjects, if // any, have an indeterminate initial value); if the object // or any of its subobjects are of const-qualified type, the // program is ill-formed. // C++0x [dcl.init]p11: // If no initializer is specified for an object, the object is // default-initialized; [...]. InitializedEntity Entity = InitializedEntity::InitializeVariable(Var); InitializationKind Kind = InitializationKind::CreateDefault(Var->getLocation()); InitializationSequence InitSeq(*this, Entity, Kind, None); ExprResult Init = InitSeq.Perform(*this, Entity, Kind, None); if (Init.isInvalid()) Var->setInvalidDecl(); else if (Init.get()) { Var->setInit(MaybeCreateExprWithCleanups(Init.get())); // This is important for template substitution. Var->setInitStyle(VarDecl::CallInit); } CheckCompleteVariableDeclaration(Var); } } void Sema::ActOnCXXForRangeDecl(Decl *D) { VarDecl *VD = dyn_cast<VarDecl>(D); if (!VD) { Diag(D->getLocation(), diag::err_for_range_decl_must_be_var); D->setInvalidDecl(); return; } VD->setCXXForRangeDecl(true); // for-range-declaration cannot be given a storage class specifier. int Error = -1; switch (VD->getStorageClass()) { case SC_None: break; case SC_Extern: Error = 0; break; case SC_Static: Error = 1; break; case SC_PrivateExtern: Error = 2; break; case SC_Auto: Error = 3; break; case SC_Register: Error = 4; break; case SC_OpenCLWorkGroupLocal: llvm_unreachable("Unexpected storage class"); } if (Error != -1) { Diag(VD->getOuterLocStart(), diag::err_for_range_storage_class) << VD->getDeclName() << Error; D->setInvalidDecl(); } } StmtResult Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd) { // C++1y [stmt.iter]p1: // A range-based for statement of the form // for ( for-range-identifier : for-range-initializer ) statement // is equivalent to // for ( auto&& for-range-identifier : for-range-initializer ) statement DeclSpec DS(Attrs.getPool().getFactory()); const char *PrevSpec; unsigned DiagID; DS.SetTypeSpecType(DeclSpec::TST_auto, IdentLoc, PrevSpec, DiagID, getPrintingPolicy()); Declarator D(DS, Declarator::ForContext); D.SetIdentifier(Ident, IdentLoc); D.takeAttributes(Attrs, AttrEnd); ParsedAttributes EmptyAttrs(Attrs.getPool().getFactory()); D.AddTypeInfo(DeclaratorChunk::getReference(0, IdentLoc, /*lvalue*/false), EmptyAttrs, IdentLoc); Decl *Var = ActOnDeclarator(S, D); cast<VarDecl>(Var)->setCXXForRangeDecl(true); FinalizeDeclaration(Var); return ActOnDeclStmt(FinalizeDeclaratorGroup(S, DS, Var), IdentLoc, AttrEnd.isValid() ? AttrEnd : IdentLoc); } void Sema::CheckCompleteVariableDeclaration(VarDecl *var) { if (var->isInvalidDecl()) return; // In ARC, don't allow jumps past the implicit initialization of a // local retaining variable. if (getLangOpts().ObjCAutoRefCount && var->hasLocalStorage()) { switch (var->getType().getObjCLifetime()) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: break; case Qualifiers::OCL_Weak: case Qualifiers::OCL_Strong: getCurFunction()->setHasBranchProtectedScope(); break; } } // Warn about externally-visible variables being defined without a // prior declaration. We only want to do this for global // declarations, but we also specifically need to avoid doing it for // class members because the linkage of an anonymous class can // change if it's later given a typedef name. if (var->isThisDeclarationADefinition() && var->getDeclContext()->getRedeclContext()->isFileContext() && var->isExternallyVisible() && var->hasLinkage() && !getDiagnostics().isIgnored(diag::warn_missing_variable_declarations, var->getLocation())) { // Find a previous declaration that's not a definition. VarDecl *prev = var->getPreviousDecl(); while (prev && prev->isThisDeclarationADefinition()) prev = prev->getPreviousDecl(); if (!prev) Diag(var->getLocation(), diag::warn_missing_variable_declarations) << var; } if (var->getTLSKind() == VarDecl::TLS_Static) { const Expr *Culprit; if (var->getType().isDestructedType()) { // GNU C++98 edits for __thread, [basic.start.term]p3: // The type of an object with thread storage duration shall not // have a non-trivial destructor. Diag(var->getLocation(), diag::err_thread_nontrivial_dtor); if (getLangOpts().CPlusPlus11) Diag(var->getLocation(), diag::note_use_thread_local); } else if (getLangOpts().CPlusPlus && var->hasInit() && !var->getInit()->isConstantInitializer( Context, var->getType()->isReferenceType(), &Culprit)) { // GNU C++98 edits for __thread, [basic.start.init]p4: // An object of thread storage duration shall not require dynamic // initialization. // FIXME: Need strict checking here. Diag(Culprit->getExprLoc(), diag::err_thread_dynamic_init) << Culprit->getSourceRange(); if (getLangOpts().CPlusPlus11) Diag(var->getLocation(), diag::note_use_thread_local); } } // Apply section attributes and pragmas to global variables. bool GlobalStorage = var->hasGlobalStorage(); if (GlobalStorage && var->isThisDeclarationADefinition() && ActiveTemplateInstantiations.empty()) { PragmaStack<StringLiteral *> *Stack = nullptr; int SectionFlags = ASTContext::PSF_Implicit | ASTContext::PSF_Read; if (var->getType().isConstQualified() // HLSL Change: for HLSL, const initialized isn't the same, unless static && (!getLangOpts().HLSL || var->getTLSKind() == VarDecl::TLS_Static) ) Stack = &ConstSegStack; else if (!var->getInit()) { Stack = &BSSSegStack; SectionFlags |= ASTContext::PSF_Write; } else { Stack = &DataSegStack; SectionFlags |= ASTContext::PSF_Write; } if (Stack->CurrentValue && !var->hasAttr<SectionAttr>()) { var->addAttr(SectionAttr::CreateImplicit( Context, SectionAttr::Declspec_allocate, Stack->CurrentValue->getString(), Stack->CurrentPragmaLocation)); } if (const SectionAttr *SA = var->getAttr<SectionAttr>()) if (UnifySection(SA->getName(), SectionFlags, var)) var->dropAttr<SectionAttr>(); // Apply the init_seg attribute if this has an initializer. If the // initializer turns out to not be dynamic, we'll end up ignoring this // attribute. if (CurInitSeg && var->getInit()) var->addAttr(InitSegAttr::CreateImplicit(Context, CurInitSeg->getString(), CurInitSegLoc)); } // All the following checks are C++ only. if (!getLangOpts().CPlusPlus) return; QualType type = var->getType(); if (type->isDependentType()) return; // __block variables might require us to capture a copy-initializer. if (var->hasAttr<BlocksAttr>()) { // It's currently invalid to ever have a __block variable with an // array type; should we diagnose that here? // Regardless, we don't want to ignore array nesting when // constructing this copy. if (type->isStructureOrClassType()) { EnterExpressionEvaluationContext scope(*this, PotentiallyEvaluated); SourceLocation poi = var->getLocation(); Expr *varRef =new (Context) DeclRefExpr(var, false, type, VK_LValue, poi); ExprResult result = PerformMoveOrCopyInitialization( InitializedEntity::InitializeBlock(poi, type, false), var, var->getType(), varRef, /*AllowNRVO=*/true); if (!result.isInvalid()) { result = MaybeCreateExprWithCleanups(result); Expr *init = result.getAs<Expr>(); Context.setBlockVarCopyInits(var, init); } } } Expr *Init = var->getInit(); bool IsGlobal = GlobalStorage && !var->isStaticLocal(); QualType baseType = Context.getBaseElementType(type); if (!var->getDeclContext()->isDependentContext() && Init && !Init->isValueDependent()) { if (IsGlobal && !var->isConstexpr() && !getDiagnostics().isIgnored(diag::warn_global_constructor, var->getLocation())) { // Warn about globals which don't have a constant initializer. Don't // warn about globals with a non-trivial destructor because we already // warned about them. CXXRecordDecl *RD = baseType->getAsCXXRecordDecl(); if (!(RD && !RD->hasTrivialDestructor()) && !Init->isConstantInitializer(Context, baseType->isReferenceType())) Diag(var->getLocation(), diag::warn_global_constructor) << Init->getSourceRange(); } if (var->isConstexpr()) { SmallVector<PartialDiagnosticAt, 8> Notes; if (!var->evaluateValue(Notes) || !var->isInitICE()) { SourceLocation DiagLoc = var->getLocation(); // If the note doesn't add any useful information other than a source // location, fold it into the primary diagnostic. if (Notes.size() == 1 && Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr) { DiagLoc = Notes[0].first; Notes.clear(); } Diag(DiagLoc, diag::err_constexpr_var_requires_const_init) << var << Init->getSourceRange(); for (unsigned I = 0, N = Notes.size(); I != N; ++I) Diag(Notes[I].first, Notes[I].second); } } else if (var->isUsableInConstantExpressions(Context)) { // Check whether the initializer of a const variable of integral or // enumeration type is an ICE now, since we can't tell whether it was // initialized by a constant expression if we check later. var->checkInitIsICE(); } } // Require the destructor. if (const RecordType *recordType = baseType->getAs<RecordType>()) FinalizeVarWithDestructor(var, recordType); } /// \brief Determines if a variable's alignment is dependent. static bool hasDependentAlignment(VarDecl *VD) { if (VD->getType()->isDependentType()) return true; for (auto *I : VD->specific_attrs<AlignedAttr>()) if (I->isAlignmentDependent()) return true; return false; } /// FinalizeDeclaration - called by ParseDeclarationAfterDeclarator to perform /// any semantic actions necessary after any initializer has been attached. void Sema::FinalizeDeclaration(Decl *ThisDecl) { // Note that we are no longer parsing the initializer for this declaration. ParsingInitForAutoVars.erase(ThisDecl); VarDecl *VD = dyn_cast_or_null<VarDecl>(ThisDecl); if (!VD) return; checkAttributesAfterMerging(*this, *VD); // Perform TLS alignment check here after attributes attached to the variable // which may affect the alignment have been processed. Only perform the check // if the target has a maximum TLS alignment (zero means no constraints). if (unsigned MaxAlign = Context.getTargetInfo().getMaxTLSAlign()) { // Protect the check so that it's not performed on dependent types and // dependent alignments (we can't determine the alignment in that case). if (VD->getTLSKind() && !hasDependentAlignment(VD)) { CharUnits MaxAlignChars = Context.toCharUnitsFromBits(MaxAlign); if (Context.getDeclAlign(VD) > MaxAlignChars) { Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum) << (unsigned)Context.getDeclAlign(VD).getQuantity() << VD << (unsigned)MaxAlignChars.getQuantity(); } } } // Static locals inherit dll attributes from their function. if (VD->isStaticLocal()) { if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod())) { if (Attr *A = getDLLAttr(FD)) { auto *NewAttr = cast<InheritableAttr>(A->clone(getASTContext())); NewAttr->setInherited(true); VD->addAttr(NewAttr); } } } // Grab the dllimport or dllexport attribute off of the VarDecl. const InheritableAttr *DLLAttr = getDLLAttr(VD); // Imported static data members cannot be defined out-of-line. if (const auto *IA = dyn_cast_or_null<DLLImportAttr>(DLLAttr)) { if (VD->isStaticDataMember() && VD->isOutOfLine() && VD->isThisDeclarationADefinition()) { // We allow definitions of dllimport class template static data members // with a warning. CXXRecordDecl *Context = cast<CXXRecordDecl>(VD->getFirstDecl()->getDeclContext()); bool IsClassTemplateMember = isa<ClassTemplatePartialSpecializationDecl>(Context) || Context->getDescribedClassTemplate(); Diag(VD->getLocation(), IsClassTemplateMember ? diag::warn_attribute_dllimport_static_field_definition : diag::err_attribute_dllimport_static_field_definition); Diag(IA->getLocation(), diag::note_attribute); if (!IsClassTemplateMember) VD->setInvalidDecl(); } } // dllimport/dllexport variables cannot be thread local, their TLS index // isn't exported with the variable. if (DLLAttr && VD->getTLSKind()) { Diag(VD->getLocation(), diag::err_attribute_dll_thread_local) << VD << DLLAttr; VD->setInvalidDecl(); } if (UsedAttr *Attr = VD->getAttr<UsedAttr>()) { if (!Attr->isInherited() && !VD->isThisDeclarationADefinition()) { Diag(Attr->getLocation(), diag::warn_attribute_ignored) << Attr; VD->dropAttr<UsedAttr>(); } } const DeclContext *DC = VD->getDeclContext(); // If there's a #pragma GCC visibility in scope, and this isn't a class // member, set the visibility of this variable. if (DC->getRedeclContext()->isFileContext() && VD->isExternallyVisible()) AddPushedVisibilityAttribute(VD); // FIXME: Warn on unused templates. if (VD->isFileVarDecl() && !VD->getDescribedVarTemplate() && !isa<VarTemplatePartialSpecializationDecl>(VD)) MarkUnusedFileScopedDecl(VD); // Now we have parsed the initializer and can update the table of magic // tag values. if (!VD->hasAttr<TypeTagForDatatypeAttr>() || !VD->getType()->isIntegralOrEnumerationType()) return; for (const auto *I : ThisDecl->specific_attrs<TypeTagForDatatypeAttr>()) { const Expr *MagicValueExpr = VD->getInit(); if (!MagicValueExpr) { continue; } llvm::APSInt MagicValueInt; if (!MagicValueExpr->isIntegerConstantExpr(MagicValueInt, Context)) { Diag(I->getRange().getBegin(), diag::err_type_tag_for_datatype_not_ice) << LangOpts.CPlusPlus << MagicValueExpr->getSourceRange(); continue; } if (MagicValueInt.getActiveBits() > 64) { Diag(I->getRange().getBegin(), diag::err_type_tag_for_datatype_too_large) << LangOpts.CPlusPlus << MagicValueExpr->getSourceRange(); continue; } uint64_t MagicValue = MagicValueInt.getZExtValue(); RegisterTypeTagForDatatype(I->getArgumentKind(), MagicValue, I->getMatchingCType(), I->getLayoutCompatible(), I->getMustBeNull()); } } Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group) { SmallVector<Decl*, 8> Decls; if (DS.isTypeSpecOwned()) Decls.push_back(DS.getRepAsDecl()); DeclaratorDecl *FirstDeclaratorInGroup = nullptr; for (unsigned i = 0, e = Group.size(); i != e; ++i) if (Decl *D = Group[i]) { if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) if (!FirstDeclaratorInGroup) FirstDeclaratorInGroup = DD; Decls.push_back(D); } if (DeclSpec::isDeclRep(DS.getTypeSpecType())) { if (TagDecl *Tag = dyn_cast_or_null<TagDecl>(DS.getRepAsDecl())) { handleTagNumbering(Tag, S); if (!Tag->hasNameForLinkage() && !Tag->hasDeclaratorForAnonDecl()) Tag->setDeclaratorForAnonDecl(FirstDeclaratorInGroup); } } return BuildDeclaratorGroup(Decls, DS.containsPlaceholderType()); } /// BuildDeclaratorGroup - convert a list of declarations into a declaration /// group, performing any necessary semantic checking. Sema::DeclGroupPtrTy Sema::BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto) { // C++0x [dcl.spec.auto]p7: // If the type deduced for the template parameter U is not the same in each // deduction, the program is ill-formed. // FIXME: When initializer-list support is added, a distinction is needed // between the deduced type U and the deduced type which 'auto' stands for. // auto a = 0, b = { 1, 2, 3 }; // is legal because the deduced type U is 'int' in both cases. if (TypeMayContainAuto && Group.size() > 1) { QualType Deduced; CanQualType DeducedCanon; VarDecl *DeducedDecl = nullptr; for (unsigned i = 0, e = Group.size(); i != e; ++i) { if (VarDecl *D = dyn_cast<VarDecl>(Group[i])) { AutoType *AT = D->getType()->getContainedAutoType(); // Don't reissue diagnostics when instantiating a template. if (AT && D->isInvalidDecl()) break; QualType U = AT ? AT->getDeducedType() : QualType(); if (!U.isNull()) { CanQualType UCanon = Context.getCanonicalType(U); if (Deduced.isNull()) { Deduced = U; DeducedCanon = UCanon; DeducedDecl = D; } else if (DeducedCanon != UCanon) { Diag(D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(), diag::err_auto_different_deductions) << (AT->isDecltypeAuto() ? 1 : 0) << Deduced << DeducedDecl->getDeclName() << U << D->getDeclName() << DeducedDecl->getInit()->getSourceRange() << D->getInit()->getSourceRange(); D->setInvalidDecl(); break; } } } } } ActOnDocumentableDecls(Group); return DeclGroupPtrTy::make( DeclGroupRef::Create(Context, Group.data(), Group.size())); } void Sema::ActOnDocumentableDecl(Decl *D) { ActOnDocumentableDecls(D); } void Sema::ActOnDocumentableDecls(ArrayRef<Decl *> Group) { // Don't parse the comment if Doxygen diagnostics are ignored. if (Group.empty() || !Group[0]) return; if (Diags.isIgnored(diag::warn_doc_param_not_found, Group[0]->getLocation()) && Diags.isIgnored(diag::warn_unknown_comment_command_name, Group[0]->getLocation())) return; if (Group.size() >= 2) { // This is a decl group. Normally it will contain only declarations // produced from declarator list. But in case we have any definitions or // additional declaration references: // 'typedef struct S {} S;' // 'typedef struct S *S;' // 'struct S *pS;' // FinalizeDeclaratorGroup adds these as separate declarations. Decl *MaybeTagDecl = Group[0]; if (MaybeTagDecl && isa<TagDecl>(MaybeTagDecl)) { Group = Group.slice(1); } } // See if there are any new comments that are not attached to a decl. ArrayRef<RawComment *> Comments = Context.getRawCommentList().getComments(); if (!Comments.empty() && !Comments.back()->isAttached()) { // There is at least one comment that not attached to a decl. // Maybe it should be attached to one of these decls? // // Note that this way we pick up not only comments that precede the // declaration, but also comments that *follow* the declaration -- thanks to // the lookahead in the lexer: we've consumed the semicolon and looked // ahead through comments. for (unsigned i = 0, e = Group.size(); i != e; ++i) Context.getCommentForDecl(Group[i], &PP); } } /// ActOnParamDeclarator - Called from Parser::ParseFunctionDeclarator() /// to introduce parameters into function prototype scope. Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) { const DeclSpec &DS = D.getDeclSpec(); // Verify C99 6.7.5.3p2: The only SCS allowed is 'register'. // C++03 [dcl.stc]p2 also permits 'auto'. StorageClass SC = SC_None; if (DS.getStorageClassSpec() == DeclSpec::SCS_register) { SC = SC_Register; } else if (getLangOpts().CPlusPlus && DS.getStorageClassSpec() == DeclSpec::SCS_auto) { SC = SC_Auto; } else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified) { Diag(DS.getStorageClassSpecLoc(), diag::err_invalid_storage_class_in_func_decl); D.getMutableDeclSpec().ClearStorageClassSpecs(); } if (DeclSpec::TSCS TSCS = DS.getThreadStorageClassSpec()) Diag(DS.getThreadStorageClassSpecLoc(), diag::err_invalid_thread) << DeclSpec::getSpecifierName(TSCS); if (DS.isConstexprSpecified()) Diag(DS.getConstexprSpecLoc(), diag::err_invalid_constexpr) << 0; DiagnoseFunctionSpecifiers(DS); TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); QualType parmDeclType = TInfo->getType(); if (getLangOpts().CPlusPlus) { // Check that there are no default arguments inside the type of this // parameter. CheckExtraCXXDefaultArguments(D); // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1). if (D.getCXXScopeSpec().isSet()) { Diag(D.getIdentifierLoc(), diag::err_qualified_param_declarator) << D.getCXXScopeSpec().getRange(); D.getCXXScopeSpec().clear(); } } // Ensure we have a valid name IdentifierInfo *II = nullptr; if (D.hasName()) { II = D.getIdentifier(); if (!II) { Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name) << GetNameForDeclarator(D).getName(); D.setInvalidType(true); } } // Check for redeclaration of parameters, e.g. int foo(int x, int x); if (II) { LookupResult R(*this, II, D.getIdentifierLoc(), LookupOrdinaryName, ForRedeclaration); LookupName(R, S); if (R.isSingleResult()) { NamedDecl *PrevDecl = R.getFoundDecl(); if (PrevDecl->isTemplateParameter()) { // Maybe we will complain about the shadowed template parameter. DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl); // Just pretend that we didn't see the previous declaration. PrevDecl = nullptr; } else if (S->isDeclScope(PrevDecl)) { Diag(D.getIdentifierLoc(), diag::err_param_redefinition) << II; Diag(PrevDecl->getLocation(), diag::note_previous_declaration); // Recover by removing the name II = nullptr; D.SetIdentifier(nullptr, D.getIdentifierLoc()); D.setInvalidType(true); } } } // HLSL Change Starts if (getLangOpts().HLSL) { const bool IsParm = true; if (hlsl::IsStringType(parmDeclType)) { static const unsigned selectParamIdx = 1; Diag(D.getLocStart(), diag::err_hlsl_unsupported_string_decl) << selectParamIdx; D.setInvalidType(); } if (!DiagnoseHLSLDecl(D, Context.getTranslationUnitDecl(), /*BitWidth*/ nullptr, TInfo, IsParm)) { assert(D.isInvalidType() && "otherwise DiagnoseHLSLDecl failed but " "didn't invalidate declaration"); } } // HLSL Change Ends // Temporarily put parameter variables in the translation unit, not // the enclosing context. This prevents them from accidentally // looking like class members in C++. ParmVarDecl *New = CheckParameter(Context.getTranslationUnitDecl(), D.getLocStart(), D.getIdentifierLoc(), II, parmDeclType, TInfo, SC, hlsl::ParamModFromAttributeList(DS.getAttributes().getList())); // HLSL Change if (D.isInvalidType()) New->setInvalidDecl(); TransferUnusualAttributes(D, New); // HLSL Change assert(S->isFunctionPrototypeScope()); assert(S->getFunctionPrototypeDepth() >= 1); New->setScopeInfo(S->getFunctionPrototypeDepth() - 1, S->getNextFunctionPrototypeIndex()); // Add the parameter declaration into this scope. S->AddDecl(New); if (II) IdResolver.AddDecl(New); ProcessDeclAttributes(S, New, D); if (D.getDeclSpec().isModulePrivateSpecified()) Diag(New->getLocation(), diag::err_module_private_local) << 1 << New->getDeclName() << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc()) << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc()); if (New->hasAttr<BlocksAttr>()) { Diag(New->getLocation(), diag::err_block_on_nonlocal); } return New; } /// \brief Synthesizes a variable for a parameter arising from a /// typedef. ParmVarDecl *Sema::BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T) { /* FIXME: setting StartLoc == Loc. Would it be worth to modify callers so as to provide proper source location for the unnamed parameters, embedding the parameter's type? */ ParmVarDecl *Param = ParmVarDecl::Create(Context, DC, Loc, Loc, nullptr, T, Context.getTrivialTypeSourceInfo(T, Loc), SC_None, nullptr); Param->setImplicit(); return Param; } void Sema::DiagnoseUnusedParameters(ParmVarDecl * const *Param, ParmVarDecl * const *ParamEnd) { // Don't diagnose unused-parameter errors in template instantiations; we // will already have done so in the template itself. if (!ActiveTemplateInstantiations.empty()) return; for (; Param != ParamEnd; ++Param) { if (!(*Param)->isReferenced() && (*Param)->getDeclName() && !(*Param)->hasAttr<UnusedAttr>()) { Diag((*Param)->getLocation(), diag::warn_unused_parameter) << (*Param)->getDeclName(); } } } void Sema::DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Param, ParmVarDecl * const *ParamEnd, QualType ReturnTy, NamedDecl *D) { if (LangOpts.NumLargeByValueCopy == 0) // No check. return; // Warn if the return value is pass-by-value and larger than the specified // threshold. if (!ReturnTy->isDependentType() && ReturnTy.isPODType(Context)) { unsigned Size = Context.getTypeSizeInChars(ReturnTy).getQuantity(); if (Size > LangOpts.NumLargeByValueCopy) Diag(D->getLocation(), diag::warn_return_value_size) << D->getDeclName() << Size; } // Warn if any parameter is pass-by-value and larger than the specified // threshold. for (; Param != ParamEnd; ++Param) { QualType T = (*Param)->getType(); if (T->isDependentType() || !T.isPODType(Context)) continue; unsigned Size = Context.getTypeSizeInChars(T).getQuantity(); if (Size > LangOpts.NumLargeByValueCopy) Diag((*Param)->getLocation(), diag::warn_parameter_size) << (*Param)->getDeclName() << Size; } } ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC, hlsl::ParameterModifier ParamMod) { // HLSL Change - add param mod // In ARC, infer a lifetime qualifier for appropriate parameter types. if (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_None && T->isObjCLifetimeType()) { Qualifiers::ObjCLifetime lifetime; // Special cases for arrays: // - if it's const, use __unsafe_unretained // - otherwise, it's an error if (T->isArrayType()) { if (!T.isConstQualified()) { DelayedDiagnostics.add( sema::DelayedDiagnostic::makeForbiddenType( NameLoc, diag::err_arc_array_param_no_ownership, T, false)); } lifetime = Qualifiers::OCL_ExplicitNone; } else { lifetime = T->getObjCARCImplicitLifetime(); } T = Context.getLifetimeQualifiedType(T, lifetime); } ParmVarDecl *New = ParmVarDecl::Create(Context, DC, StartLoc, NameLoc, Name, Context.getAdjustedParameterType(T), TSInfo, SC, nullptr, ParamMod); // Parameters can not be abstract class types. // For record types, this is done by the AbstractClassUsageDiagnoser once // the class has been completely parsed. if (!CurContext->isRecord() && RequireNonAbstractType(NameLoc, T, diag::err_abstract_type_in_decl, AbstractParamType)) New->setInvalidDecl(); // Parameter declarators cannot be interface types. All ObjC objects are // passed by reference. if (T->isObjCObjectType()) { SourceLocation TypeEndLoc = TSInfo->getTypeLoc().getLocEnd(); Diag(NameLoc, diag::err_object_cannot_be_passed_returned_by_value) << 1 << T << FixItHint::CreateInsertion(TypeEndLoc, "*"); T = Context.getObjCObjectPointerType(T); New->setType(T); } // ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage // duration shall not be qualified by an address-space qualifier." // Since all parameters have automatic store duration, they can not have // an address space. if (T.getAddressSpace() != 0) { // OpenCL allows function arguments declared to be an array of a type // to be qualified with an address space. if (!(getLangOpts().OpenCL && T->isArrayType())) { Diag(NameLoc, diag::err_arg_with_address_space); New->setInvalidDecl(); } } return New; } void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls) { DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo(); // Verify 6.9.1p6: 'every identifier in the identifier list shall be declared' // for a K&R function. if (!FTI.hasPrototype) { for (int i = FTI.NumParams; i != 0; /* decrement in loop */) { --i; if (FTI.Params[i].Param == nullptr) { SmallString<256> Code; llvm::raw_svector_ostream(Code) << " int " << FTI.Params[i].Ident->getName() << ";\n"; Diag(FTI.Params[i].IdentLoc, diag::ext_param_not_declared) << FTI.Params[i].Ident << FixItHint::CreateInsertion(LocAfterDecls, Code); // Implicitly declare the argument as type 'int' for lack of a better // type. AttributeFactory attrs; DeclSpec DS(attrs); const char* PrevSpec; // unused unsigned DiagID; // unused DS.SetTypeSpecType(DeclSpec::TST_int, FTI.Params[i].IdentLoc, PrevSpec, DiagID, Context.getPrintingPolicy()); // Use the identifier location for the type source range. DS.SetRangeStart(FTI.Params[i].IdentLoc); DS.SetRangeEnd(FTI.Params[i].IdentLoc); Declarator ParamD(DS, Declarator::KNRTypeListContext); ParamD.SetIdentifier(FTI.Params[i].Ident, FTI.Params[i].IdentLoc); FTI.Params[i].Param = ActOnParamDeclarator(S, ParamD); } } } } Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D) { assert(getCurFunctionDecl() == nullptr && "Function parsing confused"); assert(D.isFunctionDeclarator() && "Not a function declarator!"); Scope *ParentScope = FnBodyScope->getParent(); D.setFunctionDefinitionKind(FDK_Definition); Decl *DP = HandleDeclarator(ParentScope, D, MultiTemplateParamsArg()); return ActOnStartOfFunctionDef(FnBodyScope, DP); } void Sema::ActOnFinishInlineMethodDef(CXXMethodDecl *D) { Consumer.HandleInlineMethodDefinition(D); } static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD, const FunctionDecl*& PossibleZeroParamPrototype) { // Don't warn about invalid declarations. if (FD->isInvalidDecl()) return false; // Or declarations that aren't global. if (!FD->isGlobal()) return false; // Don't warn about C++ member functions. if (isa<CXXMethodDecl>(FD)) return false; // Don't warn about 'main'. if (FD->isMain()) return false; // Don't warn about inline functions. if (FD->isInlined()) return false; // Don't warn about function templates. if (FD->getDescribedFunctionTemplate()) return false; // Don't warn about function template specializations. if (FD->isFunctionTemplateSpecialization()) return false; // Don't warn for OpenCL kernels. if (FD->hasAttr<OpenCLKernelAttr>()) return false; // Don't warn on explicitly deleted functions. if (FD->isDeleted()) return false; bool MissingPrototype = true; for (const FunctionDecl *Prev = FD->getPreviousDecl(); Prev; Prev = Prev->getPreviousDecl()) { // Ignore any declarations that occur in function or method // scope, because they aren't visible from the header. if (Prev->getLexicalDeclContext()->isFunctionOrMethod()) continue; MissingPrototype = !Prev->getType()->isFunctionProtoType(); if (FD->getNumParams() == 0) PossibleZeroParamPrototype = Prev; break; } return MissingPrototype; } void Sema::CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition) { // Don't complain if we're in GNU89 mode and the previous definition // was an extern inline function. const FunctionDecl *Definition = EffectiveDefinition; if (!Definition) if (!FD->isDefined(Definition)) return; if (canRedefineFunction(Definition, getLangOpts())) return; // If we don't have a visible definition of the function, and it's inline or // a template, it's OK to form another definition of it. // // FIXME: Should we skip the body of the function and use the old definition // in this case? That may be necessary for functions that return local types // through a deduced return type, or instantiate templates with local types. if (!hasVisibleDefinition(Definition) && (Definition->getFormalLinkage() == InternalLinkage || Definition->isInlined() || Definition->getDescribedFunctionTemplate() || Definition->getNumTemplateParameterLists())) return; if (getLangOpts().GNUMode && Definition->isInlineSpecified() && Definition->getStorageClass() == SC_Extern) Diag(FD->getLocation(), diag::err_redefinition_extern_inline) << FD->getDeclName() << getLangOpts().CPlusPlus; else Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName(); Diag(Definition->getLocation(), diag::note_previous_definition); FD->setInvalidDecl(); } static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator, Sema &S) { CXXRecordDecl *const LambdaClass = CallOperator->getParent(); LambdaScopeInfo *LSI = S.PushLambdaScope(); LSI->CallOperator = CallOperator; LSI->Lambda = LambdaClass; LSI->ReturnType = CallOperator->getReturnType(); const LambdaCaptureDefault LCD = LambdaClass->getLambdaCaptureDefault(); if (LCD == LCD_None) LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_None; else if (LCD == LCD_ByCopy) LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_LambdaByval; else if (LCD == LCD_ByRef) LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_LambdaByref; DeclarationNameInfo DNI = CallOperator->getNameInfo(); LSI->IntroducerRange = DNI.getCXXOperatorNameRange(); LSI->Mutable = !CallOperator->isConst(); // Add the captures to the LSI so they can be noted as already // captured within tryCaptureVar. auto I = LambdaClass->field_begin(); for (const auto &C : LambdaClass->captures()) { if (C.capturesVariable()) { VarDecl *VD = C.getCapturedVar(); if (VD->isInitCapture()) S.CurrentInstantiationScope->InstantiatedLocal(VD, VD); QualType CaptureType = VD->getType(); const bool ByRef = C.getCaptureKind() == LCK_ByRef; LSI->addCapture(VD, /*IsBlock*/false, ByRef, /*RefersToEnclosingVariableOrCapture*/true, C.getLocation(), /*EllipsisLoc*/C.isPackExpansion() ? C.getEllipsisLoc() : SourceLocation(), CaptureType, /*Expr*/ nullptr); } else if (C.capturesThis()) { LSI->addThisCapture(/*Nested*/ false, C.getLocation(), S.getCurrentThisType(), /*Expr*/ nullptr); } else { LSI->addVLATypeCapture(C.getLocation(), I->getType()); } ++I; } } Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) { // Clear the last template instantiation error context. LastTemplateInstantiationErrorContext = ActiveTemplateInstantiation(); if (!D) return D; FunctionDecl *FD = nullptr; if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D)) FD = FunTmpl->getTemplatedDecl(); else FD = cast<FunctionDecl>(D); // If we are instantiating a generic lambda call operator, push // a LambdaScopeInfo onto the function stack. But use the information // that's already been calculated (ActOnLambdaExpr) to prime the current // LambdaScopeInfo. // When the template operator is being specialized, the LambdaScopeInfo, // has to be properly restored so that tryCaptureVariable doesn't try // and capture any new variables. In addition when calculating potential // captures during transformation of nested lambdas, it is necessary to // have the LSI properly restored. if (isGenericLambdaCallOperatorSpecialization(FD)) { assert(ActiveTemplateInstantiations.size() && "There should be an active template instantiation on the stack " "when instantiating a generic lambda!"); RebuildLambdaScopeInfo(cast<CXXMethodDecl>(D), *this); } else // Enter a new function scope PushFunctionScope(); // See if this is a redefinition. if (!FD->isLateTemplateParsed()) CheckForFunctionRedefinition(FD); // Builtin functions cannot be defined. if (unsigned BuiltinID = FD->getBuiltinID()) { if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID) && !Context.BuiltinInfo.isPredefinedRuntimeFunction(BuiltinID)) { Diag(FD->getLocation(), diag::err_builtin_definition) << FD; FD->setInvalidDecl(); } } // The return type of a function definition must be complete // (C99 6.9.1p3, C++ [dcl.fct]p6). QualType ResultType = FD->getReturnType(); if (!ResultType->isDependentType() && !ResultType->isVoidType() && !FD->isInvalidDecl() && RequireCompleteType(FD->getLocation(), ResultType, diag::err_func_def_incomplete_result)) FD->setInvalidDecl(); if (FnBodyScope) PushDeclContext(FnBodyScope, FD); // Check the validity of our function parameters CheckParmsForFunctionDef(FD->param_begin(), FD->param_end(), /*CheckParameterNames=*/true); // Introduce our parameters into the function scope for (auto Param : FD->params()) { Param->setOwningFunction(FD); // If this has an identifier, add it to the scope stack. if (Param->getIdentifier() && FnBodyScope) { CheckShadow(FnBodyScope, Param); PushOnScopeChains(Param, FnBodyScope); } } // If we had any tags defined in the function prototype, // introduce them into the function scope. if (FnBodyScope) { for (ArrayRef<NamedDecl *>::iterator I = FD->getDeclsInPrototypeScope().begin(), E = FD->getDeclsInPrototypeScope().end(); I != E; ++I) { NamedDecl *D = *I; // Some of these decls (like enums) may have been pinned to the // translation unit for lack of a real context earlier. If so, remove // from the translation unit and reattach to the current context. if (D->getLexicalDeclContext() == Context.getTranslationUnitDecl()) { // Is the decl actually in the context? for (const auto *DI : Context.getTranslationUnitDecl()->decls()) { if (DI == D) { Context.getTranslationUnitDecl()->removeDecl(D); break; } } // Either way, reassign the lexical decl context to our FunctionDecl. D->setLexicalDeclContext(CurContext); } // If the decl has a non-null name, make accessible in the current scope. if (!D->getName().empty()) PushOnScopeChains(D, FnBodyScope, /*AddToContext=*/false); // Similarly, dive into enums and fish their constants out, making them // accessible in this scope. if (auto *ED = dyn_cast<EnumDecl>(D)) { for (auto *EI : ED->enumerators()) PushOnScopeChains(EI, FnBodyScope, /*AddToContext=*/false); } } } // Ensure that the function's exception specification is instantiated. if (const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>()) ResolveExceptionSpec(D->getLocation(), FPT); // dllimport cannot be applied to non-inline function definitions. if (FD->hasAttr<DLLImportAttr>() && !FD->isInlined() && !FD->isTemplateInstantiation()) { assert(!FD->hasAttr<DLLExportAttr>()); Diag(FD->getLocation(), diag::err_attribute_dllimport_function_definition); FD->setInvalidDecl(); return D; } // We want to attach documentation to original Decl (which might be // a function template). ActOnDocumentableDecl(D); if (getCurLexicalContext()->isObjCContainer() && getCurLexicalContext()->getDeclKind() != Decl::ObjCCategoryImpl && getCurLexicalContext()->getDeclKind() != Decl::ObjCImplementation) Diag(FD->getLocation(), diag::warn_function_def_in_objc_container); return D; } /// \brief Given the set of return statements within a function body, /// compute the variables that are subject to the named return value /// optimization. /// /// Each of the variables that is subject to the named return value /// optimization will be marked as NRVO variables in the AST, and any /// return statement that has a marked NRVO variable as its NRVO candidate can /// use the named return value optimization. /// /// This function applies a very simplistic algorithm for NRVO: if every return /// statement in the scope of a variable has the same NRVO candidate, that /// candidate is an NRVO variable. void Sema::computeNRVO(Stmt *Body, FunctionScopeInfo *Scope) { ReturnStmt **Returns = Scope->Returns.data(); for (unsigned I = 0, E = Scope->Returns.size(); I != E; ++I) { if (const VarDecl *NRVOCandidate = Returns[I]->getNRVOCandidate()) { if (!NRVOCandidate->isNRVOVariable()) Returns[I]->setNRVOCandidate(nullptr); } } } bool Sema::canDelayFunctionBody(const Declarator &D) { // We can't delay parsing the body of a constexpr function template (yet). if (D.getDeclSpec().isConstexprSpecified()) return false; // We can't delay parsing the body of a function template with a deduced // return type (yet). if (D.getDeclSpec().containsPlaceholderType()) { // If the placeholder introduces a non-deduced trailing return type, // we can still delay parsing it. if (D.getNumTypeObjects()) { const auto &Outer = D.getTypeObject(D.getNumTypeObjects() - 1); if (Outer.Kind == DeclaratorChunk::Function && Outer.Fun.hasTrailingReturnType()) { QualType Ty = GetTypeFromParser(Outer.Fun.getTrailingReturnType()); return Ty.isNull() || !Ty->isUndeducedType(); } } return false; } return true; } bool Sema::canSkipFunctionBody(Decl *D) { // We cannot skip the body of a function (or function template) which is // constexpr, since we may need to evaluate its body in order to parse the // rest of the file. // We cannot skip the body of a function with an undeduced return type, // because any callers of that function need to know the type. if (const FunctionDecl *FD = D->getAsFunction()) if (FD->isConstexpr() || FD->getReturnType()->isUndeducedType()) return false; return Consumer.shouldSkipFunctionBody(D); } Decl *Sema::ActOnSkippedFunctionBody(Decl *Decl) { if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Decl)) FD->setHasSkippedBody(); else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(Decl)) MD->setHasSkippedBody(); return ActOnFinishFunctionBody(Decl, nullptr); } Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) { return ActOnFinishFunctionBody(D, BodyArg, false); } Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body, bool IsInstantiation) { FunctionDecl *FD = dcl ? dcl->getAsFunction() : nullptr; sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy(); sema::AnalysisBasedWarnings::Policy *ActivePolicy = nullptr; if (FD) { FD->setBody(Body); if (getLangOpts().CPlusPlus14 && !FD->isInvalidDecl() && Body && !FD->isDependentContext() && FD->getReturnType()->isUndeducedType()) { // If the function has a deduced result type but contains no 'return' // statements, the result type as written must be exactly 'auto', and // the deduced result type is 'void'. if (!FD->getReturnType()->getAs<AutoType>()) { Diag(dcl->getLocation(), diag::err_auto_fn_no_return_but_not_auto) << FD->getReturnType(); FD->setInvalidDecl(); } else { // Substitute 'void' for the 'auto' in the type. TypeLoc ResultType = getReturnTypeLoc(FD); Context.adjustDeducedFunctionResultType( FD, SubstAutoType(ResultType.getType(), Context.VoidTy)); } } #if 0 // HLSL Change Start - no lambda support else if (getLangOpts().CPlusPlus11 && isLambdaCallOperator(FD)) { auto *LSI = getCurLambda(); if (LSI->HasImplicitReturnType) { deduceClosureReturnType(*LSI); // C++11 [expr.prim.lambda]p4: // [...] if there are no return statements in the compound-statement // [the deduced type is] the type void QualType RetType = LSI->ReturnType.isNull() ? Context.VoidTy : LSI->ReturnType; // Update the return type to the deduced type. const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>(); FD->setType(Context.getFunctionType(RetType, Proto->getParamTypes(), Proto->getExtProtoInfo())); } } #endif // HLSL Change Start - no lambda support // The only way to be included in UndefinedButUsed is if there is an // ODR use before the definition. Avoid the expensive map lookup if this // is the first declaration. if (!FD->isFirstDecl() && FD->getPreviousDecl()->isUsed()) { if (!FD->isExternallyVisible()) UndefinedButUsed.erase(FD); else if (FD->isInlined() && !LangOpts.GNUInline && (!FD->getPreviousDecl()->hasAttr<GNUInlineAttr>())) UndefinedButUsed.erase(FD); } // If the function implicitly returns zero (like 'main') or is naked, // don't complain about missing return statements. if (FD->hasImplicitReturnZero() || FD->hasAttr<NakedAttr>()) WP.disableCheckFallThrough(); // MSVC permits the use of pure specifier (=0) on function definition, // defined at class scope, warn about this non-standard construct. if (getLangOpts().MicrosoftExt && FD->isPure() && FD->isCanonicalDecl()) Diag(FD->getLocation(), diag::ext_pure_function_definition); if (!FD->isInvalidDecl()) { // Don't diagnose unused parameters of defaulted or deleted functions. if (!FD->isDeleted() && !FD->isDefaulted()) DiagnoseUnusedParameters(FD->param_begin(), FD->param_end()); DiagnoseSizeOfParametersAndReturnValue(FD->param_begin(), FD->param_end(), FD->getReturnType(), FD); // If this is a structor, we need a vtable. if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD)) MarkVTableUsed(FD->getLocation(), Constructor->getParent()); else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(FD)) MarkVTableUsed(FD->getLocation(), Destructor->getParent()); // Try to apply the named return value optimization. We have to check // if we can do this here because lambdas keep return statements around // to deduce an implicit return type. if (getLangOpts().CPlusPlus && FD->getReturnType()->isRecordType() && !FD->isDependentContext()) computeNRVO(Body, getCurFunction()); } // GNU warning -Wmissing-prototypes: // Warn if a global function is defined without a previous // prototype declaration. This warning is issued even if the // definition itself provides a prototype. The aim is to detect // global functions that fail to be declared in header files. const FunctionDecl *PossibleZeroParamPrototype = nullptr; if (ShouldWarnAboutMissingPrototype(FD, PossibleZeroParamPrototype)) { Diag(FD->getLocation(), diag::warn_missing_prototype) << FD; if (PossibleZeroParamPrototype) { // We found a declaration that is not a prototype, // but that could be a zero-parameter prototype if (TypeSourceInfo *TI = PossibleZeroParamPrototype->getTypeSourceInfo()) { TypeLoc TL = TI->getTypeLoc(); if (FunctionNoProtoTypeLoc FTL = TL.getAs<FunctionNoProtoTypeLoc>()) Diag(PossibleZeroParamPrototype->getLocation(), diag::note_declaration_not_a_prototype) << PossibleZeroParamPrototype << FixItHint::CreateInsertion(FTL.getRParenLoc(), "void"); } } } if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) { const CXXMethodDecl *KeyFunction; if (MD->isOutOfLine() && (MD = MD->getCanonicalDecl()) && MD->isVirtual() && (KeyFunction = Context.getCurrentKeyFunction(MD->getParent())) && MD == KeyFunction->getCanonicalDecl()) { // Update the key-function state if necessary for this ABI. if (FD->isInlined() && !Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline()) { Context.setNonKeyFunction(MD); // If the newly-chosen key function is already defined, then we // need to mark the vtable as used retroactively. KeyFunction = Context.getCurrentKeyFunction(MD->getParent()); const FunctionDecl *Definition; if (KeyFunction && KeyFunction->isDefined(Definition)) MarkVTableUsed(Definition->getLocation(), MD->getParent(), true); } else { // We just defined they key function; mark the vtable as used. MarkVTableUsed(FD->getLocation(), MD->getParent(), true); } } } assert((FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) && "Function parsing confused"); #if 0 // HLSL Change Starts } else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) { assert(MD == getCurMethodDecl() && "Method parsing confused"); MD->setBody(Body); if (!MD->isInvalidDecl()) { DiagnoseUnusedParameters(MD->param_begin(), MD->param_end()); DiagnoseSizeOfParametersAndReturnValue(MD->param_begin(), MD->param_end(), MD->getReturnType(), MD); if (Body) computeNRVO(Body, getCurFunction()); } if (getCurFunction()->ObjCShouldCallSuper) { Diag(MD->getLocEnd(), diag::warn_objc_missing_super_call) << MD->getSelector().getAsString(); getCurFunction()->ObjCShouldCallSuper = false; } if (getCurFunction()->ObjCWarnForNoDesignatedInitChain) { const ObjCMethodDecl *InitMethod = nullptr; bool isDesignated = MD->isDesignatedInitializerForTheInterface(&InitMethod); assert(isDesignated && InitMethod); (void)isDesignated; auto superIsNSObject = [&](const ObjCMethodDecl *MD) { auto IFace = MD->getClassInterface(); if (!IFace) return false; auto SuperD = IFace->getSuperClass(); if (!SuperD) return false; return SuperD->getIdentifier() == NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject); }; // Don't issue this warning for unavailable inits or direct subclasses // of NSObject. if (!MD->isUnavailable() && !superIsNSObject(MD)) { Diag(MD->getLocation(), diag::warn_objc_designated_init_missing_super_call); Diag(InitMethod->getLocation(), diag::note_objc_designated_init_marked_here); } getCurFunction()->ObjCWarnForNoDesignatedInitChain = false; } if (getCurFunction()->ObjCWarnForNoInitDelegation) { // Don't issue this warning for unavaialable inits. if (!MD->isUnavailable()) Diag(MD->getLocation(), diag::warn_objc_secondary_init_missing_init_call); getCurFunction()->ObjCWarnForNoInitDelegation = false; } #endif // HLSL Change Ends } else { return nullptr; } assert(!getCurFunction()->ObjCShouldCallSuper && "This should only be set for ObjC methods, which should have been " "handled in the block above."); // Verify and clean out per-function state. if (Body && (!FD || !FD->isDefaulted())) { // C++ constructors that have function-try-blocks can't have return // statements in the handlers of that block. (C++ [except.handle]p14) // Verify this. if (FD && isa<CXXConstructorDecl>(FD) && isa<CXXTryStmt>(Body)) DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body)); // Verify that gotos and switch cases don't jump into scopes illegally. if (getCurFunction()->NeedsScopeChecking() && !PP.isCodeCompletionEnabled()) DiagnoseInvalidJumps(Body); if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl)) { if (!Destructor->getParent()->isDependentType()) CheckDestructor(Destructor); MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(), Destructor->getParent()); } // If any errors have occurred, clear out any temporaries that may have // been leftover. This ensures that these temporaries won't be picked up for // deletion in some later function. if (getDiagnostics().hasErrorOccurred() || getDiagnostics().getSuppressAllDiagnostics()) { DiscardCleanupsInEvaluationContext(); } if (!getDiagnostics().hasUncompilableErrorOccurred() && !isa<FunctionTemplateDecl>(dcl)) { // Since the body is valid, issue any analysis-based warnings that are // enabled. ActivePolicy = &WP; } if (!IsInstantiation && FD && FD->isConstexpr() && !FD->isInvalidDecl() && (!CheckConstexprFunctionDecl(FD) || !CheckConstexprFunctionBody(FD, Body))) FD->setInvalidDecl(); if (FD && FD->hasAttr<NakedAttr>()) { for (const Stmt *S : Body->children()) { if (!isa<AsmStmt>(S) && !isa<NullStmt>(S)) { Diag(S->getLocStart(), diag::err_non_asm_stmt_in_naked_function); Diag(FD->getAttr<NakedAttr>()->getLocation(), diag::note_attribute); FD->setInvalidDecl(); break; } } } assert(ExprCleanupObjects.size() == ExprEvalContexts.back().NumCleanupObjects && "Leftover temporaries in function"); assert(!ExprNeedsCleanups && "Unaccounted cleanups in function"); assert(MaybeODRUseExprs.empty() && "Leftover expressions for odr-use checking"); } if (!IsInstantiation) PopDeclContext(); PopFunctionScopeInfo(ActivePolicy, dcl); // If any errors have occurred, clear out any temporaries that may have // been leftover. This ensures that these temporaries won't be picked up for // deletion in some later function. if (getDiagnostics().hasErrorOccurred()) { DiscardCleanupsInEvaluationContext(); } return dcl; } /// When we finish delayed parsing of an attribute, we must attach it to the /// relevant Decl. void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs) { // Always attach attributes to the underlying decl. if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) D = TD->getTemplatedDecl(); ProcessDeclAttributeList(S, D, Attrs.getList()); if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(D)) if (Method->isStatic()) checkThisInStaticMemberFunctionAttributes(Method); } /// ImplicitlyDefineFunction - An undeclared identifier was used in a function /// call, forming a call to an implicitly defined function (per C99 6.5.1p2). NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S) { // Before we produce a declaration for an implicitly defined // function, see whether there was a locally-scoped declaration of // this name as a function or variable. If so, use that // (non-visible) declaration, and complain about it. if (NamedDecl *ExternCPrev = findLocallyScopedExternCDecl(&II)) { Diag(Loc, diag::warn_use_out_of_scope_declaration) << ExternCPrev; Diag(ExternCPrev->getLocation(), diag::note_previous_declaration); return ExternCPrev; } // Extension in C99. Legal in C90, but warn about it. unsigned diag_id; if (II.getName().startswith("__builtin_")) diag_id = diag::warn_builtin_unknown; else if (getLangOpts().C99) diag_id = diag::ext_implicit_function_decl; else diag_id = diag::warn_implicit_function_decl; Diag(Loc, diag_id) << &II; // Because typo correction is expensive, only do it if the implicit // function declaration is going to be treated as an error. if (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error) { TypoCorrection Corrected; if (S && (Corrected = CorrectTypo( DeclarationNameInfo(&II, Loc), LookupOrdinaryName, S, nullptr, llvm::make_unique<DeclFilterCCC<FunctionDecl>>(), CTK_NonError))) diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion), /*ErrorRecovery*/false); } // Set a Declarator for the implicit definition: int foo(); const char *Dummy; AttributeFactory attrFactory; DeclSpec DS(attrFactory); unsigned DiagID; bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy, DiagID, Context.getPrintingPolicy()); (void)Error; // Silence warning. assert(!Error && "Error setting up implicit decl!"); SourceLocation NoLoc; Declarator D(DS, Declarator::BlockContext); D.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/false, /*IsAmbiguous=*/false, /*LParenLoc=*/NoLoc, /*Params=*/nullptr, /*NumParams=*/0, /*EllipsisLoc=*/NoLoc, /*RParenLoc=*/NoLoc, /*TypeQuals=*/0, /*RefQualifierIsLvalueRef=*/true, /*RefQualifierLoc=*/NoLoc, /*ConstQualifierLoc=*/NoLoc, /*VolatileQualifierLoc=*/NoLoc, /*RestrictQualifierLoc=*/NoLoc, /*MutableLoc=*/NoLoc, EST_None, /*ESpecLoc=*/NoLoc, /*Exceptions=*/nullptr, /*ExceptionRanges=*/nullptr, /*NumExceptions=*/0, /*NoexceptExpr=*/nullptr, /*ExceptionSpecTokens=*/nullptr, Loc, Loc, D), DS.getAttributes(), SourceLocation()); D.SetIdentifier(&II, Loc); // Insert this function into translation-unit scope. DeclContext *PrevDC = CurContext; CurContext = Context.getTranslationUnitDecl(); FunctionDecl *FD = cast<FunctionDecl>(ActOnDeclarator(TUScope, D)); FD->setImplicit(); CurContext = PrevDC; AddKnownFunctionAttributes(FD); return FD; } /// \brief Adds any function attributes that we know a priori based on /// the declaration of this function. /// /// These attributes can apply both to implicitly-declared builtins /// (like __builtin___printf_chk) or to library-declared functions /// like NSLog or printf. /// /// We need to check for duplicate attributes both here and where user-written /// attributes are applied to declarations. void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) { if (FD->isInvalidDecl()) return; // If this is a built-in function, map its builtin attributes to // actual attributes. if (unsigned BuiltinID = FD->getBuiltinID()) { // Handle printf-formatting attributes. unsigned FormatIdx; bool HasVAListArg; if (Context.BuiltinInfo.isPrintfLike(BuiltinID, FormatIdx, HasVAListArg)) { if (!FD->hasAttr<FormatAttr>()) { const char *fmt = "printf"; unsigned int NumParams = FD->getNumParams(); if (FormatIdx < NumParams && // NumParams may be 0 (e.g. vfprintf) FD->getParamDecl(FormatIdx)->getType()->isObjCObjectPointerType()) fmt = "NSString"; FD->addAttr(FormatAttr::CreateImplicit(Context, &Context.Idents.get(fmt), FormatIdx+1, HasVAListArg ? 0 : FormatIdx+2, FD->getLocation())); } } if (Context.BuiltinInfo.isScanfLike(BuiltinID, FormatIdx, HasVAListArg)) { if (!FD->hasAttr<FormatAttr>()) FD->addAttr(FormatAttr::CreateImplicit(Context, &Context.Idents.get("scanf"), FormatIdx+1, HasVAListArg ? 0 : FormatIdx+2, FD->getLocation())); } // Mark const if we don't care about errno and that is the only // thing preventing the function from being const. This allows // IRgen to use LLVM intrinsics for such functions. if (!getLangOpts().MathErrno && Context.BuiltinInfo.isConstWithoutErrno(BuiltinID)) { if (!FD->hasAttr<ConstAttr>()) FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation())); } if (Context.BuiltinInfo.isReturnsTwice(BuiltinID) && !FD->hasAttr<ReturnsTwiceAttr>()) FD->addAttr(ReturnsTwiceAttr::CreateImplicit(Context, FD->getLocation())); if (Context.BuiltinInfo.isNoThrow(BuiltinID) && !FD->hasAttr<NoThrowAttr>()) FD->addAttr(NoThrowAttr::CreateImplicit(Context, FD->getLocation())); if (Context.BuiltinInfo.isConst(BuiltinID) && !FD->hasAttr<ConstAttr>()) FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation())); } IdentifierInfo *Name = FD->getIdentifier(); if (!Name) return; if ((!getLangOpts().CPlusPlus && FD->getDeclContext()->isTranslationUnit()) || (isa<LinkageSpecDecl>(FD->getDeclContext()) && cast<LinkageSpecDecl>(FD->getDeclContext())->getLanguage() == LinkageSpecDecl::lang_c)) { // Okay: this could be a libc/libm/Objective-C function we know // about. } else return; if (Name->isStr("asprintf") || Name->isStr("vasprintf")) { // FIXME: asprintf and vasprintf aren't C99 functions. Should they be // target-specific builtins, perhaps? if (!FD->hasAttr<FormatAttr>()) FD->addAttr(FormatAttr::CreateImplicit(Context, &Context.Idents.get("printf"), 2, Name->isStr("vasprintf") ? 0 : 3, FD->getLocation())); } if (Name->isStr("__CFStringMakeConstantString")) { // We already have a __builtin___CFStringMakeConstantString, // but builds that use -fno-constant-cfstrings don't go through that. if (!FD->hasAttr<FormatArgAttr>()) FD->addAttr(FormatArgAttr::CreateImplicit(Context, 1, FD->getLocation())); } } TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo) { assert(D.getIdentifier() && "Wrong callback for declspec without declarator"); assert(!T.isNull() && "GetTypeForDeclarator() returned null type"); if (!TInfo) { assert(D.isInvalidType() && "no declarator info for valid type"); TInfo = Context.getTrivialTypeSourceInfo(T); } // Scope manipulation handled by caller. TypedefDecl *NewTD = TypedefDecl::Create(Context, CurContext, D.getLocStart(), D.getIdentifierLoc(), D.getIdentifier(), TInfo); // Bail out immediately if we have an invalid declaration. if (D.isInvalidType()) { NewTD->setInvalidDecl(); return NewTD; } if (D.getDeclSpec().isModulePrivateSpecified()) { if (CurContext->isFunctionOrMethod()) Diag(NewTD->getLocation(), diag::err_module_private_local) << 2 << NewTD->getDeclName() << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc()) << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc()); else NewTD->setModulePrivate(); } // C++ [dcl.typedef]p8: // If the typedef declaration defines an unnamed class (or // enum), the first typedef-name declared by the declaration // to be that class type (or enum type) is used to denote the // class type (or enum type) for linkage purposes only. // We need to check whether the type was declared in the declaration. switch (D.getDeclSpec().getTypeSpecType()) { case TST_enum: case TST_struct: case TST_interface: case TST_union: case TST_class: { TagDecl *tagFromDeclSpec = cast<TagDecl>(D.getDeclSpec().getRepAsDecl()); setTagNameForLinkagePurposes(tagFromDeclSpec, NewTD); break; } default: break; } return NewTD; } /// \brief Check that this is a valid underlying type for an enum declaration. bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) { SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc(); QualType T = TI->getType(); if (T->isDependentType()) return false; if (const BuiltinType *BT = T->getAs<BuiltinType>()) if (BT->isInteger()) return false; Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T; return true; } /// Check whether this is a valid redeclaration of a previous enumeration. /// \return true if the redeclaration was invalid. bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev) { bool IsFixed = !EnumUnderlyingTy.isNull(); if (IsScoped != Prev->isScoped()) { Diag(EnumLoc, diag::err_enum_redeclare_scoped_mismatch) << Prev->isScoped(); Diag(Prev->getLocation(), diag::note_previous_declaration); return true; } if (IsFixed && Prev->isFixed()) { if (!EnumUnderlyingTy->isDependentType() && !Prev->getIntegerType()->isDependentType() && !Context.hasSameUnqualifiedType(EnumUnderlyingTy, Prev->getIntegerType())) { // TODO: Highlight the underlying type of the redeclaration. Diag(EnumLoc, diag::err_enum_redeclare_type_mismatch) << EnumUnderlyingTy << Prev->getIntegerType(); Diag(Prev->getLocation(), diag::note_previous_declaration) << Prev->getIntegerTypeRange(); return true; } } else if (IsFixed != Prev->isFixed()) { Diag(EnumLoc, diag::err_enum_redeclare_fixed_mismatch) << Prev->isFixed(); Diag(Prev->getLocation(), diag::note_previous_declaration); return true; } return false; } /// \brief Get diagnostic %select index for tag kind for /// redeclaration diagnostic message. /// WARNING: Indexes apply to particular diagnostics only! /// /// \returns diagnostic %select index. static unsigned getRedeclDiagFromTagKind(TagTypeKind Tag) { switch (Tag) { case TTK_Struct: return 0; case TTK_Interface: return 1; case TTK_Class: return 2; default: llvm_unreachable("Invalid tag kind for redecl diagnostic!"); } } /// \brief Determine if tag kind is a class-key compatible with /// class for redeclaration (class, struct, or __interface). /// /// \returns true iff the tag kind is compatible. static bool isClassCompatTagKind(TagTypeKind Tag) { return Tag == TTK_Struct || Tag == TTK_Class || Tag == TTK_Interface; } /// \brief Determine whether a tag with a given kind is acceptable /// as a redeclaration of the given tag declaration. /// /// \returns true if the new tag kind is acceptable, false otherwise. bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name) { // C++ [dcl.type.elab]p3: // The class-key or enum keyword present in the // elaborated-type-specifier shall agree in kind with the // declaration to which the name in the elaborated-type-specifier // refers. This rule also applies to the form of // elaborated-type-specifier that declares a class-name or // friend class since it can be construed as referring to the // definition of the class. Thus, in any // elaborated-type-specifier, the enum keyword shall be used to // refer to an enumeration (7.2), the union class-key shall be // used to refer to a union (clause 9), and either the class or // struct class-key shall be used to refer to a class (clause 9) // declared using the class or struct class-key. TagTypeKind OldTag = Previous->getTagKind(); if (!isDefinition || !isClassCompatTagKind(NewTag)) if (OldTag == NewTag) return true; if (isClassCompatTagKind(OldTag) && isClassCompatTagKind(NewTag)) { // Warn about the struct/class tag mismatch. bool isTemplate = false; if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous)) isTemplate = Record->getDescribedClassTemplate(); if (!ActiveTemplateInstantiations.empty()) { // In a template instantiation, do not offer fix-its for tag mismatches // since they usually mess up the template instead of fixing the problem. Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch) << getRedeclDiagFromTagKind(NewTag) << isTemplate << Name << getRedeclDiagFromTagKind(OldTag); return true; } if (isDefinition) { // On definitions, check previous tags and issue a fix-it for each // one that doesn't match the current tag. if (Previous->getDefinition()) { // Don't suggest fix-its for redefinitions. return true; } bool previousMismatch = false; for (auto I : Previous->redecls()) { if (I->getTagKind() != NewTag) { if (!previousMismatch) { previousMismatch = true; Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch) << getRedeclDiagFromTagKind(NewTag) << isTemplate << Name << getRedeclDiagFromTagKind(I->getTagKind()); } Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion) << getRedeclDiagFromTagKind(NewTag) << FixItHint::CreateReplacement(I->getInnerLocStart(), TypeWithKeyword::getTagTypeKindName(NewTag)); } } return true; } // Check for a previous definition. If current tag and definition // are same type, do nothing. If no definition, but disagree with // with previous tag type, give a warning, but no fix-it. const TagDecl *Redecl = Previous->getDefinition() ? Previous->getDefinition() : Previous; if (Redecl->getTagKind() == NewTag) { return true; } Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch) << getRedeclDiagFromTagKind(NewTag) << isTemplate << Name << getRedeclDiagFromTagKind(OldTag); Diag(Redecl->getLocation(), diag::note_previous_use); // If there is a previous definition, suggest a fix-it. if (Previous->getDefinition()) { Diag(NewTagLoc, diag::note_struct_class_suggestion) << getRedeclDiagFromTagKind(Redecl->getTagKind()) << FixItHint::CreateReplacement(SourceRange(NewTagLoc), TypeWithKeyword::getTagTypeKindName(Redecl->getTagKind())); } return true; } return false; } /// Add a minimal nested name specifier fixit hint to allow lookup of a tag name /// from an outer enclosing namespace or file scope inside a friend declaration. /// This should provide the commented out code in the following snippet: /// namespace N { /// struct X; /// namespace M { /// struct Y { friend struct /*N::*/ X; }; /// } /// } static FixItHint createFriendTagNNSFixIt(Sema &SemaRef, NamedDecl *ND, Scope *S, SourceLocation NameLoc) { // While the decl is in a namespace, do repeated lookup of that name and see // if we get the same namespace back. If we do not, continue until // translation unit scope, at which point we have a fully qualified NNS. SmallVector<IdentifierInfo *, 4> Namespaces; DeclContext *DC = ND->getDeclContext()->getRedeclContext(); for (; !DC->isTranslationUnit(); DC = DC->getParent()) { // This tag should be declared in a namespace, which can only be enclosed by // other namespaces. Bail if there's an anonymous namespace in the chain. NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(DC); if (!Namespace || Namespace->isAnonymousNamespace()) return FixItHint(); IdentifierInfo *II = Namespace->getIdentifier(); Namespaces.push_back(II); NamedDecl *Lookup = SemaRef.LookupSingleName( S, II, NameLoc, Sema::LookupNestedNameSpecifierName); if (Lookup == Namespace) break; } // Once we have all the namespaces, reverse them to go outermost first, and // build an NNS. SmallString<64> Insertion; llvm::raw_svector_ostream OS(Insertion); if (DC->isTranslationUnit()) OS << "::"; std::reverse(Namespaces.begin(), Namespaces.end()); for (auto *II : Namespaces) OS << II->getName() << "::"; OS.flush(); return FixItHint::CreateInsertion(NameLoc, Insertion); } /// \brief Determine whether a tag originally declared in context \p OldDC can /// be redeclared with an unqualfied name in \p NewDC (assuming name lookup /// found a declaration in \p OldDC as a previous decl, perhaps through a /// using-declaration). static bool isAcceptableTagRedeclContext(Sema &S, DeclContext *OldDC, DeclContext *NewDC) { OldDC = OldDC->getRedeclContext(); NewDC = NewDC->getRedeclContext(); if (OldDC->Equals(NewDC)) return true; // In MSVC mode, we allow a redeclaration if the contexts are related (either // encloses the other). if (S.getLangOpts().MSVCCompat && (OldDC->Encloses(NewDC) || NewDC->Encloses(OldDC))) return true; return false; } /// \brief This is invoked when we see 'struct foo' or 'struct {'. In the /// former case, Name will be non-null. In the later case, Name will be null. /// TagSpec indicates what kind of tag this is. TUK indicates whether this is a /// reference/declaration/definition of a tag. /// /// \param IsTypeSpecifier \c true if this is a type-specifier (or /// trailing-type-specifier) other than one in an alias-declaration. /// /// \param SkipBody If non-null, will be set to indicate if the caller should /// skip the definition of this tag and treat it as if it were a declaration. Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody) { // If this is not a definition, it must have a name. IdentifierInfo *OrigName = Name; assert((Name != nullptr || TUK == TUK_Definition) && "Nameless record must be a definition!"); assert(TemplateParameterLists.size() == 0 || TUK != TUK_Reference); OwnedDecl = false; TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec); bool ScopedEnum = ScopedEnumKWLoc.isValid(); // FIXME: Check explicit specializations more carefully. bool isExplicitSpecialization = false; bool Invalid = false; // We only need to do this matching if we have template parameters // or a scope specifier, which also conveniently avoids this work // for non-C++ cases. if (TemplateParameterLists.size() > 0 || (SS.isNotEmpty() && TUK != TUK_Reference)) { if (TemplateParameterList *TemplateParams = MatchTemplateParametersToScopeSpecifier( KWLoc, NameLoc, SS, nullptr, TemplateParameterLists, TUK == TUK_Friend, isExplicitSpecialization, Invalid)) { if (Kind == TTK_Enum) { Diag(KWLoc, diag::err_enum_template); return nullptr; } if (TemplateParams->size() > 0) { // This is a declaration or definition of a class template (which may // be a member of another template). if (Invalid) return nullptr; OwnedDecl = false; DeclResult Result = CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS, Name, NameLoc, Attr, TemplateParams, AS, ModulePrivateLoc, /*FriendLoc*/SourceLocation(), TemplateParameterLists.size()-1, TemplateParameterLists.data(), SkipBody); return Result.get(); } else { // The "template<>" header is extraneous. Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams) << TypeWithKeyword::getTagTypeKindName(Kind) << Name; isExplicitSpecialization = true; } } } // Figure out the underlying type if this a enum declaration. We need to do // this early, because it's needed to detect if this is an incompatible // redeclaration. llvm::PointerUnion<const Type*, TypeSourceInfo*> EnumUnderlying; if (Kind == TTK_Enum) { if (UnderlyingType.isInvalid() || (!UnderlyingType.get() && ScopedEnum)) // No underlying type explicitly specified, or we failed to parse the // type, default to int. EnumUnderlying = Context.IntTy.getTypePtr(); else if (UnderlyingType.get()) { // C++0x 7.2p2: The type-specifier-seq of an enum-base shall name an // integral type; any cv-qualification is ignored. TypeSourceInfo *TI = nullptr; GetTypeFromParser(UnderlyingType.get(), &TI); EnumUnderlying = TI; if (CheckEnumUnderlyingType(TI)) // Recover by falling back to int. EnumUnderlying = Context.IntTy.getTypePtr(); if (DiagnoseUnexpandedParameterPack(TI->getTypeLoc().getBeginLoc(), TI, UPPC_FixedUnderlyingType)) EnumUnderlying = Context.IntTy.getTypePtr(); } else if (getLangOpts().MSVCCompat) // Microsoft enums are always of int type. EnumUnderlying = Context.IntTy.getTypePtr(); } // HLSL Change Starts if (getLangOpts().HLSLVersion == hlsl::LangStd::v2015 && TUK == TUK_Declaration) { Diag(NameLoc, diag::err_hlsl_unsupported_construct) << "struct declaration without definition"; } // HLSL Change Ends DeclContext *SearchDC = CurContext; DeclContext *DC = CurContext; bool isStdBadAlloc = false; RedeclarationKind Redecl = ForRedeclaration; if (TUK == TUK_Friend || TUK == TUK_Reference) Redecl = NotForRedeclaration; LookupResult Previous(*this, Name, NameLoc, LookupTagName, Redecl); if (Name && SS.isNotEmpty()) { // We have a nested-name tag ('struct foo::bar'). // Check for invalid 'foo::'. if (SS.isInvalid()) { Name = nullptr; goto CreateNewDecl; } // If this is a friend or a reference to a class in a dependent // context, don't try to make a decl for it. if (TUK == TUK_Friend || TUK == TUK_Reference) { DC = computeDeclContext(SS, false); if (!DC) { IsDependent = true; return nullptr; } } else { DC = computeDeclContext(SS, true); if (!DC) { Diag(SS.getRange().getBegin(), diag::err_dependent_nested_name_spec) << SS.getRange(); return nullptr; } } if (RequireCompleteDeclContext(SS, DC)) return nullptr; SearchDC = DC; // Look-up name inside 'foo::'. LookupQualifiedName(Previous, DC); if (Previous.isAmbiguous()) return nullptr; if (Previous.empty()) { // Name lookup did not find anything. However, if the // nested-name-specifier refers to the current instantiation, // and that current instantiation has any dependent base // classes, we might find something at instantiation time: treat // this as a dependent elaborated-type-specifier. // But this only makes any sense for reference-like lookups. if (Previous.wasNotFoundInCurrentInstantiation() && (TUK == TUK_Reference || TUK == TUK_Friend)) { IsDependent = true; return nullptr; } // A tag 'foo::bar' must already exist. Diag(NameLoc, diag::err_not_tag_in_scope) << Kind << Name << DC << SS.getRange(); Name = nullptr; Invalid = true; goto CreateNewDecl; } } else if (Name) { // C++14 [class.mem]p14: // If T is the name of a class, then each of the following shall have a // name different from T: // -- every member of class T that is itself a type if (TUK != TUK_Reference && TUK != TUK_Friend && DiagnoseClassNameShadow(SearchDC, DeclarationNameInfo(Name, NameLoc))) return nullptr; // If this is a named struct, check to see if there was a previous forward // declaration or definition. // FIXME: We're looking into outer scopes here, even when we // shouldn't be. Doing so can result in ambiguities that we // shouldn't be diagnosing. LookupName(Previous, S); // When declaring or defining a tag, ignore ambiguities introduced // by types using'ed into this scope. if (Previous.isAmbiguous() && (TUK == TUK_Definition || TUK == TUK_Declaration)) { LookupResult::Filter F = Previous.makeFilter(); while (F.hasNext()) { NamedDecl *ND = F.next(); if (ND->getDeclContext()->getRedeclContext() != SearchDC) F.erase(); } F.done(); } // C++11 [namespace.memdef]p3: // If the name in a friend declaration is neither qualified nor // a template-id and the declaration is a function or an // elaborated-type-specifier, the lookup to determine whether // the entity has been previously declared shall not consider // any scopes outside the innermost enclosing namespace. // // MSVC doesn't implement the above rule for types, so a friend tag // declaration may be a redeclaration of a type declared in an enclosing // scope. They do implement this rule for friend functions. // // Does it matter that this should be by scope instead of by // semantic context? if (!Previous.empty() && TUK == TUK_Friend) { DeclContext *EnclosingNS = SearchDC->getEnclosingNamespaceContext(); LookupResult::Filter F = Previous.makeFilter(); bool FriendSawTagOutsideEnclosingNamespace = false; while (F.hasNext()) { NamedDecl *ND = F.next(); DeclContext *DC = ND->getDeclContext()->getRedeclContext(); if (DC->isFileContext() && !EnclosingNS->Encloses(ND->getDeclContext())) { if (getLangOpts().MSVCCompat) FriendSawTagOutsideEnclosingNamespace = true; else F.erase(); } } F.done(); // Diagnose this MSVC extension in the easy case where lookup would have // unambiguously found something outside the enclosing namespace. if (Previous.isSingleResult() && FriendSawTagOutsideEnclosingNamespace) { NamedDecl *ND = Previous.getFoundDecl(); Diag(NameLoc, diag::ext_friend_tag_redecl_outside_namespace) << createFriendTagNNSFixIt(*this, ND, S, NameLoc); } } // Note: there used to be some attempt at recovery here. if (Previous.isAmbiguous()) return nullptr; if (!getLangOpts().CPlusPlus && TUK != TUK_Reference) { // FIXME: This makes sure that we ignore the contexts associated // with C structs, unions, and enums when looking for a matching // tag declaration or definition. See the similar lookup tweak // in Sema::LookupName; is there a better way to deal with this? while (isa<RecordDecl>(SearchDC) || isa<EnumDecl>(SearchDC)) SearchDC = SearchDC->getParent(); } } if (Previous.isSingleResult() && Previous.getFoundDecl()->isTemplateParameter()) { // Maybe we will complain about the shadowed template parameter. DiagnoseTemplateParameterShadow(NameLoc, Previous.getFoundDecl()); // Just pretend that we didn't see the previous declaration. Previous.clear(); } if (getLangOpts().CPlusPlus && Name && DC && StdNamespace && DC->Equals(getStdNamespace()) && Name->isStr("bad_alloc")) { // This is a declaration of or a reference to "std::bad_alloc". isStdBadAlloc = true; if (Previous.empty() && StdBadAlloc) { // std::bad_alloc has been implicitly declared (but made invisible to // name lookup). Fill in this implicit declaration as the previous // declaration, so that the declarations get chained appropriately. Previous.addDecl(getStdBadAlloc()); } } // If we didn't find a previous declaration, and this is a reference // (or friend reference), move to the correct scope. In C++, we // also need to do a redeclaration lookup there, just in case // there's a shadow friend decl. if (Name && Previous.empty() && (TUK == TUK_Reference || TUK == TUK_Friend)) { if (Invalid) goto CreateNewDecl; assert(SS.isEmpty()); if (TUK == TUK_Reference) { // C++ [basic.scope.pdecl]p5: // -- for an elaborated-type-specifier of the form // // class-key identifier // // if the elaborated-type-specifier is used in the // decl-specifier-seq or parameter-declaration-clause of a // function defined in namespace scope, the identifier is // declared as a class-name in the namespace that contains // the declaration; otherwise, except as a friend // declaration, the identifier is declared in the smallest // non-class, non-function-prototype scope that contains the // declaration. // // C99 6.7.2.3p8 has a similar (but not identical!) provision for // C structs and unions. // // It is an error in C++ to declare (rather than define) an enum // type, including via an elaborated type specifier. We'll // diagnose that later; for now, declare the enum in the same // scope as we would have picked for any other tag type. // // GNU C also supports this behavior as part of its incomplete // enum types extension, while GNU C++ does not. // // Find the context where we'll be declaring the tag. // FIXME: We would like to maintain the current DeclContext as the // lexical context, while (!SearchDC->isFileContext() && !SearchDC->isFunctionOrMethod()) SearchDC = SearchDC->getParent(); // Find the scope where we'll be declaring the tag. while (S->isClassScope() || (getLangOpts().CPlusPlus && S->isFunctionPrototypeScope()) || ((S->getFlags() & Scope::DeclScope) == 0) || (S->getEntity() && S->getEntity()->isTransparentContext())) S = S->getParent(); } else { assert(TUK == TUK_Friend); // C++ [namespace.memdef]p3: // If a friend declaration in a non-local class first declares a // class or function, the friend class or function is a member of // the innermost enclosing namespace. SearchDC = SearchDC->getEnclosingNamespaceContext(); } // In C++, we need to do a redeclaration lookup to properly // diagnose some problems. if (getLangOpts().CPlusPlus) { Previous.setRedeclarationKind(ForRedeclaration); LookupQualifiedName(Previous, SearchDC); } } // If we have a known previous declaration to use, then use it. if (Previous.empty() && SkipBody && SkipBody->Previous) Previous.addDecl(SkipBody->Previous); if (!Previous.empty()) { NamedDecl *PrevDecl = Previous.getFoundDecl(); NamedDecl *DirectPrevDecl = Previous.getRepresentativeDecl(); // It's okay to have a tag decl in the same scope as a typedef // which hides a tag decl in the same scope. Finding this // insanity with a redeclaration lookup can only actually happen // in C++. // // This is also okay for elaborated-type-specifiers, which is // technically forbidden by the current standard but which is // okay according to the likely resolution of an open issue; // see http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#407 if (getLangOpts().CPlusPlus) { if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(PrevDecl)) { if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) { TagDecl *Tag = TT->getDecl(); if (Tag->getDeclName() == Name && Tag->getDeclContext()->getRedeclContext() ->Equals(TD->getDeclContext()->getRedeclContext())) { PrevDecl = Tag; Previous.clear(); Previous.addDecl(Tag); Previous.resolveKind(); } } } } // If this is a redeclaration of a using shadow declaration, it must // declare a tag in the same context. In MSVC mode, we allow a // redefinition if either context is within the other. if (auto *Shadow = dyn_cast<UsingShadowDecl>(DirectPrevDecl)) { auto *OldTag = dyn_cast<TagDecl>(PrevDecl); if (SS.isEmpty() && TUK != TUK_Reference && TUK != TUK_Friend && isDeclInScope(Shadow, SearchDC, S, isExplicitSpecialization) && !(OldTag && isAcceptableTagRedeclContext( *this, OldTag->getDeclContext(), SearchDC))) { Diag(KWLoc, diag::err_using_decl_conflict_reverse); Diag(Shadow->getTargetDecl()->getLocation(), diag::note_using_decl_target); Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl) << 0; // Recover by ignoring the old declaration. Previous.clear(); goto CreateNewDecl; } } if (TagDecl *PrevTagDecl = dyn_cast<TagDecl>(PrevDecl)) { // If this is a use of a previous tag, or if the tag is already declared // in the same scope (so that the definition/declaration completes or // rementions the tag), reuse the decl. if (TUK == TUK_Reference || TUK == TUK_Friend || isDeclInScope(DirectPrevDecl, SearchDC, S, SS.isNotEmpty() || isExplicitSpecialization)) { // Make sure that this wasn't declared as an enum and now used as a // struct or something similar. if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind, TUK == TUK_Definition, KWLoc, Name)) { bool SafeToContinue = (PrevTagDecl->getTagKind() != TTK_Enum && Kind != TTK_Enum); if (SafeToContinue) Diag(KWLoc, diag::err_use_with_wrong_tag) << Name << FixItHint::CreateReplacement(SourceRange(KWLoc), PrevTagDecl->getKindName()); else Diag(KWLoc, diag::err_use_with_wrong_tag) << Name; Diag(PrevTagDecl->getLocation(), diag::note_previous_use); if (SafeToContinue) Kind = PrevTagDecl->getTagKind(); else { // Recover by making this an anonymous redefinition. Name = nullptr; Previous.clear(); Invalid = true; } } if (Kind == TTK_Enum && PrevTagDecl->getTagKind() == TTK_Enum) { const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl); // If this is an elaborated-type-specifier for a scoped enumeration, // the 'class' keyword is not necessary and not permitted. if (TUK == TUK_Reference || TUK == TUK_Friend) { if (ScopedEnum) Diag(ScopedEnumKWLoc, diag::err_enum_class_reference) << PrevEnum->isScoped() << FixItHint::CreateRemoval(ScopedEnumKWLoc); return PrevTagDecl; } QualType EnumUnderlyingTy; if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>()) EnumUnderlyingTy = TI->getType().getUnqualifiedType(); else if (const Type *T = EnumUnderlying.dyn_cast<const Type*>()) EnumUnderlyingTy = QualType(T, 0); // All conflicts with previous declarations are recovered by // returning the previous declaration, unless this is a definition, // in which case we want the caller to bail out. if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc, ScopedEnum, EnumUnderlyingTy, PrevEnum)) return TUK == TUK_Declaration ? PrevTagDecl : nullptr; } // C++11 [class.mem]p1: // A member shall not be declared twice in the member-specification, // except that a nested class or member class template can be declared // and then later defined. if (TUK == TUK_Declaration && PrevDecl->isCXXClassMember() && S->isDeclScope(PrevDecl)) { Diag(NameLoc, diag::ext_member_redeclared); Diag(PrevTagDecl->getLocation(), diag::note_previous_declaration); } if (!Invalid) { // If this is a use, just return the declaration we found, unless // we have attributes. // FIXME: In the future, return a variant or some other clue // for the consumer of this Decl to know it doesn't own it. // For our current ASTs this shouldn't be a problem, but will // need to be changed with DeclGroups. if (!Attr && ((TUK == TUK_Reference && (!PrevTagDecl->getFriendObjectKind() || getLangOpts().MicrosoftExt)) || TUK == TUK_Friend)) return PrevTagDecl; // Diagnose attempts to redefine a tag. if (TUK == TUK_Definition) { if (NamedDecl *Def = PrevTagDecl->getDefinition()) { // If we're defining a specialization and the previous definition // is from an implicit instantiation, don't emit an error // here; we'll catch this in the general case below. bool IsExplicitSpecializationAfterInstantiation = false; if (isExplicitSpecialization) { if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Def)) IsExplicitSpecializationAfterInstantiation = RD->getTemplateSpecializationKind() != TSK_ExplicitSpecialization; else if (EnumDecl *ED = dyn_cast<EnumDecl>(Def)) IsExplicitSpecializationAfterInstantiation = ED->getTemplateSpecializationKind() != TSK_ExplicitSpecialization; } NamedDecl *Hidden = nullptr; if (SkipBody && getLangOpts().CPlusPlus && !hasVisibleDefinition(Def, &Hidden)) { // There is a definition of this tag, but it is not visible. We // explicitly make use of C++'s one definition rule here, and // assume that this definition is identical to the hidden one // we already have. Make the existing definition visible and // use it in place of this one. SkipBody->ShouldSkip = true; makeMergedDefinitionVisible(Hidden, KWLoc); return Def; } else if (!IsExplicitSpecializationAfterInstantiation) { // A redeclaration in function prototype scope in C isn't // visible elsewhere, so merely issue a warning. if (!getLangOpts().CPlusPlus && S->containedInPrototypeScope()) Diag(NameLoc, diag::warn_redefinition_in_param_list) << Name; else Diag(NameLoc, diag::err_redefinition) << Name; Diag(Def->getLocation(), diag::note_previous_definition); // If this is a redefinition, recover by making this // struct be anonymous, which will make any later // references get the previous definition. Name = nullptr; Previous.clear(); Invalid = true; } } else { // If the type is currently being defined, complain // about a nested redefinition. auto *TD = Context.getTagDeclType(PrevTagDecl)->getAsTagDecl(); if (TD->isBeingDefined()) { Diag(NameLoc, diag::err_nested_redefinition) << Name; Diag(PrevTagDecl->getLocation(), diag::note_previous_definition); Name = nullptr; Previous.clear(); Invalid = true; } } // Okay, this is definition of a previously declared or referenced // tag. We're going to create a new Decl for it. } // Okay, we're going to make a redeclaration. If this is some kind // of reference, make sure we build the redeclaration in the same DC // as the original, and ignore the current access specifier. if (TUK == TUK_Friend || TUK == TUK_Reference) { SearchDC = PrevTagDecl->getDeclContext(); AS = AS_none; } } // If we get here we have (another) forward declaration or we // have a definition. Just create a new decl. } else { // If we get here, this is a definition of a new tag type in a nested // scope, e.g. "struct foo; void bar() { struct foo; }", just create a // new decl/type. We set PrevDecl to NULL so that the entities // have distinct types. Previous.clear(); } // If we get here, we're going to create a new Decl. If PrevDecl // is non-NULL, it's a definition of the tag declared by // PrevDecl. If it's NULL, we have a new definition. // Otherwise, PrevDecl is not a tag, but was found with tag // lookup. This is only actually possible in C++, where a few // things like templates still live in the tag namespace. } else { // Use a better diagnostic if an elaborated-type-specifier // found the wrong kind of type on the first // (non-redeclaration) lookup. if ((TUK == TUK_Reference || TUK == TUK_Friend) && !Previous.isForRedeclaration()) { unsigned Kind = 0; if (isa<TypedefDecl>(PrevDecl)) Kind = 1; else if (isa<TypeAliasDecl>(PrevDecl)) Kind = 2; else if (isa<ClassTemplateDecl>(PrevDecl)) Kind = 3; Diag(NameLoc, diag::err_tag_reference_non_tag) << Kind; Diag(PrevDecl->getLocation(), diag::note_declared_at); Invalid = true; // Otherwise, only diagnose if the declaration is in scope. } else if (!isDeclInScope(DirectPrevDecl, SearchDC, S, SS.isNotEmpty() || isExplicitSpecialization)) { // do nothing // Diagnose implicit declarations introduced by elaborated types. } else if (TUK == TUK_Reference || TUK == TUK_Friend) { unsigned Kind = 0; if (isa<TypedefDecl>(PrevDecl)) Kind = 1; else if (isa<TypeAliasDecl>(PrevDecl)) Kind = 2; else if (isa<ClassTemplateDecl>(PrevDecl)) Kind = 3; Diag(NameLoc, diag::err_tag_reference_conflict) << Kind; Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl; Invalid = true; // Otherwise it's a declaration. Call out a particularly common // case here. } else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(PrevDecl)) { unsigned Kind = 0; if (isa<TypeAliasDecl>(PrevDecl)) Kind = 1; Diag(NameLoc, diag::err_tag_definition_of_typedef) << Name << Kind << TND->getUnderlyingType(); Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl; Invalid = true; // Otherwise, diagnose. } else { // The tag name clashes with something else in the target scope, // issue an error and recover by making this tag be anonymous. Diag(NameLoc, diag::err_redefinition_different_kind) << Name; Diag(PrevDecl->getLocation(), diag::note_previous_definition); Name = nullptr; Invalid = true; } // The existing declaration isn't relevant to us; we're in a // new scope, so clear out the previous declaration. Previous.clear(); } } CreateNewDecl: TagDecl *PrevDecl = nullptr; if (Previous.isSingleResult()) PrevDecl = cast<TagDecl>(Previous.getFoundDecl()); // If there is an identifier, use the location of the identifier as the // location of the decl, otherwise use the location of the struct/union // keyword. SourceLocation Loc = NameLoc.isValid() ? NameLoc : KWLoc; // Otherwise, create a new declaration. If there is a previous // declaration of the same entity, the two will be linked via // PrevDecl. TagDecl *New; bool IsForwardReference = false; if (Kind == TTK_Enum) { // FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.: // enum X { A, B, C } D; D should chain to X. New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name, cast_or_null<EnumDecl>(PrevDecl), ScopedEnum, ScopedEnumUsesClassTag, !EnumUnderlying.isNull()); // If this is an undefined enum, warn. if (TUK != TUK_Definition && !Invalid) { TagDecl *Def; if ((getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) && cast<EnumDecl>(New)->isFixed()) { // C++0x: 7.2p2: opaque-enum-declaration. // Conflicts are diagnosed above. Do nothing. } else if (PrevDecl && (Def = cast<EnumDecl>(PrevDecl)->getDefinition())) { Diag(Loc, diag::ext_forward_ref_enum_def) << New; Diag(Def->getLocation(), diag::note_previous_definition); } else { unsigned DiagID = diag::ext_forward_ref_enum; if (getLangOpts().MSVCCompat) DiagID = diag::ext_ms_forward_ref_enum; else if (getLangOpts().CPlusPlus) DiagID = diag::err_forward_ref_enum; Diag(Loc, DiagID); // If this is a forward-declared reference to an enumeration, make a // note of it; we won't actually be introducing the declaration into // the declaration context. if (TUK == TUK_Reference) IsForwardReference = true; } } if (EnumUnderlying) { EnumDecl *ED = cast<EnumDecl>(New); if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>()) ED->setIntegerTypeSourceInfo(TI); else ED->setIntegerType(QualType(EnumUnderlying.get<const Type*>(), 0)); ED->setPromotionType(ED->getIntegerType()); } } else { // struct/union/class // FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.: // struct X { int A; } D; D should chain to X. if (getLangOpts().CPlusPlus) { // FIXME: Look for a way to use RecordDecl for simple structs. New = CXXRecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name, cast_or_null<CXXRecordDecl>(PrevDecl)); if (isStdBadAlloc && (!StdBadAlloc || getStdBadAlloc()->isImplicit())) StdBadAlloc = cast<CXXRecordDecl>(New); } else New = RecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name, cast_or_null<RecordDecl>(PrevDecl)); } // C++11 [dcl.type]p3: // A type-specifier-seq shall not define a class or enumeration [...]. if (getLangOpts().CPlusPlus && IsTypeSpecifier && TUK == TUK_Definition) { Diag(New->getLocation(), diag::err_type_defined_in_type_specifier) << Context.getTagDeclType(New); Invalid = true; } // Maybe add qualifier info. if (SS.isNotEmpty()) { if (SS.isSet()) { // If this is either a declaration or a definition, check the // nested-name-specifier against the current context. We don't do this // for explicit specializations, because they have similar checking // (with more specific diagnostics) in the call to // CheckMemberSpecialization, below. if (!isExplicitSpecialization && (TUK == TUK_Definition || TUK == TUK_Declaration) && diagnoseQualifiedDeclaration(SS, DC, OrigName, Loc)) Invalid = true; New->setQualifierInfo(SS.getWithLocInContext(Context)); if (TemplateParameterLists.size() > 0) { New->setTemplateParameterListsInfo(Context, TemplateParameterLists.size(), TemplateParameterLists.data()); } } else Invalid = true; } if (RecordDecl *RD = dyn_cast<RecordDecl>(New)) { // Add alignment attributes if necessary; these attributes are checked when // the ASTContext lays out the structure. // // It is important for implementing the correct semantics that this // happen here (in act on tag decl). The #pragma pack stack is // maintained as a result of parser callbacks which can occur at // many points during the parsing of a struct declaration (because // the #pragma tokens are effectively skipped over during the // parsing of the struct). if (TUK == TUK_Definition) { AddAlignmentAttributesForRecord(RD); AddMsStructLayoutForRecord(RD); } } if (ModulePrivateLoc.isValid()) { if (isExplicitSpecialization) Diag(New->getLocation(), diag::err_module_private_specialization) << 2 << FixItHint::CreateRemoval(ModulePrivateLoc); // __module_private__ does not apply to local classes. However, we only // diagnose this as an error when the declaration specifiers are // freestanding. Here, we just ignore the __module_private__. else if (!SearchDC->isFunctionOrMethod()) New->setModulePrivate(); } // If this is a specialization of a member class (of a class template), // check the specialization. if (isExplicitSpecialization && CheckMemberSpecialization(New, Previous)) Invalid = true; // If we're declaring or defining a tag in function prototype scope in C, // note that this type can only be used within the function and add it to // the list of decls to inject into the function definition scope. if ((Name || Kind == TTK_Enum) && getNonFieldDeclScope(S)->isFunctionPrototypeScope()) { if (getLangOpts().CPlusPlus) { // C++ [dcl.fct]p6: // Types shall not be defined in return or parameter types. if (TUK == TUK_Definition && !IsTypeSpecifier) { Diag(Loc, diag::err_type_defined_in_param_type) << Name; Invalid = true; } } else { Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New); } DeclsInPrototypeScope.push_back(New); } if (Invalid) New->setInvalidDecl(); if (Attr) ProcessDeclAttributeList(S, New, Attr); // Set the lexical context. If the tag has a C++ scope specifier, the // lexical context will be different from the semantic context. New->setLexicalDeclContext(CurContext); // Mark this as a friend decl if applicable. // In Microsoft mode, a friend declaration also acts as a forward // declaration so we always pass true to setObjectOfFriendDecl to make // the tag name visible. if (TUK == TUK_Friend) New->setObjectOfFriendDecl(getLangOpts().MSVCCompat); // Set the access specifier. if (!Invalid && SearchDC->isRecord()) SetMemberAccessSpecifier(New, PrevDecl, AS); if (TUK == TUK_Definition) New->startDefinition(); // If this has an identifier, add it to the scope stack. if (TUK == TUK_Friend) { // We might be replacing an existing declaration in the lookup tables; // if so, borrow its access specifier. if (PrevDecl) New->setAccess(PrevDecl->getAccess()); DeclContext *DC = New->getDeclContext()->getRedeclContext(); DC->makeDeclVisibleInContext(New); if (Name) // can be null along some error paths if (Scope *EnclosingScope = getScopeForDeclContext(S, DC)) PushOnScopeChains(New, EnclosingScope, /* AddToContext = */ false); } else if (Name) { S = getNonFieldDeclScope(S); PushOnScopeChains(New, S, !IsForwardReference); if (IsForwardReference) SearchDC->makeDeclVisibleInContext(New); } else { CurContext->addDecl(New); } // If this is the C FILE type, notify the AST context. if (IdentifierInfo *II = New->getIdentifier()) if (!New->isInvalidDecl() && New->getDeclContext()->getRedeclContext()->isTranslationUnit() && II->isStr("FILE")) Context.setFILEDecl(New); if (PrevDecl) mergeDeclAttributes(New, PrevDecl); // If there's a #pragma GCC visibility in scope, set the visibility of this // record. AddPushedVisibilityAttribute(New); OwnedDecl = true; // In C++, don't return an invalid declaration. We can't recover well from // the cases where we make the type anonymous. return (Invalid && getLangOpts().CPlusPlus) ? nullptr : New; } void Sema::ActOnTagStartDefinition(Scope *S, Decl *TagD) { AdjustDeclIfTemplate(TagD); TagDecl *Tag = cast<TagDecl>(TagD); // Enter the tag context. PushDeclContext(S, Tag); ActOnDocumentableDecl(TagD); // If there's a #pragma GCC visibility in scope, set the visibility of this // record. AddPushedVisibilityAttribute(Tag); } Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) { assert(isa<ObjCContainerDecl>(IDecl) && "ActOnObjCContainerStartDefinition - Not ObjCContainerDecl"); DeclContext *OCD = cast<DeclContext>(IDecl); assert(getContainingDC(OCD) == CurContext && "The next DeclContext should be lexically contained in the current one."); CurContext = OCD; return IDecl; } void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc) { AdjustDeclIfTemplate(TagD); CXXRecordDecl *Record = cast<CXXRecordDecl>(TagD); FieldCollector->StartClass(); if (!Record->getIdentifier()) return; if (FinalLoc.isValid()) Record->addAttr(new (Context) FinalAttr(FinalLoc, Context, IsFinalSpelledSealed)); // C++ [class]p2: // [...] The class-name is also inserted into the scope of the // class itself; this is known as the injected-class-name. For // purposes of access checking, the injected-class-name is treated // as if it were a public member name. CXXRecordDecl *InjectedClassName = CXXRecordDecl::Create(Context, Record->getTagKind(), CurContext, Record->getLocStart(), Record->getLocation(), Record->getIdentifier(), /*PrevDecl=*/nullptr, /*DelayTypeCreation=*/true); Context.getTypeDeclType(InjectedClassName, Record); InjectedClassName->setImplicit(); InjectedClassName->setAccess(AS_public); if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate()) InjectedClassName->setDescribedClassTemplate(Template); PushOnScopeChains(InjectedClassName, S); assert(InjectedClassName->isInjectedClassName() && "Broken injected-class-name"); } void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD, SourceLocation RBraceLoc) { AdjustDeclIfTemplate(TagD); TagDecl *Tag = cast<TagDecl>(TagD); Tag->setRBraceLoc(RBraceLoc); // Make sure we "complete" the definition even it is invalid. if (Tag->isBeingDefined()) { assert(Tag->isInvalidDecl() && "We should already have completed it"); if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag)) RD->completeDefinition(); } if (isa<CXXRecordDecl>(Tag)) FieldCollector->FinishClass(); // Exit this scope of this tag's definition. PopDeclContext(); if (getCurLexicalContext()->isObjCContainer() && Tag->getDeclContext()->isFileContext()) Tag->setTopLevelDeclInObjCContainer(); // Notify the consumer that we've defined a tag. if (!Tag->isInvalidDecl()) Consumer.HandleTagDeclDefinition(Tag); } void Sema::ActOnObjCContainerFinishDefinition() { // Exit this scope of this interface definition. PopDeclContext(); } void Sema::ActOnObjCTemporaryExitContainerContext(DeclContext *DC) { assert(DC == CurContext && "Mismatch of container contexts"); OriginalLexicalContext = DC; ActOnObjCContainerFinishDefinition(); } void Sema::ActOnObjCReenterContainerContext(DeclContext *DC) { ActOnObjCContainerStartDefinition(cast<Decl>(DC)); OriginalLexicalContext = nullptr; } void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) { AdjustDeclIfTemplate(TagD); TagDecl *Tag = cast<TagDecl>(TagD); Tag->setInvalidDecl(); // Make sure we "complete" the definition even it is invalid. if (Tag->isBeingDefined()) { if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag)) RD->completeDefinition(); } // We're undoing ActOnTagStartDefinition here, not // ActOnStartCXXMemberDeclarations, so we don't have to mess with // the FieldCollector. PopDeclContext(); } // Note that FieldName may be null for anonymous bitfields. ExprResult Sema::VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth) { // Default to true; that shouldn't confuse checks for emptiness if (ZeroWidth) *ZeroWidth = true; // C99 6.7.2.1p4 - verify the field type. // C++ 9.6p3: A bit-field shall have integral or enumeration type. if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) { // Handle incomplete types with specific error. if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete)) return ExprError(); if (FieldName) return Diag(FieldLoc, diag::err_not_integral_type_bitfield) << FieldName << FieldTy << BitWidth->getSourceRange(); return Diag(FieldLoc, diag::err_not_integral_type_anon_bitfield) << FieldTy << BitWidth->getSourceRange(); } else if (DiagnoseUnexpandedParameterPack(const_cast<Expr *>(BitWidth), UPPC_BitFieldWidth)) return ExprError(); // If the bit-width is type- or value-dependent, don't try to check // it now. if (BitWidth->isValueDependent() || BitWidth->isTypeDependent()) return BitWidth; llvm::APSInt Value; ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Value); if (ICE.isInvalid()) return ICE; BitWidth = ICE.get(); if (Value != 0 && ZeroWidth) *ZeroWidth = false; // Zero-width bitfield is ok for anonymous field. if (Value == 0 && FieldName) return Diag(FieldLoc, diag::err_bitfield_has_zero_width) << FieldName; if (Value.isSigned() && Value.isNegative()) { if (FieldName) return Diag(FieldLoc, diag::err_bitfield_has_negative_width) << FieldName << Value.toString(10); return Diag(FieldLoc, diag::err_anon_bitfield_has_negative_width) << Value.toString(10); } if (!FieldTy->isDependentType()) { uint64_t TypeSize = Context.getTypeSize(FieldTy); if (Value.getZExtValue() > TypeSize) { if (!getLangOpts().CPlusPlus || IsMsStruct || Context.getTargetInfo().getCXXABI().isMicrosoft()) { if (FieldName) return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_size) << FieldName << (unsigned)Value.getZExtValue() << (unsigned)TypeSize; return Diag(FieldLoc, diag::err_anon_bitfield_width_exceeds_type_size) << (unsigned)Value.getZExtValue() << (unsigned)TypeSize; } if (FieldName) Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_size) << FieldName << (unsigned)Value.getZExtValue() << (unsigned)TypeSize; else Diag(FieldLoc, diag::warn_anon_bitfield_width_exceeds_type_size) << (unsigned)Value.getZExtValue() << (unsigned)TypeSize; } } return BitWidth; } /// ActOnField - Each field of a C struct/union is passed into this in order /// to create a FieldDecl object for it. Decl *Sema::ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth) { FieldDecl *Res = HandleField(S, cast_or_null<RecordDecl>(TagD), DeclStart, D, static_cast<Expr*>(BitfieldWidth), /*InitStyle=*/ICIS_NoInit, AS_public); return Res; } /// HandleField - Analyze a field of a C struct or a C++ data member. /// FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record, SourceLocation DeclStart, Declarator &D, Expr *BitWidth, InClassInitStyle InitStyle, AccessSpecifier AS) { IdentifierInfo *II = D.getIdentifier(); SourceLocation Loc = DeclStart; if (II) Loc = D.getIdentifierLoc(); TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); QualType T = TInfo->getType(); if (getLangOpts().CPlusPlus) { CheckExtraCXXDefaultArguments(D); if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo, UPPC_DataMemberType)) { D.setInvalidType(); T = Context.IntTy; TInfo = Context.getTrivialTypeSourceInfo(T, Loc); } } // HLSL Changes Start if (getLangOpts().HLSL) { const bool IsParameterFalse = false; if (!DiagnoseHLSLDecl(D, CurContext, BitWidth, TInfo, IsParameterFalse)) { // Let the diagnostic provide errors, don't actually return nullptr here; // compilation will recover, which is helpful because HLSL diagnostics // need not interrupt the declaration processing. } } // HLSL Changes End // TR 18037 does not allow fields to be declared with address spaces. if (T.getQualifiers().hasAddressSpace()) { Diag(Loc, diag::err_field_with_address_space); D.setInvalidType(); } // OpenCL 1.2 spec, s6.9 r: // The event type cannot be used to declare a structure or union field. if (LangOpts.OpenCL && T->isEventT()) { Diag(Loc, diag::err_event_t_struct_field); D.setInvalidType(); } DiagnoseFunctionSpecifiers(D.getDeclSpec()); if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(), diag::err_invalid_thread) << DeclSpec::getSpecifierName(TSCS); // Check to see if this name was declared as a member previously NamedDecl *PrevDecl = nullptr; LookupResult Previous(*this, II, Loc, LookupMemberName, ForRedeclaration); LookupName(Previous, S); switch (Previous.getResultKind()) { case LookupResult::Found: case LookupResult::FoundUnresolvedValue: PrevDecl = Previous.getAsSingle<NamedDecl>(); break; case LookupResult::FoundOverloaded: PrevDecl = Previous.getRepresentativeDecl(); break; case LookupResult::NotFound: case LookupResult::NotFoundInCurrentInstantiation: case LookupResult::Ambiguous: break; } Previous.suppressDiagnostics(); if (PrevDecl && PrevDecl->isTemplateParameter()) { // Maybe we will complain about the shadowed template parameter. DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl); // Just pretend that we didn't see the previous declaration. PrevDecl = nullptr; } if (PrevDecl && !isDeclInScope(PrevDecl, Record, S)) PrevDecl = nullptr; bool Mutable = (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_mutable); SourceLocation TSSL = D.getLocStart(); FieldDecl *NewFD = CheckFieldDecl(II, T, TInfo, Record, Loc, Mutable, BitWidth, InitStyle, TSSL, AS, PrevDecl, &D); if (NewFD->isInvalidDecl()) Record->setInvalidDecl(); TransferUnusualAttributes(D, NewFD); // HLSL Change if (D.getDeclSpec().isModulePrivateSpecified()) NewFD->setModulePrivate(); if (NewFD->isInvalidDecl() && PrevDecl) { // Don't introduce NewFD into scope; there's already something // with the same name in the same scope. } else if (II) { PushOnScopeChains(NewFD, S); } else Record->addDecl(NewFD); return NewFD; } /// \brief Build a new FieldDecl and check its well-formedness. /// /// This routine builds a new FieldDecl given the fields name, type, /// record, etc. \p PrevDecl should refer to any previous declaration /// with the same name and in the same scope as the field to be /// created. /// /// \returns a new FieldDecl. /// /// \todo The Declarator argument is a hack. It will be removed once FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D) { IdentifierInfo *II = Name.getAsIdentifierInfo(); bool InvalidDecl = false; if (D) InvalidDecl = D->isInvalidType(); // If we receive a broken type, recover by assuming 'int' and // marking this declaration as invalid. if (T.isNull()) { InvalidDecl = true; T = Context.IntTy; } QualType EltTy = Context.getBaseElementType(T); if (!EltTy->isDependentType()) { if (RequireCompleteType(Loc, EltTy, diag::err_field_incomplete)) { // Fields of incomplete type force their record to be invalid. Record->setInvalidDecl(); InvalidDecl = true; } else { NamedDecl *Def; EltTy->isIncompleteType(&Def); if (Def && Def->isInvalidDecl()) { Record->setInvalidDecl(); InvalidDecl = true; } } } // OpenCL v1.2 s6.9.c: bitfields are not supported. if (BitWidth && getLangOpts().OpenCL) { Diag(Loc, diag::err_opencl_bitfields); InvalidDecl = true; } // C99 6.7.2.1p8: A member of a structure or union may have any type other // than a variably modified type. if (!InvalidDecl && T->isVariablyModifiedType()) { bool SizeIsNegative; llvm::APSInt Oversized; TypeSourceInfo *FixedTInfo = TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context, SizeIsNegative, Oversized); if (FixedTInfo) { Diag(Loc, diag::warn_illegal_constant_array_size); TInfo = FixedTInfo; T = FixedTInfo->getType(); } else { if (SizeIsNegative) Diag(Loc, diag::err_typecheck_negative_array_size); else if (Oversized.getBoolValue()) Diag(Loc, diag::err_array_too_large) << Oversized.toString(10); else Diag(Loc, diag::err_typecheck_field_variable_size); InvalidDecl = true; } } // Fields can not have abstract class types if (!InvalidDecl && RequireNonAbstractType(Loc, T, diag::err_abstract_type_in_decl, AbstractFieldType)) InvalidDecl = true; bool ZeroWidth = false; if (InvalidDecl) BitWidth = nullptr; // If this is declared as a bit-field, check the bit-field. if (BitWidth) { BitWidth = VerifyBitField(Loc, II, T, Record->isMsStruct(Context), BitWidth, &ZeroWidth).get(); if (!BitWidth) { InvalidDecl = true; BitWidth = nullptr; ZeroWidth = false; } } // Check that 'mutable' is consistent with the type of the declaration. if (!InvalidDecl && Mutable) { unsigned DiagID = 0; if (T->isReferenceType()) DiagID = getLangOpts().MSVCCompat ? diag::ext_mutable_reference : diag::err_mutable_reference; else if (T.isConstQualified()) DiagID = diag::err_mutable_const; if (DiagID) { SourceLocation ErrLoc = Loc; if (D && D->getDeclSpec().getStorageClassSpecLoc().isValid()) ErrLoc = D->getDeclSpec().getStorageClassSpecLoc(); Diag(ErrLoc, DiagID); if (DiagID != diag::ext_mutable_reference) { Mutable = false; InvalidDecl = true; } } } // C++11 [class.union]p8 (DR1460): // At most one variant member of a union may have a // brace-or-equal-initializer. if (InitStyle != ICIS_NoInit) checkDuplicateDefaultInit(*this, cast<CXXRecordDecl>(Record), Loc); FieldDecl *NewFD = FieldDecl::Create(Context, Record, TSSL, Loc, II, T, TInfo, BitWidth, Mutable, InitStyle); if (InvalidDecl) NewFD->setInvalidDecl(); if (PrevDecl && !isa<TagDecl>(PrevDecl)) { Diag(Loc, diag::err_duplicate_member) << II; Diag(PrevDecl->getLocation(), diag::note_previous_declaration); NewFD->setInvalidDecl(); } if (!InvalidDecl && getLangOpts().CPlusPlus) { if (Record->isUnion()) { if (const RecordType *RT = EltTy->getAs<RecordType>()) { CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl()); if (RDecl->getDefinition()) { // C++ [class.union]p1: An object of a class with a non-trivial // constructor, a non-trivial copy constructor, a non-trivial // destructor, or a non-trivial copy assignment operator // cannot be a member of a union, nor can an array of such // objects. if (CheckNontrivialField(NewFD)) NewFD->setInvalidDecl(); } } // C++ [class.union]p1: If a union contains a member of reference type, // the program is ill-formed, except when compiling with MSVC extensions // enabled. if (EltTy->isReferenceType()) { Diag(NewFD->getLocation(), getLangOpts().MicrosoftExt ? diag::ext_union_member_of_reference_type : diag::err_union_member_of_reference_type) << NewFD->getDeclName() << EltTy; if (!getLangOpts().MicrosoftExt) NewFD->setInvalidDecl(); } } } // FIXME: We need to pass in the attributes given an AST // representation, not a parser representation. if (D) { // FIXME: The current scope is almost... but not entirely... correct here. ProcessDeclAttributes(getCurScope(), NewFD, *D); if (NewFD->hasAttrs()) CheckAlignasUnderalignment(NewFD); } // In auto-retain/release, infer strong retension for fields of // retainable type. if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewFD)) NewFD->setInvalidDecl(); if (T.isObjCGCWeak()) Diag(Loc, diag::warn_attribute_weak_on_field); NewFD->setAccess(AS); return NewFD; } bool Sema::CheckNontrivialField(FieldDecl *FD) { assert(FD); assert(getLangOpts().CPlusPlus && "valid check only for C++"); if (FD->isInvalidDecl() || FD->getType()->isDependentType()) return false; QualType EltTy = Context.getBaseElementType(FD->getType()); if (const RecordType *RT = EltTy->getAs<RecordType>()) { CXXRecordDecl *RDecl = cast<CXXRecordDecl>(RT->getDecl()); if (RDecl->getDefinition()) { // We check for copy constructors before constructors // because otherwise we'll never get complaints about // copy constructors. CXXSpecialMember member = CXXInvalid; // We're required to check for any non-trivial constructors. Since the // implicit default constructor is suppressed if there are any // user-declared constructors, we just need to check that there is a // trivial default constructor and a trivial copy constructor. (We don't // worry about move constructors here, since this is a C++98 check.) if (RDecl->hasNonTrivialCopyConstructor()) member = CXXCopyConstructor; else if (!RDecl->hasTrivialDefaultConstructor()) member = CXXDefaultConstructor; else if (RDecl->hasNonTrivialCopyAssignment()) member = CXXCopyAssignment; else if (RDecl->hasNonTrivialDestructor()) member = CXXDestructor; if (member != CXXInvalid) { if (!getLangOpts().CPlusPlus11 && getLangOpts().ObjCAutoRefCount && RDecl->hasObjectMember()) { // Objective-C++ ARC: it is an error to have a non-trivial field of // a union. However, system headers in Objective-C programs // occasionally have Objective-C lifetime objects within unions, // and rather than cause the program to fail, we make those // members unavailable. SourceLocation Loc = FD->getLocation(); if (getSourceManager().isInSystemHeader(Loc)) { if (!FD->hasAttr<UnavailableAttr>()) FD->addAttr(UnavailableAttr::CreateImplicit(Context, "this system field has retaining ownership", Loc)); return false; } } Diag(FD->getLocation(), getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member : diag::err_illegal_union_or_anon_struct_member) << (int)FD->getParent()->isUnion() << FD->getDeclName() << member; DiagnoseNontrivial(RDecl, member); return !getLangOpts().CPlusPlus11; } } } return false; } /// TranslateIvarVisibility - Translate visibility from a token ID to an /// AST enum value. static ObjCIvarDecl::AccessControl TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) { switch (ivarVisibility) { default: llvm_unreachable("Unknown visitibility kind"); case tok::objc_private: return ObjCIvarDecl::Private; case tok::objc_public: return ObjCIvarDecl::Public; case tok::objc_protected: return ObjCIvarDecl::Protected; case tok::objc_package: return ObjCIvarDecl::Package; } } /// ActOnIvar - Each ivar field of an objective-c class is passed into this /// in order to create an IvarDecl object for it. Decl *Sema::ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind Visibility) { IdentifierInfo *II = D.getIdentifier(); Expr *BitWidth = (Expr*)BitfieldWidth; SourceLocation Loc = DeclStart; if (II) Loc = D.getIdentifierLoc(); // FIXME: Unnamed fields can be handled in various different ways, for // example, unnamed unions inject all members into the struct namespace! TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); QualType T = TInfo->getType(); if (BitWidth) { // 6.7.2.1p3, 6.7.2.1p4 BitWidth = VerifyBitField(Loc, II, T, /*IsMsStruct*/false, BitWidth).get(); if (!BitWidth) D.setInvalidType(); } else { // Not a bitfield. // validate II. } if (T->isReferenceType()) { Diag(Loc, diag::err_ivar_reference_type); D.setInvalidType(); } // C99 6.7.2.1p8: A member of a structure or union may have any type other // than a variably modified type. else if (T->isVariablyModifiedType()) { Diag(Loc, diag::err_typecheck_ivar_variable_size); D.setInvalidType(); } // Get the visibility (access control) for this ivar. ObjCIvarDecl::AccessControl ac = Visibility != tok::objc_not_keyword ? TranslateIvarVisibility(Visibility) : ObjCIvarDecl::None; // Must set ivar's DeclContext to its enclosing interface. ObjCContainerDecl *EnclosingDecl = cast<ObjCContainerDecl>(CurContext); if (!EnclosingDecl || EnclosingDecl->isInvalidDecl()) return nullptr; ObjCContainerDecl *EnclosingContext; if (ObjCImplementationDecl *IMPDecl = dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) { if (LangOpts.ObjCRuntime.isFragile()) { // Case of ivar declared in an implementation. Context is that of its class. EnclosingContext = IMPDecl->getClassInterface(); assert(EnclosingContext && "Implementation has no class interface!"); } else EnclosingContext = EnclosingDecl; } else { if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) { if (LangOpts.ObjCRuntime.isFragile() || !CDecl->IsClassExtension()) { Diag(Loc, diag::err_misplaced_ivar) << CDecl->IsClassExtension(); return nullptr; } } EnclosingContext = EnclosingDecl; } // Construct the decl. ObjCIvarDecl *NewID = ObjCIvarDecl::Create(Context, EnclosingContext, DeclStart, Loc, II, T, TInfo, ac, (Expr *)BitfieldWidth); if (II) { NamedDecl *PrevDecl = LookupSingleName(S, II, Loc, LookupMemberName, ForRedeclaration); if (PrevDecl && isDeclInScope(PrevDecl, EnclosingContext, S) && !isa<TagDecl>(PrevDecl)) { Diag(Loc, diag::err_duplicate_member) << II; Diag(PrevDecl->getLocation(), diag::note_previous_declaration); NewID->setInvalidDecl(); } } // Process attributes attached to the ivar. ProcessDeclAttributes(S, NewID, D); if (D.isInvalidType()) NewID->setInvalidDecl(); // In ARC, infer 'retaining' for ivars of retainable type. if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewID)) NewID->setInvalidDecl(); if (D.getDeclSpec().isModulePrivateSpecified()) NewID->setModulePrivate(); if (II) { // FIXME: When interfaces are DeclContexts, we'll need to add // these to the interface. S->AddDecl(NewID); IdResolver.AddDecl(NewID); } if (LangOpts.ObjCRuntime.isNonFragile() && !NewID->isInvalidDecl() && isa<ObjCInterfaceDecl>(EnclosingDecl)) Diag(Loc, diag::warn_ivars_in_interface); return NewID; } /// ActOnLastBitfield - This routine handles synthesized bitfields rules for /// class and class extensions. For every class \@interface and class /// extension \@interface, if the last ivar is a bitfield of any type, /// then add an implicit `char :0` ivar to the end of that interface. void Sema::ActOnLastBitfield(SourceLocation DeclLoc, SmallVectorImpl<Decl *> &AllIvarDecls) { if (LangOpts.ObjCRuntime.isFragile() || AllIvarDecls.empty()) return; Decl *ivarDecl = AllIvarDecls[AllIvarDecls.size()-1]; ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(ivarDecl); if (!Ivar->isBitField() || Ivar->getBitWidthValue(Context) == 0) return; ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CurContext); if (!ID) { if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CurContext)) { if (!CD->IsClassExtension()) return; } // No need to add this to end of @implementation. else return; } // All conditions are met. Add a new bitfield to the tail end of ivars. llvm::APInt Zero(Context.getTypeSize(Context.IntTy), 0); Expr * BW = IntegerLiteral::Create(Context, Zero, Context.IntTy, DeclLoc); Ivar = ObjCIvarDecl::Create(Context, cast<ObjCContainerDecl>(CurContext), DeclLoc, DeclLoc, nullptr, Context.CharTy, Context.getTrivialTypeSourceInfo(Context.CharTy, DeclLoc), ObjCIvarDecl::Private, BW, true); AllIvarDecls.push_back(Ivar); } void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *Attr) { assert(EnclosingDecl && "missing record or interface decl"); // If this is an Objective-C @implementation or category and we have // new fields here we should reset the layout of the interface since // it will now change. if (!Fields.empty() && isa<ObjCContainerDecl>(EnclosingDecl)) { ObjCContainerDecl *DC = cast<ObjCContainerDecl>(EnclosingDecl); switch (DC->getKind()) { default: break; case Decl::ObjCCategory: Context.ResetObjCLayout(cast<ObjCCategoryDecl>(DC)->getClassInterface()); break; case Decl::ObjCImplementation: Context. ResetObjCLayout(cast<ObjCImplementationDecl>(DC)->getClassInterface()); break; } } RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl); // Start counting up the number of named members; make sure to include // members of anonymous structs and unions in the total. unsigned NumNamedMembers = 0; if (Record) { for (const auto *I : Record->decls()) { if (const auto *IFD = dyn_cast<IndirectFieldDecl>(I)) if (IFD->getDeclName()) ++NumNamedMembers; } } // Verify that all the fields are okay. SmallVector<FieldDecl*, 32> RecFields; bool ARCErrReported = false; for (ArrayRef<Decl *>::iterator i = Fields.begin(), end = Fields.end(); i != end; ++i) { FieldDecl *FD = cast<FieldDecl>(*i); // Get the type for the field. const Type *FDTy = FD->getType().getTypePtr(); if (!FD->isAnonymousStructOrUnion()) { // Remember all fields written by the user. RecFields.push_back(FD); } // If the field is already invalid for some reason, don't emit more // diagnostics about it. if (FD->isInvalidDecl()) { EnclosingDecl->setInvalidDecl(); continue; } // C99 6.7.2.1p2: // A structure or union shall not contain a member with // incomplete or function type (hence, a structure shall not // contain an instance of itself, but may contain a pointer to // an instance of itself), except that the last member of a // structure with more than one named member may have incomplete // array type; such a structure (and any union containing, // possibly recursively, a member that is such a structure) // shall not be a member of a structure or an element of an // array. if (FDTy->isFunctionType()) { // Field declared as a function. Diag(FD->getLocation(), diag::err_field_declared_as_function) << FD->getDeclName(); FD->setInvalidDecl(); EnclosingDecl->setInvalidDecl(); continue; } else if (FDTy->isIncompleteArrayType() && Record && ((i + 1 == Fields.end() && !Record->isUnion()) || ((getLangOpts().MicrosoftExt || getLangOpts().CPlusPlus) && (i + 1 == Fields.end() || Record->isUnion())))) { // Flexible array member. // Microsoft and g++ is more permissive regarding flexible array. // It will accept flexible array in union and also // as the sole element of a struct/class. unsigned DiagID = 0; if (Record->isUnion()) DiagID = getLangOpts().MicrosoftExt ? diag::ext_flexible_array_union_ms : getLangOpts().CPlusPlus ? diag::ext_flexible_array_union_gnu : diag::err_flexible_array_union; else if (Fields.size() == 1) DiagID = getLangOpts().MicrosoftExt ? diag::ext_flexible_array_empty_aggregate_ms : getLangOpts().CPlusPlus ? diag::ext_flexible_array_empty_aggregate_gnu : NumNamedMembers < 1 ? diag::err_flexible_array_empty_aggregate : 0; if (DiagID) Diag(FD->getLocation(), DiagID) << FD->getDeclName() << Record->getTagKind(); // While the layout of types that contain virtual bases is not specified // by the C++ standard, both the Itanium and Microsoft C++ ABIs place // virtual bases after the derived members. This would make a flexible // array member declared at the end of an object not adjacent to the end // of the type. if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Record)) if (RD->getNumVBases() != 0) Diag(FD->getLocation(), diag::err_flexible_array_virtual_base) << FD->getDeclName() << Record->getTagKind(); if (!getLangOpts().C99) Diag(FD->getLocation(), diag::ext_c99_flexible_array_member) << FD->getDeclName() << Record->getTagKind(); // If the element type has a non-trivial destructor, we would not // implicitly destroy the elements, so disallow it for now. // // FIXME: GCC allows this. We should probably either implicitly delete // the destructor of the containing class, or just allow this. QualType BaseElem = Context.getBaseElementType(FD->getType()); if (!BaseElem->isDependentType() && BaseElem.isDestructedType()) { Diag(FD->getLocation(), diag::err_flexible_array_has_nontrivial_dtor) << FD->getDeclName() << FD->getType(); FD->setInvalidDecl(); EnclosingDecl->setInvalidDecl(); continue; } // Okay, we have a legal flexible array member at the end of the struct. Record->setHasFlexibleArrayMember(true); } else if (!FDTy->isDependentType() && RequireCompleteType(FD->getLocation(), FD->getType(), diag::err_field_incomplete)) { // Incomplete type FD->setInvalidDecl(); EnclosingDecl->setInvalidDecl(); continue; } else if (const RecordType *FDTTy = FDTy->getAs<RecordType>()) { if (Record && FDTTy->getDecl()->hasFlexibleArrayMember()) { // A type which contains a flexible array member is considered to be a // flexible array member. Record->setHasFlexibleArrayMember(true); if (!Record->isUnion()) { // If this is a struct/class and this is not the last element, reject // it. Note that GCC supports variable sized arrays in the middle of // structures. if (i + 1 != Fields.end()) Diag(FD->getLocation(), diag::ext_variable_sized_type_in_struct) << FD->getDeclName() << FD->getType(); else { // We support flexible arrays at the end of structs in // other structs as an extension. Diag(FD->getLocation(), diag::ext_flexible_array_in_struct) << FD->getDeclName(); } } } if (isa<ObjCContainerDecl>(EnclosingDecl) && RequireNonAbstractType(FD->getLocation(), FD->getType(), diag::err_abstract_type_in_decl, AbstractIvarType)) { // Ivars can not have abstract class types FD->setInvalidDecl(); } if (Record && FDTTy->getDecl()->hasObjectMember()) Record->setHasObjectMember(true); if (Record && FDTTy->getDecl()->hasVolatileMember()) Record->setHasVolatileMember(true); } else if (FDTy->isObjCObjectType()) { /// A field cannot be an Objective-c object Diag(FD->getLocation(), diag::err_statically_allocated_object) << FixItHint::CreateInsertion(FD->getLocation(), "*"); QualType T = Context.getObjCObjectPointerType(FD->getType()); FD->setType(T); } else if (getLangOpts().ObjCAutoRefCount && Record && !ARCErrReported && (!getLangOpts().CPlusPlus || Record->isUnion())) { // It's an error in ARC if a field has lifetime. // We don't want to report this in a system header, though, // so we just make the field unavailable. // FIXME: that's really not sufficient; we need to make the type // itself invalid to, say, initialize or copy. QualType T = FD->getType(); Qualifiers::ObjCLifetime lifetime = T.getObjCLifetime(); if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone) { SourceLocation loc = FD->getLocation(); if (getSourceManager().isInSystemHeader(loc)) { if (!FD->hasAttr<UnavailableAttr>()) { FD->addAttr(UnavailableAttr::CreateImplicit(Context, "this system field has retaining ownership", loc)); } } else { Diag(FD->getLocation(), diag::err_arc_objc_object_in_tag) << T->isBlockPointerType() << Record->getTagKind(); } ARCErrReported = true; } } else if (getLangOpts().ObjC1 && getLangOpts().getGC() != LangOptions::NonGC && Record && !Record->hasObjectMember()) { if (FD->getType()->isObjCObjectPointerType() || FD->getType().isObjCGCStrong()) Record->setHasObjectMember(true); else if (Context.getAsArrayType(FD->getType())) { QualType BaseType = Context.getBaseElementType(FD->getType()); if (BaseType->isRecordType() && BaseType->getAs<RecordType>()->getDecl()->hasObjectMember()) Record->setHasObjectMember(true); else if (BaseType->isObjCObjectPointerType() || BaseType.isObjCGCStrong()) Record->setHasObjectMember(true); } } if (Record && FD->getType().isVolatileQualified()) Record->setHasVolatileMember(true); // Keep track of the number of named members. if (FD->getIdentifier()) ++NumNamedMembers; } // Okay, we successfully defined 'Record'. if (Record) { bool Completed = false; if (CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) { if (!CXXRecord->isInvalidDecl()) { // Set access bits correctly on the directly-declared conversions. for (CXXRecordDecl::conversion_iterator I = CXXRecord->conversion_begin(), E = CXXRecord->conversion_end(); I != E; ++I) I.setAccess((*I)->getAccess()); if (!CXXRecord->isDependentType()) { if (CXXRecord->hasUserDeclaredDestructor()) { // Adjust user-defined destructor exception spec. if (getLangOpts().CPlusPlus11) AdjustDestructorExceptionSpec(CXXRecord, CXXRecord->getDestructor()); } // Add any implicitly-declared members to this class. AddImplicitlyDeclaredMembersToClass(CXXRecord); // If we have virtual base classes, we may end up finding multiple // final overriders for a given virtual function. Check for this // problem now. if (CXXRecord->getNumVBases()) { CXXFinalOverriderMap FinalOverriders; CXXRecord->getFinalOverriders(FinalOverriders); for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(), MEnd = FinalOverriders.end(); M != MEnd; ++M) { for (OverridingMethods::iterator SO = M->second.begin(), SOEnd = M->second.end(); SO != SOEnd; ++SO) { assert(SO->second.size() > 0 && "Virtual function without overridding functions?"); if (SO->second.size() == 1) continue; // C++ [class.virtual]p2: // In a derived class, if a virtual member function of a base // class subobject has more than one final overrider the // program is ill-formed. Diag(Record->getLocation(), diag::err_multiple_final_overriders) << (const NamedDecl *)M->first << Record; Diag(M->first->getLocation(), diag::note_overridden_virtual_function); for (OverridingMethods::overriding_iterator OM = SO->second.begin(), OMEnd = SO->second.end(); OM != OMEnd; ++OM) Diag(OM->Method->getLocation(), diag::note_final_overrider) << (const NamedDecl *)M->first << OM->Method->getParent(); Record->setInvalidDecl(); } } CXXRecord->completeDefinition(&FinalOverriders); Completed = true; } } } } if (!Completed) Record->completeDefinition(); if (Record->hasAttrs()) { CheckAlignasUnderalignment(Record); if (const MSInheritanceAttr *IA = Record->getAttr<MSInheritanceAttr>()) checkMSInheritanceAttrOnDefinition(cast<CXXRecordDecl>(Record), IA->getRange(), IA->getBestCase(), IA->getSemanticSpelling()); } // Check if the structure/union declaration is a type that can have zero // size in C. For C this is a language extension, for C++ it may cause // compatibility problems. bool CheckForZeroSize; if (!getLangOpts().CPlusPlus) { CheckForZeroSize = true; } else { // For C++ filter out types that cannot be referenced in C code. CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record); CheckForZeroSize = CXXRecord->getLexicalDeclContext()->isExternCContext() && !CXXRecord->isDependentType() && CXXRecord->isCLike(); } if (CheckForZeroSize) { bool ZeroSize = true; bool IsEmpty = true; unsigned NonBitFields = 0; for (RecordDecl::field_iterator I = Record->field_begin(), E = Record->field_end(); (NonBitFields == 0 || ZeroSize) && I != E; ++I) { IsEmpty = false; if (I->isUnnamedBitfield()) { if (I->getBitWidthValue(Context) > 0) ZeroSize = false; } else { ++NonBitFields; QualType FieldType = I->getType(); if (FieldType->isIncompleteType() || !Context.getTypeSizeInChars(FieldType).isZero()) ZeroSize = false; } } // Empty structs are an extension in C (C99 6.7.2.1p7). They are // allowed in C++, but warn if its declaration is inside // extern "C" block. if (ZeroSize) { Diag(RecLoc, getLangOpts().CPlusPlus ? diag::warn_zero_size_struct_union_in_extern_c : diag::warn_zero_size_struct_union_compat) << IsEmpty << Record->isUnion() << (NonBitFields > 1); } // Structs without named members are extension in C (C99 6.7.2.1p7), // but are accepted by GCC. if (NonBitFields == 0 && !getLangOpts().CPlusPlus) { Diag(RecLoc, IsEmpty ? diag::ext_empty_struct_union : diag::ext_no_named_members_in_struct_union) << Record->isUnion(); } } } else { ObjCIvarDecl **ClsFields = reinterpret_cast<ObjCIvarDecl**>(RecFields.data()); if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(EnclosingDecl)) { ID->setEndOfDefinitionLoc(RBrac); // Add ivar's to class's DeclContext. for (unsigned i = 0, e = RecFields.size(); i != e; ++i) { ClsFields[i]->setLexicalDeclContext(ID); ID->addDecl(ClsFields[i]); } // Must enforce the rule that ivars in the base classes may not be // duplicates. if (ID->getSuperClass()) DiagnoseDuplicateIvars(ID, ID->getSuperClass()); } else if (ObjCImplementationDecl *IMPDecl = dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) { assert(IMPDecl && "ActOnFields - missing ObjCImplementationDecl"); for (unsigned I = 0, N = RecFields.size(); I != N; ++I) // Ivar declared in @implementation never belongs to the implementation. // Only it is in implementation's lexical context. ClsFields[I]->setLexicalDeclContext(IMPDecl); CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(), RBrac); IMPDecl->setIvarLBraceLoc(LBrac); IMPDecl->setIvarRBraceLoc(RBrac); } else if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) { // case of ivars in class extension; all other cases have been // reported as errors elsewhere. // FIXME. Class extension does not have a LocEnd field. // CDecl->setLocEnd(RBrac); // Add ivar's to class extension's DeclContext. // Diagnose redeclaration of private ivars. ObjCInterfaceDecl *IDecl = CDecl->getClassInterface(); for (unsigned i = 0, e = RecFields.size(); i != e; ++i) { if (IDecl) { if (const ObjCIvarDecl *ClsIvar = IDecl->getIvarDecl(ClsFields[i]->getIdentifier())) { Diag(ClsFields[i]->getLocation(), diag::err_duplicate_ivar_declaration); Diag(ClsIvar->getLocation(), diag::note_previous_definition); continue; } for (const auto *Ext : IDecl->known_extensions()) { if (const ObjCIvarDecl *ClsExtIvar = Ext->getIvarDecl(ClsFields[i]->getIdentifier())) { Diag(ClsFields[i]->getLocation(), diag::err_duplicate_ivar_declaration); Diag(ClsExtIvar->getLocation(), diag::note_previous_definition); continue; } } } ClsFields[i]->setLexicalDeclContext(CDecl); CDecl->addDecl(ClsFields[i]); } CDecl->setIvarLBraceLoc(LBrac); CDecl->setIvarRBraceLoc(RBrac); } } if (Attr) ProcessDeclAttributeList(S, Record, Attr); } /// \brief Determine whether the given integral value is representable within /// the given type T. static bool isRepresentableIntegerValue(ASTContext &Context, llvm::APSInt &Value, QualType T) { assert(T->isIntegralType(Context) && "Integral type required!"); unsigned BitWidth = Context.getIntWidth(T); if (Value.isUnsigned() || Value.isNonNegative()) { if (T->isSignedIntegerOrEnumerationType()) --BitWidth; return Value.getActiveBits() <= BitWidth; } return Value.getMinSignedBits() <= BitWidth; } // \brief Given an integral type, return the next larger integral type // (or a NULL type of no such type exists). static QualType getNextLargerIntegralType(ASTContext &Context, QualType T) { // FIXME: Int128/UInt128 support, which also needs to be introduced into // enum checking below. assert(T->isIntegralType(Context) && "Integral type required!"); const unsigned NumTypes = 4; QualType SignedIntegralTypes[NumTypes] = { Context.ShortTy, Context.IntTy, Context.LongTy, Context.LongLongTy }; QualType UnsignedIntegralTypes[NumTypes] = { Context.UnsignedShortTy, Context.UnsignedIntTy, Context.UnsignedLongTy, Context.UnsignedLongLongTy }; unsigned BitWidth = Context.getTypeSize(T); QualType *Types = T->isSignedIntegerOrEnumerationType()? SignedIntegralTypes : UnsignedIntegralTypes; for (unsigned I = 0; I != NumTypes; ++I) if (Context.getTypeSize(Types[I]) > BitWidth) return Types[I]; return QualType(); } EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *Val) { unsigned IntWidth = Context.getTargetInfo().getIntWidth(); llvm::APSInt EnumVal(IntWidth); QualType EltTy; if (Val && DiagnoseUnexpandedParameterPack(Val, UPPC_EnumeratorValue)) Val = nullptr; if (Val) Val = DefaultLvalueConversion(Val).get(); if (Val) { if (Enum->isDependentType() || Val->isTypeDependent()) EltTy = Context.DependentTy; else { SourceLocation ExpLoc; // HLSL Change - check constant expression for enum if ((getLangOpts().HLSLVersion >= hlsl::LangStd::v2017 || getLangOpts().CPlusPlus11) && Enum->isFixed() && !getLangOpts().MSVCCompat) { // C++11 [dcl.enum]p5: If the underlying type is fixed, [...] the // constant-expression in the enumerator-definition shall be a converted // constant expression of the underlying type. EltTy = Enum->getIntegerType(); ExprResult Converted = CheckConvertedConstantExpression(Val, EltTy, EnumVal, CCEK_Enumerator); if (Converted.isInvalid()) Val = nullptr; else Val = Converted.get(); } else if (!Val->isValueDependent() && !(Val = VerifyIntegerConstantExpression(Val, &EnumVal).get())) { // C99 6.7.2.2p2: Make sure we have an integer constant expression. } else { if (Enum->isFixed()) { EltTy = Enum->getIntegerType(); // In Obj-C and Microsoft mode, require the enumeration value to be // representable in the underlying type of the enumeration. In C++11, // we perform a non-narrowing conversion as part of converted constant // expression checking. if (!isRepresentableIntegerValue(Context, EnumVal, EltTy)) { if (getLangOpts().MSVCCompat) { Diag(IdLoc, diag::ext_enumerator_too_large) << EltTy; Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).get(); } else Diag(IdLoc, diag::err_enumerator_too_large) << EltTy; } else Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).get(); } else if (getLangOpts().CPlusPlus) { // C++11 [dcl.enum]p5: // If the underlying type is not fixed, the type of each enumerator // is the type of its initializing value: // - If an initializer is specified for an enumerator, the // initializing value has the same type as the expression. EltTy = Val->getType(); } else { // C99 6.7.2.2p2: // The expression that defines the value of an enumeration constant // shall be an integer constant expression that has a value // representable as an int. // Complain if the value is not representable in an int. if (!isRepresentableIntegerValue(Context, EnumVal, Context.IntTy)) Diag(IdLoc, diag::ext_enum_value_not_int) << EnumVal.toString(10) << Val->getSourceRange() << (EnumVal.isUnsigned() || EnumVal.isNonNegative()); else if (!Context.hasSameType(Val->getType(), Context.IntTy)) { // Force the type of the expression to 'int'. Val = ImpCastExprToType(Val, Context.IntTy, CK_IntegralCast).get(); } EltTy = Val->getType(); } } } } if (!Val) { if (Enum->isDependentType()) EltTy = Context.DependentTy; else if (!LastEnumConst) { // C++0x [dcl.enum]p5: // If the underlying type is not fixed, the type of each enumerator // is the type of its initializing value: // - If no initializer is specified for the first enumerator, the // initializing value has an unspecified integral type. // // GCC uses 'int' for its unspecified integral type, as does // C99 6.7.2.2p3. if (Enum->isFixed()) { EltTy = Enum->getIntegerType(); } else { EltTy = Context.IntTy; } } else { // Assign the last value + 1. EnumVal = LastEnumConst->getInitVal(); ++EnumVal; EltTy = LastEnumConst->getType(); // Check for overflow on increment. if (EnumVal < LastEnumConst->getInitVal()) { // C++0x [dcl.enum]p5: // If the underlying type is not fixed, the type of each enumerator // is the type of its initializing value: // // - Otherwise the type of the initializing value is the same as // the type of the initializing value of the preceding enumerator // unless the incremented value is not representable in that type, // in which case the type is an unspecified integral type // sufficient to contain the incremented value. If no such type // exists, the program is ill-formed. QualType T = getNextLargerIntegralType(Context, EltTy); if (T.isNull() || Enum->isFixed()) { // There is no integral type larger enough to represent this // value. Complain, then allow the value to wrap around. EnumVal = LastEnumConst->getInitVal(); EnumVal = EnumVal.zext(EnumVal.getBitWidth() * 2); ++EnumVal; if (Enum->isFixed()) // When the underlying type is fixed, this is ill-formed. Diag(IdLoc, diag::err_enumerator_wrapped) << EnumVal.toString(10) << EltTy; else Diag(IdLoc, diag::ext_enumerator_increment_too_large) << EnumVal.toString(10); } else { EltTy = T; } // Retrieve the last enumerator's value, extent that type to the // type that is supposed to be large enough to represent the incremented // value, then increment. EnumVal = LastEnumConst->getInitVal(); EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType()); EnumVal = EnumVal.zextOrTrunc(Context.getIntWidth(EltTy)); ++EnumVal; // If we're not in C++, diagnose the overflow of enumerator values, // which in C99 means that the enumerator value is not representable in // an int (C99 6.7.2.2p2). However, we support GCC's extension that // permits enumerator values that are representable in some larger // integral type. if (!getLangOpts().CPlusPlus && !T.isNull()) Diag(IdLoc, diag::warn_enum_value_overflow); } else if (!getLangOpts().CPlusPlus && !isRepresentableIntegerValue(Context, EnumVal, EltTy)) { // Enforce C99 6.7.2.2p2 even when we compute the next value. Diag(IdLoc, diag::ext_enum_value_not_int) << EnumVal.toString(10) << 1; } } } if (!EltTy->isDependentType()) { // Make the enumerator value match the signedness and size of the // enumerator's type. EnumVal = EnumVal.extOrTrunc(Context.getIntWidth(EltTy)); EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType()); } return EnumConstantDecl::Create(Context, Enum, IdLoc, Id, EltTy, Val, EnumVal); } Sema::SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc) { if (!(getLangOpts().Modules || getLangOpts().ModulesLocalVisibility) || !getLangOpts().CPlusPlus) return SkipBodyInfo(); // We have an anonymous enum definition. Look up the first enumerator to // determine if we should merge the definition with an existing one and // skip the body. NamedDecl *PrevDecl = LookupSingleName(S, II, IILoc, LookupOrdinaryName, ForRedeclaration); auto *PrevECD = dyn_cast_or_null<EnumConstantDecl>(PrevDecl); NamedDecl *Hidden; if (PrevECD && !hasVisibleDefinition(cast<NamedDecl>(PrevECD->getDeclContext()), &Hidden)) { SkipBodyInfo Skip; Skip.Previous = Hidden; return Skip; } return SkipBodyInfo(); } Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attr, SourceLocation EqualLoc, Expr *Val) { EnumDecl *TheEnumDecl = cast<EnumDecl>(theEnumDecl); EnumConstantDecl *LastEnumConst = cast_or_null<EnumConstantDecl>(lastEnumConst); // The scope passed in may not be a decl scope. Zip up the scope tree until // we find one that is. S = getNonFieldDeclScope(S); // Verify that there isn't already something declared with this name in this // scope. NamedDecl *PrevDecl = LookupSingleName(S, Id, IdLoc, LookupOrdinaryName, ForRedeclaration); if (PrevDecl && PrevDecl->isTemplateParameter()) { // Maybe we will complain about the shadowed template parameter. DiagnoseTemplateParameterShadow(IdLoc, PrevDecl); // Just pretend that we didn't see the previous declaration. PrevDecl = nullptr; } if (PrevDecl) { // When in C++, we may get a TagDecl with the same name; in this case the // enum constant will 'hide' the tag. assert((getLangOpts().CPlusPlus || !isa<TagDecl>(PrevDecl)) && "Received TagDecl when not in C++!"); if (!isa<TagDecl>(PrevDecl) && isDeclInScope(PrevDecl, CurContext, S)) { if (isa<EnumConstantDecl>(PrevDecl)) Diag(IdLoc, diag::err_redefinition_of_enumerator) << Id; else Diag(IdLoc, diag::err_redefinition) << Id; Diag(PrevDecl->getLocation(), diag::note_previous_definition); return nullptr; } } // C++ [class.mem]p15: // If T is the name of a class, then each of the following shall have a name // different from T: // - every enumerator of every member of class T that is an unscoped // enumerated type if (!TheEnumDecl->isScoped()) DiagnoseClassNameShadow(TheEnumDecl->getDeclContext(), DeclarationNameInfo(Id, IdLoc)); EnumConstantDecl *New = CheckEnumConstant(TheEnumDecl, LastEnumConst, IdLoc, Id, Val); if (New) { // Process attributes. if (Attr) ProcessDeclAttributeList(S, New, Attr); // Register this decl in the current scope stack. New->setAccess(TheEnumDecl->getAccess()); PushOnScopeChains(New, S); } ActOnDocumentableDecl(New); return New; } // Returns true when the enum initial expression does not trigger the // duplicate enum warning. A few common cases are exempted as follows: // Element2 = Element1 // Element2 = Element1 + 1 // Element2 = Element1 - 1 // Where Element2 and Element1 are from the same enum. static bool ValidDuplicateEnum(EnumConstantDecl *ECD, EnumDecl *Enum) { Expr *InitExpr = ECD->getInitExpr(); if (!InitExpr) return true; InitExpr = InitExpr->IgnoreImpCasts(); if (BinaryOperator *BO = dyn_cast<BinaryOperator>(InitExpr)) { if (!BO->isAdditiveOp()) return true; IntegerLiteral *IL = dyn_cast<IntegerLiteral>(BO->getRHS()); if (!IL) return true; if (IL->getValue() != 1) return true; InitExpr = BO->getLHS(); } // This checks if the elements are from the same enum. DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(InitExpr); if (!DRE) return true; EnumConstantDecl *EnumConstant = dyn_cast<EnumConstantDecl>(DRE->getDecl()); if (!EnumConstant) return true; if (cast<EnumDecl>(TagDecl::castFromDeclContext(ECD->getDeclContext())) != Enum) return true; return false; } struct DupKey { int64_t val; bool isTombstoneOrEmptyKey; DupKey(int64_t val, bool isTombstoneOrEmptyKey) : val(val), isTombstoneOrEmptyKey(isTombstoneOrEmptyKey) {} }; static DupKey GetDupKey(const llvm::APSInt& Val) { return DupKey(Val.isSigned() ? Val.getSExtValue() : Val.getZExtValue(), false); } struct DenseMapInfoDupKey { static DupKey getEmptyKey() { return DupKey(0, true); } static DupKey getTombstoneKey() { return DupKey(1, true); } static unsigned getHashValue(const DupKey Key) { return (unsigned)(Key.val * 37); } static bool isEqual(const DupKey& LHS, const DupKey& RHS) { return LHS.isTombstoneOrEmptyKey == RHS.isTombstoneOrEmptyKey && LHS.val == RHS.val; } }; // Emits a warning when an element is implicitly set a value that // a previous element has already been set to. static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements, EnumDecl *Enum, QualType EnumType) { if (S.Diags.isIgnored(diag::warn_duplicate_enum_values, Enum->getLocation())) return; // Avoid anonymous enums if (!Enum->getIdentifier()) return; // Only check for small enums. if (Enum->getNumPositiveBits() > 63 || Enum->getNumNegativeBits() > 64) return; typedef SmallVector<EnumConstantDecl *, 3> ECDVector; typedef SmallVector<ECDVector *, 3> DuplicatesVector; typedef llvm::PointerUnion<EnumConstantDecl*, ECDVector*> DeclOrVector; typedef llvm::DenseMap<DupKey, DeclOrVector, DenseMapInfoDupKey> ValueToVectorMap; DuplicatesVector DupVector; ValueToVectorMap EnumMap; // Populate the EnumMap with all values represented by enum constants without // an initialier. for (unsigned i = 0, e = Elements.size(); i != e; ++i) { EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Elements[i]); // Null EnumConstantDecl means a previous diagnostic has been emitted for // this constant. Skip this enum since it may be ill-formed. if (!ECD) { return; } if (ECD->getInitExpr()) continue; DupKey Key = GetDupKey(ECD->getInitVal()); DeclOrVector &Entry = EnumMap[Key]; // First time encountering this value. if (Entry.isNull()) Entry = ECD; } // Create vectors for any values that has duplicates. for (unsigned i = 0, e = Elements.size(); i != e; ++i) { EnumConstantDecl *ECD = cast<EnumConstantDecl>(Elements[i]); if (!ValidDuplicateEnum(ECD, Enum)) continue; DupKey Key = GetDupKey(ECD->getInitVal()); DeclOrVector& Entry = EnumMap[Key]; if (Entry.isNull()) continue; if (EnumConstantDecl *D = Entry.dyn_cast<EnumConstantDecl*>()) { // Ensure constants are different. if (D == ECD) continue; // Create new vector and push values onto it. ECDVector *Vec = new ECDVector(); Vec->push_back(D); Vec->push_back(ECD); // Update entry to point to the duplicates vector. Entry = Vec; // Store the vector somewhere we can consult later for quick emission of // diagnostics. DupVector.push_back(Vec); continue; } ECDVector *Vec = Entry.get<ECDVector*>(); // Make sure constants are not added more than once. if (*Vec->begin() == ECD) continue; Vec->push_back(ECD); } // Emit diagnostics. for (DuplicatesVector::iterator DupVectorIter = DupVector.begin(), DupVectorEnd = DupVector.end(); DupVectorIter != DupVectorEnd; ++DupVectorIter) { ECDVector *Vec = *DupVectorIter; assert(Vec->size() > 1 && "ECDVector should have at least 2 elements."); // Emit warning for one enum constant. ECDVector::iterator I = Vec->begin(); S.Diag((*I)->getLocation(), diag::warn_duplicate_enum_values) << (*I)->getName() << (*I)->getInitVal().toString(10) << (*I)->getSourceRange(); ++I; // Emit one note for each of the remaining enum constants with // the same value. for (ECDVector::iterator E = Vec->end(); I != E; ++I) S.Diag((*I)->getLocation(), diag::note_duplicate_element) << (*I)->getName() << (*I)->getInitVal().toString(10) << (*I)->getSourceRange(); delete Vec; } } bool Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const { FlagEnumAttr *FEAttr = ED->getAttr<FlagEnumAttr>(); assert(FEAttr && "looking for value in non-flag enum"); llvm::APInt FlagMask = ~FEAttr->getFlagBits(); unsigned Width = FlagMask.getBitWidth(); // We will try a zero-extended value for the regular check first. llvm::APInt ExtVal = Val.zextOrSelf(Width); // A value is in a flag enum if either its bits are a subset of the enum's // flag bits (the first condition) or we are allowing masks and the same is // true of its complement (the second condition). When masks are allowed, we // allow the common idiom of ~(enum1 | enum2) to be a valid enum value. // // While it's true that any value could be used as a mask, the assumption is // that a mask will have all of the insignificant bits set. Anything else is // likely a logic error. if (!(FlagMask & ExtVal)) return true; if (AllowMask) { // Try a one-extended value instead. This can happen if the enum is wider // than the constant used, in C with extensions to allow for wider enums. // The mask will still have the correct behaviour, so we give the user the // benefit of the doubt. // // FIXME: This heuristic can cause weird results if the enum was extended // to a larger type and is signed, because then bit-masks of smaller types // that get extended will fall out of range (e.g. ~0x1u). We currently don't // detect that case and will get a false positive for it. In most cases, // though, it can be fixed by making it a signed type (e.g. ~0x1), so it may // be fine just to accept this as a warning. ExtVal |= llvm::APInt::getHighBitsSet(Width, Width - Val.getBitWidth()); if (!(FlagMask & ~ExtVal)) return true; } return false; } void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDeclX, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr) { EnumDecl *Enum = cast<EnumDecl>(EnumDeclX); QualType EnumType = Context.getTypeDeclType(Enum); if (Attr) ProcessDeclAttributeList(S, Enum, Attr); if (Enum->isDependentType()) { for (unsigned i = 0, e = Elements.size(); i != e; ++i) { EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Elements[i]); if (!ECD) continue; ECD->setType(EnumType); } Enum->completeDefinition(Context.DependentTy, Context.DependentTy, 0, 0); return; } // TODO: If the result value doesn't fit in an int, it must be a long or long // long value. ISO C does not support this, but GCC does as an extension, // emit a warning. unsigned IntWidth = Context.getTargetInfo().getIntWidth(); unsigned CharWidth = Context.getTargetInfo().getCharWidth(); unsigned ShortWidth = Context.getTargetInfo().getShortWidth(); // Verify that all the values are okay, compute the size of the values, and // reverse the list. unsigned NumNegativeBits = 0; unsigned NumPositiveBits = 0; // Keep track of whether all elements have type int. bool AllElementsInt = true; for (unsigned i = 0, e = Elements.size(); i != e; ++i) { EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Elements[i]); if (!ECD) continue; // Already issued a diagnostic. const llvm::APSInt &InitVal = ECD->getInitVal(); // Keep track of the size of positive and negative values. if (InitVal.isUnsigned() || InitVal.isNonNegative()) NumPositiveBits = std::max(NumPositiveBits, (unsigned)InitVal.getActiveBits()); else NumNegativeBits = std::max(NumNegativeBits, (unsigned)InitVal.getMinSignedBits()); // Keep track of whether every enum element has type int (very commmon). if (AllElementsInt) AllElementsInt = ECD->getType() == Context.IntTy; } // Figure out the type that should be used for this enum. QualType BestType; unsigned BestWidth; // C++0x N3000 [conv.prom]p3: // An rvalue of an unscoped enumeration type whose underlying // type is not fixed can be converted to an rvalue of the first // of the following types that can represent all the values of // the enumeration: int, unsigned int, long int, unsigned long // int, long long int, or unsigned long long int. // C99 6.4.4.3p2: // An identifier declared as an enumeration constant has type int. // The C99 rule is modified by a gcc extension QualType BestPromotionType; bool Packed = Enum->hasAttr<PackedAttr>(); // -fshort-enums is the equivalent to specifying the packed attribute on all // enum definitions. if (LangOpts.ShortEnums) Packed = true; if (Enum->isFixed()) { BestType = Enum->getIntegerType(); if (BestType->isPromotableIntegerType()) BestPromotionType = Context.getPromotedIntegerType(BestType); else BestPromotionType = BestType; BestWidth = Context.getIntWidth(BestType); } else if (NumNegativeBits) { // If there is a negative value, figure out the smallest integer type (of // int/long/longlong) that fits. // If it's packed, check also if it fits a char or a short. if (Packed && NumNegativeBits <= CharWidth && NumPositiveBits < CharWidth) { BestType = Context.SignedCharTy; BestWidth = CharWidth; } else if (Packed && NumNegativeBits <= ShortWidth && NumPositiveBits < ShortWidth) { BestType = Context.ShortTy; BestWidth = ShortWidth; } else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) { BestType = Context.IntTy; BestWidth = IntWidth; } else { BestWidth = Context.getTargetInfo().getLongWidth(); if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) { BestType = Context.LongTy; } else { BestWidth = Context.getTargetInfo().getLongLongWidth(); if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth) Diag(Enum->getLocation(), diag::ext_enum_too_large); BestType = Context.LongLongTy; } } BestPromotionType = (BestWidth <= IntWidth ? Context.IntTy : BestType); } else { // If there is no negative value, figure out the smallest type that fits // all of the enumerator values. // If it's packed, check also if it fits a char or a short. if (Packed && NumPositiveBits <= CharWidth) { BestType = Context.UnsignedCharTy; BestPromotionType = Context.IntTy; BestWidth = CharWidth; } else if (Packed && NumPositiveBits <= ShortWidth) { BestType = Context.UnsignedShortTy; BestPromotionType = Context.IntTy; BestWidth = ShortWidth; } else if (NumPositiveBits <= IntWidth) { BestType = Context.UnsignedIntTy; BestWidth = IntWidth; BestPromotionType = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus) ? Context.UnsignedIntTy : Context.IntTy; } else if (NumPositiveBits <= (BestWidth = Context.getTargetInfo().getLongWidth())) { BestType = Context.UnsignedLongTy; BestPromotionType = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus) ? Context.UnsignedLongTy : Context.LongTy; } else { BestWidth = Context.getTargetInfo().getLongLongWidth(); assert(NumPositiveBits <= BestWidth && "How could an initializer get larger than ULL?"); BestType = Context.UnsignedLongLongTy; BestPromotionType = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus) ? Context.UnsignedLongLongTy : Context.LongLongTy; } } FlagEnumAttr *FEAttr = Enum->getAttr<FlagEnumAttr>(); if (FEAttr) FEAttr->getFlagBits() = llvm::APInt(BestWidth, 0); // Loop over all of the enumerator constants, changing their types to match // the type of the enum if needed. If we have a flag type, we also prepare the // FlagBits cache. for (auto *D : Elements) { auto *ECD = cast_or_null<EnumConstantDecl>(D); if (!ECD) continue; // Already issued a diagnostic. // Standard C says the enumerators have int type, but we allow, as an // extension, the enumerators to be larger than int size. If each // enumerator value fits in an int, type it as an int, otherwise type it the // same as the enumerator decl itself. This means that in "enum { X = 1U }" // that X has type 'int', not 'unsigned'. // Determine whether the value fits into an int. llvm::APSInt InitVal = ECD->getInitVal(); // If it fits into an integer type, force it. Otherwise force it to match // the enum decl type. QualType NewTy; unsigned NewWidth; bool NewSign; if (!getLangOpts().CPlusPlus && !Enum->isFixed() && isRepresentableIntegerValue(Context, InitVal, Context.IntTy)) { NewTy = Context.IntTy; NewWidth = IntWidth; NewSign = true; } else if (ECD->getType() == BestType) { // Already the right type! if (getLangOpts().CPlusPlus) // C++ [dcl.enum]p4: Following the closing brace of an // enum-specifier, each enumerator has the type of its // enumeration. ECD->setType(EnumType); goto flagbits; } else { NewTy = BestType; NewWidth = BestWidth; NewSign = BestType->isSignedIntegerOrEnumerationType(); } // Adjust the APSInt value. InitVal = InitVal.extOrTrunc(NewWidth); InitVal.setIsSigned(NewSign); ECD->setInitVal(InitVal); // Adjust the Expr initializer and type. if (ECD->getInitExpr() && !Context.hasSameType(NewTy, ECD->getInitExpr()->getType())) ECD->setInitExpr(ImplicitCastExpr::Create(Context, NewTy, CK_IntegralCast, ECD->getInitExpr(), /*base paths*/ nullptr, VK_RValue)); if (getLangOpts().CPlusPlus) // C++ [dcl.enum]p4: Following the closing brace of an // enum-specifier, each enumerator has the type of its // enumeration. ECD->setType(EnumType); else ECD->setType(NewTy); flagbits: // Check to see if we have a constant with exactly one bit set. Note that x // & (x - 1) will be nonzero if and only if x has more than one bit set. if (FEAttr) { llvm::APInt ExtVal = InitVal.zextOrSelf(BestWidth); if (ExtVal != 0 && !(ExtVal & (ExtVal - 1))) { FEAttr->getFlagBits() |= ExtVal; } } } if (FEAttr) { for (Decl *D : Elements) { EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(D); if (!ECD) continue; // Already issued a diagnostic. llvm::APSInt InitVal = ECD->getInitVal(); if (InitVal != 0 && !IsValueInFlagEnum(Enum, InitVal, true)) Diag(ECD->getLocation(), diag::warn_flag_enum_constant_out_of_range) << ECD << Enum; } } Enum->completeDefinition(BestType, BestPromotionType, NumPositiveBits, NumNegativeBits); CheckForDuplicateEnumValues(*this, Elements, Enum, EnumType); // Now that the enum type is defined, ensure it's not been underaligned. if (Enum->hasAttrs()) CheckAlignasUnderalignment(Enum); } Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr, SourceLocation StartLoc, SourceLocation EndLoc) { #if 1 // HLSL Change llvm_unreachable("HLSL parser does not produce file scope asm decl"); #else StringLiteral *AsmString = cast<StringLiteral>(expr); FileScopeAsmDecl *New = FileScopeAsmDecl::Create(Context, CurContext, AsmString, StartLoc, EndLoc); CurContext->addDecl(New); return New; #endif // HLSL Change } static void checkModuleImportContext(Sema &S, Module *M, SourceLocation ImportLoc, DeclContext *DC) { if (auto *LSD = dyn_cast<LinkageSpecDecl>(DC)) { switch (LSD->getLanguage()) { case LinkageSpecDecl::lang_c: if (!M->IsExternC) { S.Diag(ImportLoc, diag::err_module_import_in_extern_c) << M->getFullModuleName(); S.Diag(LSD->getLocStart(), diag::note_module_import_in_extern_c); return; } break; case LinkageSpecDecl::lang_cxx: break; } DC = LSD->getParent(); } while (isa<LinkageSpecDecl>(DC)) DC = DC->getParent(); if (!isa<TranslationUnitDecl>(DC)) { S.Diag(ImportLoc, diag::err_module_import_not_at_top_level) << M->getFullModuleName() << DC; S.Diag(cast<Decl>(DC)->getLocStart(), diag::note_module_import_not_at_top_level) << DC; } } DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path) { Module *Mod = getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible, /*IsIncludeDirective=*/false); if (!Mod) return true; VisibleModules.setVisible(Mod, ImportLoc); checkModuleImportContext(*this, Mod, ImportLoc, CurContext); // FIXME: we should support importing a submodule within a different submodule // of the same top-level module. Until we do, make it an error rather than // silently ignoring the import. if (Mod->getTopLevelModuleName() == getLangOpts().CurrentModule) Diag(ImportLoc, diag::err_module_self_import) << Mod->getFullModuleName() << getLangOpts().CurrentModule; else if (Mod->getTopLevelModuleName() == getLangOpts().ImplementationOfModule) Diag(ImportLoc, diag::err_module_import_in_implementation) << Mod->getFullModuleName() << getLangOpts().ImplementationOfModule; SmallVector<SourceLocation, 2> IdentifierLocs; Module *ModCheck = Mod; for (unsigned I = 0, N = Path.size(); I != N; ++I) { // If we've run out of module parents, just drop the remaining identifiers. // We need the length to be consistent. if (!ModCheck) break; ModCheck = ModCheck->Parent; IdentifierLocs.push_back(Path[I].second); } ImportDecl *Import = ImportDecl::Create(Context, Context.getTranslationUnitDecl(), AtLoc.isValid()? AtLoc : ImportLoc, Mod, IdentifierLocs); Context.getTranslationUnitDecl()->addDecl(Import); return Import; } void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) { checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext); // Determine whether we're in the #include buffer for a module. The #includes // in that buffer do not qualify as module imports; they're just an // implementation detail of us building the module. // // FIXME: Should we even get ActOnModuleInclude calls for those? bool IsInModuleIncludes = TUKind == TU_Module && getSourceManager().isWrittenInMainFile(DirectiveLoc); // If this module import was due to an inclusion directive, create an // implicit import declaration to capture it in the AST. if (!IsInModuleIncludes) { TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl(); ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU, DirectiveLoc, Mod, DirectiveLoc); TU->addDecl(ImportD); Consumer.HandleImplicitImportDecl(ImportD); } getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, DirectiveLoc); VisibleModules.setVisible(Mod, DirectiveLoc); } void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) { checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext); if (getLangOpts().ModulesLocalVisibility) VisibleModulesStack.push_back(std::move(VisibleModules)); VisibleModules.setVisible(Mod, DirectiveLoc); } void Sema::ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod) { checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext); if (getLangOpts().ModulesLocalVisibility) { VisibleModules = std::move(VisibleModulesStack.back()); VisibleModulesStack.pop_back(); VisibleModules.setVisible(Mod, DirectiveLoc); } } void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod) { // Bail if we're not allowed to implicitly import a module here. if (isSFINAEContext() || !getLangOpts().ModulesErrorRecovery) return; // Create the implicit import declaration. TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl(); ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU, Loc, Mod, Loc); TU->addDecl(ImportD); Consumer.HandleImplicitImportDecl(ImportD); // Make the module visible. getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, Loc); VisibleModules.setVisible(Mod, Loc); } void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation NameLoc, SourceLocation AliasNameLoc) { NamedDecl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc, LookupOrdinaryName); AsmLabelAttr *Attr = AsmLabelAttr::CreateImplicit(Context, AliasName->getName(), AliasNameLoc); // If a declaration that: // 1) declares a function or a variable // 2) has external linkage // already exists, add a label attribute to it. if (PrevDecl && (isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)) && PrevDecl->hasExternalFormalLinkage()) PrevDecl->addAttr(Attr); // Otherwise, add a label atttibute to ExtnameUndeclaredIdentifiers. else (void)ExtnameUndeclaredIdentifiers.insert(std::make_pair(Name, Attr)); } void Sema::ActOnPragmaWeakID(IdentifierInfo* Name, SourceLocation PragmaLoc, SourceLocation NameLoc) { Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc, LookupOrdinaryName); if (PrevDecl) { PrevDecl->addAttr(WeakAttr::CreateImplicit(Context, PragmaLoc)); } else { (void)WeakUndeclaredIdentifiers.insert( std::pair<IdentifierInfo*,WeakInfo> (Name, WeakInfo((IdentifierInfo*)nullptr, NameLoc))); } } void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation NameLoc, SourceLocation AliasNameLoc) { Decl *PrevDecl = LookupSingleName(TUScope, AliasName, AliasNameLoc, LookupOrdinaryName); WeakInfo W = WeakInfo(Name, NameLoc); if (PrevDecl) { if (!PrevDecl->hasAttr<AliasAttr>()) if (NamedDecl *ND = dyn_cast<NamedDecl>(PrevDecl)) DeclApplyPragmaWeak(TUScope, ND, W); } else { (void)WeakUndeclaredIdentifiers.insert( std::pair<IdentifierInfo*,WeakInfo>(AliasName, W)); } } Decl *Sema::getObjCDeclContext() const { return (dyn_cast_or_null<ObjCContainerDecl>(CurContext)); } AvailabilityResult Sema::getCurContextAvailability() const { const Decl *D = cast_or_null<Decl>(getCurObjCLexicalContext()); if (!D) return AR_Available; // If we are within an Objective-C method, we should consult // both the availability of the method as well as the // enclosing class. If the class is (say) deprecated, // the entire method is considered deprecated from the // purpose of checking if the current context is deprecated. if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { AvailabilityResult R = MD->getAvailability(); if (R != AR_Available) return R; D = MD->getClassInterface(); } // If we are within an Objective-c @implementation, it // gets the same availability context as the @interface. else if (const ObjCImplementationDecl *ID = dyn_cast<ObjCImplementationDecl>(D)) { D = ID->getClassInterface(); } // Recover from user error. return D ? D->getAvailability() : AR_Available; } // HLSL Change Begin void Sema::DiagnoseSemanticDecl(hlsl::SemanticDecl *Decl) { StringRef SemName = Decl->SemanticName; StringRef BaseSemName; // The 'FOO' in 'FOO1' uint32_t SemIndex; // The '1' in 'FOO1' // Split name and index. hlsl::Semantic::DecomposeNameAndIndex(SemName, &BaseSemName, &SemIndex); // The valid semantic indices for SV_Target[n] are 0 <= n <= 7. if (BaseSemName.equals("SV_Target") && SemIndex > 7) { Diag(Decl->Loc, diag::err_hlsl_unsupported_semantic_index) << SemName << SemIndex << "7"; } } // HLSL Change Ends
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/MultiplexExternalSemaSource.cpp
//===--- MultiplexExternalSemaSource.cpp ---------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the event dispatching to the subscribed clients. // //===----------------------------------------------------------------------===// #include "clang/Sema/MultiplexExternalSemaSource.h" #include "clang/AST/DeclContextInternals.h" #include "clang/Sema/Lookup.h" // // /////////////////////////////////////////////////////////////////////////////// using namespace clang; ///\brief Constructs a new multiplexing external sema source and appends the /// given element to it. /// ///\param[in] source - An ExternalSemaSource. /// MultiplexExternalSemaSource::MultiplexExternalSemaSource(ExternalSemaSource &s1, ExternalSemaSource &s2){ Sources.push_back(&s1); Sources.push_back(&s2); } // pin the vtable here. MultiplexExternalSemaSource::~MultiplexExternalSemaSource() {} ///\brief Appends new source to the source list. /// ///\param[in] source - An ExternalSemaSource. /// void MultiplexExternalSemaSource::addSource(ExternalSemaSource &source) { Sources.push_back(&source); } //===----------------------------------------------------------------------===// // ExternalASTSource. //===----------------------------------------------------------------------===// Decl *MultiplexExternalSemaSource::GetExternalDecl(uint32_t ID) { for(size_t i = 0; i < Sources.size(); ++i) if (Decl *Result = Sources[i]->GetExternalDecl(ID)) return Result; return nullptr; } void MultiplexExternalSemaSource::CompleteRedeclChain(const Decl *D) { for (size_t i = 0; i < Sources.size(); ++i) Sources[i]->CompleteRedeclChain(D); } Selector MultiplexExternalSemaSource::GetExternalSelector(uint32_t ID) { Selector Sel; for(size_t i = 0; i < Sources.size(); ++i) { Sel = Sources[i]->GetExternalSelector(ID); if (!Sel.isNull()) return Sel; } return Sel; } uint32_t MultiplexExternalSemaSource::GetNumExternalSelectors() { uint32_t total = 0; for(size_t i = 0; i < Sources.size(); ++i) total += Sources[i]->GetNumExternalSelectors(); return total; } Stmt *MultiplexExternalSemaSource::GetExternalDeclStmt(uint64_t Offset) { for(size_t i = 0; i < Sources.size(); ++i) if (Stmt *Result = Sources[i]->GetExternalDeclStmt(Offset)) return Result; return nullptr; } CXXBaseSpecifier *MultiplexExternalSemaSource::GetExternalCXXBaseSpecifiers( uint64_t Offset){ for(size_t i = 0; i < Sources.size(); ++i) if (CXXBaseSpecifier *R = Sources[i]->GetExternalCXXBaseSpecifiers(Offset)) return R; return nullptr; } CXXCtorInitializer ** MultiplexExternalSemaSource::GetExternalCXXCtorInitializers(uint64_t Offset) { for (auto *S : Sources) if (auto *R = S->GetExternalCXXCtorInitializers(Offset)) return R; return nullptr; } bool MultiplexExternalSemaSource:: FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name) { bool AnyDeclsFound = false; for (size_t i = 0; i < Sources.size(); ++i) AnyDeclsFound |= Sources[i]->FindExternalVisibleDeclsByName(DC, Name); return AnyDeclsFound; } void MultiplexExternalSemaSource::completeVisibleDeclsMap(const DeclContext *DC){ for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->completeVisibleDeclsMap(DC); } ExternalLoadResult MultiplexExternalSemaSource:: FindExternalLexicalDecls(const DeclContext *DC, bool (*isKindWeWant)(Decl::Kind), SmallVectorImpl<Decl*> &Result) { for(size_t i = 0; i < Sources.size(); ++i) // FIXME: The semantics of the return result is unclear to me... Sources[i]->FindExternalLexicalDecls(DC, isKindWeWant, Result); return ELR_Success; } void MultiplexExternalSemaSource::FindFileRegionDecls(FileID File, unsigned Offset, unsigned Length, SmallVectorImpl<Decl *> &Decls){ for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->FindFileRegionDecls(File, Offset, Length, Decls); } void MultiplexExternalSemaSource::CompleteType(TagDecl *Tag) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->CompleteType(Tag); } void MultiplexExternalSemaSource::CompleteType(ObjCInterfaceDecl *Class) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->CompleteType(Class); } void MultiplexExternalSemaSource::ReadComments() { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadComments(); } void MultiplexExternalSemaSource::StartedDeserializing() { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->StartedDeserializing(); } void MultiplexExternalSemaSource::FinishedDeserializing() { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->FinishedDeserializing(); } void MultiplexExternalSemaSource::StartTranslationUnit(ASTConsumer *Consumer) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->StartTranslationUnit(Consumer); } void MultiplexExternalSemaSource::PrintStats() { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->PrintStats(); } bool MultiplexExternalSemaSource::layoutRecordType(const RecordDecl *Record, uint64_t &Size, uint64_t &Alignment, llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets, llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets, llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets){ for(size_t i = 0; i < Sources.size(); ++i) if (Sources[i]->layoutRecordType(Record, Size, Alignment, FieldOffsets, BaseOffsets, VirtualBaseOffsets)) return true; return false; } void MultiplexExternalSemaSource:: getMemoryBufferSizes(MemoryBufferSizes &sizes) const { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->getMemoryBufferSizes(sizes); } //===----------------------------------------------------------------------===// // ExternalSemaSource. //===----------------------------------------------------------------------===// void MultiplexExternalSemaSource::InitializeSema(Sema &S) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->InitializeSema(S); } void MultiplexExternalSemaSource::ForgetSema() { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ForgetSema(); } void MultiplexExternalSemaSource::ReadMethodPool(Selector Sel) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadMethodPool(Sel); } void MultiplexExternalSemaSource::ReadKnownNamespaces( SmallVectorImpl<NamespaceDecl*> &Namespaces){ for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadKnownNamespaces(Namespaces); } void MultiplexExternalSemaSource::ReadUndefinedButUsed( llvm::DenseMap<NamedDecl*, SourceLocation> &Undefined){ for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadUndefinedButUsed(Undefined); } void MultiplexExternalSemaSource::ReadMismatchingDeleteExpressions( llvm::MapVector<FieldDecl *, llvm::SmallVector<std::pair<SourceLocation, bool>, 4>> & Exprs) { for (auto &Source : Sources) Source->ReadMismatchingDeleteExpressions(Exprs); } bool MultiplexExternalSemaSource::LookupUnqualified(LookupResult &R, Scope *S){ for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->LookupUnqualified(R, S); return !R.empty(); } void MultiplexExternalSemaSource::ReadTentativeDefinitions( SmallVectorImpl<VarDecl*> &TentativeDefs) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadTentativeDefinitions(TentativeDefs); } void MultiplexExternalSemaSource::ReadUnusedFileScopedDecls( SmallVectorImpl<const DeclaratorDecl*> &Decls) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadUnusedFileScopedDecls(Decls); } void MultiplexExternalSemaSource::ReadDelegatingConstructors( SmallVectorImpl<CXXConstructorDecl*> &Decls) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadDelegatingConstructors(Decls); } void MultiplexExternalSemaSource::ReadExtVectorDecls( SmallVectorImpl<TypedefNameDecl*> &Decls) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadExtVectorDecls(Decls); } void MultiplexExternalSemaSource::ReadUnusedLocalTypedefNameCandidates( llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadUnusedLocalTypedefNameCandidates(Decls); } void MultiplexExternalSemaSource::ReadReferencedSelectors( SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadReferencedSelectors(Sels); } void MultiplexExternalSemaSource::ReadWeakUndeclaredIdentifiers( SmallVectorImpl<std::pair<IdentifierInfo*, WeakInfo> > &WI) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadWeakUndeclaredIdentifiers(WI); } void MultiplexExternalSemaSource::ReadUsedVTables( SmallVectorImpl<ExternalVTableUse> &VTables) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadUsedVTables(VTables); } void MultiplexExternalSemaSource::ReadPendingInstantiations( SmallVectorImpl<std::pair<ValueDecl*, SourceLocation> > &Pending) { for(size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadPendingInstantiations(Pending); } void MultiplexExternalSemaSource::ReadLateParsedTemplates( llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> &LPTMap) { for (size_t i = 0; i < Sources.size(); ++i) Sources[i]->ReadLateParsedTemplates(LPTMap); } TypoCorrection MultiplexExternalSemaSource::CorrectTypo( const DeclarationNameInfo &Typo, int LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT) { for (size_t I = 0, E = Sources.size(); I < E; ++I) { if (TypoCorrection C = Sources[I]->CorrectTypo(Typo, LookupKind, S, SS, CCC, MemberContext, EnteringContext, OPT)) return C; } return TypoCorrection(); } bool MultiplexExternalSemaSource::MaybeDiagnoseMissingCompleteType( SourceLocation Loc, QualType T) { for (size_t I = 0, E = Sources.size(); I < E; ++I) { if (Sources[I]->MaybeDiagnoseMissingCompleteType(Loc, T)) return true; } return false; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaExprMember.cpp
//===--- SemaExprMember.cpp - Semantic Analysis for Expressions -----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis member access expressions. // //===----------------------------------------------------------------------===// #include "clang/Sema/Overload.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change using namespace clang; using namespace sema; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 4> BaseSet; static bool BaseIsNotInSet(const CXXRecordDecl *Base, void *BasesPtr) { const BaseSet &Bases = *reinterpret_cast<const BaseSet*>(BasesPtr); return !Bases.count(Base->getCanonicalDecl()); } /// Determines if the given class is provably not derived from all of /// the prospective base classes. static bool isProvablyNotDerivedFrom(Sema &SemaRef, CXXRecordDecl *Record, const BaseSet &Bases) { void *BasesPtr = const_cast<void*>(reinterpret_cast<const void*>(&Bases)); return BaseIsNotInSet(Record, BasesPtr) && Record->forallBases(BaseIsNotInSet, BasesPtr); } enum IMAKind { /// The reference is definitely not an instance member access. IMA_Static, /// The reference may be an implicit instance member access. IMA_Mixed, /// The reference may be to an instance member, but it might be invalid if /// so, because the context is not an instance method. IMA_Mixed_StaticContext, /// The reference may be to an instance member, but it is invalid if /// so, because the context is from an unrelated class. IMA_Mixed_Unrelated, /// The reference is definitely an implicit instance member access. IMA_Instance, /// The reference may be to an unresolved using declaration. IMA_Unresolved, /// The reference is a contextually-permitted abstract member reference. IMA_Abstract, /// The reference may be to an unresolved using declaration and the /// context is not an instance method. IMA_Unresolved_StaticContext, // The reference refers to a field which is not a member of the containing // class, which is allowed because we're in C++11 mode and the context is // unevaluated. IMA_Field_Uneval_Context, /// All possible referrents are instance members and the current /// context is not an instance method. IMA_Error_StaticContext, /// All possible referrents are instance members of an unrelated /// class. IMA_Error_Unrelated }; /// The given lookup names class member(s) and is not being used for /// an address-of-member expression. Classify the type of access /// according to whether it's possible that this reference names an /// instance member. This is best-effort in dependent contexts; it is okay to /// conservatively answer "yes", in which case some errors will simply /// not be caught until template-instantiation. static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef, const LookupResult &R) { assert(!R.empty() && (*R.begin())->isCXXClassMember()); DeclContext *DC = SemaRef.getFunctionLevelDeclContext(); bool isStaticContext = SemaRef.CXXThisTypeOverride.isNull() && (!isa<CXXMethodDecl>(DC) || cast<CXXMethodDecl>(DC)->isStatic()); if (R.isUnresolvableResult()) return isStaticContext ? IMA_Unresolved_StaticContext : IMA_Unresolved; // Collect all the declaring classes of instance members we find. bool hasNonInstance = false; bool isField = false; BaseSet Classes; for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { NamedDecl *D = *I; if (D->isCXXInstanceMember()) { isField |= isa<FieldDecl>(D) || isa<MSPropertyDecl>(D) || isa<IndirectFieldDecl>(D); CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext()); Classes.insert(R->getCanonicalDecl()); } else hasNonInstance = true; } // If we didn't find any instance members, it can't be an implicit // member reference. if (Classes.empty()) return IMA_Static; // C++11 [expr.prim.general]p12: // An id-expression that denotes a non-static data member or non-static // member function of a class can only be used: // (...) // - if that id-expression denotes a non-static data member and it // appears in an unevaluated operand. // // This rule is specific to C++11. However, we also permit this form // in unevaluated inline assembly operands, like the operand to a SIZE. IMAKind AbstractInstanceResult = IMA_Static; // happens to be 'false' assert(!AbstractInstanceResult); switch (SemaRef.ExprEvalContexts.back().Context) { case Sema::Unevaluated: if (isField && SemaRef.getLangOpts().CPlusPlus11) AbstractInstanceResult = IMA_Field_Uneval_Context; break; case Sema::UnevaluatedAbstract: AbstractInstanceResult = IMA_Abstract; break; case Sema::ConstantEvaluated: case Sema::PotentiallyEvaluated: case Sema::PotentiallyEvaluatedIfUsed: break; } // If the current context is not an instance method, it can't be // an implicit member reference. if (isStaticContext) { if (hasNonInstance) return IMA_Mixed_StaticContext; return AbstractInstanceResult ? AbstractInstanceResult : IMA_Error_StaticContext; } CXXRecordDecl *contextClass; if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC)) contextClass = MD->getParent()->getCanonicalDecl(); else contextClass = cast<CXXRecordDecl>(DC); // [class.mfct.non-static]p3: // ...is used in the body of a non-static member function of class X, // if name lookup (3.4.1) resolves the name in the id-expression to a // non-static non-type member of some class C [...] // ...if C is not X or a base class of X, the class member access expression // is ill-formed. if (R.getNamingClass() && contextClass->getCanonicalDecl() != R.getNamingClass()->getCanonicalDecl()) { // If the naming class is not the current context, this was a qualified // member name lookup, and it's sufficient to check that we have the naming // class as a base class. Classes.clear(); Classes.insert(R.getNamingClass()->getCanonicalDecl()); } // If we can prove that the current context is unrelated to all the // declaring classes, it can't be an implicit member reference (in // which case it's an error if any of those members are selected). if (isProvablyNotDerivedFrom(SemaRef, contextClass, Classes)) return hasNonInstance ? IMA_Mixed_Unrelated : AbstractInstanceResult ? AbstractInstanceResult : IMA_Error_Unrelated; return (hasNonInstance ? IMA_Mixed : IMA_Instance); } /// Diagnose a reference to a field with no object available. static void diagnoseInstanceReference(Sema &SemaRef, const CXXScopeSpec &SS, NamedDecl *Rep, const DeclarationNameInfo &nameInfo) { SourceLocation Loc = nameInfo.getLoc(); SourceRange Range(Loc); if (SS.isSet()) Range.setBegin(SS.getRange().getBegin()); // Look through using shadow decls and aliases. Rep = Rep->getUnderlyingDecl(); DeclContext *FunctionLevelDC = SemaRef.getFunctionLevelDeclContext(); CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FunctionLevelDC); CXXRecordDecl *ContextClass = Method ? Method->getParent() : nullptr; CXXRecordDecl *RepClass = dyn_cast<CXXRecordDecl>(Rep->getDeclContext()); bool InStaticMethod = Method && Method->isStatic(); bool IsField = isa<FieldDecl>(Rep) || isa<IndirectFieldDecl>(Rep); if (IsField && InStaticMethod) // "invalid use of member 'x' in static member function" SemaRef.Diag(Loc, diag::err_invalid_member_use_in_static_method) << Range << nameInfo.getName(); else if (ContextClass && RepClass && SS.isEmpty() && !InStaticMethod && !RepClass->Equals(ContextClass) && RepClass->Encloses(ContextClass)) // Unqualified lookup in a non-static member function found a member of an // enclosing class. SemaRef.Diag(Loc, diag::err_nested_non_static_member_use) << IsField << RepClass << nameInfo.getName() << ContextClass << Range; else if (IsField) SemaRef.Diag(Loc, diag::err_invalid_non_static_member_use) << nameInfo.getName() << Range; else SemaRef.Diag(Loc, diag::err_member_call_without_object) << Range; } /// Builds an expression which might be an implicit member expression. ExprResult Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs) { switch (ClassifyImplicitMemberAccess(*this, R)) { case IMA_Instance: return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, true); case IMA_Mixed: case IMA_Mixed_Unrelated: case IMA_Unresolved: return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, false); case IMA_Field_Uneval_Context: Diag(R.getNameLoc(), diag::warn_cxx98_compat_non_static_member_use) << R.getLookupNameInfo().getName(); LLVM_FALLTHROUGH; // HLSL Change case IMA_Static: case IMA_Abstract: case IMA_Mixed_StaticContext: case IMA_Unresolved_StaticContext: if (TemplateArgs || TemplateKWLoc.isValid()) return BuildTemplateIdExpr(SS, TemplateKWLoc, R, false, TemplateArgs); return BuildDeclarationNameExpr(SS, R, false); case IMA_Error_StaticContext: case IMA_Error_Unrelated: diagnoseInstanceReference(*this, SS, R.getRepresentativeDecl(), R.getLookupNameInfo()); return ExprError(); } llvm_unreachable("unexpected instance member access kind"); } /// Check an ext-vector component access expression. /// /// VK should be set in advance to the value kind of the base /// expression. static QualType CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK, SourceLocation OpLoc, const IdentifierInfo *CompName, SourceLocation CompLoc) { // FIXME: Share logic with ExtVectorElementExpr::containsDuplicateElements, // see FIXME there. // // FIXME: This logic can be greatly simplified by splitting it along // halving/not halving and reworking the component checking. const ExtVectorType *vecType = baseType->getAs<ExtVectorType>(); // The vector accessor can't exceed the number of elements. const char *compStr = CompName->getNameStart(); // This flag determines whether or not the component is one of the four // special names that indicate a subset of exactly half the elements are // to be selected. bool HalvingSwizzle = false; // This flag determines whether or not CompName has an 's' char prefix, // indicating that it is a string of hex values to be used as vector indices. bool HexSwizzle = (*compStr == 's' || *compStr == 'S') && compStr[1]; bool HasRepeated = false; bool HasIndex[16] = {}; int Idx; // Check that we've found one of the special components, or that the component // names must come from the same set. if (!strcmp(compStr, "hi") || !strcmp(compStr, "lo") || !strcmp(compStr, "even") || !strcmp(compStr, "odd")) { HalvingSwizzle = true; } else if (!HexSwizzle && (Idx = vecType->getPointAccessorIdx(*compStr)) != -1) { ExtVectorType::AccessorSet currentSet = vecType->getPointAccessorSet(*compStr); // HLSL Change do { if (HasIndex[Idx]) HasRepeated = true; // HLSL Change Starts if (currentSet != vecType->getPointAccessorSet(*compStr)) { S.Diag(OpLoc, diag::err_ext_vector_component_name_mixedsets) << SourceRange(CompLoc); return QualType(); } // HLSL Change Ends HasIndex[Idx] = true; compStr++; } while (*compStr && (Idx = vecType->getPointAccessorIdx(*compStr)) != -1); } else { // HLSL Note - is this parsing digits? if (HexSwizzle) compStr++; while ((Idx = vecType->getNumericAccessorIdx(*compStr)) != -1) { if (HasIndex[Idx]) HasRepeated = true; HasIndex[Idx] = true; compStr++; } } if (!HalvingSwizzle && *compStr) { // We didn't get to the end of the string. This means the component names // didn't come from the same set *or* we encountered an illegal name. S.Diag(OpLoc, diag::err_ext_vector_component_name_illegal) << StringRef(compStr, 1) << SourceRange(CompLoc); return QualType(); } // Ensure no component accessor exceeds the width of the vector type it // operates on. if (!HalvingSwizzle) { compStr = CompName->getNameStart(); if (HexSwizzle) compStr++; while (*compStr) { if (!vecType->isAccessorWithinNumElements(*compStr++)) { S.Diag(OpLoc, diag::err_ext_vector_component_exceeds_length) << baseType << SourceRange(CompLoc); return QualType(); } } } // The component accessor looks fine - now we need to compute the actual type. // The vector type is implied by the component accessor. For example, // vec4.b is a float, vec4.xy is a vec2, vec4.rgb is a vec3, etc. // vec4.s0 is a float, vec4.s23 is a vec3, etc. // vec4.hi, vec4.lo, vec4.e, and vec4.o all return vec2. unsigned CompSize = HalvingSwizzle ? (vecType->getNumElements() + 1) / 2 : CompName->getLength(); if (HexSwizzle) CompSize--; if (CompSize == 1) return vecType->getElementType(); if (HasRepeated) VK = VK_RValue; QualType VT = S.Context.getExtVectorType(vecType->getElementType(), CompSize); // Now look up the TypeDefDecl from the vector type. Without this, // diagostics look bad. We want extended vector types to appear built-in. for (Sema::ExtVectorDeclsType::iterator I = S.ExtVectorDecls.begin(S.getExternalSource()), E = S.ExtVectorDecls.end(); I != E; ++I) { if ((*I)->getUnderlyingType() == VT) return S.Context.getTypedefType(*I); } return VT; // should never get here (a typedef type should always be found). } static Decl *FindGetterSetterNameDeclFromProtocolList(const ObjCProtocolDecl*PDecl, IdentifierInfo *Member, const Selector &Sel, ASTContext &Context) { if (Member) if (ObjCPropertyDecl *PD = PDecl->FindPropertyDeclaration(Member)) return PD; if (ObjCMethodDecl *OMD = PDecl->getInstanceMethod(Sel)) return OMD; for (const auto *I : PDecl->protocols()) { if (Decl *D = FindGetterSetterNameDeclFromProtocolList(I, Member, Sel, Context)) return D; } return nullptr; } static Decl *FindGetterSetterNameDecl(const ObjCObjectPointerType *QIdTy, IdentifierInfo *Member, const Selector &Sel, ASTContext &Context) { // Check protocols on qualified interfaces. Decl *GDecl = nullptr; for (const auto *I : QIdTy->quals()) { if (Member) if (ObjCPropertyDecl *PD = I->FindPropertyDeclaration(Member)) { GDecl = PD; break; } // Also must look for a getter or setter name which uses property syntax. if (ObjCMethodDecl *OMD = I->getInstanceMethod(Sel)) { GDecl = OMD; break; } } if (!GDecl) { for (const auto *I : QIdTy->quals()) { // Search in the protocol-qualifier list of current protocol. GDecl = FindGetterSetterNameDeclFromProtocolList(I, Member, Sel, Context); if (GDecl) return GDecl; } } return GDecl; } ExprResult Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs) { // Even in dependent contexts, try to diagnose base expressions with // obviously wrong types, e.g.: // // T* t; // t.f; // // In Obj-C++, however, the above expression is valid, since it could be // accessing the 'f' property if T is an Obj-C interface. The extra check // allows this, while still reporting an error if T is a struct pointer. if (!IsArrow) { const PointerType *PT = BaseType->getAs<PointerType>(); if (PT && (!getLangOpts().ObjC1 || PT->getPointeeType()->isRecordType())) { assert(BaseExpr && "cannot happen with implicit member accesses"); Diag(OpLoc, diag::err_typecheck_member_reference_struct_union) << BaseType << BaseExpr->getSourceRange() << NameInfo.getSourceRange(); return ExprError(); } } assert(BaseType->isDependentType() || NameInfo.getName().isDependentName() || isDependentScopeSpecifier(SS)); // Get the type being accessed in BaseType. If this is an arrow, the BaseExpr // must have pointer type, and the accessed type is the pointee. return CXXDependentScopeMemberExpr::Create( Context, BaseExpr, BaseType, IsArrow, OpLoc, SS.getWithLocInContext(Context), TemplateKWLoc, FirstQualifierInScope, NameInfo, TemplateArgs); } /// We know that the given qualified member reference points only to /// declarations which do not belong to the static type of the base /// expression. Diagnose the problem. static void DiagnoseQualifiedMemberReference(Sema &SemaRef, Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, NamedDecl *rep, const DeclarationNameInfo &nameInfo) { // If this is an implicit member access, use a different set of // diagnostics. if (!BaseExpr) return diagnoseInstanceReference(SemaRef, SS, rep, nameInfo); SemaRef.Diag(nameInfo.getLoc(), diag::err_qualified_member_of_unrelated) << SS.getRange() << rep << BaseType; } // Check whether the declarations we found through a nested-name // specifier in a member expression are actually members of the base // type. The restriction here is: // // C++ [expr.ref]p2: // ... In these cases, the id-expression shall name a // member of the class or of one of its base classes. // // So it's perfectly legitimate for the nested-name specifier to name // an unrelated class, and for us to find an overload set including // decls from classes which are not superclasses, as long as the decl // we actually pick through overload resolution is from a superclass. bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R) { BaseType = BaseType.getNonReferenceType(); // HLSL Change CXXRecordDecl *BaseRecord = cast_or_null<CXXRecordDecl>(computeDeclContext(BaseType)); if (!BaseRecord) { // We can't check this yet because the base type is still // dependent. assert(BaseType->isDependentType()); return false; } for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { // If this is an implicit member reference and we find a // non-instance member, it's not an error. if (!BaseExpr && !(*I)->isCXXInstanceMember()) return false; // Note that we use the DC of the decl, not the underlying decl. DeclContext *DC = (*I)->getDeclContext(); while (DC->isTransparentContext()) DC = DC->getParent(); if (!DC->isRecord()) continue; CXXRecordDecl *MemberRecord = cast<CXXRecordDecl>(DC)->getCanonicalDecl(); if (BaseRecord->getCanonicalDecl() == MemberRecord || !BaseRecord->isProvablyNotDerivedFrom(MemberRecord)) return false; } DiagnoseQualifiedMemberReference(*this, BaseExpr, BaseType, SS, R.getRepresentativeDecl(), R.getLookupNameInfo()); return true; } namespace { // Callback to only accept typo corrections that are either a ValueDecl or a // FunctionTemplateDecl and are declared in the current record or, for a C++ // classes, one of its base classes. class RecordMemberExprValidatorCCC : public CorrectionCandidateCallback { public: explicit RecordMemberExprValidatorCCC(const RecordType *RTy) : Record(RTy->getDecl()) { // Don't add bare keywords to the consumer since they will always fail // validation by virtue of not being associated with any decls. WantTypeSpecifiers = false; WantExpressionKeywords = false; WantCXXNamedCasts = false; WantFunctionLikeCasts = false; WantRemainingKeywords = false; } bool ValidateCandidate(const TypoCorrection &candidate) override { NamedDecl *ND = candidate.getCorrectionDecl(); // Don't accept candidates that cannot be member functions, constants, // variables, or templates. if (!ND || !(isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND))) return false; // Accept candidates that occur in the current record. if (Record->containsDecl(ND)) return true; if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Record)) { // Accept candidates that occur in any of the current class' base classes. for (const auto &BS : RD->bases()) { if (const RecordType *BSTy = dyn_cast_or_null<RecordType>(BS.getType().getTypePtrOrNull())) { if (BSTy->getDecl()->containsDecl(ND)) return true; } } } return false; } private: const RecordDecl *const Record; }; } static bool LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R, Expr *BaseExpr, const RecordType *RTy, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, bool HasTemplateArgs, TypoExpr *&TE) { SourceRange BaseRange = BaseExpr ? BaseExpr->getSourceRange() : SourceRange(); RecordDecl *RDecl = RTy->getDecl(); if (!SemaRef.isThisOutsideMemberFunctionBody(QualType(RTy, 0)) && SemaRef.RequireCompleteType(OpLoc, QualType(RTy, 0), diag::err_typecheck_incomplete_tag, BaseRange)) return true; if (HasTemplateArgs) { // LookupTemplateName doesn't expect these both to exist simultaneously. QualType ObjectType = SS.isSet() ? QualType() : QualType(RTy, 0); bool MOUS; SemaRef.LookupTemplateName(R, nullptr, SS, ObjectType, false, MOUS); return false; } DeclContext *DC = RDecl; if (SS.isSet()) { // If the member name was a qualified-id, look into the // nested-name-specifier. DC = SemaRef.computeDeclContext(SS, false); if (SemaRef.RequireCompleteDeclContext(SS, DC)) { SemaRef.Diag(SS.getRange().getEnd(), diag::err_typecheck_incomplete_tag) << SS.getRange() << DC; return true; } assert(DC && "Cannot handle non-computable dependent contexts in lookup"); if (!isa<TypeDecl>(DC)) { SemaRef.Diag(R.getNameLoc(), diag::err_qualified_member_nonclass) << DC << SS.getRange(); return true; } } // The record definition is complete, now look up the member. SemaRef.LookupQualifiedName(R, DC, SS); if (!R.empty()) return false; DeclarationName Typo = R.getLookupName(); SourceLocation TypoLoc = R.getNameLoc(); TE = SemaRef.CorrectTypoDelayed( R.getLookupNameInfo(), R.getLookupKind(), nullptr, &SS, llvm::make_unique<RecordMemberExprValidatorCCC>(RTy), [=, &SemaRef](const TypoCorrection &TC) { if (TC) { assert(!TC.isKeyword() && "Got a keyword as a correction for a member!"); bool DroppedSpecifier = TC.WillReplaceSpecifier() && Typo.getAsString() == TC.getAsString(SemaRef.getLangOpts()); SemaRef.diagnoseTypo(TC, SemaRef.PDiag(diag::err_no_member_suggest) << Typo << DC << DroppedSpecifier << SS.getRange()); } else { SemaRef.Diag(TypoLoc, diag::err_no_member) << Typo << DC << BaseRange; } }, [=](Sema &SemaRef, TypoExpr *TE, TypoCorrection TC) mutable { R.clear(); // Ensure there's no decls lingering in the shared state. R.suppressDiagnostics(); R.setLookupName(TC.getCorrection()); for (NamedDecl *ND : TC) R.addDecl(ND); R.resolveKind(); return SemaRef.BuildMemberReferenceExpr( BaseExpr, BaseExpr->getType(), OpLoc, IsArrow, SS, SourceLocation(), nullptr, R, nullptr); }, Sema::CTK_ErrorRecovery, DC); return false; } static ExprResult LookupMemberExpr(Sema &S, LookupResult &R, ExprResult &BaseExpr, bool &IsArrow, SourceLocation OpLoc, CXXScopeSpec &SS, Decl *ObjCImpDecl, bool HasTemplateArgs); ExprResult Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs) { if (BaseType->isDependentType() || (SS.isSet() && isDependentScopeSpecifier(SS))) return ActOnDependentMemberExpr(Base, BaseType, IsArrow, OpLoc, SS, TemplateKWLoc, FirstQualifierInScope, NameInfo, TemplateArgs); LookupResult R(*this, NameInfo, LookupMemberName); // Implicit member accesses. if (!Base) { TypoExpr *TE = nullptr; QualType RecordTy = BaseType; if (IsArrow) RecordTy = RecordTy->getAs<PointerType>()->getPointeeType(); RecordTy = RecordTy.getNonReferenceType(); // HLSL Change - implicit this is a reference. if (LookupMemberExprInRecord(*this, R, nullptr, RecordTy->getAs<RecordType>(), OpLoc, IsArrow, SS, TemplateArgs != nullptr, TE)) return ExprError(); if (TE) return TE; // Explicit member accesses. } else { ExprResult BaseResult = Base; ExprResult Result = LookupMemberExpr( *this, R, BaseResult, IsArrow, OpLoc, SS, ExtraArgs ? ExtraArgs->ObjCImpDecl : nullptr, TemplateArgs != nullptr); if (BaseResult.isInvalid()) return ExprError(); Base = BaseResult.get(); if (Result.isInvalid()) return ExprError(); if (Result.get()) return Result; // LookupMemberExpr can modify Base, and thus change BaseType BaseType = Base->getType(); } return BuildMemberReferenceExpr(Base, BaseType, OpLoc, IsArrow, SS, TemplateKWLoc, FirstQualifierInScope, R, TemplateArgs, false, ExtraArgs); } static ExprResult BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS, SourceLocation loc, IndirectFieldDecl *indirectField, DeclAccessPair foundDecl, Expr *baseObjectExpr, SourceLocation opLoc) { // First, build the expression that refers to the base object. bool baseObjectIsPointer = false; Qualifiers baseQuals; // Case 1: the base of the indirect field is not a field. VarDecl *baseVariable = indirectField->getVarDecl(); CXXScopeSpec EmptySS; if (baseVariable) { assert(baseVariable->getType()->isRecordType()); // In principle we could have a member access expression that // accesses an anonymous struct/union that's a static member of // the base object's class. However, under the current standard, // static data members cannot be anonymous structs or unions. // Supporting this is as easy as building a MemberExpr here. assert(!baseObjectExpr && "anonymous struct/union is static data member?"); DeclarationNameInfo baseNameInfo(DeclarationName(), loc); ExprResult result = BuildDeclarationNameExpr(EmptySS, baseNameInfo, baseVariable); if (result.isInvalid()) return ExprError(); baseObjectExpr = result.get(); baseObjectIsPointer = false; baseQuals = baseObjectExpr->getType().getQualifiers(); // Case 2: the base of the indirect field is a field and the user // wrote a member expression. } else if (baseObjectExpr) { // The caller provided the base object expression. Determine // whether its a pointer and whether it adds any qualifiers to the // anonymous struct/union fields we're looking into. QualType objectType = baseObjectExpr->getType(); if (const PointerType *ptr = objectType->getAs<PointerType>()) { baseObjectIsPointer = true; objectType = ptr->getPointeeType(); } else { baseObjectIsPointer = false; } baseQuals = objectType.getQualifiers(); // Case 3: the base of the indirect field is a field and we should // build an implicit member access. } else { // We've found a member of an anonymous struct/union that is // inside a non-anonymous struct/union, so in a well-formed // program our base object expression is "this". QualType ThisTy = getCurrentThisType(); if (ThisTy.isNull()) { Diag(loc, diag::err_invalid_member_use_in_static_method) << indirectField->getDeclName(); return ExprError(); } // Our base object expression is "this". CheckCXXThisCapture(loc); baseObjectExpr = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/ true); baseObjectIsPointer = true; baseQuals = ThisTy->castAs<PointerType>()->getPointeeType().getQualifiers(); } // Build the implicit member references to the field of the // anonymous struct/union. Expr *result = baseObjectExpr; IndirectFieldDecl::chain_iterator FI = indirectField->chain_begin(), FEnd = indirectField->chain_end(); // Build the first member access in the chain with full information. if (!baseVariable) { FieldDecl *field = cast<FieldDecl>(*FI); // Make a nameInfo that properly uses the anonymous name. DeclarationNameInfo memberNameInfo(field->getDeclName(), loc); result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer, SourceLocation(), EmptySS, field, foundDecl, memberNameInfo).get(); if (!result) return ExprError(); // FIXME: check qualified member access } // In all cases, we should now skip the first declaration in the chain. ++FI; while (FI != FEnd) { FieldDecl *field = cast<FieldDecl>(*FI++); // FIXME: these are somewhat meaningless DeclarationNameInfo memberNameInfo(field->getDeclName(), loc); DeclAccessPair fakeFoundDecl = DeclAccessPair::make(field, field->getAccess()); result = BuildFieldReferenceExpr(*this, result, /*isarrow*/ false, SourceLocation(), (FI == FEnd ? SS : EmptySS), field, fakeFoundDecl, memberNameInfo).get(); } return result; } static ExprResult BuildMSPropertyRefExpr(Sema &S, Expr *BaseExpr, bool IsArrow, const CXXScopeSpec &SS, MSPropertyDecl *PD, const DeclarationNameInfo &NameInfo) { // Property names are always simple identifiers and therefore never // require any interesting additional storage. return new (S.Context) MSPropertyRefExpr(BaseExpr, PD, IsArrow, S.Context.PseudoObjectTy, VK_LValue, SS.getWithLocInContext(S.Context), NameInfo.getLoc()); } /// \brief Build a MemberExpr AST node. static MemberExpr *BuildMemberExpr( Sema &SemaRef, ASTContext &C, Expr *Base, bool isArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr) { assert((!isArrow || Base->isRValue()) && "-> base must be a pointer rvalue"); MemberExpr *E = MemberExpr::Create( C, Base, isArrow, OpLoc, SS.getWithLocInContext(C), TemplateKWLoc, Member, FoundDecl, MemberNameInfo, TemplateArgs, Ty, VK, OK); SemaRef.MarkMemberReferenced(E); return E; } ExprResult Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck, ActOnMemberAccessExtraArgs *ExtraArgs) { QualType BaseType = BaseExprType; if (IsArrow) { assert(BaseType->isPointerType()); BaseType = BaseType->castAs<PointerType>()->getPointeeType(); } R.setBaseObjectType(BaseType); LambdaScopeInfo *const CurLSI = getCurLambda(); // If this is an implicit member reference and the overloaded // name refers to both static and non-static member functions // (i.e. BaseExpr is null) and if we are currently processing a lambda, // check if we should/can capture 'this'... // Keep this example in mind: // struct X { // void f(int) { } // static void f(double) { } // // int g() { // auto L = [=](auto a) { // return [](int i) { // return [=](auto b) { // f(b); // //f(decltype(a){}); // }; // }; // }; // auto M = L(0.0); // auto N = M(3); // N(5.32); // OK, must not error. // return 0; // } // }; // if (!BaseExpr && CurLSI) { SourceLocation Loc = R.getNameLoc(); if (SS.getRange().isValid()) Loc = SS.getRange().getBegin(); DeclContext *EnclosingFunctionCtx = CurContext->getParent()->getParent(); // If the enclosing function is not dependent, then this lambda is // capture ready, so if we can capture this, do so. if (!EnclosingFunctionCtx->isDependentContext()) { // If the current lambda and all enclosing lambdas can capture 'this' - // then go ahead and capture 'this' (since our unresolved overload set // contains both static and non-static member functions). if (!CheckCXXThisCapture(Loc, /*Explcit*/false, /*Diagnose*/false)) CheckCXXThisCapture(Loc); } else if (CurContext->isDependentContext()) { // ... since this is an implicit member reference, that might potentially // involve a 'this' capture, mark 'this' for potential capture in // enclosing lambdas. if (CurLSI->ImpCaptureStyle != CurLSI->ImpCap_None) CurLSI->addPotentialThisCapture(Loc); } } const DeclarationNameInfo &MemberNameInfo = R.getLookupNameInfo(); DeclarationName MemberName = MemberNameInfo.getName(); SourceLocation MemberLoc = MemberNameInfo.getLoc(); if (R.isAmbiguous()) return ExprError(); if (R.empty()) { // Rederive where we looked up. DeclContext *DC = (SS.isSet() ? computeDeclContext(SS, false) : BaseType->getAs<RecordType>()->getDecl()); if (ExtraArgs) { ExprResult RetryExpr; if (!IsArrow && BaseExpr) { SFINAETrap Trap(*this, true); ParsedType ObjectType; bool MayBePseudoDestructor = false; RetryExpr = ActOnStartCXXMemberReference(getCurScope(), BaseExpr, OpLoc, tok::arrow, ObjectType, MayBePseudoDestructor); if (RetryExpr.isUsable() && !Trap.hasErrorOccurred()) { CXXScopeSpec TempSS(SS); RetryExpr = ActOnMemberAccessExpr( ExtraArgs->S, RetryExpr.get(), OpLoc, tok::arrow, TempSS, TemplateKWLoc, ExtraArgs->Id, ExtraArgs->ObjCImpDecl); } if (Trap.hasErrorOccurred()) RetryExpr = ExprError(); } if (RetryExpr.isUsable()) { Diag(OpLoc, diag::err_no_member_overloaded_arrow) << MemberName << DC << FixItHint::CreateReplacement(OpLoc, "->"); return RetryExpr; } } Diag(R.getNameLoc(), diag::err_no_member) << MemberName << DC << (BaseExpr ? BaseExpr->getSourceRange() : SourceRange()); return ExprError(); } // Diagnose lookups that find only declarations from a non-base // type. This is possible for either qualified lookups (which may // have been qualified with an unrelated type) or implicit member // expressions (which were found with unqualified lookup and thus // may have come from an enclosing scope). Note that it's okay for // lookup to find declarations from a non-base type as long as those // aren't the ones picked by overload resolution. if ((SS.isSet() || !BaseExpr || (isa<CXXThisExpr>(BaseExpr) && cast<CXXThisExpr>(BaseExpr)->isImplicit())) && !SuppressQualifierCheck && CheckQualifiedMemberReference(BaseExpr, BaseType, SS, R)) return ExprError(); // Construct an unresolved result if we in fact got an unresolved // result. if (R.isOverloadedResult() || R.isUnresolvableResult()) { // Suppress any lookup-related diagnostics; we'll do these when we // pick a member. R.suppressDiagnostics(); UnresolvedMemberExpr *MemExpr = UnresolvedMemberExpr::Create(Context, R.isUnresolvableResult(), BaseExpr, BaseExprType, IsArrow, OpLoc, SS.getWithLocInContext(Context), TemplateKWLoc, MemberNameInfo, TemplateArgs, R.begin(), R.end()); return MemExpr; } assert(R.isSingleResult()); DeclAccessPair FoundDecl = R.begin().getPair(); NamedDecl *MemberDecl = R.getFoundDecl(); // FIXME: diagnose the presence of template arguments now. // If the decl being referenced had an error, return an error for this // sub-expr without emitting another error, in order to avoid cascading // error cases. if (MemberDecl->isInvalidDecl()) return ExprError(); // Handle the implicit-member-access case. if (!BaseExpr) { // If this is not an instance member, convert to a non-member access. if (!MemberDecl->isCXXInstanceMember()) return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), MemberDecl); SourceLocation Loc = R.getNameLoc(); if (SS.getRange().isValid()) Loc = SS.getRange().getBegin(); CheckCXXThisCapture(Loc); // HLSL Change Starts - adjust this from T* to T&-like if (getLangOpts().HLSL) BaseExpr = genereateHLSLThis(Loc, BaseExprType, /*isImplicit=*/true); else BaseExpr = new (Context) CXXThisExpr(Loc, BaseExprType,/*isImplicit=*/true); // HLSL Change Ends } bool ShouldCheckUse = true; if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MemberDecl)) { // Don't diagnose the use of a virtual member function unless it's // explicitly qualified. if (MD->isVirtual() && !SS.isSet()) ShouldCheckUse = false; } // Check the use of this member. if (ShouldCheckUse && DiagnoseUseOfDecl(MemberDecl, MemberLoc)) return ExprError(); if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl)) return BuildFieldReferenceExpr(*this, BaseExpr, IsArrow, OpLoc, SS, FD, FoundDecl, MemberNameInfo); if (MSPropertyDecl *PD = dyn_cast<MSPropertyDecl>(MemberDecl)) return BuildMSPropertyRefExpr(*this, BaseExpr, IsArrow, SS, PD, MemberNameInfo); if (IndirectFieldDecl *FD = dyn_cast<IndirectFieldDecl>(MemberDecl)) // We may have found a field within an anonymous union or struct // (C++ [class.union]). return BuildAnonymousStructUnionMemberReference(SS, MemberLoc, FD, FoundDecl, BaseExpr, OpLoc); if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl)) { return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS, TemplateKWLoc, Var, FoundDecl, MemberNameInfo, Var->getType().getNonReferenceType(), VK_LValue, OK_Ordinary); } if (CXXMethodDecl *MemberFn = dyn_cast<CXXMethodDecl>(MemberDecl)) { ExprValueKind valueKind; QualType type; if (MemberFn->isInstance()) { valueKind = VK_RValue; type = Context.BoundMemberTy; } else { valueKind = VK_LValue; type = MemberFn->getType(); } return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS, TemplateKWLoc, MemberFn, FoundDecl, MemberNameInfo, type, valueKind, OK_Ordinary); } assert(!isa<FunctionDecl>(MemberDecl) && "member function not C++ method?"); if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) { return BuildMemberExpr(*this, Context, BaseExpr, IsArrow, OpLoc, SS, TemplateKWLoc, Enum, FoundDecl, MemberNameInfo, Enum->getType(), VK_RValue, OK_Ordinary); } // We found something that we didn't expect. Complain. if (isa<TypeDecl>(MemberDecl)) Diag(MemberLoc, diag::err_typecheck_member_reference_type) << MemberName << BaseType << int(IsArrow); else Diag(MemberLoc, diag::err_typecheck_member_reference_unknown) << MemberName << BaseType << int(IsArrow); Diag(MemberDecl->getLocation(), diag::note_member_declared_here) << MemberName; R.suppressDiagnostics(); return ExprError(); } /// Given that normal member access failed on the given expression, /// and given that the expression's type involves builtin-id or /// builtin-Class, decide whether substituting in the redefinition /// types would be profitable. The redefinition type is whatever /// this translation unit tried to typedef to id/Class; we store /// it to the side and then re-use it in places like this. static bool ShouldTryAgainWithRedefinitionType(Sema &S, ExprResult &base) { const ObjCObjectPointerType *opty = base.get()->getType()->getAs<ObjCObjectPointerType>(); if (!opty) return false; const ObjCObjectType *ty = opty->getObjectType(); QualType redef; if (ty->isObjCId()) { redef = S.Context.getObjCIdRedefinitionType(); } else if (ty->isObjCClass()) { redef = S.Context.getObjCClassRedefinitionType(); } else { return false; } // Do the substitution as long as the redefinition type isn't just a // possibly-qualified pointer to builtin-id or builtin-Class again. opty = redef->getAs<ObjCObjectPointerType>(); if (opty && !opty->getObjectType()->getInterface()) return false; base = S.ImpCastExprToType(base.get(), redef, CK_BitCast); return true; } static bool isRecordType(QualType T) { return T->isRecordType(); } static bool isPointerToRecordType(QualType T) { if (const PointerType *PT = T->getAs<PointerType>()) return PT->getPointeeType()->isRecordType(); return false; } /// Perform conversions on the LHS of a member access expression. ExprResult Sema::PerformMemberExprBaseConversion(Expr *Base, bool IsArrow) { if (IsArrow && !Base->getType()->isFunctionType()) return DefaultFunctionArrayLvalueConversion(Base); return CheckPlaceholderExpr(Base); } /// Look up the given member of the given non-type-dependent /// expression. This can return in one of two ways: /// * If it returns a sentinel null-but-valid result, the caller will /// assume that lookup was performed and the results written into /// the provided structure. It will take over from there. /// * Otherwise, the returned expression will be produced in place of /// an ordinary member expression. /// /// The ObjCImpDecl bit is a gross hack that will need to be properly /// fixed for ObjC++. static ExprResult LookupMemberExpr(Sema &S, LookupResult &R, ExprResult &BaseExpr, bool &IsArrow, SourceLocation OpLoc, CXXScopeSpec &SS, Decl *ObjCImpDecl, bool HasTemplateArgs) { assert(BaseExpr.get() && "no base expression"); // Perform default conversions. BaseExpr = S.PerformMemberExprBaseConversion(BaseExpr.get(), IsArrow); if (BaseExpr.isInvalid()) return ExprError(); QualType BaseType = BaseExpr.get()->getType(); assert(!BaseType->isDependentType()); DeclarationName MemberName = R.getLookupName(); SourceLocation MemberLoc = R.getNameLoc(); // For later type-checking purposes, turn arrow accesses into dot // accesses. The only access type we support that doesn't follow // the C equivalence "a->b === (*a).b" is ObjC property accesses, // and those never use arrows, so this is unaffected. if (IsArrow) { if (const PointerType *Ptr = BaseType->getAs<PointerType>()) BaseType = Ptr->getPointeeType(); else if (const ObjCObjectPointerType *Ptr = BaseType->getAs<ObjCObjectPointerType>()) BaseType = Ptr->getPointeeType(); else if (BaseType->isRecordType()) { // Recover from arrow accesses to records, e.g.: // struct MyRecord foo; // foo->bar // This is actually well-formed in C++ if MyRecord has an // overloaded operator->, but that should have been dealt with // by now--or a diagnostic message already issued if a problem // was encountered while looking for the overloaded operator->. if (!S.getLangOpts().CPlusPlus) { S.Diag(OpLoc, diag::err_typecheck_member_reference_suggestion) << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange() << FixItHint::CreateReplacement(OpLoc, "."); } IsArrow = false; } else if (BaseType->isFunctionType()) { goto fail; } else { S.Diag(MemberLoc, diag::err_typecheck_member_reference_arrow) << BaseType << BaseExpr.get()->getSourceRange(); return ExprError(); } } // HLSL Change Starts // Look up HLSL specialty records: Matrix, Vector, Array if (S.getLangOpts().HLSL) { ExprResult res; if (hlsl::LookupRecordMemberExprForHLSL(&S, *BaseExpr.get(), MemberName, IsArrow, OpLoc, MemberLoc, res)) return res; } // HLSL Change Ends // Handle field access to simple records. if (const RecordType *RTy = BaseType->getAs<RecordType>()) { TypoExpr *TE = nullptr; if (LookupMemberExprInRecord(S, R, BaseExpr.get(), RTy, OpLoc, IsArrow, SS, HasTemplateArgs, TE)) return ExprError(); // Returning valid-but-null is how we indicate to the caller that // the lookup result was filled in. If typo correction was attempted and // failed, the lookup result will have been cleared--that combined with the // valid-but-null ExprResult will trigger the appropriate diagnostics. return ExprResult(TE); } // Handle ivar access to Objective-C objects. if (const ObjCObjectType *OTy = BaseType->getAs<ObjCObjectType>()) { if (!SS.isEmpty() && !SS.isInvalid()) { S.Diag(SS.getRange().getBegin(), diag::err_qualified_objc_access) << 1 << SS.getScopeRep() << FixItHint::CreateRemoval(SS.getRange()); SS.clear(); } IdentifierInfo *Member = MemberName.getAsIdentifierInfo(); // There are three cases for the base type: // - builtin id (qualified or unqualified) // - builtin Class (qualified or unqualified) // - an interface ObjCInterfaceDecl *IDecl = OTy->getInterface(); if (!IDecl) { if (S.getLangOpts().ObjCAutoRefCount && (OTy->isObjCId() || OTy->isObjCClass())) goto fail; // There's an implicit 'isa' ivar on all objects. // But we only actually find it this way on objects of type 'id', // apparently. if (OTy->isObjCId() && Member->isStr("isa")) return new (S.Context) ObjCIsaExpr(BaseExpr.get(), IsArrow, MemberLoc, OpLoc, S.Context.getObjCClassType()); if (ShouldTryAgainWithRedefinitionType(S, BaseExpr)) return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS, ObjCImpDecl, HasTemplateArgs); goto fail; } if (S.RequireCompleteType(OpLoc, BaseType, diag::err_typecheck_incomplete_tag, BaseExpr.get())) return ExprError(); ObjCInterfaceDecl *ClassDeclared = nullptr; ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared); if (!IV) { // Attempt to correct for typos in ivar names. auto Validator = llvm::make_unique<DeclFilterCCC<ObjCIvarDecl>>(); Validator->IsObjCIvarLookup = IsArrow; if (TypoCorrection Corrected = S.CorrectTypo( R.getLookupNameInfo(), Sema::LookupMemberName, nullptr, nullptr, std::move(Validator), Sema::CTK_ErrorRecovery, IDecl)) { IV = Corrected.getCorrectionDeclAs<ObjCIvarDecl>(); S.diagnoseTypo( Corrected, S.PDiag(diag::err_typecheck_member_reference_ivar_suggest) << IDecl->getDeclName() << MemberName); // Figure out the class that declares the ivar. assert(!ClassDeclared); Decl *D = cast<Decl>(IV->getDeclContext()); if (ObjCCategoryDecl *CAT = dyn_cast<ObjCCategoryDecl>(D)) D = CAT->getClassInterface(); ClassDeclared = cast<ObjCInterfaceDecl>(D); } else { if (IsArrow && IDecl->FindPropertyDeclaration(Member)) { S.Diag(MemberLoc, diag::err_property_found_suggest) << Member << BaseExpr.get()->getType() << FixItHint::CreateReplacement(OpLoc, "."); return ExprError(); } S.Diag(MemberLoc, diag::err_typecheck_member_reference_ivar) << IDecl->getDeclName() << MemberName << BaseExpr.get()->getSourceRange(); return ExprError(); } } assert(ClassDeclared); // If the decl being referenced had an error, return an error for this // sub-expr without emitting another error, in order to avoid cascading // error cases. if (IV->isInvalidDecl()) return ExprError(); // Check whether we can reference this field. if (S.DiagnoseUseOfDecl(IV, MemberLoc)) return ExprError(); if (IV->getAccessControl() != ObjCIvarDecl::Public && IV->getAccessControl() != ObjCIvarDecl::Package) { ObjCInterfaceDecl *ClassOfMethodDecl = nullptr; if (ObjCMethodDecl *MD = S.getCurMethodDecl()) ClassOfMethodDecl = MD->getClassInterface(); else if (ObjCImpDecl && S.getCurFunctionDecl()) { // Case of a c-function declared inside an objc implementation. // FIXME: For a c-style function nested inside an objc implementation // class, there is no implementation context available, so we pass // down the context as argument to this routine. Ideally, this context // need be passed down in the AST node and somehow calculated from the // AST for a function decl. if (ObjCImplementationDecl *IMPD = dyn_cast<ObjCImplementationDecl>(ObjCImpDecl)) ClassOfMethodDecl = IMPD->getClassInterface(); else if (ObjCCategoryImplDecl* CatImplClass = dyn_cast<ObjCCategoryImplDecl>(ObjCImpDecl)) ClassOfMethodDecl = CatImplClass->getClassInterface(); } if (!S.getLangOpts().DebuggerSupport) { if (IV->getAccessControl() == ObjCIvarDecl::Private) { if (!declaresSameEntity(ClassDeclared, IDecl) || !declaresSameEntity(ClassOfMethodDecl, ClassDeclared)) S.Diag(MemberLoc, diag::error_private_ivar_access) << IV->getDeclName(); } else if (!IDecl->isSuperClassOf(ClassOfMethodDecl)) // @protected S.Diag(MemberLoc, diag::error_protected_ivar_access) << IV->getDeclName(); } } bool warn = true; if (S.getLangOpts().ObjCAutoRefCount) { Expr *BaseExp = BaseExpr.get()->IgnoreParenImpCasts(); if (UnaryOperator *UO = dyn_cast<UnaryOperator>(BaseExp)) if (UO->getOpcode() == UO_Deref) BaseExp = UO->getSubExpr()->IgnoreParenCasts(); if (DeclRefExpr *DE = dyn_cast<DeclRefExpr>(BaseExp)) if (DE->getType().getObjCLifetime() == Qualifiers::OCL_Weak) { S.Diag(DE->getLocation(), diag::error_arc_weak_ivar_access); warn = false; } } if (warn) { if (ObjCMethodDecl *MD = S.getCurMethodDecl()) { ObjCMethodFamily MF = MD->getMethodFamily(); warn = (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize && !S.IvarBacksCurrentMethodAccessor(IDecl, MD, IV)); } if (warn) S.Diag(MemberLoc, diag::warn_direct_ivar_access) << IV->getDeclName(); } ObjCIvarRefExpr *Result = new (S.Context) ObjCIvarRefExpr( IV, IV->getUsageType(BaseType), MemberLoc, OpLoc, BaseExpr.get(), IsArrow); if (S.getLangOpts().ObjCAutoRefCount) { if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) { if (!S.Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, MemberLoc)) S.recordUseOfEvaluatedWeak(Result); } } return Result; } // Objective-C property access. const ObjCObjectPointerType *OPT; if (!IsArrow && (OPT = BaseType->getAs<ObjCObjectPointerType>())) { if (!SS.isEmpty() && !SS.isInvalid()) { S.Diag(SS.getRange().getBegin(), diag::err_qualified_objc_access) << 0 << SS.getScopeRep() << FixItHint::CreateRemoval(SS.getRange()); SS.clear(); } // This actually uses the base as an r-value. BaseExpr = S.DefaultLvalueConversion(BaseExpr.get()); if (BaseExpr.isInvalid()) return ExprError(); assert(S.Context.hasSameUnqualifiedType(BaseType, BaseExpr.get()->getType())); IdentifierInfo *Member = MemberName.getAsIdentifierInfo(); const ObjCObjectType *OT = OPT->getObjectType(); // id, with and without qualifiers. if (OT->isObjCId()) { // Check protocols on qualified interfaces. Selector Sel = S.PP.getSelectorTable().getNullarySelector(Member); if (Decl *PMDecl = FindGetterSetterNameDecl(OPT, Member, Sel, S.Context)) { if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(PMDecl)) { // Check the use of this declaration if (S.DiagnoseUseOfDecl(PD, MemberLoc)) return ExprError(); return new (S.Context) ObjCPropertyRefExpr(PD, S.Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, MemberLoc, BaseExpr.get()); } if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(PMDecl)) { // Check the use of this method. if (S.DiagnoseUseOfDecl(OMD, MemberLoc)) return ExprError(); Selector SetterSel = SelectorTable::constructSetterSelector(S.PP.getIdentifierTable(), S.PP.getSelectorTable(), Member); ObjCMethodDecl *SMD = nullptr; if (Decl *SDecl = FindGetterSetterNameDecl(OPT, /*Property id*/ nullptr, SetterSel, S.Context)) SMD = dyn_cast<ObjCMethodDecl>(SDecl); return new (S.Context) ObjCPropertyRefExpr(OMD, SMD, S.Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, MemberLoc, BaseExpr.get()); } } // Use of id.member can only be for a property reference. Do not // use the 'id' redefinition in this case. if (IsArrow && ShouldTryAgainWithRedefinitionType(S, BaseExpr)) return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS, ObjCImpDecl, HasTemplateArgs); return ExprError(S.Diag(MemberLoc, diag::err_property_not_found) << MemberName << BaseType); } // 'Class', unqualified only. if (OT->isObjCClass()) { // Only works in a method declaration (??!). ObjCMethodDecl *MD = S.getCurMethodDecl(); if (!MD) { if (ShouldTryAgainWithRedefinitionType(S, BaseExpr)) return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS, ObjCImpDecl, HasTemplateArgs); goto fail; } // Also must look for a getter name which uses property syntax. Selector Sel = S.PP.getSelectorTable().getNullarySelector(Member); ObjCInterfaceDecl *IFace = MD->getClassInterface(); ObjCMethodDecl *Getter; if ((Getter = IFace->lookupClassMethod(Sel))) { // Check the use of this method. if (S.DiagnoseUseOfDecl(Getter, MemberLoc)) return ExprError(); } else Getter = IFace->lookupPrivateMethod(Sel, false); // If we found a getter then this may be a valid dot-reference, we // will look for the matching setter, in case it is needed. Selector SetterSel = SelectorTable::constructSetterSelector(S.PP.getIdentifierTable(), S.PP.getSelectorTable(), Member); ObjCMethodDecl *Setter = IFace->lookupClassMethod(SetterSel); if (!Setter) { // If this reference is in an @implementation, also check for 'private' // methods. Setter = IFace->lookupPrivateMethod(SetterSel, false); } if (Setter && S.DiagnoseUseOfDecl(Setter, MemberLoc)) return ExprError(); if (Getter || Setter) { return new (S.Context) ObjCPropertyRefExpr( Getter, Setter, S.Context.PseudoObjectTy, VK_LValue, OK_ObjCProperty, MemberLoc, BaseExpr.get()); } if (ShouldTryAgainWithRedefinitionType(S, BaseExpr)) return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS, ObjCImpDecl, HasTemplateArgs); return ExprError(S.Diag(MemberLoc, diag::err_property_not_found) << MemberName << BaseType); } // Normal property access. return S.HandleExprPropertyRefExpr(OPT, BaseExpr.get(), OpLoc, MemberName, MemberLoc, SourceLocation(), QualType(), false); } // Handle 'field access' to vectors, such as 'V.xx'. if (BaseType->isExtVectorType()) { // FIXME: this expr should store IsArrow. IdentifierInfo *Member = MemberName.getAsIdentifierInfo(); ExprValueKind VK; if (IsArrow) VK = VK_LValue; else { if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(BaseExpr.get())) VK = POE->getSyntacticForm()->getValueKind(); else VK = BaseExpr.get()->getValueKind(); } QualType ret = CheckExtVectorComponent(S, BaseType, VK, OpLoc, Member, MemberLoc); if (ret.isNull()) return ExprError(); return new (S.Context) ExtVectorElementExpr(ret, VK, BaseExpr.get(), *Member, MemberLoc); } // Adjust builtin-sel to the appropriate redefinition type if that's // not just a pointer to builtin-sel again. if (IsArrow && BaseType->isSpecificBuiltinType(BuiltinType::ObjCSel) && !S.Context.getObjCSelRedefinitionType()->isObjCSelType()) { BaseExpr = S.ImpCastExprToType( BaseExpr.get(), S.Context.getObjCSelRedefinitionType(), CK_BitCast); return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS, ObjCImpDecl, HasTemplateArgs); } // Failure cases. fail: // Recover from dot accesses to pointers, e.g.: // type *foo; // foo.bar // This is actually well-formed in two cases: // - 'type' is an Objective C type // - 'bar' is a pseudo-destructor name which happens to refer to // the appropriate pointer type if (const PointerType *Ptr = BaseType->getAs<PointerType>()) { if (!IsArrow && Ptr->getPointeeType()->isRecordType() && MemberName.getNameKind() != DeclarationName::CXXDestructorName) { S.Diag(OpLoc, diag::err_typecheck_member_reference_suggestion) << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange() << FixItHint::CreateReplacement(OpLoc, "->"); // Recurse as an -> access. IsArrow = true; return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS, ObjCImpDecl, HasTemplateArgs); } } // If the user is trying to apply -> or . to a function name, it's probably // because they forgot parentheses to call that function. if (S.tryToRecoverWithCall( BaseExpr, S.PDiag(diag::err_member_reference_needs_call), /*complain*/ false, IsArrow ? &isPointerToRecordType : &isRecordType)) { if (BaseExpr.isInvalid()) return ExprError(); BaseExpr = S.DefaultFunctionArrayConversion(BaseExpr.get()); return LookupMemberExpr(S, R, BaseExpr, IsArrow, OpLoc, SS, ObjCImpDecl, HasTemplateArgs); } S.Diag(OpLoc, diag::err_typecheck_member_reference_struct_union) << BaseType << BaseExpr.get()->getSourceRange() << MemberLoc; return ExprError(); } /// The main callback when the parser finds something like /// expression . [nested-name-specifier] identifier /// expression -> [nested-name-specifier] identifier /// where 'identifier' encompasses a fairly broad spectrum of /// possibilities, including destructor and operator references. /// /// \param OpKind either tok::arrow or tok::period /// \param ObjCImpDecl the current Objective-C \@implementation /// decl; this is an ugly hack around the fact that Objective-C /// \@implementations aren't properly put in the context chain ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, Decl *ObjCImpDecl) { if (SS.isSet() && SS.isInvalid()) return ExprError(); // Warn about the explicit constructor calls Microsoft extension. if (getLangOpts().MicrosoftExt && Id.getKind() == UnqualifiedId::IK_ConstructorName) Diag(Id.getSourceRange().getBegin(), diag::ext_ms_explicit_constructor_call); TemplateArgumentListInfo TemplateArgsBuffer; // Decompose the name into its component parts. DeclarationNameInfo NameInfo; const TemplateArgumentListInfo *TemplateArgs; DecomposeUnqualifiedId(Id, TemplateArgsBuffer, NameInfo, TemplateArgs); DeclarationName Name = NameInfo.getName(); bool IsArrow = (OpKind == tok::arrow); NamedDecl *FirstQualifierInScope = (!SS.isSet() ? nullptr : FindFirstQualifierInScope(S, SS.getScopeRep())); // This is a postfix expression, so get rid of ParenListExprs. ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base); if (Result.isInvalid()) return ExprError(); Base = Result.get(); // HLSL Changes Start if (getLangOpts().HLSL) { Result = hlsl::MaybeConvertMemberAccess(this, Base); if (Result.isInvalid()) return ExprError(); Base = Result.get(); } // HLSL Changes End if (Base->getType()->isDependentType() || Name.isDependentName() || isDependentScopeSpecifier(SS)) { return ActOnDependentMemberExpr(Base, Base->getType(), IsArrow, OpLoc, SS, TemplateKWLoc, FirstQualifierInScope, NameInfo, TemplateArgs); } ActOnMemberAccessExtraArgs ExtraArgs = {S, Id, ObjCImpDecl}; return BuildMemberReferenceExpr(Base, Base->getType(), OpLoc, IsArrow, SS, TemplateKWLoc, FirstQualifierInScope, NameInfo, TemplateArgs, &ExtraArgs); } static ExprResult BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo) { // x.a is an l-value if 'a' has a reference type. Otherwise: // x.a is an l-value/x-value/pr-value if the base is (and note // that *x is always an l-value), except that if the base isn't // an ordinary object then we must have an rvalue. ExprValueKind VK = VK_LValue; ExprObjectKind OK = OK_Ordinary; if (!IsArrow) { if (BaseExpr->getObjectKind() == OK_Ordinary) VK = BaseExpr->getValueKind(); else VK = VK_RValue; } if (VK != VK_RValue && Field->isBitField()) OK = OK_BitField; // Figure out the type of the member; see C99 6.5.2.3p3, C++ [expr.ref] QualType MemberType = Field->getType(); if (const ReferenceType *Ref = MemberType->getAs<ReferenceType>()) { MemberType = Ref->getPointeeType(); VK = VK_LValue; } else { QualType BaseType = BaseExpr->getType(); if (IsArrow) BaseType = BaseType->getAs<PointerType>()->getPointeeType(); Qualifiers BaseQuals = BaseType.getQualifiers(); // GC attributes are never picked up by members. BaseQuals.removeObjCGCAttr(); // CVR attributes from the base are picked up by members, // except that 'mutable' members don't pick up 'const'. if (Field->isMutable()) BaseQuals.removeConst(); Qualifiers MemberQuals = S.Context.getCanonicalType(MemberType).getQualifiers(); assert(!MemberQuals.hasAddressSpace()); Qualifiers Combined = BaseQuals + MemberQuals; if (Combined != MemberQuals) MemberType = S.Context.getQualifiedType(MemberType, Combined); } S.UnusedPrivateFields.remove(Field); ExprResult Base = S.PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(), FoundDecl, Field); if (Base.isInvalid()) return ExprError(); return BuildMemberExpr(S, S.Context, Base.get(), IsArrow, OpLoc, SS, /*TemplateKWLoc=*/SourceLocation(), Field, FoundDecl, MemberNameInfo, MemberType, VK, OK); } /// Builds an implicit member access expression. The current context /// is known to be an instance method, and the given unqualified lookup /// set is known to contain only instance members, at least one of which /// is from an appropriate type. ExprResult Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsKnownInstance) { assert(!R.empty() && !R.isAmbiguous()); SourceLocation loc = R.getNameLoc(); // If this is known to be an instance access, go ahead and build an // implicit 'this' expression now. // 'this' expression now. QualType ThisTy = getCurrentThisType(); assert(!ThisTy.isNull() && "didn't correctly pre-flight capture of 'this'"); Expr *baseExpr = nullptr; // null signifies implicit access if (IsKnownInstance) { SourceLocation Loc = R.getNameLoc(); if (SS.getRange().isValid()) Loc = SS.getRange().getBegin(); CheckCXXThisCapture(Loc); if (getLangOpts().HLSL) { baseExpr = genereateHLSLThis(Loc, ThisTy, /*isImplicit=*/true); ThisTy = ThisTy->getPointeeType(); } else baseExpr = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/true); } return BuildMemberReferenceExpr(baseExpr, ThisTy, /*OpLoc*/ SourceLocation(), // HLSL Change - this is a reference /*IsArrow*/ !getLangOpts().HLSL, SS, TemplateKWLoc, /*FirstQualifierInScope*/ nullptr, R, TemplateArgs); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaDXR.cpp
//===------ SemaDXR.cpp - Semantic Analysis for DXR shader -----*- C++ -*-===// /////////////////////////////////////////////////////////////////////////////// // // // SemaDXR.cpp // // Copyright (C) Nvidia Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // This file defines the semantic support for DXR. // // // /////////////////////////////////////////////////////////////////////////////// #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/Sema/SemaHLSL.h" #include "clang/Analysis/Analyses/Dominators.h" #include "clang/Analysis/Analyses/ReachableCode.h" #include "clang/Analysis/CFG.h" #include "llvm/ADT/BitVector.h" #include "dxc/DXIL/DxilConstants.h" #include "dxc/DXIL/DxilShaderModel.h" using namespace clang; using namespace sema; using namespace hlsl; namespace { struct PayloadUse { PayloadUse() = default; PayloadUse(const Stmt *S, const CFGBlock *Parent) : S(S), Parent(Parent), Member(nullptr) {} PayloadUse(const Stmt *S, const CFGBlock *Parent, const MemberExpr *Member) : S(S), Parent(Parent), Member(Member) {} bool operator<(const PayloadUse &Other) const { return S < Other.S; } const Stmt *S = nullptr; const CFGBlock *Parent = nullptr; const MemberExpr *Member = nullptr; }; struct TraceRayCall { TraceRayCall() = default; TraceRayCall(const CallExpr *Call, const CFGBlock *Parent) : Call(Call), Parent(Parent) {} const CallExpr *Call = nullptr; const CFGBlock *Parent = nullptr; }; struct PayloadAccessInfo { PayloadAccessInfo() = default; PayloadAccessInfo(const MemberExpr *Member, const CallExpr *Call, bool IsLValue) : Member(Member), Call(Call), IsLValue(IsLValue) {} const MemberExpr *Member = nullptr; const CallExpr *Call = nullptr; bool IsLValue = false; }; struct DxrShaderDiagnoseInfo { const FunctionDecl *funcDecl; const VarDecl *Payload; DXIL::PayloadAccessShaderStage Stage; std::vector<TraceRayCall> TraceCalls; std::map<const FieldDecl *, std::vector<PayloadUse>> WritesPerField; std::map<const FieldDecl *, std::vector<PayloadUse>> ReadsPerField; std::vector<PayloadUse> PayloadAsCallArg; }; std::vector<const FieldDecl *> DiagnosePayloadAccess(Sema &S, DxrShaderDiagnoseInfo &Info, const std::set<const FieldDecl *> &FieldsToIgnoreRead, const std::set<const FieldDecl *> &FieldsToIgnoreWrite, std::set<const FunctionDecl *> VisitedFunctions); const Stmt *IgnoreParensAndDecay(const Stmt *S); // Transform the shader stage to string to be used in diagnostics StringRef GetStringForShaderStage(DXIL::PayloadAccessShaderStage Stage) { StringRef StageNames[] = {"caller", "closesthit", "miss", "anyhit"}; if (Stage != DXIL::PayloadAccessShaderStage::Invalid) return StageNames[static_cast<unsigned>(Stage)]; return ""; } // Returns the Qualifier for a Field and a given shader stage. DXIL::PayloadAccessQualifier GetPayloadQualifierForStage(FieldDecl *Field, DXIL::PayloadAccessShaderStage Stage) { bool hasRead = false; bool hasWrite = false; for (UnusualAnnotation *annotation : Field->getUnusualAnnotations()) { if (auto *payloadAnnotation = dyn_cast<hlsl::PayloadAccessAnnotation>(annotation)) { for (auto &ShaderStage : payloadAnnotation->ShaderStages) { if (ShaderStage != Stage) continue; hasRead |= payloadAnnotation->qualifier == DXIL::PayloadAccessQualifier::Read; hasWrite |= payloadAnnotation->qualifier == DXIL::PayloadAccessQualifier::Write; } } } if (hasRead && hasWrite) return DXIL::PayloadAccessQualifier::ReadWrite; if (hasRead) return DXIL::PayloadAccessQualifier::Read; if (hasWrite) return DXIL::PayloadAccessQualifier::Write; return DXIL::PayloadAccessQualifier::NoAccess; } // Returns the declaration of the payload used in a TraceRay call const VarDecl *GetPayloadParameterForTraceCall(const CallExpr *Trace) { const Decl *callee = Trace->getCalleeDecl(); if (!callee) return nullptr; if (!isa<FunctionDecl>(callee)) return nullptr; const FunctionDecl *FD = cast<FunctionDecl>(callee); if (FD->isImplicit() && FD->getName() == "TraceRay") { const Stmt *Param = IgnoreParensAndDecay(Trace->getArg(7)); if (const DeclRefExpr *ParamRef = dyn_cast<DeclRefExpr>(Param)) { if (const VarDecl *Decl = dyn_cast<VarDecl>(ParamRef->getDecl())) return Decl; } } return nullptr; } // Recursively extracts accesses to a payload struct from a Stmt void GetPayloadAccesses(const Stmt *S, const DxrShaderDiagnoseInfo &Info, std::vector<PayloadAccessInfo> &Accesses, bool IsLValue, const MemberExpr *Member, const CallExpr *Call) { for (auto C : S->children()) { if (!C) continue; if (const DeclRefExpr *Ref = dyn_cast<DeclRefExpr>(C)) { if (Ref->getDecl() == Info.Payload) { Accesses.push_back(PayloadAccessInfo{Member, Call, IsLValue}); } } if (const ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(C)) { if (Cast->getCastKind() == CK_LValueToRValue) { IsLValue = false; } } GetPayloadAccesses(C, Info, Accesses, IsLValue, Member ? Member : dyn_cast<MemberExpr>(C), Call ? Call : dyn_cast<CallExpr>(C)); } } // Collects all reads, writes and calls with participation of the payload. void CollectReadsWritesAndCallsForPayload(const Stmt *S, DxrShaderDiagnoseInfo &Info, const CFGBlock *Block) { std::vector<PayloadAccessInfo> PayloadAccesses; GetPayloadAccesses(S, Info, PayloadAccesses, true, dyn_cast<MemberExpr>(S), dyn_cast<CallExpr>(S)); for (auto &Access : PayloadAccesses) { // An access to a payload member was found. if (Access.Member) { FieldDecl *Field = cast<FieldDecl>(Access.Member->getMemberDecl()); if (Access.IsLValue) { Info.WritesPerField[Field].push_back( PayloadUse{S, Block, Access.Member}); } else { Info.ReadsPerField[Field].push_back( PayloadUse{S, Block, Access.Member}); } } else if (Access.Call) { Info.PayloadAsCallArg.push_back(PayloadUse{S, Block}); } } } // Collects all TraceRay calls. void CollectTraceRayCalls(const Stmt *S, DxrShaderDiagnoseInfo &Info, const CFGBlock *Block) { // TraceRay has void as return type so it should never be something else // than a plain CallExpr. if (const CallExpr *Call = dyn_cast<CallExpr>(S)) { const Decl *Callee = Call->getCalleeDecl(); if (!Callee || !isa<FunctionDecl>(Callee)) return; const FunctionDecl *CalledFunction = cast<FunctionDecl>(Callee); // Ignore trace calls here. if (CalledFunction->isImplicit() && CalledFunction->getName() == "TraceRay") { Info.TraceCalls.push_back({Call, Block}); } } } // Find the last write to the payload field in the given block. PayloadUse GetLastWriteInBlock(CFGBlock &Block, ArrayRef<PayloadUse> PayloadWrites) { PayloadUse LastWrite; for (auto &Element : Block) { // TODO: reverse iterate? if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) { auto It = std::find_if( PayloadWrites.begin(), PayloadWrites.end(), [&](const PayloadUse &V) { return V.S == S->getStmt(); }); if (It != std::end(PayloadWrites)) { LastWrite = *It; LastWrite.Parent = &Block; } } } return LastWrite; } // Travers the CFG until every path has reached a write or the ENTRY. void TraverseCFGUntilWrite(CFGBlock &Current, std::vector<PayloadUse> &Writes, ArrayRef<PayloadUse> PayloadWrites, std::set<const CFGBlock *> &Visited) { if (Visited.count(&Current)) return; Visited.insert(&Current); for (auto I = Current.pred_begin(), E = Current.pred_end(); I != E; ++I) { CFGBlock *Pred = *I; if (!Pred) continue; PayloadUse WriteInPred = GetLastWriteInBlock(*Pred, PayloadWrites); if (!WriteInPred.S) TraverseCFGUntilWrite(*Pred, Writes, PayloadWrites, Visited); else Writes.push_back(WriteInPred); } } // Traverse the CFG from the EXIT backwards and stop as soon as a block has a // write to the payload field. std::vector<PayloadUse> GetAllWritesReachingExit(CFG &ShaderCFG, ArrayRef<PayloadUse> PayloadWrites) { std::vector<PayloadUse> Writes; CFGBlock &Exit = ShaderCFG.getExit(); std::set<const CFGBlock *> Visited; TraverseCFGUntilWrite(Exit, Writes, PayloadWrites, Visited); return Writes; } // Find the first read to the payload field in the given block. PayloadUse GetFirstReadInBlock(CFGBlock &Block, ArrayRef<PayloadUse> PayloadReads) { PayloadUse FirstRead; for (auto &Element : Block) { if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) { auto It = std::find_if( PayloadReads.begin(), PayloadReads.end(), [&](const PayloadUse &V) { return V.S == S->getStmt(); }); if (It != std::end(PayloadReads)) { FirstRead = *It; FirstRead.Parent = &Block; break; // We found the first read and are done with this block. } } } return FirstRead; } // Travers the CFG until every path has reached a read or the EXIT. void TraverseCFGUntilRead(CFGBlock &Current, std::vector<PayloadUse> &Reads, ArrayRef<PayloadUse> PayloadWrites, std::set<const CFGBlock *> &Visited) { if (Visited.count(&Current)) return; Visited.insert(&Current); for (auto I = Current.succ_begin(), E = Current.succ_end(); I != E; ++I) { CFGBlock *Succ = *I; if (!Succ) continue; PayloadUse ReadInSucc = GetFirstReadInBlock(*Succ, PayloadWrites); if (!ReadInSucc.S) TraverseCFGUntilRead(*Succ, Reads, PayloadWrites, Visited); else Reads.push_back(ReadInSucc); } } // Traverse the CFG from the ENTRY down and stop as soon as a block has a read // to the payload field. std::vector<PayloadUse> GetAllReadsReachedFromEntry(CFG &ShaderCFG, ArrayRef<PayloadUse> PayloadReads) { std::vector<PayloadUse> Reads; CFGBlock &Entry = ShaderCFG.getEntry(); std::set<const CFGBlock *> Visited; TraverseCFGUntilRead(Entry, Reads, PayloadReads, Visited); return Reads; } // Returns the record type of a payload declaration. CXXRecordDecl *GetPayloadType(const VarDecl *Payload) { auto PayloadType = Payload->getType(); if (PayloadType->isStructureOrClassType()) { return PayloadType->getAsCXXRecordDecl(); } return nullptr; } std::vector<FieldDecl *> GetAllPayloadFields(RecordDecl *PayloadType) { std::vector<FieldDecl *> PayloadFields; for (FieldDecl *Field : PayloadType->fields()) { QualType FieldType = Field->getType(); if (RecordDecl *FieldRecordDecl = FieldType->getAsCXXRecordDecl()) { // Skip nested payload types. if (FieldRecordDecl->hasAttr<HLSLRayPayloadAttr>()) { auto SubTypeFields = GetAllPayloadFields(FieldRecordDecl); PayloadFields.insert(PayloadFields.end(), SubTypeFields.begin(), SubTypeFields.end()); continue; } } PayloadFields.push_back(Field); } return PayloadFields; } // Returns true if the field is writeable in an earlier shader stage. bool IsFieldWriteableInEarlierStage(FieldDecl *Field, DXIL::PayloadAccessShaderStage ThisStage) { bool isWriteableInEarlierStage = false; switch (ThisStage) { case DXIL::PayloadAccessShaderStage::Anyhit: case DXIL::PayloadAccessShaderStage::Closesthit: case DXIL::PayloadAccessShaderStage::Miss: { auto Qualifier = GetPayloadQualifierForStage( Field, DXIL::PayloadAccessShaderStage::Caller); isWriteableInEarlierStage = Qualifier == DXIL::PayloadAccessQualifier::Write || Qualifier == DXIL::PayloadAccessQualifier::ReadWrite; Qualifier = GetPayloadQualifierForStage( Field, DXIL::PayloadAccessShaderStage::Anyhit); isWriteableInEarlierStage |= Qualifier == DXIL::PayloadAccessQualifier::Write || Qualifier == DXIL::PayloadAccessQualifier::ReadWrite; } break; default: break; } return isWriteableInEarlierStage; } // Emit warnings on payload writes. void DiagnosePayloadWrites(Sema &S, CFG &ShaderCFG, DominatorTree &DT, const DxrShaderDiagnoseInfo &Info, ArrayRef<FieldDecl *> NonWriteableFields, RecordDecl *PayloadType) { for (FieldDecl *Field : NonWriteableFields) { auto WritesToField = Info.WritesPerField.find(Field); if (WritesToField == Info.WritesPerField.end()) continue; const auto &WritesToDiagnose = GetAllWritesReachingExit(ShaderCFG, WritesToField->second); for (auto &Write : WritesToDiagnose) { FieldDecl *MemField = cast<FieldDecl>(Write.Member->getMemberDecl()); auto Qualifier = GetPayloadQualifierForStage(MemField, Info.Stage); if (Qualifier != DXIL::PayloadAccessQualifier::Write && Qualifier != DXIL::PayloadAccessQualifier::ReadWrite) { S.Diag(Write.Member->getExprLoc(), diag::warn_hlsl_payload_access_write_loss) << Field->getName() << GetStringForShaderStage(Info.Stage); } } } // Check if a field is not unconditionally written and a write form an earlier // stage will be lost. auto PayloadFields = GetAllPayloadFields(PayloadType); for (FieldDecl *Field : PayloadFields) { auto Qualifier = GetPayloadQualifierForStage(Field, Info.Stage); if (IsFieldWriteableInEarlierStage(Field, Info.Stage) && Qualifier == DXIL::PayloadAccessQualifier::Write) { // The field is writeable in an earlier stage and pure write in this // stage. Check if we find a write that dominates the exit of the // function. bool fieldHasDominatingWrite = false; auto It = Info.WritesPerField.find(Field); if (It != Info.WritesPerField.end()) { for (auto &Write : It->second) { fieldHasDominatingWrite = DT.dominates(Write.Parent, &ShaderCFG.getExit()); if (fieldHasDominatingWrite) break; } } if (!fieldHasDominatingWrite) { S.Diag(Info.Payload->getLocation(), diag::warn_hlsl_payload_access_data_loss) << Field->getName() << GetStringForShaderStage(Info.Stage); } } } } // Returns true if A is earlier than B in Parent bool IsEarlierStatementAs(const Stmt *A, const Stmt *B, const CFGBlock &Parent) { for (auto Element : Parent) { if (auto S = Element.getAs<CFGStmt>()) { if (S->getStmt() == A) return true; if (S->getStmt() == B) return false; } } return true; } // Returns true if the write dominates payload use. template <typename T> bool WriteDominatesUse(const PayloadUse &Write, const T &Use, DominatorTree &DT) { if (Use.Parent == Write.Parent) { // Use and write are in the same Block. return IsEarlierStatementAs(Write.S, Use.S, *Use.Parent); } return DT.dominates(Write.Parent, Use.Parent); } // Emit warnings for payload reads. void DiagnosePayloadReads(Sema &S, CFG &ShaderCFG, DominatorTree &DT, const DxrShaderDiagnoseInfo &Info, ArrayRef<FieldDecl *> NonReadableFields) { for (FieldDecl *Field : NonReadableFields) { auto ReadsFromField = Info.ReadsPerField.find(Field); if (ReadsFromField == Info.ReadsPerField.end()) continue; auto WritesToField = Info.WritesPerField.find(Field); bool FieldHasWrites = WritesToField != Info.WritesPerField.end(); const auto &ReadsToDiagnose = GetAllReadsReachedFromEntry(ShaderCFG, ReadsFromField->second); for (auto &Read : ReadsToDiagnose) { bool ReadIsDominatedByWrite = false; if (FieldHasWrites) { // We found a read to a field that needs diagnose. // We do not want to warn about fields that read but are dominated by a // write. Find writes that dominate the read. If we found one, ignore // the read. for (auto Write : WritesToField->second) { ReadIsDominatedByWrite = WriteDominatesUse(Write, Read, DT); if (ReadIsDominatedByWrite) break; } } if (ReadIsDominatedByWrite) continue; FieldDecl *MemField = cast<FieldDecl>(Read.Member->getMemberDecl()); auto Qualifier = GetPayloadQualifierForStage(MemField, Info.Stage); if (Qualifier != DXIL::PayloadAccessQualifier::Read && Qualifier != DXIL::PayloadAccessQualifier::ReadWrite) { S.Diag(Read.Member->getExprLoc(), diag::warn_hlsl_payload_access_undef_read) << Field->getName() << GetStringForShaderStage(Info.Stage); } } } } // Generic CFG traversal that performs PerElementAction on every Stmt in the // CFG. template <bool Backward, typename Action> void TraverseCFG(const CFGBlock &Block, Action PerElementAction, std::set<const CFGBlock *> &Visited) { if (Visited.count(&Block)) return; Visited.insert(&Block); for (const auto &Element : Block) { PerElementAction(Block, Element); } if (!Backward) { for (auto I = Block.succ_begin(), E = Block.succ_end(); I != E; ++I) { CFGBlock *Succ = *I; if (!Succ) continue; TraverseCFG</*Backward=*/false>(*Succ, PerElementAction, Visited); } } else { for (auto I = Block.pred_begin(), E = Block.pred_end(); I != E; ++I) { CFGBlock *Pred = *I; if (!Pred) continue; TraverseCFG<Backward>(*Pred, PerElementAction, Visited); } } } // Forward traverse the CFG and collect calls to TraceRay. void ForwardTraverseCFGAndCollectTraceCalls( const CFGBlock &Block, DxrShaderDiagnoseInfo &Info, std::set<const CFGBlock *> &Visited) { auto Action = [&Info](const CFGBlock &Block, const CFGElement &Element) { if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) { CollectTraceRayCalls(S->getStmt(), Info, &Block); } }; TraverseCFG<false>(Block, Action, Visited); } // Foward traverse the CFG and collect all reads and writes to the payload. void ForwardTraverseCFGAndCollectReadsWrites( const CFGBlock &StartBlock, DxrShaderDiagnoseInfo &Info, std::set<const CFGBlock *> &Visited) { auto Action = [&Info](const CFGBlock &Block, const CFGElement &Element) { if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) { CollectReadsWritesAndCallsForPayload(S->getStmt(), Info, &Block); } }; TraverseCFG<false>(StartBlock, Action, Visited); } // Backward traverse the CFG and collect all reads and writes to the payload. void BackwardTraverseCFGAndCollectReadsWrites( const CFGBlock &StartBlock, DxrShaderDiagnoseInfo &Info, std::set<const CFGBlock *> &Visited) { auto Action = [&](const CFGBlock &Block, const CFGElement &Element) { if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) { CollectReadsWritesAndCallsForPayload(S->getStmt(), Info, &Block); } }; TraverseCFG<true>(StartBlock, Action, Visited); } // Returns true if the Stmt uses the Payload. bool IsPayloadArg(const Stmt *S, const Decl *Payload) { if (const DeclRefExpr *Ref = dyn_cast<DeclRefExpr>(S)) { const Decl *Decl = Ref->getDecl(); if (Decl == Payload) return true; } for (auto C : S->children()) { if (IsPayloadArg(C, Payload)) return true; } return false; } bool DiagnoseCallExprForExternal(Sema &S, const FunctionDecl *FD, const CallExpr *CE, const ParmVarDecl *Payload); // Collects all writes that dominate a PayloadUse in a CallExpr // and returns a set of the Fields accessed. std::set<const FieldDecl *> CollectDominatingWritesForCall(PayloadUse &Use, DxrShaderDiagnoseInfo &Info, DominatorTree &DT) { std::set<const FieldDecl *> FieldsToIgnore; for (auto P : Info.WritesPerField) { for (auto Write : P.second) { bool WriteDominatesCallSite = WriteDominatesUse(Write, Use, DT); if (WriteDominatesCallSite) { FieldsToIgnore.insert(P.first); break; } } } return FieldsToIgnore; } // Collects all reads that are reachable from a PayloadUse in a CallExpr // and returns a set of the Fields accessed. std::set<const FieldDecl *> CollectReachableWritesForCall(PayloadUse &Use, const DxrShaderDiagnoseInfo &Info) { std::set<const FieldDecl *> FieldsToIgnore; assert(Use.Parent); const CFGBlock *Current = Use.Parent; // Traverse the CFG beginning from the block of the call and collect all // fields written to after the call. These fields must not be diagnosed with // warnings about lost writes. DxrShaderDiagnoseInfo TempInfo; TempInfo.Payload = Info.Payload; bool foundCall = false; for (auto &Element : *Current) { // Search for the Call in the block if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) { if (S->getStmt() == Use.S) { foundCall = true; continue; } if (foundCall) CollectReadsWritesAndCallsForPayload(S->getStmt(), TempInfo, Current); } } for (auto I = Current->succ_begin(); I != Current->succ_end(); ++I) { CFGBlock *Succ = *I; if (!Succ) continue; std::set<const CFGBlock *> Visited; ForwardTraverseCFGAndCollectReadsWrites(*Succ, TempInfo, Visited); } for (auto &p : TempInfo.WritesPerField) FieldsToIgnore.insert(p.first); return FieldsToIgnore; } // Emit diagnostics when the payload is used as an argument // in a function call. std::map<PayloadUse, std::vector<const FieldDecl *>> DiagnosePayloadAsFunctionArg( Sema &S, DxrShaderDiagnoseInfo &Info, DominatorTree &DT, const std::set<const FieldDecl *> &ParentFieldsToIgnoreRead, const std::set<const FieldDecl *> &ParentFieldsToIgnoreWrite, std::set<const FunctionDecl *> VisitedFunctions) { std::map<PayloadUse, std::vector<const FieldDecl *>> WrittenFieldsInCalls; for (PayloadUse &Use : Info.PayloadAsCallArg) { if (const CallExpr *Call = dyn_cast<CallExpr>(Use.S)) { const Decl *Callee = Call->getCalleeDecl(); if (!Callee || !isa<FunctionDecl>(Callee)) continue; const FunctionDecl *CalledFunction = cast<FunctionDecl>(Callee); // Ignore trace calls here. if (CalledFunction->isImplicit() && CalledFunction->getName() == "TraceRay") { Info.TraceCalls.push_back(TraceRayCall{Call, Use.Parent}); continue; } // Handle external function calls if (!CalledFunction->hasBody()) { assert(isa<ParmVarDecl>(Info.Payload)); DiagnoseCallExprForExternal(S, CalledFunction, Call, cast<ParmVarDecl>(Info.Payload)); continue; } if (VisitedFunctions.count(CalledFunction)) return WrittenFieldsInCalls; VisitedFunctions.insert(CalledFunction); DxrShaderDiagnoseInfo CalleeInfo; for (unsigned i = 0; i < Call->getNumArgs(); ++i) { const Expr *Arg = Call->getArg(i); if (IsPayloadArg(Arg, Info.Payload)) { CalleeInfo.Payload = CalledFunction->getParamDecl(i); break; } } if (CalleeInfo.Payload) { CalleeInfo.funcDecl = CalledFunction; CalleeInfo.Stage = Info.Stage; auto FieldsToIgnoreRead = CollectDominatingWritesForCall(Use, Info, DT); auto FieldsToIgnoreWrite = CollectReachableWritesForCall(Use, Info); FieldsToIgnoreRead.insert(ParentFieldsToIgnoreRead.begin(), ParentFieldsToIgnoreRead.end()); FieldsToIgnoreWrite.insert(ParentFieldsToIgnoreWrite.begin(), ParentFieldsToIgnoreWrite.end()); WrittenFieldsInCalls[Use] = DiagnosePayloadAccess(S, CalleeInfo, FieldsToIgnoreRead, FieldsToIgnoreWrite, VisitedFunctions); } } } return WrittenFieldsInCalls; } // Collect all fields that cannot be accessed for the given shader stage. // This function recurses into nested payload types. void CollectNonAccessableFields( RecordDecl *PayloadType, DXIL::PayloadAccessShaderStage Stage, const std::set<const FieldDecl *> &FieldsToIgnoreRead, const std::set<const FieldDecl *> &FieldsToIgnoreWrite, std::vector<FieldDecl *> &NonWriteableFields, std::vector<FieldDecl *> &NonReadableFields) { for (FieldDecl *Field : PayloadType->fields()) { QualType FieldType = Field->getType(); if (RecordDecl *FieldRecordDecl = FieldType->getAsCXXRecordDecl()) { if (FieldRecordDecl->hasAttr<HLSLRayPayloadAttr>()) { CollectNonAccessableFields(FieldRecordDecl, Stage, FieldsToIgnoreRead, FieldsToIgnoreWrite, NonWriteableFields, NonReadableFields); continue; } } auto Qualifier = GetPayloadQualifierForStage(Field, Stage); // Diagnose writes only if they are not written heigher in the call-graph. if (!FieldsToIgnoreWrite.count(Field)) { if (Qualifier != DXIL::PayloadAccessQualifier::Write && Qualifier != DXIL::PayloadAccessQualifier::ReadWrite) NonWriteableFields.push_back(Field); } // Diagnose reads only if they have no write heigher in the call-graph. if (!FieldsToIgnoreRead.count(Field)) { if (Qualifier != DXIL::PayloadAccessQualifier::Read && Qualifier != DXIL::PayloadAccessQualifier::ReadWrite) NonReadableFields.push_back(Field); } } } void CollectAccessableFields(RecordDecl *PayloadType, const std::vector<FieldDecl *> &NonWriteableFields, const std::vector<FieldDecl *> &NonReadableFields, std::vector<FieldDecl *> &WriteableFields, std::vector<FieldDecl *> &ReadableFields) { for (FieldDecl *Field : PayloadType->fields()) { QualType FieldType = Field->getType(); if (RecordDecl *FieldRecordDecl = FieldType->getAsCXXRecordDecl()) { // Skip nested payload types. if (FieldRecordDecl->hasAttr<HLSLRayPayloadAttr>()) { CollectAccessableFields(FieldRecordDecl, NonWriteableFields, NonReadableFields, WriteableFields, ReadableFields); continue; } } if (std::find(NonWriteableFields.begin(), NonWriteableFields.end(), Field) == NonWriteableFields.end()) WriteableFields.push_back(Field); if (std::find(NonReadableFields.begin(), NonReadableFields.end(), Field) == NonReadableFields.end()) ReadableFields.push_back(Field); } } void HandlePayloadInitializer(DxrShaderDiagnoseInfo &Info) { const VarDecl *Payload = Info.Payload; const Expr *Init = Payload->getInit(); if (Init) { // If the payload has an initializer, then handle all fields as // written. Sema will check that the initializer is correct. // We can handle all fields as written. RecordDecl *PayloadType = GetPayloadType(Info.Payload); for (FieldDecl *Field : PayloadType->fields()) { Info.WritesPerField[Field].push_back(PayloadUse{Init, nullptr, nullptr}); } } } // Emit diagnostics for a TraceRay call. void DiagnoseTraceCall(Sema &S, const VarDecl *Payload, const TraceRayCall &Trace, DominatorTree &DT) { // For each TraceRay call check if write(caller) fields are written. const DXIL::PayloadAccessShaderStage CallerStage = DXIL::PayloadAccessShaderStage::Caller; std::vector<FieldDecl *> WriteableFields; std::vector<FieldDecl *> NonWriteableFields; std::vector<FieldDecl *> ReadableFields; std::vector<FieldDecl *> NonReadableFields; RecordDecl *PayloadType = GetPayloadType(Payload); // Check if the payload type used for this trace call is a payload type if (!PayloadType->hasAttr<HLSLRayPayloadAttr>()) { S.Diag(Payload->getLocation(), diag::err_payload_requires_attribute) << PayloadType->getName(); return; } CollectNonAccessableFields(PayloadType, CallerStage, {}, {}, NonWriteableFields, NonReadableFields); CollectAccessableFields(PayloadType, NonWriteableFields, NonReadableFields, WriteableFields, ReadableFields); // Find all writes to Payload that reaches the Trace DxrShaderDiagnoseInfo TraceInfo; TraceInfo.Payload = Payload; // Handle initializers for the payload struct if any is present. HandlePayloadInitializer(TraceInfo); std::set<const CFGBlock *> Visited; const CFGBlock *Parent = Trace.Parent; Visited.insert(Parent); // Collect payload accesses in the same block until we reach the TraceRay call for (auto Element : *Parent) { if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) { if (S->getStmt() == Trace.Call) break; CollectReadsWritesAndCallsForPayload(S->getStmt(), TraceInfo, Parent); } } for (auto I = Parent->pred_begin(); I != Parent->pred_end(); ++I) { CFGBlock *Pred = *I; if (!Pred) continue; BackwardTraverseCFGAndCollectReadsWrites(*Pred, TraceInfo, Visited); } // Warn if a writeable field has not been written. for (const FieldDecl *Field : WriteableFields) { if (!TraceInfo.WritesPerField.count(Field)) { S.Diag(Trace.Call->getArg(7)->getExprLoc(), diag::warn_hlsl_payload_access_no_write_for_trace_payload) << Field->getName(); } } // Warn if a written field is not write(caller) for (const FieldDecl *Field : NonWriteableFields) { if (TraceInfo.WritesPerField.count(Field)) { S.Diag( Trace.Call->getArg(7)->getExprLoc(), diag::warn_hlsl_payload_access_write_but_no_write_for_trace_payload) << Field->getName(); } } // After a trace call, collect all reads that are not dominated by another // write warn if a field is not read(caller) but the value is read (undef // read). // Discard reads/writes from backward traversal. TraceInfo.ReadsPerField.clear(); TraceInfo.WritesPerField.clear(); bool CallFound = false; for (auto Element : *Parent) { // TODO: reverse iterate? if (Optional<CFGStmt> S = Element.getAs<CFGStmt>()) { if (S->getStmt() == Trace.Call) { CallFound = true; continue; } if (CallFound) CollectReadsWritesAndCallsForPayload(S->getStmt(), TraceInfo, Parent); } } for (auto I = Parent->succ_begin(); I != Parent->succ_end(); ++I) { CFGBlock *Pred = *I; if (!Pred) continue; ForwardTraverseCFGAndCollectReadsWrites(*Pred, TraceInfo, Visited); } for (const FieldDecl *Field : ReadableFields) { if (!TraceInfo.ReadsPerField.count(Field)) { S.Diag(Trace.Call->getArg(7)->getExprLoc(), diag::warn_hlsl_payload_access_read_but_no_read_after_trace) << Field->getName(); } } for (const FieldDecl *Field : NonReadableFields) { auto WritesToField = TraceInfo.WritesPerField.find(Field); bool FieldHasWrites = WritesToField != TraceInfo.WritesPerField.end(); for (auto &Read : TraceInfo.ReadsPerField[Field]) { bool ReadIsDominatedByWrite = false; if (FieldHasWrites) { // We found a read to a field that needs diagnose. // We do not want to warn about fields that read but are dominated by // a write. Find writes that dominate the read. If we found one, // ignore the read. for (auto Write : WritesToField->second) { ReadIsDominatedByWrite = WriteDominatesUse(Write, Read, DT); if (ReadIsDominatedByWrite) break; } } if (ReadIsDominatedByWrite) continue; S.Diag(Read.Member->getExprLoc(), diag::warn_hlsl_payload_access_read_of_undef_after_trace) << Field->getName(); } } } // Emit diagnostics for all TraceRay calls. void DiagnoseTraceCalls(Sema &S, CFG &ShaderCFG, DominatorTree &DT, DxrShaderDiagnoseInfo &Info) { // Collect TraceRay calls in the shader. std::set<const CFGBlock *> Visited; ForwardTraverseCFGAndCollectTraceCalls(ShaderCFG.getEntry(), Info, Visited); std::set<const CallExpr *> Diagnosed; for (const TraceRayCall &TraceCall : Info.TraceCalls) { if (Diagnosed.count(TraceCall.Call)) continue; Diagnosed.insert(TraceCall.Call); const VarDecl *Payload = GetPayloadParameterForTraceCall(TraceCall.Call); DiagnoseTraceCall(S, Payload, TraceCall, DT); } } // Emit diagnostics for all access to the payload of a shader, // and the input to TraceRay calls. std::vector<const FieldDecl *> DiagnosePayloadAccess(Sema &S, DxrShaderDiagnoseInfo &Info, const std::set<const FieldDecl *> &FieldsToIgnoreRead, const std::set<const FieldDecl *> &FieldsToIgnoreWrite, std::set<const FunctionDecl *> VisitedFunctions) { clang::DominatorTree DT; AnalysisDeclContextManager AnalysisManager; AnalysisDeclContext *AnalysisContext = AnalysisManager.getContext(Info.funcDecl); CFG &TheCFG = *AnalysisContext->getCFG(); DT.buildDominatorTree(*AnalysisContext); // Collect all Fields that gets written to return it back up through the // recursion. std::vector<const FieldDecl *> WrittenFields; // Skip if we are in a RayGeneration shader without payload. if (Info.Payload) { std::vector<FieldDecl *> NonWriteableFields; std::vector<FieldDecl *> NonReadableFields; RecordDecl *PayloadType = GetPayloadType(Info.Payload); if (!PayloadType) return WrittenFields; CollectNonAccessableFields(PayloadType, Info.Stage, FieldsToIgnoreRead, FieldsToIgnoreWrite, NonWriteableFields, NonReadableFields); std::set<const CFGBlock *> Visited; ForwardTraverseCFGAndCollectReadsWrites(TheCFG.getEntry(), Info, Visited); if (Info.Payload->hasAttr<HLSLOutAttr>() || Info.Payload->hasAttr<HLSLInOutAttr>()) { // If there is copy-out semantic on the payload field, // save the written fields and return it back to the caller for // better diagnostics in higher recursion levels. for (auto &p : Info.WritesPerField) { WrittenFields.push_back(p.first); } DiagnosePayloadWrites(S, TheCFG, DT, Info, NonWriteableFields, PayloadType); } auto WrittenFieldsInCalls = DiagnosePayloadAsFunctionArg( S, Info, DT, FieldsToIgnoreRead, FieldsToIgnoreWrite, VisitedFunctions); // Add calls that write fields as writes to allow the diagnostics on reads // to check if a call that writes the field dominates the read. for (auto &P : WrittenFieldsInCalls) { for (const FieldDecl *Field : P.second) { Info.WritesPerField[Field].push_back(P.first); } } if (Info.Payload->hasAttr<HLSLInAttr>() || Info.Payload->hasAttr<HLSLInOutAttr>()) DiagnosePayloadReads(S, TheCFG, DT, Info, NonReadableFields); } DiagnoseTraceCalls(S, TheCFG, DT, Info); return WrittenFields; } const Stmt *IgnoreParensAndDecay(const Stmt *S) { for (;;) { switch (S->getStmtClass()) { case Stmt::ParenExprClass: S = cast<ParenExpr>(S)->getSubExpr(); break; case Stmt::ImplicitCastExprClass: { const ImplicitCastExpr *castExpr = cast<ImplicitCastExpr>(S); if (castExpr->getCastKind() != CK_ArrayToPointerDecay && castExpr->getCastKind() != CK_NoOp && castExpr->getCastKind() != CK_LValueToRValue) { return S; } S = castExpr->getSubExpr(); } break; default: return S; } } } // Emit warnings for calls that pass the payload to extern functions. bool DiagnoseCallExprForExternal(Sema &S, const FunctionDecl *FD, const CallExpr *CE, const ParmVarDecl *Payload) { // We check if we are passing the entire payload struct to an extern function. // Here ends what we can check, so we just issue a warning. if (!FD->hasBody()) { const DeclRefExpr *DRef = nullptr; const ParmVarDecl *PDecl = nullptr; for (unsigned i = 0; i < CE->getNumArgs(); ++i) { const Stmt *arg = IgnoreParensAndDecay(CE->getArg(i)); if (const DeclRefExpr *ArgRef = dyn_cast<DeclRefExpr>(arg)) { if (ArgRef->getDecl() == Payload) { DRef = ArgRef; PDecl = FD->getParamDecl(i); break; } } } if (DRef) { S.Diag(CE->getExprLoc(), diag::warn_qualified_payload_passed_to_extern_function); return true; } } return false; } // Emits diagnostics for the Payload parameter of a DXR shader stage. bool DiagnosePayloadParameter(Sema &S, ParmVarDecl *Payload, FunctionDecl *FD, DXIL::PayloadAccessShaderStage stage) { if (!Payload) { // cought already during codgegen of the function return false; } if (!Payload->getAttr<HLSLInOutAttr>()) { // error: payload must be inout qualified return false; } CXXRecordDecl *Decl = Payload->getType()->getAsCXXRecordDecl(); if (!Decl || Decl->isImplicit()) { // error: not a user defined type decl return false; } if (!Decl->hasAttr<HLSLRayPayloadAttr>()) { S.Diag(Payload->getLocation(), diag::err_payload_requires_attribute) << Decl->getName(); return false; } return true; } class DXRShaderVisitor : public RecursiveASTVisitor<DXRShaderVisitor> { public: DXRShaderVisitor(Sema &S) : S(S) {} void diagnose(TranslationUnitDecl *TU) { TraverseTranslationUnitDecl(TU); } bool VisitFunctionDecl(FunctionDecl *Decl) { auto attr = Decl->getAttr<HLSLShaderAttr>(); if (!attr) return true; StringRef shaderStage = attr->getStage(); if (StringRef("miss,closesthit,anyhit,raygeneration").count(shaderStage)) { ParmVarDecl *Payload = nullptr; if (shaderStage != "raygeneration") Payload = Decl->getParamDecl(0); DXIL::PayloadAccessShaderStage Stage = DXIL::PayloadAccessShaderStage::Invalid; if (shaderStage == "closesthit") { Stage = DXIL::PayloadAccessShaderStage::Closesthit; } else if (shaderStage == "miss") { Stage = DXIL::PayloadAccessShaderStage::Miss; } else if (shaderStage == "anyhit") { Stage = DXIL::PayloadAccessShaderStage::Anyhit; } // Diagnose the payload parameter. if (Payload) { DiagnosePayloadParameter(S, Payload, Decl, Stage); } DxrShaderDiagnoseInfo Info; Info.funcDecl = Decl; Info.Payload = Payload; Info.Stage = Stage; std::set<const FunctionDecl *> VisitedFunctions; DiagnosePayloadAccess(S, Info, {}, {}, VisitedFunctions); } return true; } private: Sema &S; }; } // namespace namespace hlsl { void DiagnoseRaytracingPayloadAccess(clang::Sema &S, clang::TranslationUnitDecl *TU) { DXRShaderVisitor visitor(S); visitor.diagnose(TU); } void DiagnoseCallableEntry(Sema &S, FunctionDecl *FD, llvm::StringRef StageName) { if (!FD->getReturnType()->isVoidType()) S.Diag(FD->getLocation(), diag::err_shader_must_return_void) << StageName; if (FD->getNumParams() != 1) S.Diag(FD->getLocation(), diag::err_raytracing_entry_param_count) << StageName << FD->getNumParams() << /*Special message for callable.*/ 3; else { ParmVarDecl *Param = FD->getParamDecl(0); if (!(Param->getAttr<HLSLInOutAttr>() || (Param->getAttr<HLSLOutAttr>() && Param->getAttr<HLSLInAttr>()))) S.Diag(Param->getLocation(), diag::err_payload_requires_inout) << /*payload|callable*/ 1 << Param; QualType Ty = Param->getType().getNonReferenceType(); if (!(hlsl::IsHLSLCopyableAnnotatableRecord(Ty))) S.Diag(Param->getLocation(), diag::err_payload_attrs_must_be_udt) << /*payload|attributes|callable*/ 2 << Param; } return; } void DiagnoseMissOrAnyHitEntry(Sema &S, FunctionDecl *FD, llvm::StringRef StageName, DXIL::ShaderKind Stage) { if (!FD->getReturnType()->isVoidType()) S.Diag(FD->getLocation(), diag::err_shader_must_return_void) << StageName; unsigned ExpectedParams = Stage == DXIL::ShaderKind::Miss ? 1 : 2; if (ExpectedParams != FD->getNumParams()) { S.Diag(FD->getLocation(), diag::err_raytracing_entry_param_count) << StageName << FD->getNumParams() << ExpectedParams; return; } ParmVarDecl *Param = FD->getParamDecl(0); if (!(Param->getAttr<HLSLInOutAttr>() || (Param->getAttr<HLSLOutAttr>() && Param->getAttr<HLSLInAttr>()))) { S.Diag(Param->getLocation(), diag::err_payload_requires_inout) << /*payload|callable*/ 0 << Param; return; } if (FD->getNumParams() > 1) { Param = FD->getParamDecl(1); if (Param->getAttr<HLSLInOutAttr>() || Param->getAttr<HLSLOutAttr>()) { S.Diag(Param->getLocation(), diag::err_attributes_requiers_in) << Param; return; } } for (unsigned Idx = 0; Idx < ExpectedParams && Idx < FD->getNumParams(); ++Idx) { Param = FD->getParamDecl(Idx); QualType Ty = Param->getType().getNonReferenceType(); if (!(hlsl::IsHLSLCopyableAnnotatableRecord(Ty))) { S.Diag(Param->getLocation(), diag::err_payload_attrs_must_be_udt) << /*payload|attributes|callable*/ Idx << Param; } } return; } void DiagnoseRayGenerationOrIntersectionEntry(Sema &S, FunctionDecl *FD, llvm::StringRef StageName) { if (!FD->getReturnType()->isVoidType()) S.Diag(FD->getLocation(), diag::err_shader_must_return_void) << StageName; unsigned ExpectedParams = 0; if (ExpectedParams != FD->getNumParams()) S.Diag(FD->getLocation(), diag::err_raytracing_entry_param_count) << StageName << FD->getNumParams() << ExpectedParams; return; } void DiagnoseClosestHitEntry(Sema &S, FunctionDecl *FD, llvm::StringRef StageName) { if (!FD->getReturnType()->isVoidType()) S.Diag(FD->getLocation(), diag::err_shader_must_return_void) << StageName; unsigned ExpectedParams = 2; if (ExpectedParams != FD->getNumParams()) { S.Diag(FD->getLocation(), diag::err_raytracing_entry_param_count) << StageName << FD->getNumParams() << ExpectedParams; } if (FD->getNumParams() == 0) return; ParmVarDecl *Param = FD->getParamDecl(0); if (!(Param->getAttr<HLSLInOutAttr>() || (Param->getAttr<HLSLOutAttr>() && Param->getAttr<HLSLInAttr>()))) { S.Diag(Param->getLocation(), diag::err_payload_requires_inout) << /*payload|callable*/ 0 << Param; } if (FD->getNumParams() > 1) { Param = FD->getParamDecl(1); if (Param->getAttr<HLSLInOutAttr>() || Param->getAttr<HLSLOutAttr>()) { S.Diag(Param->getLocation(), diag::err_attributes_requiers_in) << Param; } } for (unsigned Idx = 0; Idx < ExpectedParams && Idx < FD->getNumParams(); ++Idx) { Param = FD->getParamDecl(Idx); QualType Ty = Param->getType().getNonReferenceType(); if (!(hlsl::IsHLSLCopyableAnnotatableRecord(Ty))) { S.Diag(Param->getLocation(), diag::err_payload_attrs_must_be_udt) << /*payload|attributes|callable*/ Idx << Param; } } return; } } // namespace hlsl
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
//===------- SemaTemplateVariadic.cpp - C++ Variadic Templates ------------===/ // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. //===----------------------------------------------------------------------===/ // // This file implements semantic analysis for C++0x variadic templates. //===----------------------------------------------------------------------===/ #include "clang/Sema/Sema.h" #include "TypeLocBuilder.h" #include "clang/AST/Expr.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/TypeLoc.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/Template.h" using namespace clang; //---------------------------------------------------------------------------- // Visitor that collects unexpanded parameter packs //---------------------------------------------------------------------------- namespace { /// \brief A class that collects unexpanded parameter packs. class CollectUnexpandedParameterPacksVisitor : public RecursiveASTVisitor<CollectUnexpandedParameterPacksVisitor> { typedef RecursiveASTVisitor<CollectUnexpandedParameterPacksVisitor> inherited; SmallVectorImpl<UnexpandedParameterPack> &Unexpanded; bool InLambda; public: explicit CollectUnexpandedParameterPacksVisitor( SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) : Unexpanded(Unexpanded), InLambda(false) { } bool shouldWalkTypesOfTypeLocs() const { return false; } //------------------------------------------------------------------------ // Recording occurrences of (unexpanded) parameter packs. //------------------------------------------------------------------------ /// \brief Record occurrences of template type parameter packs. bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) { if (TL.getTypePtr()->isParameterPack()) Unexpanded.push_back(std::make_pair(TL.getTypePtr(), TL.getNameLoc())); return true; } /// \brief Record occurrences of template type parameter packs /// when we don't have proper source-location information for /// them. /// /// Ideally, this routine would never be used. bool VisitTemplateTypeParmType(TemplateTypeParmType *T) { if (T->isParameterPack()) Unexpanded.push_back(std::make_pair(T, SourceLocation())); return true; } /// \brief Record occurrences of function and non-type template /// parameter packs in an expression. bool VisitDeclRefExpr(DeclRefExpr *E) { if (E->getDecl()->isParameterPack()) Unexpanded.push_back(std::make_pair(E->getDecl(), E->getLocation())); return true; } /// \brief Record occurrences of template template parameter packs. bool TraverseTemplateName(TemplateName Template) { if (TemplateTemplateParmDecl *TTP = dyn_cast_or_null<TemplateTemplateParmDecl>( Template.getAsTemplateDecl())) if (TTP->isParameterPack()) Unexpanded.push_back(std::make_pair(TTP, SourceLocation())); return inherited::TraverseTemplateName(Template); } /// \brief Suppress traversal into Objective-C container literal /// elements that are pack expansions. bool TraverseObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { if (!E->containsUnexpandedParameterPack()) return true; for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) { ObjCDictionaryElement Element = E->getKeyValueElement(I); if (Element.isPackExpansion()) continue; TraverseStmt(Element.Key); TraverseStmt(Element.Value); } return true; } //------------------------------------------------------------------------ // Pruning the search for unexpanded parameter packs. //------------------------------------------------------------------------ /// \brief Suppress traversal into statements and expressions that /// do not contain unexpanded parameter packs. bool TraverseStmt(Stmt *S) { Expr *E = dyn_cast_or_null<Expr>(S); if ((E && E->containsUnexpandedParameterPack()) || InLambda) return inherited::TraverseStmt(S); return true; } /// \brief Suppress traversal into types that do not contain /// unexpanded parameter packs. bool TraverseType(QualType T) { if ((!T.isNull() && T->containsUnexpandedParameterPack()) || InLambda) return inherited::TraverseType(T); return true; } /// \brief Suppress traversel into types with location information /// that do not contain unexpanded parameter packs. bool TraverseTypeLoc(TypeLoc TL) { if ((!TL.getType().isNull() && TL.getType()->containsUnexpandedParameterPack()) || InLambda) return inherited::TraverseTypeLoc(TL); return true; } /// \brief Suppress traversal of non-parameter declarations, since /// they cannot contain unexpanded parameter packs. bool TraverseDecl(Decl *D) { if ((D && isa<ParmVarDecl>(D)) || InLambda) return inherited::TraverseDecl(D); return true; } /// \brief Suppress traversal of template argument pack expansions. bool TraverseTemplateArgument(const TemplateArgument &Arg) { if (Arg.isPackExpansion()) return true; return inherited::TraverseTemplateArgument(Arg); } /// \brief Suppress traversal of template argument pack expansions. bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &ArgLoc) { if (ArgLoc.getArgument().isPackExpansion()) return true; return inherited::TraverseTemplateArgumentLoc(ArgLoc); } /// \brief Note whether we're traversing a lambda containing an unexpanded /// parameter pack. In this case, the unexpanded pack can occur anywhere, /// including all the places where we normally wouldn't look. Within a /// lambda, we don't propagate the 'contains unexpanded parameter pack' bit /// outside an expression. bool TraverseLambdaExpr(LambdaExpr *Lambda) { // The ContainsUnexpandedParameterPack bit on a lambda is always correct, // even if it's contained within another lambda. if (!Lambda->containsUnexpandedParameterPack()) return true; bool WasInLambda = InLambda; InLambda = true; // If any capture names a function parameter pack, that pack is expanded // when the lambda is expanded. for (LambdaExpr::capture_iterator I = Lambda->capture_begin(), E = Lambda->capture_end(); I != E; ++I) { if (I->capturesVariable()) { VarDecl *VD = I->getCapturedVar(); if (VD->isParameterPack()) Unexpanded.push_back(std::make_pair(VD, I->getLocation())); } } inherited::TraverseLambdaExpr(Lambda); InLambda = WasInLambda; return true; } }; } /// \brief Determine whether it's possible for an unexpanded parameter pack to /// be valid in this location. This only happens when we're in a declaration /// that is nested within an expression that could be expanded, such as a /// lambda-expression within a function call. /// /// This is conservatively correct, but may claim that some unexpanded packs are /// permitted when they are not. bool Sema::isUnexpandedParameterPackPermitted() { for (auto *SI : FunctionScopes) if (isa<sema::LambdaScopeInfo>(SI)) return true; return false; } /// \brief Diagnose all of the unexpanded parameter packs in the given /// vector. bool Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded) { if (Unexpanded.empty()) return false; // If we are within a lambda expression, that lambda contains an unexpanded // parameter pack, and we are done. // FIXME: Store 'Unexpanded' on the lambda so we don't need to recompute it // later. for (unsigned N = FunctionScopes.size(); N; --N) { if (sema::LambdaScopeInfo *LSI = dyn_cast<sema::LambdaScopeInfo>(FunctionScopes[N-1])) { LSI->ContainsUnexpandedParameterPack = true; return false; } } SmallVector<SourceLocation, 4> Locations; SmallVector<IdentifierInfo *, 4> Names; llvm::SmallPtrSet<IdentifierInfo *, 4> NamesKnown; for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) { IdentifierInfo *Name = nullptr; if (const TemplateTypeParmType *TTP = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>()) Name = TTP->getIdentifier(); else Name = Unexpanded[I].first.get<NamedDecl *>()->getIdentifier(); if (Name && NamesKnown.insert(Name).second) Names.push_back(Name); if (Unexpanded[I].second.isValid()) Locations.push_back(Unexpanded[I].second); } DiagnosticBuilder DB = Diag(Loc, diag::err_unexpanded_parameter_pack) << (int)UPPC << (int)Names.size(); for (size_t I = 0, E = std::min(Names.size(), (size_t)2); I != E; ++I) DB << Names[I]; for (unsigned I = 0, N = Locations.size(); I != N; ++I) DB << SourceRange(Locations[I]); return true; } bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC) { // C++0x [temp.variadic]p5: // An appearance of a name of a parameter pack that is not expanded is // ill-formed. if (!T->getType()->containsUnexpandedParameterPack()) return false; SmallVector<UnexpandedParameterPack, 2> Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc( T->getTypeLoc()); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded); } bool Sema::DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC) { // C++0x [temp.variadic]p5: // An appearance of a name of a parameter pack that is not expanded is // ill-formed. if (!E->containsUnexpandedParameterPack()) return false; SmallVector<UnexpandedParameterPack, 2> Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(E); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(E->getLocStart(), UPPC, Unexpanded); } bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC) { // C++0x [temp.variadic]p5: // An appearance of a name of a parameter pack that is not expanded is // ill-formed. if (!SS.getScopeRep() || !SS.getScopeRep()->containsUnexpandedParameterPack()) return false; SmallVector<UnexpandedParameterPack, 2> Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseNestedNameSpecifier(SS.getScopeRep()); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(SS.getRange().getBegin(), UPPC, Unexpanded); } bool Sema::DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC) { // C++0x [temp.variadic]p5: // An appearance of a name of a parameter pack that is not expanded is // ill-formed. switch (NameInfo.getName().getNameKind()) { case DeclarationName::Identifier: case DeclarationName::ObjCZeroArgSelector: case DeclarationName::ObjCOneArgSelector: case DeclarationName::ObjCMultiArgSelector: case DeclarationName::CXXOperatorName: case DeclarationName::CXXLiteralOperatorName: case DeclarationName::CXXUsingDirective: return false; case DeclarationName::CXXConstructorName: case DeclarationName::CXXDestructorName: case DeclarationName::CXXConversionFunctionName: // FIXME: We shouldn't need this null check! if (TypeSourceInfo *TSInfo = NameInfo.getNamedTypeInfo()) return DiagnoseUnexpandedParameterPack(NameInfo.getLoc(), TSInfo, UPPC); if (!NameInfo.getName().getCXXNameType()->containsUnexpandedParameterPack()) return false; break; } SmallVector<UnexpandedParameterPack, 2> Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseType(NameInfo.getName().getCXXNameType()); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(NameInfo.getLoc(), UPPC, Unexpanded); } bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC) { if (Template.isNull() || !Template.containsUnexpandedParameterPack()) return false; SmallVector<UnexpandedParameterPack, 2> Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseTemplateName(Template); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded); } bool Sema::DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC) { if (Arg.getArgument().isNull() || !Arg.getArgument().containsUnexpandedParameterPack()) return false; SmallVector<UnexpandedParameterPack, 2> Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseTemplateArgumentLoc(Arg); assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs"); return DiagnoseUnexpandedParameterPacks(Arg.getLocation(), UPPC, Unexpanded); } void Sema::collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseTemplateArgument(Arg); } void Sema::collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseTemplateArgumentLoc(Arg); } void Sema::collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(T); } void Sema::collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(TL); } void Sema::collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) { NestedNameSpecifier *Qualifier = SS.getScopeRep(); if (!Qualifier) return; NestedNameSpecifierLoc QualifierLoc(Qualifier, SS.location_data()); CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseNestedNameSpecifierLoc(QualifierLoc); } void Sema::collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) { CollectUnexpandedParameterPacksVisitor(Unexpanded) .TraverseDeclarationNameInfo(NameInfo); } ParsedTemplateArgument Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc) { if (Arg.isInvalid()) return Arg; switch (Arg.getKind()) { case ParsedTemplateArgument::Type: { TypeResult Result = ActOnPackExpansion(Arg.getAsType(), EllipsisLoc); if (Result.isInvalid()) return ParsedTemplateArgument(); return ParsedTemplateArgument(Arg.getKind(), Result.get().getAsOpaquePtr(), Arg.getLocation()); } case ParsedTemplateArgument::NonType: { ExprResult Result = ActOnPackExpansion(Arg.getAsExpr(), EllipsisLoc); if (Result.isInvalid()) return ParsedTemplateArgument(); return ParsedTemplateArgument(Arg.getKind(), Result.get(), Arg.getLocation()); } case ParsedTemplateArgument::Template: if (!Arg.getAsTemplate().get().containsUnexpandedParameterPack()) { SourceRange R(Arg.getLocation()); if (Arg.getScopeSpec().isValid()) R.setBegin(Arg.getScopeSpec().getBeginLoc()); Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << R; return ParsedTemplateArgument(); } return Arg.getTemplatePackExpansion(EllipsisLoc); } llvm_unreachable("Unhandled template argument kind?"); } TypeResult Sema::ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc) { TypeSourceInfo *TSInfo; GetTypeFromParser(Type, &TSInfo); if (!TSInfo) return true; TypeSourceInfo *TSResult = CheckPackExpansion(TSInfo, EllipsisLoc, None); if (!TSResult) return true; return CreateParsedType(TSResult->getType(), TSResult); } TypeSourceInfo * Sema::CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions) { // Create the pack expansion type and source-location information. QualType Result = CheckPackExpansion(Pattern->getType(), Pattern->getTypeLoc().getSourceRange(), EllipsisLoc, NumExpansions); if (Result.isNull()) return nullptr; TypeLocBuilder TLB; TLB.pushFullCopy(Pattern->getTypeLoc()); PackExpansionTypeLoc TL = TLB.push<PackExpansionTypeLoc>(Result); TL.setEllipsisLoc(EllipsisLoc); return TLB.getTypeSourceInfo(Context, Result); } QualType Sema::CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions) { // C++0x [temp.variadic]p5: // The pattern of a pack expansion shall name one or more // parameter packs that are not expanded by a nested pack // expansion. if (!Pattern->containsUnexpandedParameterPack()) { Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << PatternRange; return QualType(); } return Context.getPackExpansionType(Pattern, NumExpansions); } ExprResult Sema::ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc) { return CheckPackExpansion(Pattern, EllipsisLoc, None); } ExprResult Sema::CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions) { if (!Pattern) return ExprError(); // C++0x [temp.variadic]p5: // The pattern of a pack expansion shall name one or more // parameter packs that are not expanded by a nested pack // expansion. if (!Pattern->containsUnexpandedParameterPack()) { Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << Pattern->getSourceRange(); return ExprError(); } // Create the pack expansion expression and source-location information. return new (Context) PackExpansionExpr(Context.DependentTy, Pattern, EllipsisLoc, NumExpansions); } /// \brief Retrieve the depth and index of a parameter pack. static std::pair<unsigned, unsigned> getDepthAndIndex(NamedDecl *ND) { if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND)) return std::make_pair(TTP->getDepth(), TTP->getIndex()); if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND)) return std::make_pair(NTTP->getDepth(), NTTP->getIndex()); TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND); return std::make_pair(TTP->getDepth(), TTP->getIndex()); } bool Sema::CheckParameterPacksForExpansion( SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions) { ShouldExpand = true; RetainExpansion = false; std::pair<IdentifierInfo *, SourceLocation> FirstPack; bool HaveFirstPack = false; if (getLangOpts().HLSL) { Diag(EllipsisLoc, diag::err_hlsl_variadic_templates); return true; } for (ArrayRef<UnexpandedParameterPack>::iterator i = Unexpanded.begin(), end = Unexpanded.end(); i != end; ++i) { // Compute the depth and index for this parameter pack. unsigned Depth = 0, Index = 0; IdentifierInfo *Name; bool IsFunctionParameterPack = false; if (const TemplateTypeParmType *TTP = i->first.dyn_cast<const TemplateTypeParmType *>()) { Depth = TTP->getDepth(); Index = TTP->getIndex(); Name = TTP->getIdentifier(); } else { NamedDecl *ND = i->first.get<NamedDecl *>(); if (isa<ParmVarDecl>(ND)) IsFunctionParameterPack = true; else std::tie(Depth, Index) = getDepthAndIndex(ND); Name = ND->getIdentifier(); } // Determine the size of this argument pack. unsigned NewPackSize; if (IsFunctionParameterPack) { // Figure out whether we're instantiating to an argument pack or not. typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack; llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation = CurrentInstantiationScope->findInstantiationOf( i->first.get<NamedDecl *>()); if (Instantiation->is<DeclArgumentPack *>()) { // We could expand this function parameter pack. NewPackSize = Instantiation->get<DeclArgumentPack *>()->size(); } else { // We can't expand this function parameter pack, so we can't expand // the pack expansion. ShouldExpand = false; continue; } } else { // If we don't have a template argument at this depth/index, then we // cannot expand the pack expansion. Make a note of this, but we still // want to check any parameter packs we *do* have arguments for. if (Depth >= TemplateArgs.getNumLevels() || !TemplateArgs.hasTemplateArgument(Depth, Index)) { ShouldExpand = false; continue; } // Determine the size of the argument pack. NewPackSize = TemplateArgs(Depth, Index).pack_size(); } // C++0x [temp.arg.explicit]p9: // Template argument deduction can extend the sequence of template // arguments corresponding to a template parameter pack, even when the // sequence contains explicitly specified template arguments. if (!IsFunctionParameterPack) { if (NamedDecl *PartialPack = CurrentInstantiationScope->getPartiallySubstitutedPack()){ unsigned PartialDepth, PartialIndex; std::tie(PartialDepth, PartialIndex) = getDepthAndIndex(PartialPack); if (PartialDepth == Depth && PartialIndex == Index) RetainExpansion = true; } } if (!NumExpansions) { // The is the first pack we've seen for which we have an argument. // Record it. NumExpansions = NewPackSize; FirstPack.first = Name; FirstPack.second = i->second; HaveFirstPack = true; continue; } if (NewPackSize != *NumExpansions) { // C++0x [temp.variadic]p5: // All of the parameter packs expanded by a pack expansion shall have // the same number of arguments specified. if (HaveFirstPack) Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict) << FirstPack.first << Name << *NumExpansions << NewPackSize << SourceRange(FirstPack.second) << SourceRange(i->second); else Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_multilevel) << Name << *NumExpansions << NewPackSize << SourceRange(i->second); return true; } } return false; } Optional<unsigned> Sema::getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs) { QualType Pattern = cast<PackExpansionType>(T)->getPattern(); SmallVector<UnexpandedParameterPack, 2> Unexpanded; CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(Pattern); Optional<unsigned> Result; for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) { // Compute the depth and index for this parameter pack. unsigned Depth; unsigned Index; if (const TemplateTypeParmType *TTP = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>()) { Depth = TTP->getDepth(); Index = TTP->getIndex(); } else { NamedDecl *ND = Unexpanded[I].first.get<NamedDecl *>(); if (isa<ParmVarDecl>(ND)) { // Function parameter pack. typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack; llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation = CurrentInstantiationScope->findInstantiationOf( Unexpanded[I].first.get<NamedDecl *>()); if (Instantiation->is<Decl*>()) // The pattern refers to an unexpanded pack. We're not ready to expand // this pack yet. return None; unsigned Size = Instantiation->get<DeclArgumentPack *>()->size(); assert((!Result || *Result == Size) && "inconsistent pack sizes"); Result = Size; continue; } std::tie(Depth, Index) = getDepthAndIndex(ND); } if (Depth >= TemplateArgs.getNumLevels() || !TemplateArgs.hasTemplateArgument(Depth, Index)) // The pattern refers to an unknown template argument. We're not ready to // expand this pack yet. return None; // Determine the size of the argument pack. unsigned Size = TemplateArgs(Depth, Index).pack_size(); assert((!Result || *Result == Size) && "inconsistent pack sizes"); Result = Size; } return Result; } bool Sema::containsUnexpandedParameterPacks(Declarator &D) { const DeclSpec &DS = D.getDeclSpec(); switch (DS.getTypeSpecType()) { case TST_typename: case TST_typeofType: case TST_underlyingType: case TST_atomic: { QualType T = DS.getRepAsType().get(); if (!T.isNull() && T->containsUnexpandedParameterPack()) return true; break; } case TST_typeofExpr: case TST_decltype: if (DS.getRepAsExpr() && DS.getRepAsExpr()->containsUnexpandedParameterPack()) return true; break; case TST_unspecified: case TST_void: case TST_char: case TST_wchar: case TST_char16: case TST_char32: case TST_int: case TST_int128: case TST_half: case TST_halffloat: // HLSL Change case TST_float: case TST_double: case TST_bool: case TST_decimal32: case TST_decimal64: case TST_decimal128: case TST_enum: case TST_union: case TST_struct: case TST_interface: case TST_class: case TST_auto: case TST_decltype_auto: case TST_unknown_anytype: case TST_error: // HLSL Change Start case TST_min16float: case TST_min16int: case TST_min16uint: case TST_min10float: case TST_min12int: case TST_int8_4packed: case TST_uint8_4packed: // HLSL Change End break; } for (unsigned I = 0, N = D.getNumTypeObjects(); I != N; ++I) { const DeclaratorChunk &Chunk = D.getTypeObject(I); switch (Chunk.Kind) { case DeclaratorChunk::Pointer: case DeclaratorChunk::Reference: case DeclaratorChunk::Paren: case DeclaratorChunk::BlockPointer: // These declarator chunks cannot contain any parameter packs. break; case DeclaratorChunk::Array: if (Chunk.Arr.NumElts && Chunk.Arr.NumElts->containsUnexpandedParameterPack()) return true; break; case DeclaratorChunk::Function: for (unsigned i = 0, e = Chunk.Fun.NumParams; i != e; ++i) { ParmVarDecl *Param = cast<ParmVarDecl>(Chunk.Fun.Params[i].Param); QualType ParamTy = Param->getType(); assert(!ParamTy.isNull() && "Couldn't parse type?"); if (ParamTy->containsUnexpandedParameterPack()) return true; } if (Chunk.Fun.getExceptionSpecType() == EST_Dynamic) { for (unsigned i = 0; i != Chunk.Fun.NumExceptions; ++i) { if (Chunk.Fun.Exceptions[i] .Ty.get() ->containsUnexpandedParameterPack()) return true; } } else if (Chunk.Fun.getExceptionSpecType() == EST_ComputedNoexcept && Chunk.Fun.NoexceptExpr->containsUnexpandedParameterPack()) return true; if (Chunk.Fun.hasTrailingReturnType()) { QualType T = Chunk.Fun.getTrailingReturnType().get(); if (!T.isNull() && T->containsUnexpandedParameterPack()) return true; } break; case DeclaratorChunk::MemberPointer: if (Chunk.Mem.Scope().getScopeRep() && Chunk.Mem.Scope().getScopeRep()->containsUnexpandedParameterPack()) return true; break; } } return false; } namespace { // Callback to only accept typo corrections that refer to parameter packs. class ParameterPackValidatorCCC : public CorrectionCandidateCallback { public: bool ValidateCandidate(const TypoCorrection &candidate) override { NamedDecl *ND = candidate.getCorrectionDecl(); return ND && ND->isParameterPack(); } }; } /// \brief Called when an expression computing the size of a parameter pack /// is parsed. /// /// \code /// template<typename ...Types> struct count { /// static const unsigned value = sizeof...(Types); /// }; /// \endcode /// // /// \param OpLoc The location of the "sizeof" keyword. /// \param Name The name of the parameter pack whose size will be determined. /// \param NameLoc The source location of the name of the parameter pack. /// \param RParenLoc The location of the closing parentheses. ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc) { // C++0x [expr.sizeof]p5: // The identifier in a sizeof... expression shall name a parameter pack. LookupResult R(*this, &Name, NameLoc, LookupOrdinaryName); LookupName(R, S); NamedDecl *ParameterPack = nullptr; switch (R.getResultKind()) { case LookupResult::Found: ParameterPack = R.getFoundDecl(); break; case LookupResult::NotFound: case LookupResult::NotFoundInCurrentInstantiation: if (TypoCorrection Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, nullptr, llvm::make_unique<ParameterPackValidatorCCC>(), CTK_ErrorRecovery)) { diagnoseTypo(Corrected, PDiag(diag::err_sizeof_pack_no_pack_name_suggest) << &Name, PDiag(diag::note_parameter_pack_here)); ParameterPack = Corrected.getCorrectionDecl(); } break; case LookupResult::FoundOverloaded: case LookupResult::FoundUnresolvedValue: break; case LookupResult::Ambiguous: DiagnoseAmbiguousLookup(R); return ExprError(); } if (!ParameterPack || !ParameterPack->isParameterPack()) { Diag(NameLoc, diag::err_sizeof_pack_no_pack_name) << &Name; return ExprError(); } MarkAnyDeclReferenced(OpLoc, ParameterPack, true); return new (Context) SizeOfPackExpr(Context.getSizeType(), OpLoc, ParameterPack, NameLoc, RParenLoc); } TemplateArgumentLoc Sema::getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const { const TemplateArgument &Argument = OrigLoc.getArgument(); assert(Argument.isPackExpansion()); switch (Argument.getKind()) { case TemplateArgument::Type: { // FIXME: We shouldn't ever have to worry about missing // type-source info! TypeSourceInfo *ExpansionTSInfo = OrigLoc.getTypeSourceInfo(); if (!ExpansionTSInfo) ExpansionTSInfo = Context.getTrivialTypeSourceInfo(Argument.getAsType(), Ellipsis); PackExpansionTypeLoc Expansion = ExpansionTSInfo->getTypeLoc().castAs<PackExpansionTypeLoc>(); Ellipsis = Expansion.getEllipsisLoc(); TypeLoc Pattern = Expansion.getPatternLoc(); NumExpansions = Expansion.getTypePtr()->getNumExpansions(); // We need to copy the TypeLoc because TemplateArgumentLocs store a // TypeSourceInfo. // FIXME: Find some way to avoid the copy? TypeLocBuilder TLB; TLB.pushFullCopy(Pattern); TypeSourceInfo *PatternTSInfo = TLB.getTypeSourceInfo(Context, Pattern.getType()); return TemplateArgumentLoc(TemplateArgument(Pattern.getType()), PatternTSInfo); } case TemplateArgument::Expression: { PackExpansionExpr *Expansion = cast<PackExpansionExpr>(Argument.getAsExpr()); Expr *Pattern = Expansion->getPattern(); Ellipsis = Expansion->getEllipsisLoc(); NumExpansions = Expansion->getNumExpansions(); return TemplateArgumentLoc(Pattern, Pattern); } case TemplateArgument::TemplateExpansion: Ellipsis = OrigLoc.getTemplateEllipsisLoc(); NumExpansions = Argument.getNumTemplateExpansions(); return TemplateArgumentLoc(Argument.getPackExpansionPattern(), OrigLoc.getTemplateQualifierLoc(), OrigLoc.getTemplateNameLoc()); case TemplateArgument::Declaration: case TemplateArgument::NullPtr: case TemplateArgument::Template: case TemplateArgument::Integral: case TemplateArgument::Pack: case TemplateArgument::Null: return TemplateArgumentLoc(); } llvm_unreachable("Invalid TemplateArgument Kind!"); } static void CheckFoldOperand(Sema &S, Expr *E) { if (!E) return; E = E->IgnoreImpCasts(); if (isa<BinaryOperator>(E) || isa<AbstractConditionalOperator>(E)) { S.Diag(E->getExprLoc(), diag::err_fold_expression_bad_operand) << E->getSourceRange() << FixItHint::CreateInsertion(E->getLocStart(), "(") << FixItHint::CreateInsertion(E->getLocEnd(), ")"); } } ExprResult Sema::ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc) { // LHS and RHS must be cast-expressions. We allow an arbitrary expression // in the parser and reduce down to just cast-expressions here. CheckFoldOperand(*this, LHS); CheckFoldOperand(*this, RHS); // [expr.prim.fold]p3: // In a binary fold, op1 and op2 shall be the same fold-operator, and // either e1 shall contain an unexpanded parameter pack or e2 shall contain // an unexpanded parameter pack, but not both. if (LHS && RHS && LHS->containsUnexpandedParameterPack() == RHS->containsUnexpandedParameterPack()) { return Diag(EllipsisLoc, LHS->containsUnexpandedParameterPack() ? diag::err_fold_expression_packs_both_sides : diag::err_pack_expansion_without_parameter_packs) << LHS->getSourceRange() << RHS->getSourceRange(); } // [expr.prim.fold]p2: // In a unary fold, the cast-expression shall contain an unexpanded // parameter pack. if (!LHS || !RHS) { Expr *Pack = LHS ? LHS : RHS; assert(Pack && "fold expression with neither LHS nor RHS"); if (!Pack->containsUnexpandedParameterPack()) return Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs) << Pack->getSourceRange(); } BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Operator); return BuildCXXFoldExpr(LParenLoc, LHS, Opc, EllipsisLoc, RHS, RParenLoc); } ExprResult Sema::BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc) { return new (Context) CXXFoldExpr(Context.DependentTy, LParenLoc, LHS, Operator, EllipsisLoc, RHS, RParenLoc); } ExprResult Sema::BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator) { // [temp.variadic]p9: // If N is zero for a unary fold-expression, the value of the expression is // * -> 1 // + -> int() // & -> -1 // | -> int() // && -> true // || -> false // , -> void() // if the operator is not listed [above], the instantiation is ill-formed. // // Note that we need to use something like int() here, not merely 0, to // prevent the result from being a null pointer constant. QualType ScalarType; switch (Operator) { case BO_Add: ScalarType = Context.IntTy; break; case BO_Mul: return ActOnIntegerConstant(EllipsisLoc, 1); case BO_Or: ScalarType = Context.IntTy; break; case BO_And: return CreateBuiltinUnaryOp(EllipsisLoc, UO_Minus, ActOnIntegerConstant(EllipsisLoc, 1).get()); case BO_LOr: return ActOnCXXBoolLiteral(EllipsisLoc, tok::kw_false); case BO_LAnd: return ActOnCXXBoolLiteral(EllipsisLoc, tok::kw_true); case BO_Comma: ScalarType = Context.VoidTy; break; default: return Diag(EllipsisLoc, diag::err_fold_expression_empty) << BinaryOperator::getOpcodeStr(Operator); } return new (Context) CXXScalarValueInitExpr( ScalarType, Context.getTrivialTypeSourceInfo(ScalarType, EllipsisLoc), EllipsisLoc); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaExpr.cpp
//===--- SemaExpr.cpp - Semantic Analysis for Expressions -----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for expressions. // //===----------------------------------------------------------------------===// #include "clang/AST/OperationKinds.h" #include "clang/Sema/SemaInternal.h" #include "TreeTransform.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/LiteralSupport.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/DelayedDiagnostic.h" #include "clang/Sema/Designator.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaFixItUtils.h" #include "clang/Sema/Template.h" #include "llvm/Support/ConvertUTF.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change using namespace clang; using namespace sema; /// \brief Determine whether the use of this declaration is valid, without /// emitting diagnostics. bool Sema::CanUseDecl(NamedDecl *D) { // See if this is an auto-typed variable whose initializer we are parsing. if (ParsingInitForAutoVars.count(D)) return false; // See if this is a deleted function. if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { if (FD->isDeleted()) return false; // If the function has a deduced return type, and we can't deduce it, // then we can't use it either. if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() && DeduceReturnType(FD, SourceLocation(), /*Diagnose*/ false)) return false; } // See if this function is unavailable. if (D->getAvailability() == AR_Unavailable && cast<Decl>(CurContext)->getAvailability() != AR_Unavailable) return false; return true; } static void DiagnoseUnusedOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc) { // Warn if this is used but marked unused. if (D->hasAttr<UnusedAttr>()) { const Decl *DC = cast_or_null<Decl>(S.getCurObjCLexicalContext()); if (DC && !DC->hasAttr<UnusedAttr>()) S.Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName(); } } static bool HasRedeclarationWithoutAvailabilityInCategory(const Decl *D) { const auto *OMD = dyn_cast<ObjCMethodDecl>(D); if (!OMD) return false; const ObjCInterfaceDecl *OID = OMD->getClassInterface(); if (!OID) return false; for (const ObjCCategoryDecl *Cat : OID->visible_categories()) if (ObjCMethodDecl *CatMeth = Cat->getMethod(OMD->getSelector(), OMD->isInstanceMethod())) if (!CatMeth->hasAttr<AvailabilityAttr>()) return true; return false; } static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess) { // See if this declaration is unavailable or deprecated. std::string Message; AvailabilityResult Result = D->getAvailability(&Message); // For typedefs, if the typedef declaration appears available look // to the underlying type to see if it is more restrictive. while (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) { if (Result == AR_Available) { if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) { D = TT->getDecl(); Result = D->getAvailability(&Message); continue; } } break; } // Forward class declarations get their attributes from their definition. if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) { if (IDecl->getDefinition()) { D = IDecl->getDefinition(); Result = D->getAvailability(&Message); } } if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) if (Result == AR_Available) { const DeclContext *DC = ECD->getDeclContext(); if (const EnumDecl *TheEnumDecl = dyn_cast<EnumDecl>(DC)) Result = TheEnumDecl->getAvailability(&Message); } const ObjCPropertyDecl *ObjCPDecl = nullptr; if (Result == AR_Deprecated || Result == AR_Unavailable || AR_NotYetIntroduced) { if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) { AvailabilityResult PDeclResult = PD->getAvailability(nullptr); if (PDeclResult == Result) ObjCPDecl = PD; } } } switch (Result) { case AR_Available: break; case AR_Deprecated: if (S.getCurContextAvailability() != AR_Deprecated) S.EmitAvailabilityWarning(Sema::AD_Deprecation, D, Message, Loc, UnknownObjCClass, ObjCPDecl, ObjCPropertyAccess); break; case AR_NotYetIntroduced: { // Don't do this for enums, they can't be redeclared. if (isa<EnumConstantDecl>(D) || isa<EnumDecl>(D)) break; bool Warn = !D->getAttr<AvailabilityAttr>()->isInherited(); // Objective-C method declarations in categories are not modelled as // redeclarations, so manually look for a redeclaration in a category // if necessary. if (Warn && HasRedeclarationWithoutAvailabilityInCategory(D)) Warn = false; // In general, D will point to the most recent redeclaration. However, // for `@class A;` decls, this isn't true -- manually go through the // redecl chain in that case. if (Warn && isa<ObjCInterfaceDecl>(D)) for (Decl *Redecl = D->getMostRecentDecl(); Redecl && Warn; Redecl = Redecl->getPreviousDecl()) if (!Redecl->hasAttr<AvailabilityAttr>() || Redecl->getAttr<AvailabilityAttr>()->isInherited()) Warn = false; if (Warn) S.EmitAvailabilityWarning(Sema::AD_Partial, D, Message, Loc, UnknownObjCClass, ObjCPDecl, ObjCPropertyAccess); break; } case AR_Unavailable: if (S.getCurContextAvailability() != AR_Unavailable) S.EmitAvailabilityWarning(Sema::AD_Unavailable, D, Message, Loc, UnknownObjCClass, ObjCPDecl, ObjCPropertyAccess); break; } return Result; } /// \brief Emit a note explaining that this function is deleted. void Sema::NoteDeletedFunction(FunctionDecl *Decl) { assert(Decl->isDeleted()); CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Decl); if (Method && Method->isDeleted() && Method->isDefaulted()) { // If the method was explicitly defaulted, point at that declaration. if (!Method->isImplicit()) Diag(Decl->getLocation(), diag::note_implicitly_deleted); // Try to diagnose why this special member function was implicitly // deleted. This might fail, if that reason no longer applies. CXXSpecialMember CSM = getSpecialMember(Method); if (CSM != CXXInvalid) ShouldDeleteSpecialMember(Method, CSM, /*Diagnose=*/true); return; } if (CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Decl)) { if (CXXConstructorDecl *BaseCD = const_cast<CXXConstructorDecl*>(CD->getInheritedConstructor())) { Diag(Decl->getLocation(), diag::note_inherited_deleted_here); if (BaseCD->isDeleted()) { NoteDeletedFunction(BaseCD); } else { // FIXME: An explanation of why exactly it can't be inherited // would be nice. Diag(BaseCD->getLocation(), diag::note_cannot_inherit); } return; } } Diag(Decl->getLocation(), diag::note_availability_specified_here) << Decl << true; } /// \brief Determine whether a FunctionDecl was ever declared with an /// explicit storage class. static bool hasAnyExplicitStorageClass(const FunctionDecl *D) { for (auto I : D->redecls()) { if (I->getStorageClass() != SC_None) return true; } return false; } /// \brief Check whether we're in an extern inline function and referring to a /// variable or function with internal linkage (C11 6.7.4p3). /// /// This is only a warning because we used to silently accept this code, but /// in many cases it will not behave correctly. This is not enabled in C++ mode /// because the restriction language is a bit weaker (C++11 [basic.def.odr]p6) /// and so while there may still be user mistakes, most of the time we can't /// prove that there are errors. static void diagnoseUseOfInternalDeclInInlineFunction(Sema &S, const NamedDecl *D, SourceLocation Loc) { // This is disabled under C++; there are too many ways for this to fire in // contexts where the warning is a false positive, or where it is technically // correct but benign. if (S.getLangOpts().CPlusPlus) return; // Check if this is an inlined function or method. FunctionDecl *Current = S.getCurFunctionDecl(); if (!Current) return; if (!Current->isInlined()) return; if (!Current->isExternallyVisible()) return; // Check if the decl has internal linkage. if (D->getFormalLinkage() != InternalLinkage) return; // Downgrade from ExtWarn to Extension if // (1) the supposedly external inline function is in the main file, // and probably won't be included anywhere else. // (2) the thing we're referencing is a pure function. // (3) the thing we're referencing is another inline function. // This last can give us false negatives, but it's better than warning on // wrappers for simple C library functions. const FunctionDecl *UsedFn = dyn_cast<FunctionDecl>(D); bool DowngradeWarning = S.getSourceManager().isInMainFile(Loc); if (!DowngradeWarning && UsedFn) DowngradeWarning = UsedFn->isInlined() || UsedFn->hasAttr<ConstAttr>(); S.Diag(Loc, DowngradeWarning ? diag::ext_internal_in_extern_inline_quiet : diag::ext_internal_in_extern_inline) << /*IsVar=*/!UsedFn << D; S.MaybeSuggestAddingStaticToDecl(Current); S.Diag(D->getCanonicalDecl()->getLocation(), diag::note_entity_declared_at) << D; } void Sema::MaybeSuggestAddingStaticToDecl(const FunctionDecl *Cur) { const FunctionDecl *First = Cur->getFirstDecl(); // Suggest "static" on the function, if possible. if (!hasAnyExplicitStorageClass(First)) { SourceLocation DeclBegin = First->getSourceRange().getBegin(); Diag(DeclBegin, diag::note_convert_inline_to_static) << Cur << FixItHint::CreateInsertion(DeclBegin, "static "); } } /// \brief Determine whether the use of this declaration is valid, and /// emit any corresponding diagnostics. /// /// This routine diagnoses various problems with referencing /// declarations that can occur when using a declaration. For example, /// it might warn if a deprecated or unavailable declaration is being /// used, or produce an error (and return true) if a C++0x deleted /// function is being used. /// /// \returns true if there was an error (this declaration cannot be /// referenced), false otherwise. /// bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess) { if (getLangOpts().CPlusPlus && isa<FunctionDecl>(D)) { // If there were any diagnostics suppressed by template argument deduction, // emit them now. SuppressedDiagnosticsMap::iterator Pos = SuppressedDiagnostics.find(D->getCanonicalDecl()); if (Pos != SuppressedDiagnostics.end()) { SmallVectorImpl<PartialDiagnosticAt> &Suppressed = Pos->second; for (unsigned I = 0, N = Suppressed.size(); I != N; ++I) Diag(Suppressed[I].first, Suppressed[I].second); // Clear out the list of suppressed diagnostics, so that we don't emit // them again for this specialization. However, we don't obsolete this // entry from the table, because we want to avoid ever emitting these // diagnostics again. Suppressed.clear(); } // C++ [basic.start.main]p3: // The function 'main' shall not be used within a program. if (cast<FunctionDecl>(D)->isMain()) Diag(Loc, diag::ext_main_used); } // See if this is an auto-typed variable whose initializer we are parsing. if (ParsingInitForAutoVars.count(D)) { Diag(Loc, diag::err_auto_variable_cannot_appear_in_own_initializer) << D->getDeclName(); return true; } // See if this is a deleted function. if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { if (FD->isDeleted()) { Diag(Loc, diag::err_deleted_function_use); NoteDeletedFunction(FD); return true; } // If the function has a deduced return type, and we can't deduce it, // then we can't use it either. if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() && DeduceReturnType(FD, Loc)) return true; } DiagnoseAvailabilityOfDecl(*this, D, Loc, UnknownObjCClass, ObjCPropertyAccess); DiagnoseUnusedOfDecl(*this, D, Loc); diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc); return false; } /// \brief Retrieve the message suffix that should be added to a /// diagnostic complaining about the given function being deleted or /// unavailable. std::string Sema::getDeletedOrUnavailableSuffix(const FunctionDecl *FD) { std::string Message; if (FD->getAvailability(&Message)) return ": " + Message; return std::string(); } /// DiagnoseSentinelCalls - This routine checks whether a call or /// message-send is to a declaration with the sentinel attribute, and /// if so, it checks that the requirements of the sentinel are /// satisfied. void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args) { const SentinelAttr *attr = D->getAttr<SentinelAttr>(); if (!attr) return; // The number of formal parameters of the declaration. unsigned numFormalParams; // The kind of declaration. This is also an index into a %select in // the diagnostic. enum CalleeType { CT_Function, CT_Method, CT_Block } calleeType; if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { numFormalParams = MD->param_size(); calleeType = CT_Method; } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { numFormalParams = FD->param_size(); calleeType = CT_Function; } else if (isa<VarDecl>(D)) { QualType type = cast<ValueDecl>(D)->getType(); const FunctionType *fn = nullptr; if (const PointerType *ptr = type->getAs<PointerType>()) { fn = ptr->getPointeeType()->getAs<FunctionType>(); if (!fn) return; calleeType = CT_Function; } else if (const BlockPointerType *ptr = type->getAs<BlockPointerType>()) { fn = ptr->getPointeeType()->castAs<FunctionType>(); calleeType = CT_Block; } else { return; } if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fn)) { numFormalParams = proto->getNumParams(); } else { numFormalParams = 0; } } else { return; } // "nullPos" is the number of formal parameters at the end which // effectively count as part of the variadic arguments. This is // useful if you would prefer to not have *any* formal parameters, // but the language forces you to have at least one. unsigned nullPos = attr->getNullPos(); assert((nullPos == 0 || nullPos == 1) && "invalid null position on sentinel"); numFormalParams = (nullPos > numFormalParams ? 0 : numFormalParams - nullPos); // The number of arguments which should follow the sentinel. unsigned numArgsAfterSentinel = attr->getSentinel(); // If there aren't enough arguments for all the formal parameters, // the sentinel, and the args after the sentinel, complain. if (Args.size() < numFormalParams + numArgsAfterSentinel + 1) { Diag(Loc, diag::warn_not_enough_argument) << D->getDeclName(); Diag(D->getLocation(), diag::note_sentinel_here) << int(calleeType); return; } // Otherwise, find the sentinel expression. Expr *sentinelExpr = Args[Args.size() - numArgsAfterSentinel - 1]; if (!sentinelExpr) return; if (sentinelExpr->isValueDependent()) return; if (Context.isSentinelNullExpr(sentinelExpr)) return; // Pick a reasonable string to insert. Optimistically use 'nil', 'nullptr', // or 'NULL' if those are actually defined in the context. Only use // 'nil' for ObjC methods, where it's much more likely that the // variadic arguments form a list of object pointers. SourceLocation MissingNilLoc = PP.getLocForEndOfToken(sentinelExpr->getLocEnd()); std::string NullValue; if (calleeType == CT_Method && PP.isMacroDefined("nil")) NullValue = "nil"; else if (getLangOpts().CPlusPlus11) NullValue = "nullptr"; else if (PP.isMacroDefined("NULL")) NullValue = "NULL"; else NullValue = "(void*) 0"; if (MissingNilLoc.isInvalid()) Diag(Loc, diag::warn_missing_sentinel) << int(calleeType); else Diag(MissingNilLoc, diag::warn_missing_sentinel) << int(calleeType) << FixItHint::CreateInsertion(MissingNilLoc, ", " + NullValue); Diag(D->getLocation(), diag::note_sentinel_here) << int(calleeType); } SourceRange Sema::getExprRange(Expr *E) const { return E ? E->getSourceRange() : SourceRange(); } //===----------------------------------------------------------------------===// // Standard Promotions and Conversions //===----------------------------------------------------------------------===// /// DefaultFunctionArrayConversion (C99 6.3.2.1p3, C99 6.3.2.1p4). ExprResult Sema::DefaultFunctionArrayConversion(Expr *E) { // Handle any placeholder expressions which made it here. if (E->getType()->isPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return ExprError(); E = result.get(); } QualType Ty = E->getType(); assert(!Ty.isNull() && "DefaultFunctionArrayConversion - missing type"); if (Ty->isFunctionType()) { // If we are here, we are not calling a function but taking // its address (which is not allowed in OpenCL v1.0 s6.8.a.3). if (getLangOpts().OpenCL) { Diag(E->getExprLoc(), diag::err_opencl_taking_function_address); return ExprError(); } E = ImpCastExprToType(E, Context.getPointerType(Ty), CK_FunctionToPointerDecay).get(); } else if (Ty->isArrayType() && !getLangOpts().HLSL) { // HLSL Change - HLSL does not have pointers; do not decay arrays // In C90 mode, arrays only promote to pointers if the array expression is // an lvalue. The relevant legalese is C90 6.2.2.1p3: "an lvalue that has // type 'array of type' is converted to an expression that has type 'pointer // to type'...". In C99 this was changed to: C99 6.3.2.1p3: "an expression // that has type 'array of type' ...". The relevant change is "an lvalue" // (C90) to "an expression" (C99). // // C++ 4.2p1: // An lvalue or rvalue of type "array of N T" or "array of unknown bound of // T" can be converted to an rvalue of type "pointer to T". // if (getLangOpts().C99 || getLangOpts().CPlusPlus || E->isLValue()) E = ImpCastExprToType(E, Context.getArrayDecayedType(Ty), CK_ArrayToPointerDecay).get(); } return E; } static void CheckForNullPointerDereference(Sema &S, Expr *E) { // Check to see if we are dereferencing a null pointer. If so, // and if not volatile-qualified, this is undefined behavior that the // optimizer will delete, so warn about it. People sometimes try to use this // to get a deterministic trap and are surprised by clang's behavior. This // only handles the pattern "*null", which is a very syntactic check. if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParenCasts())) if (UO->getOpcode() == UO_Deref && UO->getSubExpr()->IgnoreParenCasts()-> isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull) && !UO->getType().isVolatileQualified()) { S.DiagRuntimeBehavior(UO->getOperatorLoc(), UO, S.PDiag(diag::warn_indirection_through_null) << UO->getSubExpr()->getSourceRange()); S.DiagRuntimeBehavior(UO->getOperatorLoc(), UO, S.PDiag(diag::note_indirection_through_null)); } } static void DiagnoseDirectIsaAccess(Sema &S, const ObjCIvarRefExpr *OIRE, SourceLocation AssignLoc, const Expr* RHS) { const ObjCIvarDecl *IV = OIRE->getDecl(); if (!IV) return; DeclarationName MemberName = IV->getDeclName(); IdentifierInfo *Member = MemberName.getAsIdentifierInfo(); if (!Member || !Member->isStr("isa")) return; const Expr *Base = OIRE->getBase(); QualType BaseType = Base->getType(); if (OIRE->isArrow()) BaseType = BaseType->getPointeeType(); if (const ObjCObjectType *OTy = BaseType->getAs<ObjCObjectType>()) if (ObjCInterfaceDecl *IDecl = OTy->getInterface()) { ObjCInterfaceDecl *ClassDeclared = nullptr; ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared); if (!ClassDeclared->getSuperClass() && (*ClassDeclared->ivar_begin()) == IV) { if (RHS) { NamedDecl *ObjectSetClass = S.LookupSingleName(S.TUScope, &S.Context.Idents.get("object_setClass"), SourceLocation(), S.LookupOrdinaryName); if (ObjectSetClass) { SourceLocation RHSLocEnd = S.PP.getLocForEndOfToken(RHS->getLocEnd()); S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_assign) << FixItHint::CreateInsertion(OIRE->getLocStart(), "object_setClass(") << FixItHint::CreateReplacement(SourceRange(OIRE->getOpLoc(), AssignLoc), ",") << FixItHint::CreateInsertion(RHSLocEnd, ")"); } else S.Diag(OIRE->getLocation(), diag::warn_objc_isa_assign); } else { NamedDecl *ObjectGetClass = S.LookupSingleName(S.TUScope, &S.Context.Idents.get("object_getClass"), SourceLocation(), S.LookupOrdinaryName); if (ObjectGetClass) S.Diag(OIRE->getExprLoc(), diag::warn_objc_isa_use) << FixItHint::CreateInsertion(OIRE->getLocStart(), "object_getClass(") << FixItHint::CreateReplacement( SourceRange(OIRE->getOpLoc(), OIRE->getLocEnd()), ")"); else S.Diag(OIRE->getLocation(), diag::warn_objc_isa_use); } S.Diag(IV->getLocation(), diag::note_ivar_decl); } } } static bool IsExprAccessingMeshOutArray(Expr* BaseExpr) { switch (BaseExpr->getStmtClass()) { case Stmt::ArraySubscriptExprClass: { ArraySubscriptExpr* ase = cast<ArraySubscriptExpr>(BaseExpr); return IsExprAccessingMeshOutArray(ase->getBase()); } case Stmt::ImplicitCastExprClass: { ImplicitCastExpr* ice = cast<ImplicitCastExpr>(BaseExpr); return IsExprAccessingMeshOutArray(ice->getSubExpr()); } case Stmt::DeclRefExprClass: { DeclRefExpr* dre = cast<DeclRefExpr>(BaseExpr); ValueDecl* vd = dre->getDecl(); if (vd->getAttr<HLSLOutAttr>() && (vd->getAttr<HLSLIndicesAttr>() || vd->getAttr<HLSLVerticesAttr>() || vd->getAttr<HLSLPrimitivesAttr>())) { return true; } return false; } default: return false; } } ExprResult Sema::DefaultLvalueConversion(Expr *E) { // Handle any placeholder expressions which made it here. if (E->getType()->isPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return ExprError(); E = result.get(); } // C++ [conv.lval]p1: // A glvalue of a non-function, non-array type T can be // converted to a prvalue. if (!E->isGLValue()) return E; QualType T = E->getType(); assert(!T.isNull() && "r-value conversion on typeless expression?"); // We don't want to throw lvalue-to-rvalue casts on top of // expressions of certain types in C++. if (getLangOpts().CPlusPlus && !getLangOpts().HLSL && // HLSL Change - matrices and object types should turn into lvalues (E->getType() == Context.OverloadTy || T->isDependentType() || T->isRecordType())) return E; // The C standard is actually really unclear on this point, and // DR106 tells us what the result should be but not why. It's // generally best to say that void types just doesn't undergo // lvalue-to-rvalue at all. Note that expressions of unqualified // 'void' type are never l-values, but qualified void can be. if (T->isVoidType()) return E; // OpenCL usually rejects direct accesses to values of 'half' type. if (getLangOpts().OpenCL && !getOpenCLOptions().cl_khr_fp16 && T->isHalfType()) { Diag(E->getExprLoc(), diag::err_opencl_half_load_store) << 0 << T; return ExprError(); } CheckForNullPointerDereference(*this, E); if (const ObjCIsaExpr *OISA = dyn_cast<ObjCIsaExpr>(E->IgnoreParenCasts())) { NamedDecl *ObjectGetClass = LookupSingleName(TUScope, &Context.Idents.get("object_getClass"), SourceLocation(), LookupOrdinaryName); if (ObjectGetClass) Diag(E->getExprLoc(), diag::warn_objc_isa_use) << FixItHint::CreateInsertion(OISA->getLocStart(), "object_getClass(") << FixItHint::CreateReplacement( SourceRange(OISA->getOpLoc(), OISA->getIsaMemberLoc()), ")"); else Diag(E->getExprLoc(), diag::warn_objc_isa_use); } else if (const ObjCIvarRefExpr *OIRE = dyn_cast<ObjCIvarRefExpr>(E->IgnoreParenCasts())) DiagnoseDirectIsaAccess(*this, OIRE, SourceLocation(), /* Expr*/nullptr); // check the access to mesh shader output arrays if (isa<ArraySubscriptExpr>(E) && IsExprAccessingMeshOutArray(E)) { Diag(E->getExprLoc(), diag::err_hlsl_load_from_mesh_out_arrays); return ExprError(); } // HLSL Change Begin // For HLSL we should not strip qualifiers for array types. bool StripQualifiers = getLangOpts().HLSL ? !T->isArrayType() : true; // HLSL Change End // C++ [conv.lval]p1: // [...] If T is a non-class type, the type of the prvalue is the // cv-unqualified version of T. Otherwise, the type of the // rvalue is T. // // C99 6.3.2.1p2: // If the lvalue has qualified type, the value has the unqualified // version of the type of the lvalue; otherwise, the value has the // type of the lvalue. if (T.hasQualifiers() && StripQualifiers) // HLSL Change don't unqualify T = T.getUnqualifiedType(); UpdateMarkingForLValueToRValue(E); // Loading a __weak object implicitly retains the value, so we need a cleanup to // balance that. if (getLangOpts().ObjCAutoRefCount && E->getType().getObjCLifetime() == Qualifiers::OCL_Weak) ExprNeedsCleanups = true; ExprResult Res = ImplicitCastExpr::Create(Context, T, CK_LValueToRValue, E, nullptr, VK_RValue); // C11 6.3.2.1p2: // ... if the lvalue has atomic type, the value has the non-atomic version // of the type of the lvalue ... if (const AtomicType *Atomic = T->getAs<AtomicType>()) { T = Atomic->getValueType().getUnqualifiedType(); Res = ImplicitCastExpr::Create(Context, T, CK_AtomicToNonAtomic, Res.get(), nullptr, VK_RValue); } return Res; } ExprResult Sema::DefaultFunctionArrayLvalueConversion(Expr *E) { ExprResult Res = DefaultFunctionArrayConversion(E); if (Res.isInvalid()) return ExprError(); Res = DefaultLvalueConversion(Res.get()); if (Res.isInvalid()) return ExprError(); return Res; } /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult Sema::CallExprUnaryConversions(Expr *E) { QualType Ty = E->getType(); ExprResult Res = E; // Only do implicit cast for a function type, but not for a pointer // to function type. if (Ty->isFunctionType()) { Res = ImpCastExprToType(E, Context.getPointerType(Ty), CK_FunctionToPointerDecay).get(); if (Res.isInvalid()) return ExprError(); } Res = DefaultLvalueConversion(Res.get()); if (Res.isInvalid()) return ExprError(); return Res.get(); } /// UsualUnaryConversions - Performs various conversions that are common to most /// operators (C99 6.3). The conversions of array and function types are /// sometimes suppressed. For example, the array->pointer conversion doesn't /// apply if the array is an argument to the sizeof or address (&) operators. /// In these instances, this routine should *not* be called. ExprResult Sema::UsualUnaryConversions(Expr *E) { // First, convert to an r-value. ExprResult Res = DefaultFunctionArrayLvalueConversion(E); if (Res.isInvalid()) return ExprError(); E = Res.get(); QualType Ty = E->getType(); assert(!Ty.isNull() && "UsualUnaryConversions - missing type"); // Half FP have to be promoted to float unless it is natively supported if (Ty->isHalfType() && !getLangOpts().NativeHalfType) return ImpCastExprToType(Res.get(), Context.FloatTy, CK_FloatingCast); // Try to perform integral promotions if the object has a theoretically // promotable type. if (Ty->isIntegralOrUnscopedEnumerationType()) { // C99 6.3.1.1p2: // // The following may be used in an expression wherever an int or // unsigned int may be used: // - an object or expression with an integer type whose integer // conversion rank is less than or equal to the rank of int // and unsigned int. // - A bit-field of type _Bool, int, signed int, or unsigned int. // // If an int can represent all values of the original type, the // value is converted to an int; otherwise, it is converted to an // unsigned int. These are called the integer promotions. All // other types are unchanged by the integer promotions. QualType PTy = Context.isPromotableBitField(E); if (!PTy.isNull()) { E = ImpCastExprToType(E, PTy, CK_IntegralCast).get(); return E; } if (Ty->isPromotableIntegerType() && !getLangOpts().HLSL) { // HLSL Change: leave low-precision integrals as such for intermediate operations QualType PT = Context.getPromotedIntegerType(Ty); E = ImpCastExprToType(E, PT, CK_IntegralCast).get(); return E; } } return E; } /// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that /// do not have a prototype. Arguments that have type float or __fp16 /// are promoted to double. All other argument types are converted by /// UsualUnaryConversions(). ExprResult Sema::DefaultArgumentPromotion(Expr *E) { QualType Ty = E->getType(); assert(!Ty.isNull() && "DefaultArgumentPromotion - missing type"); ExprResult Res = UsualUnaryConversions(E); if (Res.isInvalid()) return ExprError(); E = Res.get(); // If this is a 'float' or '__fp16' (CVR qualified or typedef) promote to // double. const BuiltinType *BTy = Ty->getAs<BuiltinType>(); if (BTy && (BTy->getKind() == BuiltinType::Half || BTy->getKind() == BuiltinType::Float)) E = ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast).get(); // C++ performs lvalue-to-rvalue conversion as a default argument // promotion, even on class types, but note: // C++11 [conv.lval]p2: // When an lvalue-to-rvalue conversion occurs in an unevaluated // operand or a subexpression thereof the value contained in the // referenced object is not accessed. Otherwise, if the glvalue // has a class type, the conversion copy-initializes a temporary // of type T from the glvalue and the result of the conversion // is a prvalue for the temporary. // FIXME: add some way to gate this entire thing for correctness in // potentially potentially evaluated contexts. if (getLangOpts().CPlusPlus && E->isGLValue() && !isUnevaluatedContext()) { ExprResult Temp = PerformCopyInitialization( InitializedEntity::InitializeTemporary(E->getType()), E->getExprLoc(), E); if (Temp.isInvalid()) return ExprError(); E = Temp.get(); } return E; } /// Determine the degree of POD-ness for an expression. /// Incomplete types are considered POD, since this check can be performed /// when we're in an unevaluated context. Sema::VarArgKind Sema::isValidVarArgType(const QualType &Ty) { if (Ty->isIncompleteType()) { // C++11 [expr.call]p7: // After these conversions, if the argument does not have arithmetic, // enumeration, pointer, pointer to member, or class type, the program // is ill-formed. // // Since we've already performed array-to-pointer and function-to-pointer // decay, the only such type in C++ is cv void. This also handles // initializer lists as variadic arguments. if (Ty->isVoidType()) return VAK_Invalid; if (Ty->isObjCObjectType()) return VAK_Invalid; return VAK_Valid; } if (Ty.isCXX98PODType(Context)) return VAK_Valid; // C++11 [expr.call]p7: // Passing a potentially-evaluated argument of class type (Clause 9) // having a non-trivial copy constructor, a non-trivial move constructor, // or a non-trivial destructor, with no corresponding parameter, // is conditionally-supported with implementation-defined semantics. if (getLangOpts().CPlusPlus11 && !Ty->isDependentType()) if (CXXRecordDecl *Record = Ty->getAsCXXRecordDecl()) if (!Record->hasNonTrivialCopyConstructor() && !Record->hasNonTrivialMoveConstructor() && !Record->hasNonTrivialDestructor()) return VAK_ValidInCXX11; if (getLangOpts().ObjCAutoRefCount && Ty->isObjCLifetimeType()) return VAK_Valid; if (Ty->isObjCObjectType()) return VAK_Invalid; if (getLangOpts().MSVCCompat) return VAK_MSVCUndefined; // FIXME: In C++11, these cases are conditionally-supported, meaning we're // permitted to reject them. We should consider doing so. return VAK_Undefined; } void Sema::checkVariadicArgument(const Expr *E, VariadicCallType CT) { // Don't allow one to pass an Objective-C interface to a vararg. const QualType &Ty = E->getType(); VarArgKind VAK = isValidVarArgType(Ty); // Complain about passing non-POD types through varargs. switch (VAK) { case VAK_ValidInCXX11: DiagRuntimeBehavior( E->getLocStart(), nullptr, PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg) << Ty << CT); LLVM_FALLTHROUGH; // HLSL Change case VAK_Valid: if (Ty->isRecordType()) { // This is unlikely to be what the user intended. If the class has a // 'c_str' member function, the user probably meant to call that. DiagRuntimeBehavior(E->getLocStart(), nullptr, PDiag(diag::warn_pass_class_arg_to_vararg) << Ty << CT << hasCStrMethod(E) << ".c_str()"); } break; case VAK_Undefined: case VAK_MSVCUndefined: DiagRuntimeBehavior( E->getLocStart(), nullptr, PDiag(diag::warn_cannot_pass_non_pod_arg_to_vararg) << getLangOpts().CPlusPlus11 << Ty << CT); break; case VAK_Invalid: if (Ty->isObjCObjectType()) DiagRuntimeBehavior( E->getLocStart(), nullptr, PDiag(diag::err_cannot_pass_objc_interface_to_vararg) << Ty << CT); else Diag(E->getLocStart(), diag::err_cannot_pass_to_vararg) << isa<InitListExpr>(E) << Ty << CT; break; } } /// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but /// will create a trap if the resulting type is not a POD type. ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl) { if (const BuiltinType *PlaceholderTy = E->getType()->getAsPlaceholderType()) { // Strip the unbridged-cast placeholder expression off, if applicable. if (PlaceholderTy->getKind() == BuiltinType::ARCUnbridgedCast && (CT == VariadicMethod || (FDecl && FDecl->hasAttr<CFAuditedTransferAttr>()))) { E = stripARCUnbridgedCast(E); // Otherwise, do normal placeholder checking. } else { ExprResult ExprRes = CheckPlaceholderExpr(E); if (ExprRes.isInvalid()) return ExprError(); E = ExprRes.get(); } } ExprResult ExprRes = DefaultArgumentPromotion(E); if (ExprRes.isInvalid()) return ExprError(); E = ExprRes.get(); // Diagnostics regarding non-POD argument types are // emitted along with format string checking in Sema::CheckFunctionCall(). if (isValidVarArgType(E->getType()) == VAK_Undefined) { // Turn this into a trap. CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId Name; Name.setIdentifier(PP.getIdentifierInfo("__builtin_trap"), E->getLocStart()); ExprResult TrapFn = ActOnIdExpression(TUScope, SS, TemplateKWLoc, Name, true, false); if (TrapFn.isInvalid()) return ExprError(); ExprResult Call = ActOnCallExpr(TUScope, TrapFn.get(), E->getLocStart(), None, E->getLocEnd()); if (Call.isInvalid()) return ExprError(); ExprResult Comma = ActOnBinOp(TUScope, E->getLocStart(), tok::comma, Call.get(), E); if (Comma.isInvalid()) return ExprError(); return Comma.get(); } if (!getLangOpts().CPlusPlus && RequireCompleteType(E->getExprLoc(), E->getType(), diag::err_call_incomplete_argument)) return ExprError(); return E; } /// \brief Converts an integer to complex float type. Helper function of /// UsualArithmeticConversions() /// /// \return false if the integer expression is an integer type and is /// successfully converted to the complex type. static bool handleIntegerToComplexFloatConversion(Sema &S, ExprResult &IntExpr, ExprResult &ComplexExpr, QualType IntTy, QualType ComplexTy, bool SkipCast) { if (IntTy->isComplexType() || IntTy->isRealFloatingType()) return true; if (SkipCast) return false; if (IntTy->isIntegerType()) { QualType fpTy = cast<ComplexType>(ComplexTy)->getElementType(); IntExpr = S.ImpCastExprToType(IntExpr.get(), fpTy, CK_IntegralToFloating); IntExpr = S.ImpCastExprToType(IntExpr.get(), ComplexTy, CK_FloatingRealToComplex); } else { assert(IntTy->isComplexIntegerType()); IntExpr = S.ImpCastExprToType(IntExpr.get(), ComplexTy, CK_IntegralComplexToFloatingComplex); } return false; } /// \brief Handle arithmetic conversion with complex types. Helper function of /// UsualArithmeticConversions() static QualType handleComplexFloatConversion(Sema &S, ExprResult &LHS, ExprResult &RHS, QualType LHSType, QualType RHSType, bool IsCompAssign) { // if we have an integer operand, the result is the complex type. if (!handleIntegerToComplexFloatConversion(S, RHS, LHS, RHSType, LHSType, /*skipCast*/false)) return LHSType; if (!handleIntegerToComplexFloatConversion(S, LHS, RHS, LHSType, RHSType, /*skipCast*/IsCompAssign)) return RHSType; // This handles complex/complex, complex/float, or float/complex. // When both operands are complex, the shorter operand is converted to the // type of the longer, and that is the type of the result. This corresponds // to what is done when combining two real floating-point operands. // The fun begins when size promotion occur across type domains. // From H&S 6.3.4: When one operand is complex and the other is a real // floating-point type, the less precise type is converted, within it's // real or complex domain, to the precision of the other type. For example, // when combining a "long double" with a "double _Complex", the // "double _Complex" is promoted to "long double _Complex". // Compute the rank of the two types, regardless of whether they are complex. int Order = S.Context.getFloatingTypeOrder(LHSType, RHSType); auto *LHSComplexType = dyn_cast<ComplexType>(LHSType); auto *RHSComplexType = dyn_cast<ComplexType>(RHSType); QualType LHSElementType = LHSComplexType ? LHSComplexType->getElementType() : LHSType; QualType RHSElementType = RHSComplexType ? RHSComplexType->getElementType() : RHSType; QualType ResultType = S.Context.getComplexType(LHSElementType); if (Order < 0) { // Promote the precision of the LHS if not an assignment. ResultType = S.Context.getComplexType(RHSElementType); if (!IsCompAssign) { if (LHSComplexType) LHS = S.ImpCastExprToType(LHS.get(), ResultType, CK_FloatingComplexCast); else LHS = S.ImpCastExprToType(LHS.get(), RHSElementType, CK_FloatingCast); } } else if (Order > 0) { // Promote the precision of the RHS. if (RHSComplexType) RHS = S.ImpCastExprToType(RHS.get(), ResultType, CK_FloatingComplexCast); else RHS = S.ImpCastExprToType(RHS.get(), LHSElementType, CK_FloatingCast); } return ResultType; } /// \brief Hande arithmetic conversion from integer to float. Helper function /// of UsualArithmeticConversions() static QualType handleIntToFloatConversion(Sema &S, ExprResult &FloatExpr, ExprResult &IntExpr, QualType FloatTy, QualType IntTy, bool ConvertFloat, bool ConvertInt) { if (IntTy->isIntegerType()) { if (ConvertInt) // Convert intExpr to the lhs floating point type. IntExpr = S.ImpCastExprToType(IntExpr.get(), FloatTy, CK_IntegralToFloating); return FloatTy; } // Convert both sides to the appropriate complex float. assert(IntTy->isComplexIntegerType()); QualType result = S.Context.getComplexType(FloatTy); // _Complex int -> _Complex float if (ConvertInt) IntExpr = S.ImpCastExprToType(IntExpr.get(), result, CK_IntegralComplexToFloatingComplex); // float -> _Complex float if (ConvertFloat) FloatExpr = S.ImpCastExprToType(FloatExpr.get(), result, CK_FloatingRealToComplex); return result; } /// \brief Handle arithmethic conversion with floating point types. Helper /// function of UsualArithmeticConversions() static QualType handleFloatConversion(Sema &S, ExprResult &LHS, ExprResult &RHS, QualType LHSType, QualType RHSType, bool IsCompAssign) { bool LHSFloat = LHSType->isRealFloatingType(); bool RHSFloat = RHSType->isRealFloatingType(); // If we have two real floating types, convert the smaller operand // to the bigger result. if (LHSFloat && RHSFloat) { int order = S.Context.getFloatingTypeOrder(LHSType, RHSType); if (order > 0) { RHS = S.ImpCastExprToType(RHS.get(), LHSType, CK_FloatingCast); return LHSType; } assert(order < 0 && "illegal float comparison"); if (!IsCompAssign) LHS = S.ImpCastExprToType(LHS.get(), RHSType, CK_FloatingCast); return RHSType; } if (LHSFloat) { // Half FP has to be promoted to float unless it is natively supported if (LHSType->isHalfType() && !S.getLangOpts().NativeHalfType) LHSType = S.Context.FloatTy; return handleIntToFloatConversion(S, LHS, RHS, LHSType, RHSType, /*convertFloat=*/!IsCompAssign, /*convertInt=*/ true); } assert(RHSFloat); return handleIntToFloatConversion(S, RHS, LHS, RHSType, LHSType, /*convertInt=*/ true, /*convertFloat=*/!IsCompAssign); } typedef ExprResult PerformCastFn(Sema &S, Expr *operand, QualType toType); namespace { /// These helper callbacks are placed in an anonymous namespace to /// permit their use as function template parameters. ExprResult doIntegralCast(Sema &S, Expr *op, QualType toType) { return S.ImpCastExprToType(op, toType, CK_IntegralCast); } ExprResult doComplexIntegralCast(Sema &S, Expr *op, QualType toType) { return S.ImpCastExprToType(op, S.Context.getComplexType(toType), CK_IntegralComplexCast); } } /// \brief Handle integer arithmetic conversions. Helper function of /// UsualArithmeticConversions() template <PerformCastFn doLHSCast, PerformCastFn doRHSCast> static QualType handleIntegerConversion(Sema &S, ExprResult &LHS, ExprResult &RHS, QualType LHSType, QualType RHSType, bool IsCompAssign) { // The rules for this case are in C99 6.3.1.8 int order = S.Context.getIntegerTypeOrder(LHSType, RHSType); bool LHSSigned = LHSType->hasSignedIntegerRepresentation(); bool RHSSigned = RHSType->hasSignedIntegerRepresentation(); if (LHSSigned == RHSSigned) { // Same signedness; use the higher-ranked type if (order >= 0) { RHS = (*doRHSCast)(S, RHS.get(), LHSType); return LHSType; } else if (!IsCompAssign) LHS = (*doLHSCast)(S, LHS.get(), RHSType); return RHSType; } else if (order != (LHSSigned ? 1 : -1)) { // The unsigned type has greater than or equal rank to the // signed type, so use the unsigned type if (RHSSigned) { RHS = (*doRHSCast)(S, RHS.get(), LHSType); return LHSType; } else if (!IsCompAssign) LHS = (*doLHSCast)(S, LHS.get(), RHSType); return RHSType; } else if (S.Context.getIntWidth(LHSType) != S.Context.getIntWidth(RHSType)) { // The two types are different widths; if we are here, that // means the signed type is larger than the unsigned type, so // use the signed type. if (LHSSigned) { RHS = (*doRHSCast)(S, RHS.get(), LHSType); return LHSType; } else if (!IsCompAssign) LHS = (*doLHSCast)(S, LHS.get(), RHSType); return RHSType; } else { // The signed type is higher-ranked than the unsigned type, // but isn't actually any bigger (like unsigned int and long // on most 32-bit systems). Use the unsigned type corresponding // to the signed type. QualType result = S.Context.getCorrespondingUnsignedType(LHSSigned ? LHSType : RHSType); RHS = (*doRHSCast)(S, RHS.get(), result); if (!IsCompAssign) LHS = (*doLHSCast)(S, LHS.get(), result); return result; } } /// \brief Handle conversions with GCC complex int extension. Helper function /// of UsualArithmeticConversions() static QualType handleComplexIntConversion(Sema &S, ExprResult &LHS, ExprResult &RHS, QualType LHSType, QualType RHSType, bool IsCompAssign) { const ComplexType *LHSComplexInt = LHSType->getAsComplexIntegerType(); const ComplexType *RHSComplexInt = RHSType->getAsComplexIntegerType(); if (LHSComplexInt && RHSComplexInt) { QualType LHSEltType = LHSComplexInt->getElementType(); QualType RHSEltType = RHSComplexInt->getElementType(); QualType ScalarType = handleIntegerConversion<doComplexIntegralCast, doComplexIntegralCast> (S, LHS, RHS, LHSEltType, RHSEltType, IsCompAssign); return S.Context.getComplexType(ScalarType); } if (LHSComplexInt) { QualType LHSEltType = LHSComplexInt->getElementType(); QualType ScalarType = handleIntegerConversion<doComplexIntegralCast, doIntegralCast> (S, LHS, RHS, LHSEltType, RHSType, IsCompAssign); QualType ComplexType = S.Context.getComplexType(ScalarType); RHS = S.ImpCastExprToType(RHS.get(), ComplexType, CK_IntegralRealToComplex); return ComplexType; } assert(RHSComplexInt); QualType RHSEltType = RHSComplexInt->getElementType(); QualType ScalarType = handleIntegerConversion<doIntegralCast, doComplexIntegralCast> (S, LHS, RHS, LHSType, RHSEltType, IsCompAssign); QualType ComplexType = S.Context.getComplexType(ScalarType); if (!IsCompAssign) LHS = S.ImpCastExprToType(LHS.get(), ComplexType, CK_IntegralRealToComplex); return ComplexType; } /// UsualArithmeticConversions - Performs various conversions that are common to /// binary operators (C99 6.3.1.8). If both operands aren't arithmetic, this /// routine returns the first non-arithmetic type found. The client is /// responsible for emitting appropriate error diagnostics. QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign) { if (!IsCompAssign) { LHS = UsualUnaryConversions(LHS.get()); if (LHS.isInvalid()) return QualType(); } RHS = UsualUnaryConversions(RHS.get()); if (RHS.isInvalid()) return QualType(); // For conversion purposes, we ignore any qualifiers. // For example, "const float" and "float" are equivalent. QualType LHSType = Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType(); QualType RHSType = Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType(); // For conversion purposes, we ignore any atomic qualifier on the LHS. if (const AtomicType *AtomicLHS = LHSType->getAs<AtomicType>()) LHSType = AtomicLHS->getValueType(); // If both types are identical, no conversion is needed. if (LHSType == RHSType) return LHSType; // If either side is a non-arithmetic type (e.g. a pointer), we are done. // The caller can deal with this (e.g. pointer + int). if (!LHSType->isArithmeticType() || !RHSType->isArithmeticType()) return QualType(); // Apply unary and bitfield promotions to the LHS's type. QualType LHSUnpromotedType = LHSType; if (LHSType->isPromotableIntegerType()) LHSType = Context.getPromotedIntegerType(LHSType); QualType LHSBitfieldPromoteTy = Context.isPromotableBitField(LHS.get()); if (!LHSBitfieldPromoteTy.isNull()) LHSType = LHSBitfieldPromoteTy; if (LHSType != LHSUnpromotedType && !IsCompAssign) LHS = ImpCastExprToType(LHS.get(), LHSType, CK_IntegralCast); // If both types are identical, no conversion is needed. if (LHSType == RHSType) return LHSType; // At this point, we have two different arithmetic types. // Handle complex types first (C99 6.3.1.8p1). if (LHSType->isComplexType() || RHSType->isComplexType()) return handleComplexFloatConversion(*this, LHS, RHS, LHSType, RHSType, IsCompAssign); // Now handle "real" floating types (i.e. float, double, long double). if (LHSType->isRealFloatingType() || RHSType->isRealFloatingType()) return handleFloatConversion(*this, LHS, RHS, LHSType, RHSType, IsCompAssign); // Handle GCC complex int extension. if (LHSType->isComplexIntegerType() || RHSType->isComplexIntegerType()) return handleComplexIntConversion(*this, LHS, RHS, LHSType, RHSType, IsCompAssign); // Finally, we have two differing integer types. return handleIntegerConversion<doIntegralCast, doIntegralCast> (*this, LHS, RHS, LHSType, RHSType, IsCompAssign); } //===----------------------------------------------------------------------===// // Semantic Analysis for various Expression Types //===----------------------------------------------------------------------===// ExprResult Sema::ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs) { unsigned NumAssocs = ArgTypes.size(); assert(NumAssocs == ArgExprs.size()); TypeSourceInfo **Types = new TypeSourceInfo*[NumAssocs]; for (unsigned i = 0; i < NumAssocs; ++i) { if (ArgTypes[i]) (void) GetTypeFromParser(ArgTypes[i], &Types[i]); else Types[i] = nullptr; } ExprResult ER = CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc, ControllingExpr, llvm::makeArrayRef(Types, NumAssocs), ArgExprs); delete [] Types; return ER; } ExprResult Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs) { unsigned NumAssocs = Types.size(); assert(NumAssocs == Exprs.size()); if (ControllingExpr->getType()->isPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(ControllingExpr); if (result.isInvalid()) return ExprError(); ControllingExpr = result.get(); } // The controlling expression is an unevaluated operand, so side effects are // likely unintended. if (ActiveTemplateInstantiations.empty() && ControllingExpr->HasSideEffects(Context, false)) Diag(ControllingExpr->getExprLoc(), diag::warn_side_effects_unevaluated_context); bool TypeErrorFound = false, IsResultDependent = ControllingExpr->isTypeDependent(), ContainsUnexpandedParameterPack = ControllingExpr->containsUnexpandedParameterPack(); for (unsigned i = 0; i < NumAssocs; ++i) { if (Exprs[i]->containsUnexpandedParameterPack()) ContainsUnexpandedParameterPack = true; if (Types[i]) { if (Types[i]->getType()->containsUnexpandedParameterPack()) ContainsUnexpandedParameterPack = true; if (Types[i]->getType()->isDependentType()) { IsResultDependent = true; } else { // C11 6.5.1.1p2 "The type name in a generic association shall specify a // complete object type other than a variably modified type." unsigned D = 0; if (Types[i]->getType()->isIncompleteType()) D = diag::err_assoc_type_incomplete; else if (!Types[i]->getType()->isObjectType()) D = diag::err_assoc_type_nonobject; else if (Types[i]->getType()->isVariablyModifiedType()) D = diag::err_assoc_type_variably_modified; if (D != 0) { Diag(Types[i]->getTypeLoc().getBeginLoc(), D) << Types[i]->getTypeLoc().getSourceRange() << Types[i]->getType(); TypeErrorFound = true; } // C11 6.5.1.1p2 "No two generic associations in the same generic // selection shall specify compatible types." for (unsigned j = i+1; j < NumAssocs; ++j) if (Types[j] && !Types[j]->getType()->isDependentType() && Context.typesAreCompatible(Types[i]->getType(), Types[j]->getType())) { Diag(Types[j]->getTypeLoc().getBeginLoc(), diag::err_assoc_compatible_types) << Types[j]->getTypeLoc().getSourceRange() << Types[j]->getType() << Types[i]->getType(); Diag(Types[i]->getTypeLoc().getBeginLoc(), diag::note_compat_assoc) << Types[i]->getTypeLoc().getSourceRange() << Types[i]->getType(); TypeErrorFound = true; } } } } if (TypeErrorFound) return ExprError(); // If we determined that the generic selection is result-dependent, don't // try to compute the result expression. if (IsResultDependent) return new (Context) GenericSelectionExpr( Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc, ContainsUnexpandedParameterPack); SmallVector<unsigned, 1> CompatIndices; unsigned DefaultIndex = -1U; for (unsigned i = 0; i < NumAssocs; ++i) { if (!Types[i]) DefaultIndex = i; else if (Context.typesAreCompatible(ControllingExpr->getType(), Types[i]->getType())) CompatIndices.push_back(i); } // C11 6.5.1.1p2 "The controlling expression of a generic selection shall have // type compatible with at most one of the types named in its generic // association list." if (CompatIndices.size() > 1) { // We strip parens here because the controlling expression is typically // parenthesized in macro definitions. ControllingExpr = ControllingExpr->IgnoreParens(); Diag(ControllingExpr->getLocStart(), diag::err_generic_sel_multi_match) << ControllingExpr->getSourceRange() << ControllingExpr->getType() << (unsigned) CompatIndices.size(); for (SmallVectorImpl<unsigned>::iterator I = CompatIndices.begin(), E = CompatIndices.end(); I != E; ++I) { Diag(Types[*I]->getTypeLoc().getBeginLoc(), diag::note_compat_assoc) << Types[*I]->getTypeLoc().getSourceRange() << Types[*I]->getType(); } return ExprError(); } // C11 6.5.1.1p2 "If a generic selection has no default generic association, // its controlling expression shall have type compatible with exactly one of // the types named in its generic association list." if (DefaultIndex == -1U && CompatIndices.size() == 0) { // We strip parens here because the controlling expression is typically // parenthesized in macro definitions. ControllingExpr = ControllingExpr->IgnoreParens(); Diag(ControllingExpr->getLocStart(), diag::err_generic_sel_no_match) << ControllingExpr->getSourceRange() << ControllingExpr->getType(); return ExprError(); } // C11 6.5.1.1p3 "If a generic selection has a generic association with a // type name that is compatible with the type of the controlling expression, // then the result expression of the generic selection is the expression // in that generic association. Otherwise, the result expression of the // generic selection is the expression in the default generic association." unsigned ResultIndex = CompatIndices.size() ? CompatIndices[0] : DefaultIndex; return new (Context) GenericSelectionExpr( Context, KeyLoc, ControllingExpr, Types, Exprs, DefaultLoc, RParenLoc, ContainsUnexpandedParameterPack, ResultIndex); } /// getUDSuffixLoc - Create a SourceLocation for a ud-suffix, given the /// location of the token and the offset of the ud-suffix within it. static SourceLocation getUDSuffixLoc(Sema &S, SourceLocation TokLoc, unsigned Offset) { return Lexer::AdvanceToTokenCharacter(TokLoc, Offset, S.getSourceManager(), S.getLangOpts()); } /// BuildCookedLiteralOperatorCall - A user-defined literal was found. Look up /// the corresponding cooked (non-raw) literal operator, and build a call to it. static ExprResult BuildCookedLiteralOperatorCall(Sema &S, Scope *Scope, IdentifierInfo *UDSuffix, SourceLocation UDSuffixLoc, ArrayRef<Expr*> Args, SourceLocation LitEndLoc) { assert(Args.size() <= 2 && "too many arguments for literal operator"); QualType ArgTy[2]; for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) { ArgTy[ArgIdx] = Args[ArgIdx]->getType(); if (ArgTy[ArgIdx]->isArrayType()) ArgTy[ArgIdx] = S.Context.getArrayDecayedType(ArgTy[ArgIdx]); } DeclarationName OpName = S.Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix); DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc); OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc); LookupResult R(S, OpName, UDSuffixLoc, Sema::LookupOrdinaryName); if (S.LookupLiteralOperator(Scope, R, llvm::makeArrayRef(ArgTy, Args.size()), /*AllowRaw*/false, /*AllowTemplate*/false, /*AllowStringTemplate*/false) == Sema::LOLR_Error) return ExprError(); return S.BuildLiteralOperatorCall(R, OpNameInfo, Args, LitEndLoc); } /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). The result string has to handle string /// concatenation ([C99 5.1.1.2, translation phase #6]), so it may come from /// multiple tokens. However, the common case is that StringToks points to one /// string. /// ExprResult Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) { assert(!StringToks.empty() && "Must have at least one string!"); StringLiteralParser Literal(StringToks, PP); if (Literal.hadError) return ExprError(); SmallVector<SourceLocation, 4> StringTokLocs; for (unsigned i = 0; i != StringToks.size(); ++i) StringTokLocs.push_back(StringToks[i].getLocation()); // HLSL Change Starts if (getLangOpts().HLSL) { assert(!Literal.isWide() && !Literal.isUTF16() && !Literal.isUTF32() && !Literal.isPascal() && "HLSL parser always produces simple string literals"); QualType CharTyConst = Context.CharTy; CharTyConst.addConst(); QualType StrTy = Context.getConstantArrayType(CharTyConst, llvm::APInt(32, Literal.GetNumStringChars()+1), ArrayType::Normal, 0); StringLiteral *Result = StringLiteral::Create(Context, Literal.GetString(), StringLiteral::StringKind::Ascii, false, StrTy, &StringTokLocs[0], StringTokLocs.size()); return Result; } // HLSL Change Ends QualType CharTy = Context.CharTy; StringLiteral::StringKind Kind = StringLiteral::Ascii; if (Literal.isWide()) { CharTy = Context.getWideCharType(); Kind = StringLiteral::Wide; } else if (Literal.isUTF8()) { Kind = StringLiteral::UTF8; } else if (Literal.isUTF16()) { CharTy = Context.Char16Ty; Kind = StringLiteral::UTF16; } else if (Literal.isUTF32()) { CharTy = Context.Char32Ty; Kind = StringLiteral::UTF32; } else if (Literal.isPascal()) { CharTy = Context.UnsignedCharTy; } QualType CharTyConst = CharTy; // A C++ string literal has a const-qualified element type (C++ 2.13.4p1). if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings) CharTyConst.addConst(); // Get an array type for the string, according to C99 6.4.5. This includes // the nul terminator character as well as the string length for pascal // strings. QualType StrTy = Context.getConstantArrayType(CharTyConst, llvm::APInt(32, Literal.GetNumStringChars()+1), ArrayType::Normal, 0); // OpenCL v1.1 s6.5.3: a string literal is in the constant address space. if (getLangOpts().OpenCL) { StrTy = Context.getAddrSpaceQualType(StrTy, LangAS::opencl_constant); } // Pass &StringTokLocs[0], StringTokLocs.size() to factory! StringLiteral *Lit = StringLiteral::Create(Context, Literal.GetString(), Kind, Literal.Pascal, StrTy, &StringTokLocs[0], StringTokLocs.size()); if (Literal.getUDSuffix().empty()) return Lit; // We're building a user-defined literal. IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix()); SourceLocation UDSuffixLoc = getUDSuffixLoc(*this, StringTokLocs[Literal.getUDSuffixToken()], Literal.getUDSuffixOffset()); // Make sure we're allowed user-defined literals here. if (!UDLScope) return ExprError(Diag(UDSuffixLoc, diag::err_invalid_string_udl)); // C++11 [lex.ext]p5: The literal L is treated as a call of the form // operator "" X (str, len) QualType SizeType = Context.getSizeType(); DeclarationName OpName = Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix); DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc); OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc); QualType ArgTy[] = { Context.getArrayDecayedType(StrTy), SizeType }; LookupResult R(*this, OpName, UDSuffixLoc, LookupOrdinaryName); switch (LookupLiteralOperator(UDLScope, R, ArgTy, /*AllowRaw*/false, /*AllowTemplate*/false, /*AllowStringTemplate*/true)) { case LOLR_Cooked: { llvm::APInt Len(Context.getIntWidth(SizeType), Literal.GetNumStringChars()); IntegerLiteral *LenArg = IntegerLiteral::Create(Context, Len, SizeType, StringTokLocs[0]); Expr *Args[] = { Lit, LenArg }; return BuildLiteralOperatorCall(R, OpNameInfo, Args, StringTokLocs.back()); } case LOLR_StringTemplate: { TemplateArgumentListInfo ExplicitArgs; unsigned CharBits = Context.getIntWidth(CharTy); bool CharIsUnsigned = CharTy->isUnsignedIntegerType(); llvm::APSInt Value(CharBits, CharIsUnsigned); TemplateArgument TypeArg(CharTy); TemplateArgumentLocInfo TypeArgInfo(Context.getTrivialTypeSourceInfo(CharTy)); ExplicitArgs.addArgument(TemplateArgumentLoc(TypeArg, TypeArgInfo)); for (unsigned I = 0, N = Lit->getLength(); I != N; ++I) { Value = Lit->getCodeUnit(I); TemplateArgument Arg(Context, Value, CharTy); TemplateArgumentLocInfo ArgInfo; ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo)); } return BuildLiteralOperatorCall(R, OpNameInfo, None, StringTokLocs.back(), &ExplicitArgs); } case LOLR_Raw: case LOLR_Template: llvm_unreachable("unexpected literal operator lookup result"); case LOLR_Error: return ExprError(); } llvm_unreachable("unexpected literal operator lookup result"); } ExprResult Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS) { DeclarationNameInfo NameInfo(D->getDeclName(), Loc); return BuildDeclRefExpr(D, Ty, VK, NameInfo, SS); } /// BuildDeclRefExpr - Build an expression that references a /// declaration that does not require a closure capture. ExprResult Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS, NamedDecl *FoundD, const TemplateArgumentListInfo *TemplateArgs) { if (getLangOpts().CUDA) if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext)) if (const FunctionDecl *Callee = dyn_cast<FunctionDecl>(D)) { if (CheckCUDATarget(Caller, Callee)) { Diag(NameInfo.getLoc(), diag::err_ref_bad_target) << IdentifyCUDATarget(Callee) << D->getIdentifier() << IdentifyCUDATarget(Caller); Diag(D->getLocation(), diag::note_previous_decl) << D->getIdentifier(); return ExprError(); } } bool RefersToCapturedVariable = isa<VarDecl>(D) && NeedToCaptureVariable(cast<VarDecl>(D), NameInfo.getLoc()); DeclRefExpr *E; if (isa<VarTemplateSpecializationDecl>(D)) { VarTemplateSpecializationDecl *VarSpec = cast<VarTemplateSpecializationDecl>(D); E = DeclRefExpr::Create(Context, SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc(), VarSpec->getTemplateKeywordLoc(), D, RefersToCapturedVariable, NameInfo.getLoc(), Ty, VK, FoundD, TemplateArgs); } else { assert(!TemplateArgs && "No template arguments for non-variable" " template specialization references"); E = DeclRefExpr::Create(Context, SS ? SS->getWithLocInContext(Context) : NestedNameSpecifierLoc(), SourceLocation(), D, RefersToCapturedVariable, NameInfo, Ty, VK, FoundD); } MarkDeclRefReferenced(E); if (getLangOpts().ObjCARCWeak && isa<VarDecl>(D) && Ty.getObjCLifetime() == Qualifiers::OCL_Weak && !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, E->getLocStart())) recordUseOfEvaluatedWeak(E); // Just in case we're building an illegal pointer-to-member. FieldDecl *FD = dyn_cast<FieldDecl>(D); if (FD && FD->isBitField()) E->setObjectKind(OK_BitField); return E; } /// Decomposes the given name into a DeclarationNameInfo, its location, and /// possibly a list of template arguments. /// /// If this produces template arguments, it is permitted to call /// DecomposeTemplateName. /// /// This actually loses a lot of source location information for /// non-standard name kinds; we should consider preserving that in /// some way. void Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs) { if (Id.getKind() == UnqualifiedId::IK_TemplateId) { Buffer.setLAngleLoc(Id.TemplateId->LAngleLoc); Buffer.setRAngleLoc(Id.TemplateId->RAngleLoc); ASTTemplateArgsPtr TemplateArgsPtr(Id.TemplateId->getTemplateArgs(), Id.TemplateId->NumArgs); translateTemplateArguments(TemplateArgsPtr, Buffer); TemplateName TName = Id.TemplateId->Template.get(); SourceLocation TNameLoc = Id.TemplateId->TemplateNameLoc; NameInfo = Context.getNameForTemplate(TName, TNameLoc); TemplateArgs = &Buffer; } else { NameInfo = GetNameFromUnqualifiedId(Id); TemplateArgs = nullptr; } } static void emitEmptyLookupTypoDiagnostic( const TypoCorrection &TC, Sema &SemaRef, const CXXScopeSpec &SS, DeclarationName Typo, SourceLocation TypoLoc, ArrayRef<Expr *> Args, unsigned DiagnosticID, unsigned DiagnosticSuggestID) { DeclContext *Ctx = SS.isEmpty() ? nullptr : SemaRef.computeDeclContext(SS, false); if (!TC) { // Emit a special diagnostic for failed member lookups. // FIXME: computing the declaration context might fail here (?) if (Ctx) SemaRef.Diag(TypoLoc, diag::err_no_member) << Typo << Ctx << SS.getRange(); else SemaRef.Diag(TypoLoc, DiagnosticID) << Typo; return; } std::string CorrectedStr = TC.getAsString(SemaRef.getLangOpts()); bool DroppedSpecifier = TC.WillReplaceSpecifier() && Typo.getAsString() == CorrectedStr; unsigned NoteID = (TC.getCorrectionDecl() && isa<ImplicitParamDecl>(TC.getCorrectionDecl())) ? diag::note_implicit_param_decl : diag::note_previous_decl; if (!Ctx) SemaRef.diagnoseTypo(TC, SemaRef.PDiag(DiagnosticSuggestID) << Typo, SemaRef.PDiag(NoteID)); else SemaRef.diagnoseTypo(TC, SemaRef.PDiag(diag::err_no_member_suggest) << Typo << Ctx << DroppedSpecifier << SS.getRange(), SemaRef.PDiag(NoteID)); } /// Diagnose an empty lookup. /// /// \return false if new lookup candidates were found bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, TypoExpr **Out) { DeclarationName Name = R.getLookupName(); unsigned diagnostic = diag::err_undeclared_var_use; unsigned diagnostic_suggest = diag::err_undeclared_var_use_suggest; if (Name.getNameKind() == DeclarationName::CXXOperatorName || Name.getNameKind() == DeclarationName::CXXLiteralOperatorName || Name.getNameKind() == DeclarationName::CXXConversionFunctionName) { diagnostic = diag::err_undeclared_use; diagnostic_suggest = diag::err_undeclared_use_suggest; } // If the original lookup was an unqualified lookup, fake an // unqualified lookup. This is useful when (for example) the // original lookup would not have found something because it was a // dependent name. DeclContext *DC = (SS.isEmpty() && !CallsUndergoingInstantiation.empty()) ? CurContext : nullptr; while (DC) { if (isa<CXXRecordDecl>(DC)) { LookupQualifiedName(R, DC); if (!R.empty()) { // Don't give errors about ambiguities in this lookup. R.suppressDiagnostics(); // During a default argument instantiation the CurContext points // to a CXXMethodDecl; but we can't apply a this-> fixit inside a // function parameter list, hence add an explicit check. bool isDefaultArgument = !ActiveTemplateInstantiations.empty() && ActiveTemplateInstantiations.back().Kind == ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation; CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext); bool isInstance = CurMethod && CurMethod->isInstance() && DC == CurMethod->getParent() && !isDefaultArgument; // Give a code modification hint to insert 'this->'. // TODO: fixit for inserting 'Base<T>::' in the other cases. // Actually quite difficult! if (getLangOpts().MSVCCompat) diagnostic = diag::ext_found_via_dependent_bases_lookup; if (isInstance) { Diag(R.getNameLoc(), diagnostic) << Name << FixItHint::CreateInsertion(R.getNameLoc(), "this->"); UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>( CallsUndergoingInstantiation.back()->getCallee()); CXXMethodDecl *DepMethod; if (CurMethod->isDependentContext()) DepMethod = CurMethod; else if (CurMethod->getTemplatedKind() == FunctionDecl::TK_FunctionTemplateSpecialization) DepMethod = cast<CXXMethodDecl>(CurMethod->getPrimaryTemplate()-> getInstantiatedFromMemberTemplate()->getTemplatedDecl()); else DepMethod = cast<CXXMethodDecl>( CurMethod->getInstantiatedFromMemberFunction()); assert(DepMethod && "No template pattern found"); QualType DepThisType = DepMethod->getThisType(Context); CheckCXXThisCapture(R.getNameLoc()); CXXThisExpr *DepThis = new (Context) CXXThisExpr( R.getNameLoc(), DepThisType, false); TemplateArgumentListInfo TList; if (ULE->hasExplicitTemplateArgs()) ULE->copyTemplateArgumentsInto(TList); CXXScopeSpec SS; SS.Adopt(ULE->getQualifierLoc()); // HLSL Change Begin - This is a reference. CXXDependentScopeMemberExpr *DepExpr = CXXDependentScopeMemberExpr::Create( Context, DepThis, DepThisType, /*IsArrow*/ !getLangOpts().HLSL, SourceLocation(), SS.getWithLocInContext(Context), ULE->getTemplateKeywordLoc(), nullptr, R.getLookupNameInfo(), ULE->hasExplicitTemplateArgs() ? &TList : nullptr); // HLSL Change End - This is a reference. CallsUndergoingInstantiation.back()->setCallee(DepExpr); } else { Diag(R.getNameLoc(), diagnostic) << Name; } // Do we really want to note all of these? for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) Diag((*I)->getLocation(), diag::note_dependent_var_use); // Return true if we are inside a default argument instantiation // and the found name refers to an instance member function, otherwise // the function calling DiagnoseEmptyLookup will try to create an // implicit member call and this is wrong for default argument. if (isDefaultArgument && ((*R.begin())->isCXXInstanceMember())) { Diag(R.getNameLoc(), diag::err_member_call_without_object); return true; } // Tell the callee to try to recover. return false; } R.clear(); } // In Microsoft mode, if we are performing lookup from within a friend // function definition declared at class scope then we must set // DC to the lexical parent to be able to search into the parent // class. if (getLangOpts().MSVCCompat && isa<FunctionDecl>(DC) && cast<FunctionDecl>(DC)->getFriendObjectKind() && DC->getLexicalParent()->isRecord()) DC = DC->getLexicalParent(); else DC = DC->getParent(); } // We didn't find anything, so try to correct for a typo. TypoCorrection Corrected; if (S && Out) { SourceLocation TypoLoc = R.getNameLoc(); assert(!ExplicitTemplateArgs && "Diagnosing an empty lookup with explicit template args!"); *Out = CorrectTypoDelayed( R.getLookupNameInfo(), R.getLookupKind(), S, &SS, std::move(CCC), [this, SS, Name, TypoLoc, Args, diagnostic, diagnostic_suggest](const TypoCorrection &TC) { emitEmptyLookupTypoDiagnostic(TC, *this, SS, Name, TypoLoc, Args, diagnostic, diagnostic_suggest); }, nullptr, CTK_ErrorRecovery); if (*Out) return true; } else if (S && (Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, &SS, std::move(CCC), CTK_ErrorRecovery))) { std::string CorrectedStr(Corrected.getAsString(getLangOpts())); bool DroppedSpecifier = Corrected.WillReplaceSpecifier() && Name.getAsString() == CorrectedStr; R.setLookupName(Corrected.getCorrection()); bool AcceptableWithRecovery = false; bool AcceptableWithoutRecovery = false; NamedDecl *ND = Corrected.getCorrectionDecl(); if (ND) { if (Corrected.isOverloaded()) { OverloadCandidateSet OCS(R.getNameLoc(), OverloadCandidateSet::CSK_Normal); OverloadCandidateSet::iterator Best; for (TypoCorrection::decl_iterator CD = Corrected.begin(), CDEnd = Corrected.end(); CD != CDEnd; ++CD) { if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(*CD)) AddTemplateOverloadCandidate( FTD, DeclAccessPair::make(FTD, AS_none), ExplicitTemplateArgs, Args, OCS); else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*CD)) if (!ExplicitTemplateArgs || ExplicitTemplateArgs->size() == 0) AddOverloadCandidate(FD, DeclAccessPair::make(FD, AS_none), Args, OCS); } switch (OCS.BestViableFunction(*this, R.getNameLoc(), Best)) { case OR_Success: ND = Best->Function; Corrected.setCorrectionDecl(ND); break; default: // FIXME: Arbitrarily pick the first declaration for the note. Corrected.setCorrectionDecl(ND); break; } } R.addDecl(ND); if (getLangOpts().CPlusPlus && ND->isCXXClassMember()) { CXXRecordDecl *Record = nullptr; if (Corrected.getCorrectionSpecifier()) { const Type *Ty = Corrected.getCorrectionSpecifier()->getAsType(); Record = Ty->getAsCXXRecordDecl(); } if (!Record) Record = cast<CXXRecordDecl>( ND->getDeclContext()->getRedeclContext()); R.setNamingClass(Record); } AcceptableWithRecovery = isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND); // FIXME: If we ended up with a typo for a type name or // Objective-C class name, we're in trouble because the parser // is in the wrong place to recover. Suggest the typo // correction, but don't make it a fix-it since we're not going // to recover well anyway. AcceptableWithoutRecovery = isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND); } else { // FIXME: We found a keyword. Suggest it, but don't provide a fix-it // because we aren't able to recover. AcceptableWithoutRecovery = true; } if (AcceptableWithRecovery || AcceptableWithoutRecovery) { unsigned NoteID = (Corrected.getCorrectionDecl() && isa<ImplicitParamDecl>(Corrected.getCorrectionDecl())) ? diag::note_implicit_param_decl : diag::note_previous_decl; if (SS.isEmpty()) diagnoseTypo(Corrected, PDiag(diagnostic_suggest) << Name, PDiag(NoteID), AcceptableWithRecovery); else diagnoseTypo(Corrected, PDiag(diag::err_no_member_suggest) << Name << computeDeclContext(SS, false) << DroppedSpecifier << SS.getRange(), PDiag(NoteID), AcceptableWithRecovery); // Tell the callee whether to try to recover. return !AcceptableWithRecovery; } } R.clear(); // Emit a special diagnostic for failed member lookups. // FIXME: computing the declaration context might fail here (?) if (!SS.isEmpty()) { Diag(R.getNameLoc(), diag::err_no_member) << Name << computeDeclContext(SS, false) << SS.getRange(); return true; } // Give up, we can't recover. Diag(R.getNameLoc(), diagnostic) << Name; return true; } /// In Microsoft mode, if we are inside a template class whose parent class has /// dependent base classes, and we can't resolve an unqualified identifier, then /// assume the identifier is a member of a dependent base class. We can only /// recover successfully in static methods, instance methods, and other contexts /// where 'this' is available. This doesn't precisely match MSVC's /// instantiation model, but it's close enough. static Expr * recoverFromMSUnqualifiedLookup(Sema &S, ASTContext &Context, DeclarationNameInfo &NameInfo, SourceLocation TemplateKWLoc, const TemplateArgumentListInfo *TemplateArgs) { // Only try to recover from lookup into dependent bases in static methods or // contexts where 'this' is available. QualType ThisType = S.getCurrentThisType(); const CXXRecordDecl *RD = nullptr; if (!ThisType.isNull()) RD = ThisType->getPointeeType()->getAsCXXRecordDecl(); else if (auto *MD = dyn_cast<CXXMethodDecl>(S.CurContext)) RD = MD->getParent(); if (!RD || !RD->hasAnyDependentBases()) return nullptr; // Diagnose this as unqualified lookup into a dependent base class. If 'this' // is available, suggest inserting 'this->' as a fixit. SourceLocation Loc = NameInfo.getLoc(); auto DB = S.Diag(Loc, diag::ext_undeclared_unqual_id_with_dependent_base); DB << NameInfo.getName() << RD; if (!ThisType.isNull()) { // HLSL Change Begin - This code is broken because `this` is a reference in // HLSL, but this code should also be unreachable. assert(!S.getLangOpts().HLSL && "This should be unreachable in DXC because we don't enable the " "MSCompat language feature."); // HLSL Change End DB << FixItHint::CreateInsertion(Loc, "this->"); return CXXDependentScopeMemberExpr::Create( Context, /*This=*/nullptr, ThisType, /*IsArrow=*/true, /*Op=*/SourceLocation(), NestedNameSpecifierLoc(), TemplateKWLoc, /*FirstQualifierInScope=*/nullptr, NameInfo, TemplateArgs); } // Synthesize a fake NNS that points to the derived class. This will // perform name lookup during template instantiation. CXXScopeSpec SS; auto *NNS = NestedNameSpecifier::Create(Context, nullptr, true, RD->getTypeForDecl()); SS.MakeTrivial(Context, NNS, SourceRange(Loc, Loc)); return DependentScopeDeclRefExpr::Create( Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo, TemplateArgs); } ExprResult Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC, bool IsInlineAsmIdentifier, Token *KeywordReplacement) { assert(!(IsAddressOfOperand && HasTrailingLParen) && "cannot be direct & operand and have a trailing lparen"); if (SS.isInvalid()) return ExprError(); TemplateArgumentListInfo TemplateArgsBuffer; // Decompose the UnqualifiedId into the following data. DeclarationNameInfo NameInfo; const TemplateArgumentListInfo *TemplateArgs; DecomposeUnqualifiedId(Id, TemplateArgsBuffer, NameInfo, TemplateArgs); DeclarationName Name = NameInfo.getName(); IdentifierInfo *II = Name.getAsIdentifierInfo(); SourceLocation NameLoc = NameInfo.getLoc(); // C++ [temp.dep.expr]p3: // An id-expression is type-dependent if it contains: // -- an identifier that was declared with a dependent type, // (note: handled after lookup) // -- a template-id that is dependent, // (note: handled in BuildTemplateIdExpr) // -- a conversion-function-id that specifies a dependent type, // -- a nested-name-specifier that contains a class-name that // names a dependent type. // Determine whether this is a member of an unknown specialization; // we need to handle these differently. bool DependentID = false; if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName && Name.getCXXNameType()->isDependentType()) { DependentID = true; } else if (SS.isSet()) { if (DeclContext *DC = computeDeclContext(SS, false)) { if (RequireCompleteDeclContext(SS, DC)) return ExprError(); } else { DependentID = true; } } if (DependentID) return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo, IsAddressOfOperand, TemplateArgs); // Perform the required lookup. LookupResult R(*this, NameInfo, (Id.getKind() == UnqualifiedId::IK_ImplicitSelfParam) ? LookupObjCImplicitSelfParam : LookupOrdinaryName); if (TemplateArgs) { // Lookup the template name again to correctly establish the context in // which it was found. This is really unfortunate as we already did the // lookup to determine that it was a template name in the first place. If // this becomes a performance hit, we can work harder to preserve those // results until we get here but it's likely not worth it. bool MemberOfUnknownSpecialization; LookupTemplateName(R, S, SS, QualType(), /*EnteringContext=*/false, MemberOfUnknownSpecialization); if (MemberOfUnknownSpecialization || (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)) return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo, IsAddressOfOperand, TemplateArgs); } else { bool IvarLookupFollowUp = II && !SS.isSet() && getCurMethodDecl(); LookupParsedName(R, S, &SS, !IvarLookupFollowUp); // If the result might be in a dependent base class, this is a dependent // id-expression. if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation) return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo, IsAddressOfOperand, TemplateArgs); // If this reference is in an Objective-C method, then we need to do // some special Objective-C lookup, too. if (IvarLookupFollowUp) { ExprResult E(LookupInObjCMethod(R, S, II, true)); if (E.isInvalid()) return ExprError(); if (Expr *Ex = E.getAs<Expr>()) return Ex; } } if (R.isAmbiguous()) return ExprError(); // This could be an implicitly declared function reference (legal in C90, // extension in C99, forbidden in C++). if (R.empty() && HasTrailingLParen && II && !getLangOpts().CPlusPlus) { NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *II, S); if (D) R.addDecl(D); } // Determine whether this name might be a candidate for // argument-dependent lookup. bool ADL = UseArgumentDependentLookup(SS, R, HasTrailingLParen); if (R.empty() && !ADL) { if (SS.isEmpty() && getLangOpts().MSVCCompat) { if (Expr *E = recoverFromMSUnqualifiedLookup(*this, Context, NameInfo, TemplateKWLoc, TemplateArgs)) return E; } // Don't diagnose an empty lookup for inline assembly. if (IsInlineAsmIdentifier) return ExprError(); // If this name wasn't predeclared and if this is not a function // call, diagnose the problem. TypoExpr *TE = nullptr; auto DefaultValidator = llvm::make_unique<CorrectionCandidateCallback>( II, SS.isValid() ? SS.getScopeRep() : nullptr); DefaultValidator->IsAddressOfOperand = IsAddressOfOperand; assert((!CCC || CCC->IsAddressOfOperand == IsAddressOfOperand) && "Typo correction callback misconfigured"); if (CCC) { // Make sure the callback knows what the typo being diagnosed is. CCC->setTypoName(II); if (SS.isValid()) CCC->setTypoNNS(SS.getScopeRep()); } if (DiagnoseEmptyLookup(S, SS, R, CCC ? std::move(CCC) : std::move(DefaultValidator), nullptr, None, &TE)) { if (TE && KeywordReplacement) { auto &State = getTypoExprState(TE); auto BestTC = State.Consumer->getNextCorrection(); if (BestTC.isKeyword()) { auto *II = BestTC.getCorrectionAsIdentifierInfo(); if (State.DiagHandler) State.DiagHandler(BestTC); KeywordReplacement->startToken(); KeywordReplacement->setKind(II->getTokenID()); KeywordReplacement->setIdentifierInfo(II); KeywordReplacement->setLocation(BestTC.getCorrectionRange().getBegin()); // Clean up the state associated with the TypoExpr, since it has // now been diagnosed (without a call to CorrectDelayedTyposInExpr). clearDelayedTypo(TE); // Signal that a correction to a keyword was performed by returning a // valid-but-null ExprResult. return (Expr*)nullptr; } State.Consumer->resetCorrectionStream(); } return TE ? TE : ExprError(); } assert(!R.empty() && "DiagnoseEmptyLookup returned false but added no results"); // If we found an Objective-C instance variable, let // LookupInObjCMethod build the appropriate expression to // reference the ivar. if (ObjCIvarDecl *Ivar = R.getAsSingle<ObjCIvarDecl>()) { R.clear(); ExprResult E(LookupInObjCMethod(R, S, Ivar->getIdentifier())); // In a hopelessly buggy code, Objective-C instance variable // lookup fails and no expression will be built to reference it. if (!E.isInvalid() && !E.get()) return ExprError(); return E; } } // This is guaranteed from this point on. assert(!R.empty() || ADL); // Check whether this might be a C++ implicit instance member access. // C++ [class.mfct.non-static]p3: // When an id-expression that is not part of a class member access // syntax and not used to form a pointer to member is used in the // body of a non-static member function of class X, if name lookup // resolves the name in the id-expression to a non-static non-type // member of some class C, the id-expression is transformed into a // class member access expression using (*this) as the // postfix-expression to the left of the . operator. // // But we don't actually need to do this for '&' operands if R // resolved to a function or overloaded function set, because the // expression is ill-formed if it actually works out to be a // non-static member function: // // C++ [expr.ref]p4: // Otherwise, if E1.E2 refers to a non-static member function. . . // [t]he expression can be used only as the left-hand operand of a // member function call. // // There are other safeguards against such uses, but it's important // to get this right here so that we don't end up making a // spuriously dependent expression if we're inside a dependent // instance method. if (!R.empty() && (*R.begin())->isCXXClassMember()) { bool MightBeImplicitMember; if (!IsAddressOfOperand) MightBeImplicitMember = true; else if (!SS.isEmpty()) MightBeImplicitMember = false; else if (R.isOverloadedResult()) MightBeImplicitMember = false; else if (R.isUnresolvableResult()) MightBeImplicitMember = true; else MightBeImplicitMember = isa<FieldDecl>(R.getFoundDecl()) || isa<IndirectFieldDecl>(R.getFoundDecl()) || isa<MSPropertyDecl>(R.getFoundDecl()); if (MightBeImplicitMember) return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs); } // HLSL Change Begin: Allow templates without empty argument list if default // arguments are provided. if (getLangOpts().HLSL && R.isSingleResult()) { if (TemplateDecl *Template = dyn_cast<TemplateDecl>(R.getFoundDecl())) { if (Template->getTemplateParameters()->getMinRequiredArguments() == 0) { TemplateArgsBuffer.setLAngleLoc(NameLoc); TemplateArgsBuffer.setRAngleLoc(NameLoc); TemplateArgs = &TemplateArgsBuffer; } } } // HLSL Change End if (TemplateArgs || TemplateKWLoc.isValid()) { // In C++1y, if this is a variable template id, then check it // in BuildTemplateIdExpr(). // The single lookup result must be a variable template declaration. if (Id.getKind() == UnqualifiedId::IK_TemplateId && Id.TemplateId && Id.TemplateId->Kind == TNK_Var_template) { assert(R.getAsSingle<VarTemplateDecl>() && "There should only be one declaration found."); } return BuildTemplateIdExpr(SS, TemplateKWLoc, R, ADL, TemplateArgs); } return BuildDeclarationNameExpr(SS, R, ADL); } /// BuildQualifiedDeclarationNameExpr - Build a C++ qualified /// declaration name, generally during template instantiation. /// There's a large number of things which don't need to be done along /// this path. ExprResult Sema::BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI) { DeclContext *DC = computeDeclContext(SS, false); if (!DC) return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(), NameInfo, /*TemplateArgs=*/nullptr); if (RequireCompleteDeclContext(SS, DC)) return ExprError(); LookupResult R(*this, NameInfo, LookupOrdinaryName); LookupQualifiedName(R, DC); if (R.isAmbiguous()) return ExprError(); if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation) return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(), NameInfo, /*TemplateArgs=*/nullptr); if (R.empty()) { Diag(NameInfo.getLoc(), diag::err_no_member) << NameInfo.getName() << DC << SS.getRange(); return ExprError(); } if (const TypeDecl *TD = R.getAsSingle<TypeDecl>()) { // Diagnose a missing typename if this resolved unambiguously to a type in // a dependent context. If we can recover with a type, downgrade this to // a warning in Microsoft compatibility mode. unsigned DiagID = diag::err_typename_missing; if (RecoveryTSI && getLangOpts().MSVCCompat) DiagID = diag::ext_typename_missing; SourceLocation Loc = SS.getBeginLoc(); auto D = Diag(Loc, DiagID); D << SS.getScopeRep() << NameInfo.getName().getAsString() << SourceRange(Loc, NameInfo.getEndLoc()); // Don't recover if the caller isn't expecting us to or if we're in a SFINAE // context. if (!RecoveryTSI) return ExprError(); // Only issue the fixit if we're prepared to recover. D << FixItHint::CreateInsertion(Loc, "typename "); // Recover by pretending this was an elaborated type. QualType Ty = Context.getTypeDeclType(TD); TypeLocBuilder TLB; TLB.pushTypeSpec(Ty).setNameLoc(NameInfo.getLoc()); QualType ET = getElaboratedType(ETK_None, SS, Ty); ElaboratedTypeLoc QTL = TLB.push<ElaboratedTypeLoc>(ET); QTL.setElaboratedKeywordLoc(SourceLocation()); QTL.setQualifierLoc(SS.getWithLocInContext(Context)); *RecoveryTSI = TLB.getTypeSourceInfo(Context, ET); return ExprEmpty(); } // Defend against this resolving to an implicit member access. We usually // won't get here if this might be a legitimate a class member (we end up in // BuildMemberReferenceExpr instead), but this can be valid if we're forming // a pointer-to-member or in an unevaluated context in C++11. if (!R.empty() && (*R.begin())->isCXXClassMember() && !IsAddressOfOperand) return BuildPossibleImplicitMemberExpr(SS, /*TemplateKWLoc=*/SourceLocation(), R, /*TemplateArgs=*/nullptr); return BuildDeclarationNameExpr(SS, R, /* ADL */ false); } /// LookupInObjCMethod - The parser has read a name in, and Sema has /// detected that we're currently inside an ObjC method. Perform some /// additional lookup. /// /// Ideally, most of this would be done by lookup, but there's /// actually quite a lot of extra work involved. /// /// Returns a null sentinel to indicate trivial success. ExprResult Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation) { SourceLocation Loc = Lookup.getNameLoc(); ObjCMethodDecl *CurMethod = getCurMethodDecl(); // Check for error condition which is already reported. if (!CurMethod) return ExprError(); // There are two cases to handle here. 1) scoped lookup could have failed, // in which case we should look for an ivar. 2) scoped lookup could have // found a decl, but that decl is outside the current instance method (i.e. // a global variable). In these two cases, we do a lookup for an ivar with // this name, if the lookup sucedes, we replace it our current decl. // If we're in a class method, we don't normally want to look for // ivars. But if we don't find anything else, and there's an // ivar, that's an error. bool IsClassMethod = CurMethod->isClassMethod(); bool LookForIvars; if (Lookup.empty()) LookForIvars = true; else if (IsClassMethod) LookForIvars = false; else LookForIvars = (Lookup.isSingleResult() && Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod()); ObjCInterfaceDecl *IFace = nullptr; if (LookForIvars) { IFace = CurMethod->getClassInterface(); ObjCInterfaceDecl *ClassDeclared; ObjCIvarDecl *IV = nullptr; if (IFace && (IV = IFace->lookupInstanceVariable(II, ClassDeclared))) { // Diagnose using an ivar in a class method. if (IsClassMethod) return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method) << IV->getDeclName()); // If we're referencing an invalid decl, just return this as a silent // error node. The error diagnostic was already emitted on the decl. if (IV->isInvalidDecl()) return ExprError(); // Check if referencing a field with __attribute__((deprecated)). if (DiagnoseUseOfDecl(IV, Loc)) return ExprError(); // Diagnose the use of an ivar outside of the declaring class. if (IV->getAccessControl() == ObjCIvarDecl::Private && !declaresSameEntity(ClassDeclared, IFace) && !getLangOpts().DebuggerSupport) Diag(Loc, diag::error_private_ivar_access) << IV->getDeclName(); // FIXME: This should use a new expr for a direct reference, don't // turn this into Self->ivar, just return a BareIVarExpr or something. IdentifierInfo &II = Context.Idents.get("self"); UnqualifiedId SelfName; SelfName.setIdentifier(&II, SourceLocation()); SelfName.setKind(UnqualifiedId::IK_ImplicitSelfParam); CXXScopeSpec SelfScopeSpec; SourceLocation TemplateKWLoc; ExprResult SelfExpr = ActOnIdExpression(S, SelfScopeSpec, TemplateKWLoc, SelfName, false, false); if (SelfExpr.isInvalid()) return ExprError(); SelfExpr = DefaultLvalueConversion(SelfExpr.get()); if (SelfExpr.isInvalid()) return ExprError(); MarkAnyDeclReferenced(Loc, IV, true); ObjCMethodFamily MF = CurMethod->getMethodFamily(); if (MF != OMF_init && MF != OMF_dealloc && MF != OMF_finalize && !IvarBacksCurrentMethodAccessor(IFace, CurMethod, IV)) Diag(Loc, diag::warn_direct_ivar_access) << IV->getDeclName(); ObjCIvarRefExpr *Result = new (Context) ObjCIvarRefExpr(IV, IV->getUsageType(SelfExpr.get()->getType()), Loc, IV->getLocation(), SelfExpr.get(), true, true); if (getLangOpts().ObjCAutoRefCount) { if (IV->getType().getObjCLifetime() == Qualifiers::OCL_Weak) { if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, Loc)) recordUseOfEvaluatedWeak(Result); } if (CurContext->isClosure()) Diag(Loc, diag::warn_implicitly_retains_self) << FixItHint::CreateInsertion(Loc, "self->"); } return Result; } } else if (CurMethod->isInstanceMethod()) { // We should warn if a local variable hides an ivar. if (ObjCInterfaceDecl *IFace = CurMethod->getClassInterface()) { ObjCInterfaceDecl *ClassDeclared; if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(II, ClassDeclared)) { if (IV->getAccessControl() != ObjCIvarDecl::Private || declaresSameEntity(IFace, ClassDeclared)) Diag(Loc, diag::warn_ivar_use_hidden) << IV->getDeclName(); } } } else if (Lookup.isSingleResult() && Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod()) { // If accessing a stand-alone ivar in a class method, this is an error. if (const ObjCIvarDecl *IV = dyn_cast<ObjCIvarDecl>(Lookup.getFoundDecl())) return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method) << IV->getDeclName()); } if (Lookup.empty() && II && AllowBuiltinCreation) { // FIXME. Consolidate this with similar code in LookupName. if (unsigned BuiltinID = II->getBuiltinID()) { if (!(getLangOpts().CPlusPlus && Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))) { NamedDecl *D = LazilyCreateBuiltin((IdentifierInfo *)II, BuiltinID, S, Lookup.isForRedeclaration(), Lookup.getNameLoc()); if (D) Lookup.addDecl(D); } } } // Sentinel value saying that we didn't do anything special. return ExprResult((Expr *)nullptr); } /// \brief Cast a base object to a member's actual type. /// /// Logically this happens in three phases: /// /// * First we cast from the base type to the naming class. /// The naming class is the class into which we were looking /// when we found the member; it's the qualifier type if a /// qualifier was provided, and otherwise it's the base type. /// /// * Next we cast from the naming class to the declaring class. /// If the member we found was brought into a class's scope by /// a using declaration, this is that class; otherwise it's /// the class declaring the member. /// /// * Finally we cast from the declaring class to the "true" /// declaring class of the member. This conversion does not /// obey access control. ExprResult Sema::PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member) { CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Member->getDeclContext()); if (!RD) return From; QualType DestRecordType; QualType DestType; QualType FromRecordType; QualType FromType = From->getType(); bool PointerConversions = false; if (isa<FieldDecl>(Member)) { DestRecordType = Context.getCanonicalType(Context.getTypeDeclType(RD)); if (FromType->getAs<PointerType>()) { DestType = Context.getPointerType(DestRecordType); FromRecordType = FromType->getPointeeType(); PointerConversions = true; } else { DestType = DestRecordType; FromRecordType = FromType; } } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member)) { if (Method->isStatic()) return From; DestType = Method->getThisType(Context); DestRecordType = DestType->getPointeeType(); if (FromType->getAs<PointerType>()) { FromRecordType = FromType->getPointeeType(); PointerConversions = true; } else { FromRecordType = FromType; DestType = DestRecordType; } } else { // No conversion necessary. return From; } if (DestType->isDependentType() || FromType->isDependentType()) return From; // If the unqualified types are the same, no conversion is necessary. if (Context.hasSameUnqualifiedType(FromRecordType, DestRecordType)) return From; SourceRange FromRange = From->getSourceRange(); SourceLocation FromLoc = FromRange.getBegin(); ExprValueKind VK = From->getValueKind(); // C++ [class.member.lookup]p8: // [...] Ambiguities can often be resolved by qualifying a name with its // class name. // // If the member was a qualified name and the qualified referred to a // specific base subobject type, we'll cast to that intermediate type // first and then to the object in which the member is declared. That allows // one to resolve ambiguities in, e.g., a diamond-shaped hierarchy such as: // // class Base { public: int x; }; // class Derived1 : public Base { }; // class Derived2 : public Base { }; // class VeryDerived : public Derived1, public Derived2 { void f(); }; // // void VeryDerived::f() { // x = 17; // error: ambiguous base subobjects // Derived1::x = 17; // okay, pick the Base subobject of Derived1 // } if (Qualifier && Qualifier->getAsType()) { QualType QType = QualType(Qualifier->getAsType(), 0); assert(QType->isRecordType() && "lookup done with non-record type"); QualType QRecordType = QualType(QType->getAs<RecordType>(), 0); // In C++98, the qualifier type doesn't actually have to be a base // type of the object type, in which case we just ignore it. // Otherwise build the appropriate casts. if (IsDerivedFrom(FromRecordType, QRecordType)) { CXXCastPath BasePath; if (CheckDerivedToBaseConversion(FromRecordType, QRecordType, FromLoc, FromRange, &BasePath)) return ExprError(); if (PointerConversions) QType = Context.getPointerType(QType); From = ImpCastExprToType(From, QType, CK_UncheckedDerivedToBase, VK, &BasePath).get(); FromType = QType; FromRecordType = QRecordType; // If the qualifier type was the same as the destination type, // we're done. if (Context.hasSameUnqualifiedType(FromRecordType, DestRecordType)) return From; } } bool IgnoreAccess = false; // If we actually found the member through a using declaration, cast // down to the using declaration's type. // // Pointer equality is fine here because only one declaration of a // class ever has member declarations. if (FoundDecl->getDeclContext() != Member->getDeclContext()) { assert(isa<UsingShadowDecl>(FoundDecl)); QualType URecordType = Context.getTypeDeclType( cast<CXXRecordDecl>(FoundDecl->getDeclContext())); // We only need to do this if the naming-class to declaring-class // conversion is non-trivial. if (!Context.hasSameUnqualifiedType(FromRecordType, URecordType)) { assert(IsDerivedFrom(FromRecordType, URecordType)); CXXCastPath BasePath; if (CheckDerivedToBaseConversion(FromRecordType, URecordType, FromLoc, FromRange, &BasePath)) return ExprError(); QualType UType = URecordType; if (PointerConversions) UType = Context.getPointerType(UType); From = ImpCastExprToType(From, UType, CK_UncheckedDerivedToBase, VK, &BasePath).get(); FromType = UType; FromRecordType = URecordType; } // We don't do access control for the conversion from the // declaring class to the true declaring class. IgnoreAccess = true; } CXXCastPath BasePath; // HLSL Change Begin // When converting ConstantBuffer or TextureBuffers to // TextureBuffer if (getLangOpts().HLSL) { if (auto TSTy = dyn_cast<TemplateSpecializationType>(FromRecordType)) { if (TSTy->getTemplateName().getAsTemplateDecl()->getName() == "ConstantBuffer" || TSTy->getTemplateName().getAsTemplateDecl()->getName() == "TextureBuffer") { auto FirstArg = TSTy->getArgs()[0]; if (FirstArg.getKind() == TemplateArgument::Type && FirstArg.getAsType() == DestRecordType) { return ImpCastExprToType(From, DestType, CK_FlatConversion, VK, &BasePath); } } } } // HLSL Change End. if (CheckDerivedToBaseConversion(FromRecordType, DestRecordType, FromLoc, FromRange, &BasePath, IgnoreAccess)) return ExprError(); return ImpCastExprToType(From, DestType, CK_UncheckedDerivedToBase, VK, &BasePath); } bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen) { // Only when used directly as the postfix-expression of a call. if (!HasTrailingLParen) return false; // Never if a scope specifier was provided. if (SS.isSet()) { // HLSL Change begins // We want to be able to have intrinsics inside the "vk" namespace. const bool isVkNamespace = SS.getScopeRep() && SS.getScopeRep()->getAsNamespace() && SS.getScopeRep()->getAsNamespace()->getName() == "vk"; if (!isVkNamespace) // HLSL Change ends return false; } // Only in C++ or ObjC++. if (!getLangOpts().CPlusPlus) return false; // Turn off ADL when we find certain kinds of declarations during // normal lookup: for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) { NamedDecl *D = *I; // C++0x [basic.lookup.argdep]p3: // -- a declaration of a class member // Since using decls preserve this property, we check this on the // original decl. if (D->isCXXClassMember()) return false; // C++0x [basic.lookup.argdep]p3: // -- a block-scope function declaration that is not a // using-declaration // NOTE: we also trigger this for function templates (in fact, we // don't check the decl type at all, since all other decl types // turn off ADL anyway). if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); else if (D->getLexicalDeclContext()->isFunctionOrMethod()) return false; // C++0x [basic.lookup.argdep]p3: // -- a declaration that is neither a function or a function // template // And also for builtin functions. if (isa<FunctionDecl>(D)) { FunctionDecl *FDecl = cast<FunctionDecl>(D); // But also builtin functions. if (FDecl->getBuiltinID() && FDecl->isImplicit()) return false; } else if (!isa<FunctionTemplateDecl>(D)) return false; } return true; } /// Diagnoses obvious problems with the use of the given declaration /// as an expression. This is only actually called for lookups that /// were not overloaded, and it doesn't promise that the declaration /// will in fact be used. static bool CheckDeclInExpr(Sema &S, SourceLocation Loc, NamedDecl *D) { if (isa<TypedefNameDecl>(D)) { S.Diag(Loc, diag::err_unexpected_typedef) << D->getDeclName(); return true; } if (isa<ObjCInterfaceDecl>(D)) { S.Diag(Loc, diag::err_unexpected_interface) << D->getDeclName(); return true; } if (isa<NamespaceDecl>(D)) { S.Diag(Loc, diag::err_unexpected_namespace) << D->getDeclName(); return true; } return false; } ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl) { // If this is a single, fully-resolved result and we don't need ADL, // just build an ordinary singleton decl ref. if (!NeedsADL && R.isSingleResult() && !R.getAsSingle<FunctionTemplateDecl>()) return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), R.getFoundDecl(), R.getRepresentativeDecl(), nullptr, AcceptInvalidDecl); // We only need to check the declaration if there's exactly one // result, because in the overloaded case the results can only be // functions and function templates. if (R.isSingleResult() && CheckDeclInExpr(*this, R.getNameLoc(), R.getFoundDecl())) return ExprError(); // Otherwise, just build an unresolved lookup expression. Suppress // any lookup-related diagnostics; we'll hash these out later, when // we've picked a target. R.suppressDiagnostics(); UnresolvedLookupExpr *ULE = UnresolvedLookupExpr::Create(Context, R.getNamingClass(), SS.getWithLocInContext(Context), R.getLookupNameInfo(), NeedsADL, R.isOverloadedResult(), R.begin(), R.end()); return ULE; } /// \brief Complete semantic analysis for a reference to the given declaration. ExprResult Sema::BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD, const TemplateArgumentListInfo *TemplateArgs, bool AcceptInvalidDecl) { assert(D && "Cannot refer to a NULL declaration"); assert(!isa<FunctionTemplateDecl>(D) && "Cannot refer unambiguously to a function template"); SourceLocation Loc = NameInfo.getLoc(); if (CheckDeclInExpr(*this, Loc, D)) return ExprError(); if (TemplateDecl *Template = dyn_cast<TemplateDecl>(D)) { // Specifically diagnose references to class templates that are missing // a template argument list. Diag(Loc, diag::err_template_decl_ref) << (isa<VarTemplateDecl>(D) ? 1 : 0) << Template << SS.getRange(); if (Template->getLocation().isValid()) { // HLSL Change - ellide location notes for built-ins Diag(Template->getLocation(), diag::note_template_decl_here); } return ExprError(); } // Make sure that we're referring to a value. ValueDecl *VD = dyn_cast<ValueDecl>(D); if (!VD) { Diag(Loc, diag::err_ref_non_value) << D << SS.getRange(); Diag(D->getLocation(), diag::note_declared_at); return ExprError(); } // Check whether this declaration can be used. Note that we suppress // this check when we're going to perform argument-dependent lookup // on this function name, because this might not be the function // that overload resolution actually selects. if (DiagnoseUseOfDecl(VD, Loc)) return ExprError(); // Only create DeclRefExpr's for valid Decl's. if (VD->isInvalidDecl() && !AcceptInvalidDecl) return ExprError(); // Handle members of anonymous structs and unions. If we got here, // and the reference is to a class member indirect field, then this // must be the subject of a pointer-to-member expression. if (IndirectFieldDecl *indirectField = dyn_cast<IndirectFieldDecl>(VD)) if (!indirectField->isCXXClassMember()) return BuildAnonymousStructUnionMemberReference(SS, NameInfo.getLoc(), indirectField); { QualType type = VD->getType(); ExprValueKind valueKind = VK_RValue; switch (D->getKind()) { // Ignore all the non-ValueDecl kinds. #define ABSTRACT_DECL(kind) #define VALUE(type, base) #define DECL(type, base) \ case Decl::type: #include "clang/AST/DeclNodes.inc" llvm_unreachable("invalid value decl kind"); // These shouldn't make it here. case Decl::ObjCAtDefsField: case Decl::ObjCIvar: llvm_unreachable("forming non-member reference to ivar?"); // Enum constants are always r-values and never references. // Unresolved using declarations are dependent. case Decl::EnumConstant: case Decl::UnresolvedUsingValue: valueKind = VK_RValue; break; // Fields and indirect fields that got here must be for // pointer-to-member expressions; we just call them l-values for // internal consistency, because this subexpression doesn't really // exist in the high-level semantics. case Decl::Field: case Decl::IndirectField: assert(getLangOpts().CPlusPlus && "building reference to field in C?"); // These can't have reference type in well-formed programs, but // for internal consistency we do this anyway. type = type.getNonReferenceType(); valueKind = VK_LValue; break; // Non-type template parameters are either l-values or r-values // depending on the type. case Decl::NonTypeTemplateParm: { if (const ReferenceType *reftype = type->getAs<ReferenceType>()) { type = reftype->getPointeeType(); valueKind = VK_LValue; // even if the parameter is an r-value reference break; } // For non-references, we need to strip qualifiers just in case // the template parameter was declared as 'const int' or whatever. valueKind = VK_RValue; type = type.getUnqualifiedType(); break; } case Decl::Var: case Decl::VarTemplateSpecialization: case Decl::VarTemplatePartialSpecialization: // In C, "extern void blah;" is valid and is an r-value. if (!getLangOpts().CPlusPlus && !type.hasQualifiers() && type->isVoidType()) { valueKind = VK_RValue; break; } LLVM_FALLTHROUGH; // HLSL Change case Decl::ImplicitParam: case Decl::ParmVar: { // These are always l-values. valueKind = VK_LValue; type = type.getNonReferenceType(); // FIXME: Does the addition of const really only apply in // potentially-evaluated contexts? Since the variable isn't actually // captured in an unevaluated context, it seems that the answer is no. if (!isUnevaluatedContext()) { QualType CapturedType = getCapturedDeclRefType(cast<VarDecl>(VD), Loc); if (!CapturedType.isNull()) type = CapturedType; } break; } case Decl::Function: { if (unsigned BID = cast<FunctionDecl>(VD)->getBuiltinID()) { if (!Context.BuiltinInfo.isPredefinedLibFunction(BID)) { type = Context.BuiltinFnTy; valueKind = VK_RValue; break; } } const FunctionType *fty = type->castAs<FunctionType>(); // If we're referring to a function with an __unknown_anytype // result type, make the entire expression __unknown_anytype. if (fty->getReturnType() == Context.UnknownAnyTy) { type = Context.UnknownAnyTy; valueKind = VK_RValue; break; } // Functions are l-values in C++. if (getLangOpts().CPlusPlus) { valueKind = VK_LValue; break; } // C99 DR 316 says that, if a function type comes from a // function definition (without a prototype), that type is only // used for checking compatibility. Therefore, when referencing // the function, we pretend that we don't have the full function // type. if (!cast<FunctionDecl>(VD)->hasPrototype() && isa<FunctionProtoType>(fty)) type = Context.getFunctionNoProtoType(fty->getReturnType(), fty->getExtInfo()); // Functions are r-values in C. valueKind = VK_RValue; break; } case Decl::MSProperty: valueKind = VK_LValue; break; case Decl::CXXMethod: // If we're referring to a method with an __unknown_anytype // result type, make the entire expression __unknown_anytype. // This should only be possible with a type written directly. if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(VD->getType())) if (proto->getReturnType() == Context.UnknownAnyTy) { type = Context.UnknownAnyTy; valueKind = VK_RValue; break; } // C++ methods are l-values if static, r-values if non-static. if (cast<CXXMethodDecl>(VD)->isStatic()) { valueKind = VK_LValue; break; } LLVM_FALLTHROUGH; // HLSL Change case Decl::CXXConversion: case Decl::CXXDestructor: case Decl::CXXConstructor: valueKind = VK_RValue; break; } return BuildDeclRefExpr(VD, type, valueKind, NameInfo, &SS, FoundD, TemplateArgs); } } static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source, SmallString<32> &Target) { Target.resize(CharByteWidth * (Source.size() + 1)); char *ResultPtr = &Target[0]; const UTF8 *ErrorPtr; bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr); (void)success; assert(success); Target.resize(ResultPtr - &Target[0]); } ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT) { // Pick the current block, lambda, captured statement or function. Decl *currentDecl = nullptr; if (const BlockScopeInfo *BSI = getCurBlock()) currentDecl = BSI->TheDecl; else if (const LambdaScopeInfo *LSI = getCurLambda()) currentDecl = LSI->CallOperator; else if (const CapturedRegionScopeInfo *CSI = getCurCapturedRegion()) currentDecl = CSI->TheCapturedDecl; else currentDecl = getCurFunctionOrMethodDecl(); if (!currentDecl) { Diag(Loc, diag::ext_predef_outside_function); currentDecl = Context.getTranslationUnitDecl(); } QualType ResTy; StringLiteral *SL = nullptr; if (cast<DeclContext>(currentDecl)->isDependentContext()) ResTy = Context.DependentTy; else { // Pre-defined identifiers are of type char[x], where x is the length of // the string. auto Str = PredefinedExpr::ComputeName(IT, currentDecl); unsigned Length = Str.length(); llvm::APInt LengthI(32, Length + 1); if (IT == PredefinedExpr::LFunction) { ResTy = Context.WideCharTy.withConst(); SmallString<32> RawChars; ConvertUTF8ToWideString(Context.getTypeSizeInChars(ResTy).getQuantity(), Str, RawChars); ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal, /*IndexTypeQuals*/ 0); SL = StringLiteral::Create(Context, RawChars, StringLiteral::Wide, /*Pascal*/ false, ResTy, Loc); } else { ResTy = Context.CharTy.withConst(); ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal, /*IndexTypeQuals*/ 0); SL = StringLiteral::Create(Context, Str, StringLiteral::Ascii, /*Pascal*/ false, ResTy, Loc); } } return new (Context) PredefinedExpr(Loc, ResTy, IT, SL); } ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) { PredefinedExpr::IdentType IT; switch (Kind) { default: llvm_unreachable("Unknown simple primary expr!"); case tok::kw___func__: IT = PredefinedExpr::Func; break; // [C99 6.4.2.2] case tok::kw___FUNCTION__: IT = PredefinedExpr::Function; break; case tok::kw___FUNCDNAME__: IT = PredefinedExpr::FuncDName; break; // [MS] case tok::kw___FUNCSIG__: IT = PredefinedExpr::FuncSig; break; // [MS] case tok::kw_L__FUNCTION__: IT = PredefinedExpr::LFunction; break; case tok::kw___PRETTY_FUNCTION__: IT = PredefinedExpr::PrettyFunction; break; } return BuildPredefinedExpr(Loc, IT); } ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) { SmallString<16> CharBuffer; bool Invalid = false; StringRef ThisTok = PP.getSpelling(Tok, CharBuffer, &Invalid); if (Invalid) return ExprError(); CharLiteralParser Literal(ThisTok.begin(), ThisTok.end(), Tok.getLocation(), PP, Tok.getKind()); if (Literal.hadError()) return ExprError(); // HLSL Change Starts if (getLangOpts().HLSL) { if (Literal.isWide() || Literal.isUTF16() || Literal.isUTF32() || Literal.isMultiChar()) { Diag(Tok.getLocation(), diag::err_hlsl_unsupported_char_literal); return ExprError(); } Expr *CharLit = new (Context)CharacterLiteral(Literal.getValue(), CharacterLiteral::Ascii, Context.CharTy, Tok.getLocation()); Expr* Result = ImplicitCastExpr::Create(Context, Context.UnsignedIntTy, CK_IntegralCast, CharLit, nullptr, VK_RValue); return Result; } // HLSL Change Ends QualType Ty; if (Literal.isWide()) Ty = Context.WideCharTy; // L'x' -> wchar_t in C and C++. else if (Literal.isUTF16()) Ty = Context.Char16Ty; // u'x' -> char16_t in C11 and C++11. else if (Literal.isUTF32()) Ty = Context.Char32Ty; // U'x' -> char32_t in C11 and C++11. else if (!getLangOpts().CPlusPlus || Literal.isMultiChar()) Ty = Context.IntTy; // 'x' -> int in C, 'wxyz' -> int in C++. else Ty = Context.CharTy; // 'x' -> char in C++ CharacterLiteral::CharacterKind Kind = CharacterLiteral::Ascii; if (Literal.isWide()) Kind = CharacterLiteral::Wide; else if (Literal.isUTF16()) Kind = CharacterLiteral::UTF16; else if (Literal.isUTF32()) Kind = CharacterLiteral::UTF32; Expr *Lit = new (Context) CharacterLiteral(Literal.getValue(), Kind, Ty, Tok.getLocation()); if (Literal.getUDSuffix().empty()) return Lit; // We're building a user-defined literal. IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix()); SourceLocation UDSuffixLoc = getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset()); // Make sure we're allowed user-defined literals here. if (!UDLScope) return ExprError(Diag(UDSuffixLoc, diag::err_invalid_character_udl)); // C++11 [lex.ext]p6: The literal L is treated as a call of the form // operator "" X (ch) return BuildCookedLiteralOperatorCall(*this, UDLScope, UDSuffix, UDSuffixLoc, Lit, Tok.getLocation()); } ExprResult Sema::ActOnIntegerConstant(SourceLocation Loc, uint64_t Val) { unsigned IntSize = Context.getTargetInfo().getIntWidth(); // HLSL Change Starts - HLSL literal int QualType Ty; if (getLangOpts().HLSL && getLangOpts().HLSLVersion < hlsl::LangStd::v202x) { IntSize = 64; Ty = Context.LitIntTy; } else Ty = Context.IntTy; // HLSL Change Ends return IntegerLiteral::Create(Context, llvm::APInt(IntSize, Val), Ty, Loc); // HLSL Change } static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal, QualType Ty, SourceLocation Loc) { const llvm::fltSemantics &Format = S.Context.getFloatTypeSemantics(Ty); using llvm::APFloat; APFloat Val(Format); APFloat::opStatus result = Literal.GetFloatValue(Val); // Overflow is always an error, but underflow is only an error if // we underflowed to zero (APFloat reports denormals as underflow). if ((result & APFloat::opOverflow) || ((result & APFloat::opUnderflow) && Val.isZero())) { unsigned diagnostic; SmallString<20> buffer; if (result & APFloat::opOverflow) { diagnostic = diag::warn_float_overflow; APFloat::getLargest(Format).toString(buffer); } else { diagnostic = diag::warn_float_underflow; APFloat::getSmallest(Format).toString(buffer); } S.Diag(Loc, diagnostic) << Ty << StringRef(buffer.data(), buffer.size()); } bool isExact = (result == APFloat::opOK); return FloatingLiteral::Create(S.Context, Val, isExact, Ty, Loc); } bool Sema::CheckLoopHintExpr(Expr *E, SourceLocation Loc) { assert(E && "Invalid expression"); if (E->isValueDependent()) return false; QualType QT = E->getType(); if (!QT->isIntegerType() || QT->isBooleanType() || QT->isCharType()) { Diag(E->getExprLoc(), diag::err_pragma_loop_invalid_argument_type) << QT; return true; } llvm::APSInt ValueAPS; ExprResult R = VerifyIntegerConstantExpression(E, &ValueAPS); if (R.isInvalid()) return true; bool ValueIsPositive = ValueAPS.isStrictlyPositive(); if (!ValueIsPositive || ValueAPS.getActiveBits() > 31) { Diag(E->getExprLoc(), diag::err_pragma_loop_invalid_argument_value) << ValueAPS.toString(10) << ValueIsPositive; return true; } return false; } ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) { // Fast path for a single digit (which is quite common). A single digit // cannot have a trigraph, escaped newline, radix prefix, or suffix. if (Tok.getLength() == 1) { const char Val = PP.getSpellingOfSingleCharacterNumericConstant(Tok); return ActOnIntegerConstant(Tok.getLocation(), Val-'0'); } SmallString<128> SpellingBuffer; // NumericLiteralParser wants to overread by one character. Add padding to // the buffer in case the token is copied to the buffer. If getSpelling() // returns a StringRef to the memory buffer, it should have a null char at // the EOF, so it is also safe. SpellingBuffer.resize(Tok.getLength() + 1); // Get the spelling of the token, which eliminates trigraphs, etc. bool Invalid = false; StringRef TokSpelling = PP.getSpelling(Tok, SpellingBuffer, &Invalid); if (Invalid) return ExprError(); NumericLiteralParser Literal(TokSpelling, Tok.getLocation(), PP); if (Literal.hadError) return ExprError(); if (Literal.hasUDSuffix()) { // We're building a user-defined literal. IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix()); SourceLocation UDSuffixLoc = getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset()); // Make sure we're allowed user-defined literals here. if (!UDLScope) return ExprError(Diag(UDSuffixLoc, diag::err_invalid_numeric_udl)); QualType CookedTy; if (Literal.isFloatingLiteral()) { // C++11 [lex.ext]p4: If S contains a literal operator with parameter type // long double, the literal is treated as a call of the form // operator "" X (f L) CookedTy = Context.LongDoubleTy; } else { // C++11 [lex.ext]p3: If S contains a literal operator with parameter type // unsigned long long, the literal is treated as a call of the form // operator "" X (n ULL) CookedTy = Context.UnsignedLongLongTy; } DeclarationName OpName = Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix); DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc); OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc); SourceLocation TokLoc = Tok.getLocation(); // Perform literal operator lookup to determine if we're building a raw // literal or a cooked one. LookupResult R(*this, OpName, UDSuffixLoc, LookupOrdinaryName); switch (LookupLiteralOperator(UDLScope, R, CookedTy, /*AllowRaw*/true, /*AllowTemplate*/true, /*AllowStringTemplate*/false)) { case LOLR_Error: return ExprError(); case LOLR_Cooked: { Expr *Lit; if (Literal.isFloatingLiteral()) { Lit = BuildFloatingLiteral(*this, Literal, CookedTy, Tok.getLocation()); } else { llvm::APInt ResultVal(Context.getTargetInfo().getLongLongWidth(), 0); if (Literal.GetIntegerValue(ResultVal)) Diag(Tok.getLocation(), diag::err_integer_literal_too_large) << /* Unsigned */ 1; Lit = IntegerLiteral::Create(Context, ResultVal, CookedTy, Tok.getLocation()); } return BuildLiteralOperatorCall(R, OpNameInfo, Lit, TokLoc); } case LOLR_Raw: { // C++11 [lit.ext]p3, p4: If S contains a raw literal operator, the // literal is treated as a call of the form // operator "" X ("n") unsigned Length = Literal.getUDSuffixOffset(); QualType StrTy = Context.getConstantArrayType( Context.CharTy.withConst(), llvm::APInt(32, Length + 1), ArrayType::Normal, 0); Expr *Lit = StringLiteral::Create( Context, StringRef(TokSpelling.data(), Length), StringLiteral::Ascii, /*Pascal*/false, StrTy, &TokLoc, 1); return BuildLiteralOperatorCall(R, OpNameInfo, Lit, TokLoc); } case LOLR_Template: { // C++11 [lit.ext]p3, p4: Otherwise (S contains a literal operator // template), L is treated as a call fo the form // operator "" X <'c1', 'c2', ... 'ck'>() // where n is the source character sequence c1 c2 ... ck. TemplateArgumentListInfo ExplicitArgs; unsigned CharBits = Context.getIntWidth(Context.CharTy); bool CharIsUnsigned = Context.CharTy->isUnsignedIntegerType(); llvm::APSInt Value(CharBits, CharIsUnsigned); for (unsigned I = 0, N = Literal.getUDSuffixOffset(); I != N; ++I) { Value = TokSpelling[I]; TemplateArgument Arg(Context, Value, Context.CharTy); TemplateArgumentLocInfo ArgInfo; ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo)); } return BuildLiteralOperatorCall(R, OpNameInfo, None, TokLoc, &ExplicitArgs); } case LOLR_StringTemplate: llvm_unreachable("unexpected literal operator lookup result"); } } Expr *Res; if (Literal.isFloatingLiteral()) { QualType Ty; if (Literal.isFloat) Ty = Context.FloatTy; // HLSL Change Starts else if (getLangOpts().HLSL && getLangOpts().HLSLVersion < hlsl::LangStd::v202x && !Literal.isLong && !Literal.isHalf) Ty = Context.LitFloatTy; else if (Literal.isLong) Ty = Context.DoubleTy; else if (Literal.isHalf) { if (getLangOpts().HLSL && getLangOpts().HLSLVersion < hlsl::LangStd::v202x) Ty = getLangOpts().UseMinPrecision ? Context.FloatTy : Context.HalfTy; else Ty = getLangOpts().UseMinPrecision ? Context.HalfFloatTy : Context.HalfTy; } else if (getLangOpts().HLSL) Ty = Context.FloatTy; // HLSL Change Ends else if (!Literal.isLong) Ty = Context.DoubleTy; else Ty = Context.LongDoubleTy; Res = BuildFloatingLiteral(*this, Literal, Ty, Tok.getLocation()); if (Ty == Context.DoubleTy) { if (getLangOpts().SinglePrecisionConstants) { Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get(); } else if (getLangOpts().OpenCL && !((getLangOpts().OpenCLVersion >= 120) || getOpenCLOptions().cl_khr_fp64)) { Diag(Tok.getLocation(), diag::warn_double_const_requires_fp64); Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get(); } } } else if (!Literal.isIntegerLiteral()) { return ExprError(); // HLSL Change Starts } else if (getLangOpts().HLSL && getLangOpts().HLSLVersion < hlsl::LangStd::v202x) { QualType Ty; unsigned Width = 64; llvm::APInt ResultVal(Width, 0); if (!Literal.isLong && !Literal.isLongLong && !Literal.isUnsigned) { // in HLSL, unspecific literal ints are LitIntTy, using 64-bit Ty = Context.LitIntTy; if (Literal.GetIntegerValue(ResultVal)) { // If this value didn't fit into 64-bit literal int, report error. Diag(Tok.getLocation(), diag::err_integer_literal_too_large); } } else { if (Literal.GetIntegerValue(ResultVal)) { Diag(Tok.getLocation(), diag::err_integer_literal_too_large); } if (Literal.isLongLong) { if (Literal.isUnsigned) Ty = Context.UnsignedLongLongTy; else Ty = Context.LongLongTy; } else { // long is the same as int for HLSL, so ignore isLong here Width = 32; ResultVal = ResultVal.trunc(Width); if (Literal.isUnsigned || Literal.getRadix() != 10) Ty = Context.UnsignedIntTy; else Ty = Context.IntTy; } } if (Literal.getRadix() != 10) { uint64_t Val = ResultVal.getLimitedValue(); if (Val < std::numeric_limits<uint32_t>::max()) Width = 32; uint64_t MSB = 1ull << (Width - 1); if ((Val & MSB) != 0) Diag(Tok.getLocation(), diag::warn_hlsl_legacy_integer_literal_signedness); } return IntegerLiteral::Create(Context, ResultVal, Ty, Tok.getLocation()); // HLSL Change Ends } else { QualType Ty; // HLSL Change: disable the warning below. // 'long long' is a C99 or C++11 feature. if (!getLangOpts().HLSL && !getLangOpts().C99 && Literal.isLongLong) { if (getLangOpts().CPlusPlus) Diag(Tok.getLocation(), getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_longlong : diag::ext_cxx11_longlong); else Diag(Tok.getLocation(), diag::ext_c99_longlong); } // Get the value in the widest-possible width. unsigned MaxWidth = Context.getTargetInfo().getIntMaxTWidth(); // The microsoft literal suffix extensions support 128-bit literals, which // may be wider than [u]intmax_t. // FIXME: Actually, they don't. We seem to have accidentally invented the // i128 suffix. if (Literal.MicrosoftInteger == 128 && MaxWidth < 128 && Context.getTargetInfo().hasInt128Type()) MaxWidth = 128; llvm::APInt ResultVal(MaxWidth, 0); // HLSL Change - 202x integer warnings. uint64_t MSB = 1ull << (MaxWidth - 1); if (Literal.GetIntegerValue(ResultVal)) { // If this value didn't fit into uintmax_t, error and force to ull. Diag(Tok.getLocation(), diag::err_integer_literal_too_large) << /* Unsigned */ 1; Ty = Context.UnsignedLongLongTy; assert(Context.getTypeSize(Ty) == ResultVal.getBitWidth() && "long long is not intmax_t?"); } else { // If this value fits into a ULL, try to figure out what else it fits into // according to the rules of C99 6.4.4.1p5. // Octal, Hexadecimal, and integers with a U suffix are allowed to // be an unsigned int. bool AllowUnsigned = Literal.isUnsigned || Literal.getRadix() != 10; // Check from smallest to largest, picking the smallest type we can. unsigned Width = 0; // Microsoft specific integer suffixes are explicitly sized. if (Literal.MicrosoftInteger) { if (Literal.MicrosoftInteger > MaxWidth) { // If this target doesn't support __int128, error and force to ull. Diag(Tok.getLocation(), diag::err_int128_unsupported); Width = MaxWidth; Ty = Context.getIntMaxType(); } else if (Literal.MicrosoftInteger == 8 && !Literal.isUnsigned) { Width = 8; Ty = Context.CharTy; } else { Width = Literal.MicrosoftInteger; Ty = Context.getIntTypeForBitwidth(Width, /*Signed=*/!Literal.isUnsigned); } } if (Ty.isNull() && !Literal.isLong && !Literal.isLongLong) { // Are int/unsigned possibilities? unsigned IntSize = Context.getTargetInfo().getIntWidth(); // Does it fit in a unsigned int? if (ResultVal.isIntN(IntSize)) { // Does it fit in a signed int? if (!Literal.isUnsigned && ResultVal[IntSize-1] == 0) Ty = Context.IntTy; else if (AllowUnsigned) Ty = Context.UnsignedIntTy; Width = IntSize; } } // HLSL Change Begin - Treat `long` literal as `long long` // This is a bit hacky. HLSL doesn't really have a `long` or `long long` // type so the specification has simplified the suffices. Unfortunately, // rather than just treating `ll` as `l` we need to do the inverse. This // is because we rely on the MSVC mangling which follows LLP64 (l being // 32-bit and ll 64-bit). We should find a better solution to this in // Clang. if (getLangOpts().HLSL && !Literal.isLongLong) Literal.isLongLong = Literal.isLong; // HLSL Change End // Are long/unsigned long possibilities? if (Ty.isNull() && !Literal.isLongLong) { unsigned LongSize = Context.getTargetInfo().getLongWidth(); // Does it fit in a unsigned long? if (ResultVal.isIntN(LongSize)) { // Does it fit in a signed long? if (!Literal.isUnsigned && ResultVal[LongSize-1] == 0) Ty = Context.LongTy; else if (AllowUnsigned) Ty = Context.UnsignedLongTy; // Check according to the rules of C90 6.1.3.2p5. C++03 [lex.icon]p2 // is compatible. // HLSL Change: HLSL will promote to the next signed integer type. else if (!getLangOpts().HLSL && !getLangOpts().C99 && !getLangOpts().CPlusPlus11) { const unsigned LongLongSize = Context.getTargetInfo().getLongLongWidth(); Diag(Tok.getLocation(), getLangOpts().CPlusPlus ? Literal.isLong ? diag::warn_old_implicitly_unsigned_long_cxx : /*C++98 UB*/ diag:: ext_old_implicitly_unsigned_long_cxx : diag::warn_old_implicitly_unsigned_long) << (LongLongSize > LongSize ? /*will have type 'long long'*/ 0 : /*will be ill-formed*/ 1); Ty = Context.UnsignedLongTy; } Width = LongSize; } } // Check long long if needed. if (Ty.isNull()) { unsigned LongLongSize = Context.getTargetInfo().getLongLongWidth(); // Does it fit in a unsigned long long? if (ResultVal.isIntN(LongLongSize)) { // Does it fit in a signed long long? // To be compatible with MSVC, hex integer literals ending with the // LL or i64 suffix are always signed in Microsoft mode. if (!Literal.isUnsigned && (ResultVal[LongLongSize-1] == 0 || (getLangOpts().MicrosoftExt && Literal.isLongLong))) Ty = Context.LongLongTy; else if (AllowUnsigned) Ty = Context.UnsignedLongLongTy; Width = LongLongSize; } } // If we still couldn't decide a type, we probably have something that // does not fit in a signed long long, but has no U suffix. if (Ty.isNull()) { Diag(Tok.getLocation(), diag::ext_integer_literal_too_large_for_signed); Ty = Context.UnsignedLongLongTy; Width = Context.getTargetInfo().getLongLongWidth(); } if (ResultVal.getBitWidth() != Width) ResultVal = ResultVal.trunc(Width); MSB = 1ull << (Width - 1); // HLSL Change - 202x integer warnings. } // HLSL Change Begin - 202x integer warnings. if (Literal.getRadix() != 10) { uint64_t Val = ResultVal.getLimitedValue(); if ((Val & MSB) != 0) Diag(Tok.getLocation(), diag::warn_hlsl_legacy_integer_literal_signedness); } // HLSL Change End - 202x integer warnings. Res = IntegerLiteral::Create(Context, ResultVal, Ty, Tok.getLocation()); } // If this is an imaginary literal, create the ImaginaryLiteral wrapper. if (Literal.isImaginary) Res = new (Context) ImaginaryLiteral(Res, Context.getComplexType(Res->getType())); return Res; } ExprResult Sema::ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E) { assert(E && "ActOnParenExpr() missing expr"); return new (Context) ParenExpr(L, R, E); } static bool CheckVecStepTraitOperandType(Sema &S, QualType T, SourceLocation Loc, SourceRange ArgRange) { // [OpenCL 1.1 6.11.12] "The vec_step built-in function takes a built-in // scalar or vector data type argument..." // Every built-in scalar type (OpenCL 1.1 6.1.1) is either an arithmetic // type (C99 6.2.5p18) or void. if (!(T->isArithmeticType() || T->isVoidType() || T->isVectorType())) { S.Diag(Loc, diag::err_vecstep_non_scalar_vector_type) << T << ArgRange; return true; } assert((T->isVoidType() || !T->isIncompleteType()) && "Scalar types should always be complete"); return false; } static bool CheckExtensionTraitOperandType(Sema &S, QualType T, SourceLocation Loc, SourceRange ArgRange, UnaryExprOrTypeTrait TraitKind) { // Invalid types must be hard errors for SFINAE in C++. if (S.LangOpts.CPlusPlus) return true; // C99 6.5.3.4p1: if (T->isFunctionType() && (TraitKind == UETT_SizeOf || TraitKind == UETT_AlignOf)) { // sizeof(function)/alignof(function) is allowed as an extension. S.Diag(Loc, diag::ext_sizeof_alignof_function_type) << TraitKind << ArgRange; return false; } // Allow sizeof(void)/alignof(void) as an extension, unless in OpenCL where // this is an error (OpenCL v1.1 s6.3.k) if (T->isVoidType()) { unsigned DiagID = S.LangOpts.OpenCL ? diag::err_opencl_sizeof_alignof_type : diag::ext_sizeof_alignof_void_type; S.Diag(Loc, DiagID) << TraitKind << ArgRange; return false; } return true; } static bool CheckObjCTraitOperandConstraints(Sema &S, QualType T, SourceLocation Loc, SourceRange ArgRange, UnaryExprOrTypeTrait TraitKind) { // Reject sizeof(interface) and sizeof(interface<proto>) if the // runtime doesn't allow it. if (!S.LangOpts.ObjCRuntime.allowsSizeofAlignof() && T->isObjCObjectType()) { S.Diag(Loc, diag::err_sizeof_nonfragile_interface) << T << (TraitKind == UETT_SizeOf) << ArgRange; return true; } return false; } /// \brief Check whether E is a pointer from a decayed array type (the decayed /// pointer type is equal to T) and emit a warning if it is. static void warnOnSizeofOnArrayDecay(Sema &S, SourceLocation Loc, QualType T, Expr *E) { // Don't warn if the operation changed the type. if (T != E->getType()) return; // Now look for array decays. ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E); if (!ICE || ICE->getCastKind() != CK_ArrayToPointerDecay) return; S.Diag(Loc, diag::warn_sizeof_array_decay) << ICE->getSourceRange() << ICE->getType() << ICE->getSubExpr()->getType(); } // HLSL Change Begins bool Sema::CheckHLSLUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation Loc, UnaryExprOrTypeTrait ExprKind) { assert(ExprKind == UnaryExprOrTypeTrait::UETT_SizeOf); // "sizeof 42" is ill-defined because HLSL has literal int type which can decay to an int of any size. const BuiltinType* BuiltinTy = ExprType->getAs<BuiltinType>(); if (BuiltinTy != nullptr && (BuiltinTy->getKind() == BuiltinType::LitInt || BuiltinTy->getKind() == BuiltinType::LitFloat)) { Diag(Loc, diag::err_hlsl_sizeof_literal) << ExprType; return true; } if (!hlsl::IsHLSLNumericOrAggregateOfNumericType(ExprType)) { Diag(Loc, diag::err_hlsl_sizeof_nonnumeric) << ExprType; return true; } return false; } // HLSL Change Ends /// \brief Check the constraints on expression operands to unary type expression /// and type traits. /// /// Completes any types necessary and validates the constraints on the operand /// expression. The logic mostly mirrors the type-based overload, but may modify /// the expression as it completes the type for that expression through template /// instantiation, etc. bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind) { QualType ExprTy = E->getType(); assert(!ExprTy->isReferenceType()); if (ExprKind == UETT_VecStep) return CheckVecStepTraitOperandType(*this, ExprTy, E->getExprLoc(), E->getSourceRange()); // Whitelist some types as extensions if (!CheckExtensionTraitOperandType(*this, ExprTy, E->getExprLoc(), E->getSourceRange(), ExprKind)) return false; // 'alignof' applied to an expression only requires the base element type of // the expression to be complete. 'sizeof' requires the expression's type to // be complete (and will attempt to complete it if it's an array of unknown // bound). if (ExprKind == UETT_AlignOf) { if (RequireCompleteType(E->getExprLoc(), Context.getBaseElementType(E->getType()), diag::err_sizeof_alignof_incomplete_type, ExprKind, E->getSourceRange())) return true; } else { if (RequireCompleteExprType(E, diag::err_sizeof_alignof_incomplete_type, ExprKind, E->getSourceRange())) return true; } // Completing the expression's type may have changed it. ExprTy = E->getType(); assert(!ExprTy->isReferenceType()); if (getLangOpts().HLSL && CheckHLSLUnaryExprOrTypeTraitOperand(ExprTy, E->getExprLoc(), ExprKind)) { return true; } if (ExprTy->isFunctionType()) { Diag(E->getExprLoc(), diag::err_sizeof_alignof_function_type) << ExprKind << E->getSourceRange(); return true; } // The operand for sizeof and alignof is in an unevaluated expression context, // so side effects could result in unintended consequences. if ((ExprKind == UETT_SizeOf || ExprKind == UETT_AlignOf) && ActiveTemplateInstantiations.empty() && E->HasSideEffects(Context, false)) Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context); if (CheckObjCTraitOperandConstraints(*this, ExprTy, E->getExprLoc(), E->getSourceRange(), ExprKind)) return true; if (ExprKind == UETT_SizeOf) { if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParens())) { if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(DeclRef->getFoundDecl())) { QualType OType = PVD->getOriginalType(); QualType Type = PVD->getType(); if (Type->isPointerType() && OType->isArrayType()) { Diag(E->getExprLoc(), diag::warn_sizeof_array_param) << Type << OType; Diag(PVD->getLocation(), diag::note_declared_at); } } } // Warn on "sizeof(array op x)" and "sizeof(x op array)", where the array // decays into a pointer and returns an unintended result. This is most // likely a typo for "sizeof(array) op x". if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E->IgnoreParens())) { warnOnSizeofOnArrayDecay(*this, BO->getOperatorLoc(), BO->getType(), BO->getLHS()); warnOnSizeofOnArrayDecay(*this, BO->getOperatorLoc(), BO->getType(), BO->getRHS()); } } return false; } /// \brief Check the constraints on operands to unary expression and type /// traits. /// /// This will complete any types necessary, and validate the various constraints /// on those operands. /// /// The UsualUnaryConversions() function is *not* called by this routine. /// C99 6.3.2.1p[2-4] all state: /// Except when it is the operand of the sizeof operator ... /// /// C++ [expr.sizeof]p4 /// The lvalue-to-rvalue, array-to-pointer, and function-to-pointer /// standard conversions are not applied to the operand of sizeof. /// /// This policy is followed for all of the unary trait expressions. bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind) { if (ExprType->isDependentType()) return false; // C++ [expr.sizeof]p2: // When applied to a reference or a reference type, the result // is the size of the referenced type. // C++11 [expr.alignof]p3: // When alignof is applied to a reference type, the result // shall be the alignment of the referenced type. if (const ReferenceType *Ref = ExprType->getAs<ReferenceType>()) ExprType = Ref->getPointeeType(); if (getLangOpts().HLSL && CheckHLSLUnaryExprOrTypeTraitOperand(ExprType, OpLoc, ExprKind)) { return true; } // C11 6.5.3.4/3, C++11 [expr.alignof]p3: // When alignof or _Alignof is applied to an array type, the result // is the alignment of the element type. if (ExprKind == UETT_AlignOf || ExprKind == UETT_OpenMPRequiredSimdAlign) ExprType = Context.getBaseElementType(ExprType); if (ExprKind == UETT_VecStep) return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange); // Whitelist some types as extensions if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange, ExprKind)) return false; if (RequireCompleteType(OpLoc, ExprType, diag::err_sizeof_alignof_incomplete_type, ExprKind, ExprRange)) return true; if (ExprType->isFunctionType()) { Diag(OpLoc, diag::err_sizeof_alignof_function_type) << ExprKind << ExprRange; return true; } if (CheckObjCTraitOperandConstraints(*this, ExprType, OpLoc, ExprRange, ExprKind)) return true; return false; } static bool CheckAlignOfExpr(Sema &S, Expr *E) { E = E->IgnoreParens(); // Cannot know anything else if the expression is dependent. if (E->isTypeDependent()) return false; if (E->getObjectKind() == OK_BitField) { S.Diag(E->getExprLoc(), diag::err_sizeof_alignof_bitfield) << 1 << E->getSourceRange(); return true; } ValueDecl *D = nullptr; if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { D = DRE->getDecl(); } else if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) { D = ME->getMemberDecl(); } // If it's a field, require the containing struct to have a // complete definition so that we can compute the layout. // // This can happen in C++11 onwards, either by naming the member // in a way that is not transformed into a member access expression // (in an unevaluated operand, for instance), or by naming the member // in a trailing-return-type. // // For the record, since __alignof__ on expressions is a GCC // extension, GCC seems to permit this but always gives the // nonsensical answer 0. // // We don't really need the layout here --- we could instead just // directly check for all the appropriate alignment-lowing // attributes --- but that would require duplicating a lot of // logic that just isn't worth duplicating for such a marginal // use-case. if (FieldDecl *FD = dyn_cast_or_null<FieldDecl>(D)) { // Fast path this check, since we at least know the record has a // definition if we can find a member of it. if (!FD->getParent()->isCompleteDefinition()) { S.Diag(E->getExprLoc(), diag::err_alignof_member_of_incomplete_type) << E->getSourceRange(); return true; } // Otherwise, if it's a field, and the field doesn't have // reference type, then it must have a complete type (or be a // flexible array member, which we explicitly want to // white-list anyway), which makes the following checks trivial. if (!FD->getType()->isReferenceType()) return false; } return S.CheckUnaryExprOrTypeTraitOperand(E, UETT_AlignOf); } bool Sema::CheckVecStepExpr(Expr *E) { E = E->IgnoreParens(); // Cannot know anything else if the expression is dependent. if (E->isTypeDependent()) return false; return CheckUnaryExprOrTypeTraitOperand(E, UETT_VecStep); } /// \brief Build a sizeof or alignof expression given a type operand. ExprResult Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R) { if (!TInfo) return ExprError(); QualType T = TInfo->getType(); if (!T->isDependentType() && CheckUnaryExprOrTypeTraitOperand(T, OpLoc, R, ExprKind)) return ExprError(); // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t. return new (Context) UnaryExprOrTypeTraitExpr( ExprKind, TInfo, Context.getSizeType(), OpLoc, R.getEnd()); } /// \brief Build a sizeof or alignof expression given an expression /// operand. ExprResult Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind) { ExprResult PE = CheckPlaceholderExpr(E); if (PE.isInvalid()) return ExprError(); E = PE.get(); // Verify that the operand is valid. bool isInvalid = false; if (E->isTypeDependent()) { // Delay type-checking for type-dependent expressions. } else if (ExprKind == UETT_AlignOf) { isInvalid = CheckAlignOfExpr(*this, E); } else if (ExprKind == UETT_VecStep) { isInvalid = CheckVecStepExpr(E); } else if (ExprKind == UETT_OpenMPRequiredSimdAlign) { Diag(E->getExprLoc(), diag::err_openmp_default_simd_align_expr); isInvalid = true; } else if (E->refersToBitField()) { // C99 6.5.3.4p1. Diag(E->getExprLoc(), diag::err_sizeof_alignof_bitfield) << 0; isInvalid = true; } else { isInvalid = CheckUnaryExprOrTypeTraitOperand(E, UETT_SizeOf); } if (isInvalid) return ExprError(); if (ExprKind == UETT_SizeOf && E->getType()->isVariableArrayType()) { PE = TransformToPotentiallyEvaluated(E); if (PE.isInvalid()) return ExprError(); E = PE.get(); } // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t. return new (Context) UnaryExprOrTypeTraitExpr( ExprKind, E, Context.getSizeType(), OpLoc, E->getSourceRange().getEnd()); } /// ActOnUnaryExprOrTypeTraitExpr - Handle @c sizeof(type) and @c sizeof @c /// expr and the same for @c alignof and @c __alignof /// Note that the ArgRange is invalid if isType is false. ExprResult Sema::ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange) { // If error parsing type, ignore. if (!TyOrEx) return ExprError(); if (IsType) { TypeSourceInfo *TInfo; (void) GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrEx), &TInfo); return CreateUnaryExprOrTypeTraitExpr(TInfo, OpLoc, ExprKind, ArgRange); } Expr *ArgEx = (Expr *)TyOrEx; ExprResult Result = CreateUnaryExprOrTypeTraitExpr(ArgEx, OpLoc, ExprKind); return Result; } static QualType CheckRealImagOperand(Sema &S, ExprResult &V, SourceLocation Loc, bool IsReal) { if (V.get()->isTypeDependent()) return S.Context.DependentTy; // _Real and _Imag are only l-values for normal l-values. if (V.get()->getObjectKind() != OK_Ordinary) { V = S.DefaultLvalueConversion(V.get()); if (V.isInvalid()) return QualType(); } // These operators return the element type of a complex type. if (const ComplexType *CT = V.get()->getType()->getAs<ComplexType>()) return CT->getElementType(); // Otherwise they pass through real integer and floating point types here. if (V.get()->getType()->isArithmeticType()) return V.get()->getType(); // Test for placeholders. ExprResult PR = S.CheckPlaceholderExpr(V.get()); if (PR.isInvalid()) return QualType(); if (PR.get() != V.get()) { V = PR; return CheckRealImagOperand(S, V, Loc, IsReal); } // Reject anything else. S.Diag(Loc, diag::err_realimag_invalid_type) << V.get()->getType() << (IsReal ? "__real" : "__imag"); return QualType(); } ExprResult Sema::ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input) { UnaryOperatorKind Opc; switch (Kind) { default: llvm_unreachable("Unknown unary op!"); case tok::plusplus: Opc = UO_PostInc; break; case tok::minusminus: Opc = UO_PostDec; break; } // Since this might is a postfix expression, get rid of ParenListExprs. ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Input); if (Result.isInvalid()) return ExprError(); Input = Result.get(); return BuildUnaryOp(S, OpLoc, Opc, Input); } /// \brief Diagnose if arithmetic on the given ObjC pointer is illegal. /// /// \return true on error static bool checkArithmeticOnObjCPointer(Sema &S, SourceLocation opLoc, Expr *op) { assert(op->getType()->isObjCObjectPointerType()); if (S.LangOpts.ObjCRuntime.allowsPointerArithmetic() && !S.LangOpts.ObjCSubscriptingLegacyRuntime) return false; S.Diag(opLoc, diag::err_arithmetic_nonfragile_interface) << op->getType()->castAs<ObjCObjectPointerType>()->getPointeeType() << op->getSourceRange(); return true; } ExprResult Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc, Expr *idx, SourceLocation rbLoc) { // Since this might be a postfix expression, get rid of ParenListExprs. if (isa<ParenListExpr>(base)) { ExprResult result = MaybeConvertParenListExprToParenExpr(S, base); if (result.isInvalid()) return ExprError(); base = result.get(); } // Handle any non-overload placeholder types in the base and index // expressions. We can't handle overloads here because the other // operand might be an overloadable type, in which case the overload // resolution for the operator overload should get the first crack // at the overload. if (base->getType()->isNonOverloadPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(base); if (result.isInvalid()) return ExprError(); base = result.get(); } if (idx->getType()->isNonOverloadPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(idx); if (result.isInvalid()) return ExprError(); idx = result.get(); } // HLSL Change Starts - Check for subscript access of out indices // Disallow component access for out indices for DXIL path. We still allow // this in SPIR-V path. if (getLangOpts().HLSL && !getLangOpts().SPIRV && base->getType()->isRecordType() && IsExprAccessingOutIndicesArray(base)) { Diag(lbLoc, diag::err_hlsl_out_indices_array_incorrect_access); return ExprError(); } // HLSL Change Ends // Build an unanalyzed expression if either operand is type-dependent. if (getLangOpts().CPlusPlus && (base->isTypeDependent() || idx->isTypeDependent())) { return new (Context) ArraySubscriptExpr(base, idx, Context.DependentTy, VK_LValue, OK_Ordinary, rbLoc); } // Use C++ overloaded-operator rules if either operand has record // type. The spec says to do this if either type is *overloadable*, // but enum types can't declare subscript operators or conversion // operators, so there's nothing interesting for overload resolution // to do if there aren't any record types involved. // // ObjC pointers have their own subscripting logic that is not tied // to overload resolution and so should not take this path. if (getLangOpts().CPlusPlus && (base->getType()->isRecordType() || (!base->getType()->isObjCObjectPointerType() && idx->getType()->isRecordType()))) { return CreateOverloadedArraySubscriptExpr(lbLoc, rbLoc, base, idx); } return CreateBuiltinArraySubscriptExpr(base, lbLoc, idx, rbLoc); } ExprResult Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc) { Expr *LHSExp = Base; Expr *RHSExp = Idx; // Perform default conversions. if (!LHSExp->getType()->getAs<VectorType>()) { ExprResult Result = DefaultFunctionArrayLvalueConversion(LHSExp); if (Result.isInvalid()) return ExprError(); LHSExp = Result.get(); } ExprResult Result = DefaultFunctionArrayLvalueConversion(RHSExp); if (Result.isInvalid()) return ExprError(); RHSExp = Result.get(); QualType LHSTy = LHSExp->getType(), RHSTy = RHSExp->getType(); ExprValueKind VK = VK_LValue; ExprObjectKind OK = OK_Ordinary; // C99 6.5.2.1p2: the expression e1[e2] is by definition precisely equivalent // to the expression *((e1)+(e2)). This means the array "Base" may actually be // in the subscript position. As a result, we need to derive the array base // and index from the expression types. Expr *BaseExpr, *IndexExpr; QualType ResultType; if (LHSTy->isDependentType() || RHSTy->isDependentType()) { BaseExpr = LHSExp; IndexExpr = RHSExp; ResultType = Context.DependentTy; } else if (const PointerType *PTy = LHSTy->getAs<PointerType>()) { BaseExpr = LHSExp; IndexExpr = RHSExp; ResultType = PTy->getPointeeType(); } else if (const ObjCObjectPointerType *PTy = LHSTy->getAs<ObjCObjectPointerType>()) { BaseExpr = LHSExp; IndexExpr = RHSExp; // Use custom logic if this should be the pseudo-object subscript // expression. if (!LangOpts.isSubscriptPointerArithmetic()) return BuildObjCSubscriptExpression(RLoc, BaseExpr, IndexExpr, nullptr, nullptr); ResultType = PTy->getPointeeType(); } else if (const PointerType *PTy = RHSTy->getAs<PointerType>()) { // Handle the uncommon case of "123[Ptr]". BaseExpr = RHSExp; IndexExpr = LHSExp; ResultType = PTy->getPointeeType(); } else if (const ObjCObjectPointerType *PTy = RHSTy->getAs<ObjCObjectPointerType>()) { // Handle the uncommon case of "123[Ptr]". BaseExpr = RHSExp; IndexExpr = LHSExp; ResultType = PTy->getPointeeType(); if (!LangOpts.isSubscriptPointerArithmetic()) { Diag(LLoc, diag::err_subscript_nonfragile_interface) << ResultType << BaseExpr->getSourceRange(); return ExprError(); } } else if (const VectorType *VTy = LHSTy->getAs<VectorType>()) { BaseExpr = LHSExp; // vectors: V[123] IndexExpr = RHSExp; VK = LHSExp->getValueKind(); if (VK != VK_RValue) OK = OK_VectorComponent; // FIXME: need to deal with const... ResultType = VTy->getElementType(); } else if (LHSTy->isArrayType()) { // If we see an array that wasn't promoted by // DefaultFunctionArrayLvalueConversion, it must be an array that // wasn't promoted because of the C90 rule that doesn't // allow promoting non-lvalue arrays. Warn, then // force the promotion here. // HLSL Change Starts - arrays won't decay if (getLangOpts().HLSL) { BaseExpr = LHSExp; IndexExpr = RHSExp; ResultType = LHSTy->getAsArrayTypeUnsafe()->getElementType(); // We need to make sure to preserve qualifiers on array types, since these // are in effect references. if (LHSTy.hasQualifiers()) ResultType.setLocalFastQualifiers(LHSTy.getQualifiers().getFastQualifiers()); } else { // HLSL Change Ends Diag(LHSExp->getLocStart(), diag::ext_subscript_non_lvalue) << LHSExp->getSourceRange(); LHSExp = ImpCastExprToType(LHSExp, Context.getArrayDecayedType(LHSTy), CK_ArrayToPointerDecay).get(); LHSTy = LHSExp->getType(); BaseExpr = LHSExp; IndexExpr = RHSExp; ResultType = LHSTy->getAs<PointerType>()->getPointeeType(); } // HLSL Change - end else block } else if (RHSTy->isArrayType()) { // Same as previous, except for 123[f().a] case Diag(RHSExp->getLocStart(), diag::ext_subscript_non_lvalue) << RHSExp->getSourceRange(); RHSExp = ImpCastExprToType(RHSExp, Context.getArrayDecayedType(RHSTy), CK_ArrayToPointerDecay).get(); RHSTy = RHSExp->getType(); BaseExpr = RHSExp; IndexExpr = LHSExp; ResultType = RHSTy->getAs<PointerType>()->getPointeeType(); } else { // HLSL Change: use HLSL variation of error message return ExprError(Diag(LLoc, getLangOpts().HLSL ? diag::err_hlsl_typecheck_subscript_value : diag::err_typecheck_subscript_value) << LHSExp->getSourceRange() << RHSExp->getSourceRange()); } // HLSL Change Starts if (getLangOpts().HLSL && BaseExpr != LHSExp) { Diag(RHSExp->getLocStart(), diag::err_hlsl_unsupported_subscript_base_rhs); } if (!IndexExpr->getType()->isIntegerType() && !IndexExpr->isTypeDependent() && IndexExpr->getType()->isFloatingType()) { IndexExpr = ImpCastExprToType(IndexExpr, Context.UnsignedIntTy, CK_FloatingToIntegral).get(); } // HLSL Change Ends // C99 6.5.2.1p1 if (!IndexExpr->getType()->isIntegerType() && !IndexExpr->isTypeDependent()) return ExprError(Diag(LLoc, diag::err_typecheck_subscript_not_integer) << IndexExpr->getSourceRange()); if ((IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_S) || IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_U)) && !IndexExpr->isTypeDependent()) Diag(LLoc, diag::warn_subscript_is_char) << IndexExpr->getSourceRange(); // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly, // C++ [expr.sub]p1: The type "T" shall be a completely-defined object // type. Note that Functions are not objects, and that (in C99 parlance) // incomplete types are not object types. if (ResultType->isFunctionType()) { Diag(BaseExpr->getLocStart(), diag::err_subscript_function_type) << ResultType << BaseExpr->getSourceRange(); return ExprError(); } if (ResultType->isVoidType() && !getLangOpts().CPlusPlus) { // GNU extension: subscripting on pointer to void Diag(LLoc, diag::ext_gnu_subscript_void_type) << BaseExpr->getSourceRange(); // C forbids expressions of unqualified void type from being l-values. // See IsCForbiddenLValueType. if (!ResultType.hasQualifiers()) VK = VK_RValue; } else if (!ResultType->isDependentType() && RequireCompleteType(LLoc, ResultType, diag::err_subscript_incomplete_type, BaseExpr)) return ExprError(); assert(VK == VK_RValue || LangOpts.CPlusPlus || !ResultType.isCForbiddenLValueType()); if (getLangOpts().HLSL) RHSExp = IndexExpr; // HLSL Change - refer to right-hands side as indexer return new (Context) ArraySubscriptExpr(LHSExp, RHSExp, ResultType, VK, OK, RLoc); } ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param) { if (Param->hasUnparsedDefaultArg()) { Diag(CallLoc, diag::err_use_of_default_argument_to_function_declared_later) << FD << cast<CXXRecordDecl>(FD->getDeclContext())->getDeclName(); Diag(UnparsedDefaultArgLocs[Param], diag::note_default_argument_declared_here); return ExprError(); } if (Param->hasUninstantiatedDefaultArg()) { /// HLSL Change Begin - back ported from llvm-project/4409a83c2935. // Instantiate the expression. MultiLevelTemplateArgumentList MutiLevelArgList = getTemplateInstantiationArgs(FD, nullptr, /*RelativeToPrimary=*/true); if (SubstDefaultArgument(CallLoc, Param, MutiLevelArgList, /*ForCallExpr*/ true)) return ExprError(); /// HLSL Change End - back ported from llvm-project/4409a83c2935. } // If the default expression creates temporaries, we need to // push them to the current stack of expression temporaries so they'll // be properly destroyed. // FIXME: We should really be rebuilding the default argument with new // bound temporaries; see the comment in PR5810. // We don't need to do that with block decls, though, because // blocks in default argument expression can never capture anything. if (isa<ExprWithCleanups>(Param->getInit())) { // Set the "needs cleanups" bit regardless of whether there are // any explicit objects. ExprNeedsCleanups = true; // Append all the objects to the cleanup list. Right now, this // should always be a no-op, because blocks in default argument // expressions should never be able to capture anything. assert(!cast<ExprWithCleanups>(Param->getInit())->getNumObjects() && "default argument expression has capturing blocks?"); } // We already type-checked the argument, so we know it works. // Just mark all of the declarations in this potentially-evaluated expression // as being "referenced". MarkDeclarationsReferencedInExpr(Param->getDefaultArg(), /*SkipLocalVariables=*/true); return CXXDefaultArgExpr::Create(Context, CallLoc, Param); } Sema::VariadicCallType Sema::getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn) { if (Proto && Proto->isVariadic()) { if (dyn_cast_or_null<CXXConstructorDecl>(FDecl)) return VariadicConstructor; else if (Fn && Fn->getType()->isBlockPointerType()) return VariadicBlock; else if (FDecl) { if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(FDecl)) if (Method->isInstance()) return VariadicMethod; } else if (Fn && Fn->getType() == Context.BoundMemberTy) return VariadicMethod; return VariadicFunction; } return VariadicDoesNotApply; } namespace { class FunctionCallCCC : public FunctionCallFilterCCC { public: FunctionCallCCC(Sema &SemaRef, const IdentifierInfo *FuncName, unsigned NumArgs, MemberExpr *ME) : FunctionCallFilterCCC(SemaRef, NumArgs, false, ME), FunctionName(FuncName) {} bool ValidateCandidate(const TypoCorrection &candidate) override { if (!candidate.getCorrectionSpecifier() || candidate.getCorrectionAsIdentifierInfo() != FunctionName) { return false; } return FunctionCallFilterCCC::ValidateCandidate(candidate); } private: const IdentifierInfo *const FunctionName; }; } static TypoCorrection TryTypoCorrectionForCall(Sema &S, Expr *Fn, FunctionDecl *FDecl, ArrayRef<Expr *> Args) { MemberExpr *ME = dyn_cast<MemberExpr>(Fn); DeclarationName FuncName = FDecl->getDeclName(); SourceLocation NameLoc = ME ? ME->getMemberLoc() : Fn->getLocStart(); if (TypoCorrection Corrected = S.CorrectTypo( DeclarationNameInfo(FuncName, NameLoc), Sema::LookupOrdinaryName, S.getScopeForContext(S.CurContext), nullptr, llvm::make_unique<FunctionCallCCC>(S, FuncName.getAsIdentifierInfo(), Args.size(), ME), Sema::CTK_ErrorRecovery)) { if (NamedDecl *ND = Corrected.getCorrectionDecl()) { if (Corrected.isOverloaded()) { OverloadCandidateSet OCS(NameLoc, OverloadCandidateSet::CSK_Normal); OverloadCandidateSet::iterator Best; for (TypoCorrection::decl_iterator CD = Corrected.begin(), CDEnd = Corrected.end(); CD != CDEnd; ++CD) { if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*CD)) S.AddOverloadCandidate(FD, DeclAccessPair::make(FD, AS_none), Args, OCS); } switch (OCS.BestViableFunction(S, NameLoc, Best)) { case OR_Success: ND = Best->Function; Corrected.setCorrectionDecl(ND); break; default: break; } } if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND)) { return Corrected; } } } return TypoCorrection(); } /// ConvertArgumentsForCall - Converts the arguments specified in /// Args/NumArgs to the parameter types of the function FDecl with /// function prototype Proto. Call is the call expression itself, and /// Fn is the function expression. For a C++ member function, this /// routine does not attempt to convert the object argument. Returns /// true if the call is ill-formed. bool Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool IsExecConfig) { // Bail out early if calling a builtin with custom typechecking. if (FDecl) if (unsigned ID = FDecl->getBuiltinID()) if (Context.BuiltinInfo.hasCustomTypechecking(ID)) return false; // C99 6.5.2.2p7 - the arguments are implicitly converted, as if by // assignment, to the types of the corresponding parameter, ... unsigned NumParams = Proto->getNumParams(); bool Invalid = false; unsigned MinArgs = FDecl ? FDecl->getMinRequiredArguments() : NumParams; unsigned FnKind = Fn->getType()->isBlockPointerType() ? 1 /* block */ : (IsExecConfig ? 3 /* kernel function (exec config) */ : 0 /* function */); // If too few arguments are available (and we don't have default // arguments for the remaining parameters), don't make the call. if (Args.size() < NumParams) { if (Args.size() < MinArgs) { TypoCorrection TC; if (FDecl && (TC = TryTypoCorrectionForCall(*this, Fn, FDecl, Args))) { unsigned diag_id = MinArgs == NumParams && !Proto->isVariadic() ? diag::err_typecheck_call_too_few_args_suggest : diag::err_typecheck_call_too_few_args_at_least_suggest; diagnoseTypo(TC, PDiag(diag_id) << FnKind << MinArgs << static_cast<unsigned>(Args.size()) << TC.getCorrectionRange()); } else if (MinArgs == 1 && FDecl && FDecl->getParamDecl(0)->getDeclName()) Diag(RParenLoc, MinArgs == NumParams && !Proto->isVariadic() ? diag::err_typecheck_call_too_few_args_one : diag::err_typecheck_call_too_few_args_at_least_one) << FnKind << FDecl->getParamDecl(0) << Fn->getSourceRange(); else Diag(RParenLoc, MinArgs == NumParams && !Proto->isVariadic() ? diag::err_typecheck_call_too_few_args : diag::err_typecheck_call_too_few_args_at_least) << FnKind << MinArgs << static_cast<unsigned>(Args.size()) << Fn->getSourceRange(); // Emit the location of the prototype. if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig) Diag(FDecl->getLocStart(), diag::note_callee_decl) << FDecl; return true; } Call->setNumArgs(Context, NumParams); } // If too many are passed and not variadic, error on the extras and drop // them. if (Args.size() > NumParams) { if (!Proto->isVariadic()) { TypoCorrection TC; if (FDecl && (TC = TryTypoCorrectionForCall(*this, Fn, FDecl, Args))) { unsigned diag_id = MinArgs == NumParams && !Proto->isVariadic() ? diag::err_typecheck_call_too_many_args_suggest : diag::err_typecheck_call_too_many_args_at_most_suggest; diagnoseTypo(TC, PDiag(diag_id) << FnKind << NumParams << static_cast<unsigned>(Args.size()) << TC.getCorrectionRange()); } else if (NumParams == 1 && FDecl && FDecl->getParamDecl(0)->getDeclName()) Diag(Args[NumParams]->getLocStart(), MinArgs == NumParams ? diag::err_typecheck_call_too_many_args_one : diag::err_typecheck_call_too_many_args_at_most_one) << FnKind << FDecl->getParamDecl(0) << static_cast<unsigned>(Args.size()) << Fn->getSourceRange() << SourceRange(Args[NumParams]->getLocStart(), Args.back()->getLocEnd()); else Diag(Args[NumParams]->getLocStart(), MinArgs == NumParams ? diag::err_typecheck_call_too_many_args : diag::err_typecheck_call_too_many_args_at_most) << FnKind << NumParams << static_cast<unsigned>(Args.size()) << Fn->getSourceRange() << SourceRange(Args[NumParams]->getLocStart(), Args.back()->getLocEnd()); // Emit the location of the prototype. if (!TC && FDecl && !FDecl->getBuiltinID() && !IsExecConfig) Diag(FDecl->getLocStart(), diag::note_callee_decl) << FDecl; // This deletes the extra arguments. Call->setNumArgs(Context, NumParams); return true; } } SmallVector<Expr *, 8> AllArgs; VariadicCallType CallType = getVariadicCallType(FDecl, Proto, Fn); Invalid = GatherArgumentsForCall(Call->getLocStart(), FDecl, Proto, 0, Args, AllArgs, CallType); if (Invalid) return true; unsigned TotalNumArgs = AllArgs.size(); for (unsigned i = 0; i < TotalNumArgs; ++i) Call->setArg(i, AllArgs[i]); return false; } bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType, bool AllowExplicit, bool IsListInitialization) { unsigned NumParams = Proto->getNumParams(); bool Invalid = false; unsigned ArgIx = 0; // Continue to check argument types (even if we have too few/many args). for (unsigned i = FirstParam; i < NumParams; i++) { QualType ProtoArgType = Proto->getParamType(i); Expr *Arg; ParmVarDecl *Param = FDecl ? FDecl->getParamDecl(i) : nullptr; if (ArgIx < Args.size()) { Arg = Args[ArgIx++]; if (!(getLangOpts().HLSL && ProtoArgType->isIncompleteArrayType()) && // HLSL Change: allow incomplete array RequireCompleteType(Arg->getLocStart(), ProtoArgType, diag::err_call_incomplete_argument, Arg)) return true; // Strip the unbridged-cast placeholder expression off, if applicable. bool CFAudited = false; if (Arg->getType() == Context.ARCUnbridgedCastTy && FDecl && FDecl->hasAttr<CFAuditedTransferAttr>() && (!Param || !Param->hasAttr<CFConsumedAttr>())) Arg = stripARCUnbridgedCast(Arg); else if (getLangOpts().ObjCAutoRefCount && FDecl && FDecl->hasAttr<CFAuditedTransferAttr>() && (!Param || !Param->hasAttr<CFConsumedAttr>())) CFAudited = true; InitializedEntity Entity = Param ? InitializedEntity::InitializeParameter(Context, Param, ProtoArgType) : InitializedEntity::InitializeParameter( Context, ProtoArgType, Proto->isParamConsumed(i)); // Remember that parameter belongs to a CF audited API. if (CFAudited) Entity.setParameterCFAudited(); ExprResult ArgE = PerformCopyInitialization( Entity, SourceLocation(), Arg, IsListInitialization, AllowExplicit); if (ArgE.isInvalid()) return true; Arg = ArgE.getAs<Expr>(); } else { assert(Param && "can't use default arguments without a known callee"); ExprResult ArgExpr = BuildCXXDefaultArgExpr(CallLoc, FDecl, Param); if (ArgExpr.isInvalid()) return true; Arg = ArgExpr.getAs<Expr>(); } // Check for array bounds violations for each argument to the call. This // check only triggers warnings when the argument isn't a more complex Expr // with its own checking, such as a BinaryOperator. CheckArrayAccess(Arg); // Check for violations of C99 static array rules (C99 6.7.5.3p7). CheckStaticArrayArgument(CallLoc, Param, Arg); AllArgs.push_back(Arg); } // If this is a variadic call, handle args passed through "...". if (CallType != VariadicDoesNotApply) { // Assume that extern "C" functions with variadic arguments that // return __unknown_anytype aren't *really* variadic. if (Proto->getReturnType() == Context.UnknownAnyTy && FDecl && FDecl->isExternC()) { for (unsigned i = ArgIx, e = Args.size(); i != e; ++i) { QualType paramType; // ignored ExprResult arg = checkUnknownAnyArg(CallLoc, Args[i], paramType); Invalid |= arg.isInvalid(); AllArgs.push_back(arg.get()); } // Otherwise do argument promotion, (C99 6.5.2.2p7). } else { for (unsigned i = ArgIx, e = Args.size(); i != e; ++i) { ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], CallType, FDecl); Invalid |= Arg.isInvalid(); AllArgs.push_back(Arg.get()); } } // Check for array bounds violations. for (unsigned i = ArgIx, e = Args.size(); i != e; ++i) CheckArrayAccess(Args[i]); } return Invalid; } static void DiagnoseCalleeStaticArrayParam(Sema &S, ParmVarDecl *PVD) { TypeLoc TL = PVD->getTypeSourceInfo()->getTypeLoc(); if (DecayedTypeLoc DTL = TL.getAs<DecayedTypeLoc>()) TL = DTL.getOriginalLoc(); if (ArrayTypeLoc ATL = TL.getAs<ArrayTypeLoc>()) S.Diag(PVD->getLocation(), diag::note_callee_static_array) << ATL.getLocalSourceRange(); } /// CheckStaticArrayArgument - If the given argument corresponds to a static /// array parameter, check that it is non-null, and that if it is formed by /// array-to-pointer decay, the underlying array is sufficiently large. /// /// C99 6.7.5.3p7: If the keyword static also appears within the [ and ] of the /// array type derivation, then for each call to the function, the value of the /// corresponding actual argument shall provide access to the first element of /// an array with at least as many elements as specified by the size expression. void Sema::CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr) { // Static array parameters are not supported in C++. if (!Param || getLangOpts().CPlusPlus) return; QualType OrigTy = Param->getOriginalType(); const ArrayType *AT = Context.getAsArrayType(OrigTy); if (!AT || AT->getSizeModifier() != ArrayType::Static) return; if (ArgExpr->isNullPointerConstant(Context, Expr::NPC_NeverValueDependent)) { Diag(CallLoc, diag::warn_null_arg) << ArgExpr->getSourceRange(); DiagnoseCalleeStaticArrayParam(*this, Param); return; } const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT); if (!CAT) return; const ConstantArrayType *ArgCAT = Context.getAsConstantArrayType(ArgExpr->IgnoreParenImpCasts()->getType()); if (!ArgCAT) return; if (ArgCAT->getSize().ult(CAT->getSize())) { Diag(CallLoc, diag::warn_static_array_too_small) << ArgExpr->getSourceRange() << (unsigned) ArgCAT->getSize().getZExtValue() << (unsigned) CAT->getSize().getZExtValue(); DiagnoseCalleeStaticArrayParam(*this, Param); } } /// Given a function expression of unknown-any type, try to rebuild it /// to have a function type. static ExprResult rebuildUnknownAnyFunction(Sema &S, Expr *fn); /// Is the given type a placeholder that we need to lower out /// immediately during argument processing? static bool isPlaceholderToRemoveAsArg(QualType type) { // Placeholders are never sugared. const BuiltinType *placeholder = dyn_cast<BuiltinType>(type); if (!placeholder) return false; switch (placeholder->getKind()) { // Ignore all the non-placeholder types. #define PLACEHOLDER_TYPE(ID, SINGLETON_ID) #define BUILTIN_TYPE(ID, SINGLETON_ID) case BuiltinType::ID: #include "clang/AST/BuiltinTypes.def" return false; // We cannot lower out overload sets; they might validly be resolved // by the call machinery. case BuiltinType::Overload: return false; // Unbridged casts in ARC can be handled in some call positions and // should be left in place. case BuiltinType::ARCUnbridgedCast: return false; // Pseudo-objects should be converted as soon as possible. case BuiltinType::PseudoObject: return true; // The debugger mode could theoretically but currently does not try // to resolve unknown-typed arguments based on known parameter types. case BuiltinType::UnknownAny: return true; // These are always invalid as call arguments and should be reported. case BuiltinType::BoundMember: case BuiltinType::BuiltinFn: return true; } llvm_unreachable("bad builtin type kind"); } /// Check an argument list for placeholders that we won't try to /// handle later. static bool checkArgsForPlaceholders(Sema &S, MultiExprArg args) { // Apply this processing to all the arguments at once instead of // dying at the first failure. bool hasInvalid = false; for (size_t i = 0, e = args.size(); i != e; i++) { if (isPlaceholderToRemoveAsArg(args[i]->getType())) { ExprResult result = S.CheckPlaceholderExpr(args[i]); if (result.isInvalid()) hasInvalid = true; else args[i] = result.get(); } else if (hasInvalid) { (void)S.CorrectDelayedTyposInExpr(args[i]); } } return hasInvalid; } /// If a builtin function has a pointer argument with no explicit address /// space, than it should be able to accept a pointer to any address /// space as input. In order to do this, we need to replace the /// standard builtin declaration with one that uses the same address space /// as the call. /// /// \returns nullptr If this builtin is not a candidate for a rewrite i.e. /// it does not contain any pointer arguments without /// an address space qualifer. Otherwise the rewritten /// FunctionDecl is returned. /// TODO: Handle pointer return types. static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context, const FunctionDecl *FDecl, MultiExprArg ArgExprs) { QualType DeclType = FDecl->getType(); const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(DeclType); if (!Context.BuiltinInfo.hasPtrArgsOrResult(FDecl->getBuiltinID()) || !FT || FT->isVariadic() || ArgExprs.size() != FT->getNumParams()) return nullptr; bool NeedsNewDecl = false; unsigned i = 0; SmallVector<QualType, 8> OverloadParams; for (QualType ParamType : FT->param_types()) { // Convert array arguments to pointer to simplify type lookup. Expr *Arg = Sema->DefaultFunctionArrayLvalueConversion(ArgExprs[i++]).get(); QualType ArgType = Arg->getType(); if (!ParamType->isPointerType() || ParamType.getQualifiers().hasAddressSpace() || !ArgType->isPointerType() || !ArgType->getPointeeType().getQualifiers().hasAddressSpace()) { OverloadParams.push_back(ParamType); continue; } NeedsNewDecl = true; unsigned AS = ArgType->getPointeeType().getQualifiers().getAddressSpace(); QualType PointeeType = ParamType->getPointeeType(); PointeeType = Context.getAddrSpaceQualType(PointeeType, AS); OverloadParams.push_back(Context.getPointerType(PointeeType)); } if (!NeedsNewDecl) return nullptr; FunctionProtoType::ExtProtoInfo EPI; QualType OverloadTy = Context.getFunctionType(FT->getReturnType(), OverloadParams, EPI, FT->getParamMods()); // HLSL Change DeclContext *Parent = Context.getTranslationUnitDecl(); FunctionDecl *OverloadDecl = FunctionDecl::Create(Context, Parent, FDecl->getLocation(), FDecl->getLocation(), FDecl->getIdentifier(), OverloadTy, /*TInfo=*/nullptr, SC_Extern, false, /*hasPrototype=*/true); SmallVector<ParmVarDecl*, 16> Params; FT = cast<FunctionProtoType>(OverloadTy); for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) { QualType ParamType = FT->getParamType(i); ParmVarDecl *Parm = ParmVarDecl::Create(Context, OverloadDecl, SourceLocation(), SourceLocation(), nullptr, ParamType, /*TInfo=*/nullptr, SC_None, nullptr); Parm->setScopeInfo(0, i); Params.push_back(Parm); } OverloadDecl->setParams(Params); return OverloadDecl; } /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig, bool IsExecConfig) { // Since this might be a postfix expression, get rid of ParenListExprs. ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Fn); if (Result.isInvalid()) return ExprError(); Fn = Result.get(); if (checkArgsForPlaceholders(*this, ArgExprs)) return ExprError(); if (getLangOpts().CPlusPlus) { // If this is a pseudo-destructor expression, build the call immediately. if (isa<CXXPseudoDestructorExpr>(Fn)) { if (!ArgExprs.empty()) { // Pseudo-destructor calls should not have any arguments. Diag(Fn->getLocStart(), diag::err_pseudo_dtor_call_with_args) << FixItHint::CreateRemoval( SourceRange(ArgExprs[0]->getLocStart(), ArgExprs.back()->getLocEnd())); } return new (Context) CallExpr(Context, Fn, None, Context.VoidTy, VK_RValue, RParenLoc); } if (Fn->getType() == Context.PseudoObjectTy) { ExprResult result = CheckPlaceholderExpr(Fn); if (result.isInvalid()) return ExprError(); Fn = result.get(); } // Determine whether this is a dependent call inside a C++ template, // in which case we won't do any semantic analysis now. // FIXME: Will need to cache the results of name lookup (including ADL) in // Fn. bool Dependent = false; if (Fn->isTypeDependent()) Dependent = true; else if (Expr::hasAnyTypeDependentArguments(ArgExprs)) Dependent = true; if (Dependent) { if (ExecConfig) { return new (Context) CUDAKernelCallExpr( Context, Fn, cast<CallExpr>(ExecConfig), ArgExprs, Context.DependentTy, VK_RValue, RParenLoc); } else { return new (Context) CallExpr( Context, Fn, ArgExprs, Context.DependentTy, VK_RValue, RParenLoc); } } // Determine whether this is a call to an object (C++ [over.call.object]). if (Fn->getType()->isRecordType()) return BuildCallToObjectOfClassType(S, Fn, LParenLoc, ArgExprs, RParenLoc); if (Fn->getType() == Context.UnknownAnyTy) { ExprResult result = rebuildUnknownAnyFunction(*this, Fn); if (result.isInvalid()) return ExprError(); Fn = result.get(); } if (Fn->getType() == Context.BoundMemberTy) { return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs, RParenLoc); } } // Check for overloaded calls. This can happen even in C due to extensions. if (Fn->getType() == Context.OverloadTy) { OverloadExpr::FindResult find = OverloadExpr::find(Fn); // We aren't supposed to apply this logic for if there's an '&' involved. if (!find.HasFormOfMemberPointer) { OverloadExpr *ovl = find.Expression; if (isa<UnresolvedLookupExpr>(ovl)) { UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(ovl); return BuildOverloadedCallExpr(S, Fn, ULE, LParenLoc, ArgExprs, RParenLoc, ExecConfig); } else { return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs, RParenLoc); } } } // If we're directly calling a function, get the appropriate declaration. if (Fn->getType() == Context.UnknownAnyTy) { ExprResult result = rebuildUnknownAnyFunction(*this, Fn); if (result.isInvalid()) return ExprError(); Fn = result.get(); } Expr *NakedFn = Fn->IgnoreParens(); NamedDecl *NDecl = nullptr; if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(NakedFn)) if (UnOp->getOpcode() == UO_AddrOf) NakedFn = UnOp->getSubExpr()->IgnoreParens(); if (isa<DeclRefExpr>(NakedFn)) { NDecl = cast<DeclRefExpr>(NakedFn)->getDecl(); FunctionDecl *FDecl = dyn_cast<FunctionDecl>(NDecl); if (FDecl && FDecl->getBuiltinID()) { // Rewrite the function decl for this builtin by replacing paramaters // with no explicit address space with the address space of the arguments // in ArgExprs. if ((FDecl = rewriteBuiltinFunctionDecl(this, Context, FDecl, ArgExprs))) { NDecl = FDecl; Fn = DeclRefExpr::Create(Context, FDecl->getQualifierLoc(), SourceLocation(), FDecl, false, SourceLocation(), FDecl->getType(), Fn->getValueKind(), FDecl); } } } else if (isa<MemberExpr>(NakedFn)) NDecl = cast<MemberExpr>(NakedFn)->getMemberDecl(); if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(NDecl)) { if (FD->hasAttr<EnableIfAttr>()) { if (const EnableIfAttr *Attr = CheckEnableIf(FD, ArgExprs, true)) { Diag(Fn->getLocStart(), isa<CXXMethodDecl>(FD) ? diag::err_ovl_no_viable_member_function_in_call : diag::err_ovl_no_viable_function_in_call) << FD << FD->getSourceRange(); Diag(FD->getLocation(), diag::note_ovl_candidate_disabled_by_enable_if_attr) << Attr->getCond()->getSourceRange() << Attr->getMessage(); } } } return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, ArgExprs, RParenLoc, ExecConfig, IsExecConfig); } /// ActOnAsTypeExpr - create a new asType (bitcast) from the arguments. /// /// __builtin_astype( value, dst type ) /// ExprResult Sema::ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc) { ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; QualType DstTy = GetTypeFromParser(ParsedDestTy); QualType SrcTy = E->getType(); if (Context.getTypeSize(DstTy) != Context.getTypeSize(SrcTy)) return ExprError(Diag(BuiltinLoc, diag::err_invalid_astype_of_different_size) << DstTy << SrcTy << E->getSourceRange()); return new (Context) AsTypeExpr(E, DstTy, VK, OK, BuiltinLoc, RParenLoc); } /// ActOnConvertVectorExpr - create a new convert-vector expression from the /// provided arguments. /// /// __builtin_convertvector( value, dst type ) /// ExprResult Sema::ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc) { TypeSourceInfo *TInfo; GetTypeFromParser(ParsedDestTy, &TInfo); return SemaConvertVectorExpr(E, TInfo, BuiltinLoc, RParenLoc); } /// BuildResolvedCallExpr - Build a call to a resolved expression, /// i.e. an expression not of \p OverloadTy. The expression should /// unary-convert to an expression of function-pointer or /// block-pointer type. /// /// \param NDecl the declaration being called, if available ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, Expr *Config, bool IsExecConfig) { FunctionDecl *FDecl = dyn_cast_or_null<FunctionDecl>(NDecl); unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0); // Promote the function operand. // We special-case function promotion here because we only allow promoting // builtin functions to function pointers in the callee of a call. ExprResult Result; if (BuiltinID && Fn->getType()->isSpecificBuiltinType(BuiltinType::BuiltinFn)) { Result = ImpCastExprToType(Fn, Context.getPointerType(FDecl->getType()), CK_BuiltinFnToFnPtr).get(); } else { Result = CallExprUnaryConversions(Fn); } if (Result.isInvalid()) return ExprError(); Fn = Result.get(); // Make the call expr early, before semantic checks. This guarantees cleanup // of arguments and function on error. CallExpr *TheCall; if (Config) TheCall = new (Context) CUDAKernelCallExpr(Context, Fn, cast<CallExpr>(Config), Args, Context.BoolTy, VK_RValue, RParenLoc); else TheCall = new (Context) CallExpr(Context, Fn, Args, Context.BoolTy, VK_RValue, RParenLoc); if (!getLangOpts().CPlusPlus) { // C cannot always handle TypoExpr nodes in builtin calls and direct // function calls as their argument checking don't necessarily handle // dependent types properly, so make sure any TypoExprs have been // dealt with. ExprResult Result = CorrectDelayedTyposInExpr(TheCall); if (!Result.isUsable()) return ExprError(); TheCall = dyn_cast<CallExpr>(Result.get()); if (!TheCall) return Result; Args = ArrayRef<Expr *>(TheCall->getArgs(), TheCall->getNumArgs()); } // Bail out early if calling a builtin with custom typechecking. if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID)) return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall); retry: const FunctionType *FuncT; if (const PointerType *PT = Fn->getType()->getAs<PointerType>()) { // C99 6.5.2.2p1 - "The expression that denotes the called function shall // have type pointer to function". FuncT = PT->getPointeeType()->getAs<FunctionType>(); if (!FuncT) return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function) << Fn->getType() << Fn->getSourceRange()); } else if (const BlockPointerType *BPT = Fn->getType()->getAs<BlockPointerType>()) { FuncT = BPT->getPointeeType()->castAs<FunctionType>(); } else { // Handle calls to expressions of unknown-any type. if (Fn->getType() == Context.UnknownAnyTy) { ExprResult rewrite = rebuildUnknownAnyFunction(*this, Fn); if (rewrite.isInvalid()) return ExprError(); Fn = rewrite.get(); TheCall->setCallee(Fn); goto retry; } return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function) << Fn->getType() << Fn->getSourceRange()); } if (getLangOpts().CUDA) { if (Config) { // CUDA: Kernel calls must be to global functions if (FDecl && !FDecl->hasAttr<CUDAGlobalAttr>()) return ExprError(Diag(LParenLoc,diag::err_kern_call_not_global_function) << FDecl->getName() << Fn->getSourceRange()); // CUDA: Kernel function must have 'void' return type if (!FuncT->getReturnType()->isVoidType()) return ExprError(Diag(LParenLoc, diag::err_kern_type_not_void_return) << Fn->getType() << Fn->getSourceRange()); } else { // CUDA: Calls to global functions must be configured if (FDecl && FDecl->hasAttr<CUDAGlobalAttr>()) return ExprError(Diag(LParenLoc, diag::err_global_call_not_config) << FDecl->getName() << Fn->getSourceRange()); } } // Check for a valid return type if (CheckCallReturnType(FuncT->getReturnType(), Fn->getLocStart(), TheCall, FDecl)) return ExprError(); // We know the result type of the call, set it. TheCall->setType(FuncT->getCallResultType(Context)); TheCall->setValueKind(Expr::getValueKindForType(FuncT->getReturnType())); const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT); if (Proto) { if (ConvertArgumentsForCall(TheCall, Fn, FDecl, Proto, Args, RParenLoc, IsExecConfig)) return ExprError(); } else { assert(isa<FunctionNoProtoType>(FuncT) && "Unknown FunctionType!"); if (FDecl) { // Check if we have too few/too many template arguments, based // on our knowledge of the function definition. const FunctionDecl *Def = nullptr; if (FDecl->hasBody(Def) && Args.size() != Def->param_size()) { Proto = Def->getType()->getAs<FunctionProtoType>(); if (!Proto || !(Proto->isVariadic() && Args.size() >= Def->param_size())) Diag(RParenLoc, diag::warn_call_wrong_number_of_arguments) << (Args.size() > Def->param_size()) << FDecl << Fn->getSourceRange(); } // If the function we're calling isn't a function prototype, but we have // a function prototype from a prior declaratiom, use that prototype. if (!FDecl->hasPrototype()) Proto = FDecl->getType()->getAs<FunctionProtoType>(); } // Promote the arguments (C99 6.5.2.2p6). for (unsigned i = 0, e = Args.size(); i != e; i++) { Expr *Arg = Args[i]; if (Proto && i < Proto->getNumParams()) { InitializedEntity Entity = InitializedEntity::InitializeParameter( Context, Proto->getParamType(i), Proto->isParamConsumed(i)); ExprResult ArgE = PerformCopyInitialization(Entity, SourceLocation(), Arg); if (ArgE.isInvalid()) return true; Arg = ArgE.getAs<Expr>(); } else { ExprResult ArgE = DefaultArgumentPromotion(Arg); if (ArgE.isInvalid()) return true; Arg = ArgE.getAs<Expr>(); } if (RequireCompleteType(Arg->getLocStart(), Arg->getType(), diag::err_call_incomplete_argument, Arg)) return ExprError(); TheCall->setArg(i, Arg); } } if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(FDecl)) if (!Method->isStatic()) return ExprError(Diag(LParenLoc, diag::err_member_call_without_object) << Fn->getSourceRange()); // Check for sentinels if (NDecl) DiagnoseSentinelCalls(NDecl, LParenLoc, Args); // Do special checking on direct calls to functions. if (FDecl) { if (CheckFunctionCall(FDecl, TheCall, Proto)) return ExprError(); if (CheckHLSLFunctionCall(FDecl, TheCall)) return ExprError(); if (BuiltinID) return CheckBuiltinFunctionCall(FDecl, BuiltinID, TheCall); } else if (NDecl) { if (CheckPointerCall(NDecl, TheCall, Proto)) return ExprError(); } else { if (CheckOtherCall(TheCall, Proto)) return ExprError(); } return MaybeBindToTemporary(TheCall); } ExprResult Sema::ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr) { assert(Ty && "ActOnCompoundLiteral(): missing type"); // FIXME: put back this assert when initializers are worked out. //assert((InitExpr != 0) && "ActOnCompoundLiteral(): missing expression"); TypeSourceInfo *TInfo; QualType literalType = GetTypeFromParser(Ty, &TInfo); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(literalType); return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, InitExpr); } ExprResult Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr) { QualType literalType = TInfo->getType(); if (literalType->isArrayType()) { if (RequireCompleteType(LParenLoc, Context.getBaseElementType(literalType), diag::err_illegal_decl_array_incomplete_type, SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd()))) return ExprError(); if (literalType->isVariableArrayType()) return ExprError(Diag(LParenLoc, diag::err_variable_object_no_init) << SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd())); } else if (!literalType->isDependentType() && RequireCompleteType(LParenLoc, literalType, diag::err_typecheck_decl_incomplete_type, SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd()))) return ExprError(); InitializedEntity Entity = InitializedEntity::InitializeCompoundLiteralInit(TInfo); InitializationKind Kind = InitializationKind::CreateCStyleCast(LParenLoc, SourceRange(LParenLoc, RParenLoc), /*InitList=*/true); InitializationSequence InitSeq(*this, Entity, Kind, LiteralExpr); ExprResult Result = InitSeq.Perform(*this, Entity, Kind, LiteralExpr, &literalType); if (Result.isInvalid()) return ExprError(); LiteralExpr = Result.get(); bool isFileScope = getCurFunctionOrMethodDecl() == nullptr; if (isFileScope && !LiteralExpr->isTypeDependent() && !LiteralExpr->isValueDependent() && !literalType->isDependentType()) { // 6.5.2.5p3 if (CheckForConstantInitializer(LiteralExpr, literalType)) return ExprError(); } // In C, compound literals are l-values for some reason. ExprValueKind VK = getLangOpts().CPlusPlus ? VK_RValue : VK_LValue; return MaybeBindToTemporary( new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType, VK, LiteralExpr, isFileScope)); } ExprResult Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc) { // Immediately handle non-overload placeholders. Overloads can be // resolved contextually, but everything else here can't. for (unsigned I = 0, E = InitArgList.size(); I != E; ++I) { if (InitArgList[I]->getType()->isNonOverloadPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(InitArgList[I]); // Ignore failures; dropping the entire initializer list because // of one failure would be terrible for indexing/etc. if (result.isInvalid()) continue; InitArgList[I] = result.get(); } } // Semantic analysis for initializers is done by ActOnDeclarator() and // CheckInitializer() - it requires knowledge of the object being intialized. InitListExpr *E = new (Context) InitListExpr(Context, LBraceLoc, InitArgList, RBraceLoc); E->setType(Context.VoidTy); // FIXME: just a place holder for now. return E; } /// Do an explicit extend of the given block pointer if we're in ARC. void Sema::maybeExtendBlockObject(ExprResult &E) { assert(E.get()->getType()->isBlockPointerType()); assert(E.get()->isRValue()); // Only do this in an r-value context. if (!getLangOpts().ObjCAutoRefCount) return; E = ImplicitCastExpr::Create(Context, E.get()->getType(), CK_ARCExtendBlockObject, E.get(), /*base path*/ nullptr, VK_RValue); ExprNeedsCleanups = true; } /// Prepare a conversion of the given expression to an ObjC object /// pointer type. CastKind Sema::PrepareCastToObjCObjectPointer(ExprResult &E) { QualType type = E.get()->getType(); if (type->isObjCObjectPointerType()) { return CK_BitCast; } else if (type->isBlockPointerType()) { maybeExtendBlockObject(E); return CK_BlockPointerToObjCPointerCast; } else { assert(type->isPointerType()); return CK_CPointerToObjCPointerCast; } } /// Prepares for a scalar cast, performing all the necessary stages /// except the final cast and returning the kind required. CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) { // Both Src and Dest are scalar types, i.e. arithmetic or pointer. // Also, callers should have filtered out the invalid cases with // pointers. Everything else should be possible. QualType SrcTy = Src.get()->getType(); if (Context.hasSameUnqualifiedType(SrcTy, DestTy)) return CK_NoOp; switch (Type::ScalarTypeKind SrcKind = SrcTy->getScalarTypeKind()) { case Type::STK_MemberPointer: llvm_unreachable("member pointer type in C"); case Type::STK_CPointer: case Type::STK_BlockPointer: case Type::STK_ObjCObjectPointer: switch (DestTy->getScalarTypeKind()) { case Type::STK_CPointer: { unsigned SrcAS = SrcTy->getPointeeType().getAddressSpace(); unsigned DestAS = DestTy->getPointeeType().getAddressSpace(); if (SrcAS != DestAS) return CK_AddressSpaceConversion; return CK_BitCast; } case Type::STK_BlockPointer: return (SrcKind == Type::STK_BlockPointer ? CK_BitCast : CK_AnyPointerToBlockPointerCast); case Type::STK_ObjCObjectPointer: if (SrcKind == Type::STK_ObjCObjectPointer) return CK_BitCast; if (SrcKind == Type::STK_CPointer) return CK_CPointerToObjCPointerCast; maybeExtendBlockObject(Src); return CK_BlockPointerToObjCPointerCast; case Type::STK_Bool: return CK_PointerToBoolean; case Type::STK_Integral: return CK_PointerToIntegral; case Type::STK_Floating: case Type::STK_FloatingComplex: case Type::STK_IntegralComplex: case Type::STK_MemberPointer: llvm_unreachable("illegal cast from pointer"); } llvm_unreachable("Should have returned before this"); case Type::STK_Bool: // casting from bool is like casting from an integer case Type::STK_Integral: switch (DestTy->getScalarTypeKind()) { case Type::STK_CPointer: case Type::STK_ObjCObjectPointer: case Type::STK_BlockPointer: if (Src.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) return CK_NullToPointer; return CK_IntegralToPointer; case Type::STK_Bool: return CK_IntegralToBoolean; case Type::STK_Integral: return CK_IntegralCast; case Type::STK_Floating: return CK_IntegralToFloating; case Type::STK_IntegralComplex: Src = ImpCastExprToType(Src.get(), DestTy->castAs<ComplexType>()->getElementType(), CK_IntegralCast); return CK_IntegralRealToComplex; case Type::STK_FloatingComplex: Src = ImpCastExprToType(Src.get(), DestTy->castAs<ComplexType>()->getElementType(), CK_IntegralToFloating); return CK_FloatingRealToComplex; case Type::STK_MemberPointer: llvm_unreachable("member pointer type in C"); } llvm_unreachable("Should have returned before this"); case Type::STK_Floating: switch (DestTy->getScalarTypeKind()) { case Type::STK_Floating: return CK_FloatingCast; case Type::STK_Bool: return CK_FloatingToBoolean; case Type::STK_Integral: return CK_FloatingToIntegral; case Type::STK_FloatingComplex: Src = ImpCastExprToType(Src.get(), DestTy->castAs<ComplexType>()->getElementType(), CK_FloatingCast); return CK_FloatingRealToComplex; case Type::STK_IntegralComplex: Src = ImpCastExprToType(Src.get(), DestTy->castAs<ComplexType>()->getElementType(), CK_FloatingToIntegral); return CK_IntegralRealToComplex; case Type::STK_CPointer: case Type::STK_ObjCObjectPointer: case Type::STK_BlockPointer: llvm_unreachable("valid float->pointer cast?"); case Type::STK_MemberPointer: llvm_unreachable("member pointer type in C"); } llvm_unreachable("Should have returned before this"); case Type::STK_FloatingComplex: switch (DestTy->getScalarTypeKind()) { case Type::STK_FloatingComplex: return CK_FloatingComplexCast; case Type::STK_IntegralComplex: return CK_FloatingComplexToIntegralComplex; case Type::STK_Floating: { QualType ET = SrcTy->castAs<ComplexType>()->getElementType(); if (Context.hasSameType(ET, DestTy)) return CK_FloatingComplexToReal; Src = ImpCastExprToType(Src.get(), ET, CK_FloatingComplexToReal); return CK_FloatingCast; } case Type::STK_Bool: return CK_FloatingComplexToBoolean; case Type::STK_Integral: Src = ImpCastExprToType(Src.get(), SrcTy->castAs<ComplexType>()->getElementType(), CK_FloatingComplexToReal); return CK_FloatingToIntegral; case Type::STK_CPointer: case Type::STK_ObjCObjectPointer: case Type::STK_BlockPointer: llvm_unreachable("valid complex float->pointer cast?"); case Type::STK_MemberPointer: llvm_unreachable("member pointer type in C"); } llvm_unreachable("Should have returned before this"); case Type::STK_IntegralComplex: switch (DestTy->getScalarTypeKind()) { case Type::STK_FloatingComplex: return CK_IntegralComplexToFloatingComplex; case Type::STK_IntegralComplex: return CK_IntegralComplexCast; case Type::STK_Integral: { QualType ET = SrcTy->castAs<ComplexType>()->getElementType(); if (Context.hasSameType(ET, DestTy)) return CK_IntegralComplexToReal; Src = ImpCastExprToType(Src.get(), ET, CK_IntegralComplexToReal); return CK_IntegralCast; } case Type::STK_Bool: return CK_IntegralComplexToBoolean; case Type::STK_Floating: Src = ImpCastExprToType(Src.get(), SrcTy->castAs<ComplexType>()->getElementType(), CK_IntegralComplexToReal); return CK_IntegralToFloating; case Type::STK_CPointer: case Type::STK_ObjCObjectPointer: case Type::STK_BlockPointer: llvm_unreachable("valid complex int->pointer cast?"); case Type::STK_MemberPointer: llvm_unreachable("member pointer type in C"); } llvm_unreachable("Should have returned before this"); } llvm_unreachable("Unhandled scalar cast"); } static bool breakDownVectorType(QualType type, uint64_t &len, QualType &eltType) { // Vectors are simple. if (const VectorType *vecType = type->getAs<VectorType>()) { len = vecType->getNumElements(); eltType = vecType->getElementType(); assert(eltType->isScalarType()); return true; } // We allow lax conversion to and from non-vector types, but only if // they're real types (i.e. non-complex, non-pointer scalar types). if (!type->isRealType()) return false; len = 1; eltType = type; return true; } static bool VectorTypesMatch(Sema &S, QualType srcTy, QualType destTy) { uint64_t srcLen, destLen; QualType srcElt, destElt; if (!breakDownVectorType(srcTy, srcLen, srcElt)) return false; if (!breakDownVectorType(destTy, destLen, destElt)) return false; // ASTContext::getTypeSize will return the size rounded up to a // power of 2, so instead of using that, we need to use the raw // element size multiplied by the element count. uint64_t srcEltSize = S.Context.getTypeSize(srcElt); uint64_t destEltSize = S.Context.getTypeSize(destElt); return (srcLen * srcEltSize == destLen * destEltSize); } /// Is this a legal conversion between two known vector types? bool Sema::isLaxVectorConversion(QualType srcTy, QualType destTy) { assert(destTy->isVectorType() || srcTy->isVectorType()); if (!Context.getLangOpts().LaxVectorConversions) return false; return VectorTypesMatch(*this, srcTy, destTy); } bool Sema::CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind) { assert(VectorTy->isVectorType() && "Not a vector type!"); if (Ty->isVectorType() || Ty->isIntegerType()) { if (!VectorTypesMatch(*this, Ty, VectorTy)) return Diag(R.getBegin(), Ty->isVectorType() ? diag::err_invalid_conversion_between_vectors : diag::err_invalid_conversion_between_vector_and_integer) << VectorTy << Ty << R; } else return Diag(R.getBegin(), diag::err_invalid_conversion_between_vector_and_scalar) << VectorTy << Ty << R; Kind = CK_BitCast; return false; } ExprResult Sema::CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind) { assert(DestTy->isExtVectorType() && "Not an extended vector type!"); QualType SrcTy = CastExpr->getType(); // If SrcTy is a VectorType, the total size must match to explicitly cast to // an ExtVectorType. // In OpenCL, casts between vectors of different types are not allowed. // (See OpenCL 6.2). if (SrcTy->isVectorType()) { if (!VectorTypesMatch(*this, SrcTy, DestTy) || (getLangOpts().OpenCL && (DestTy.getCanonicalType() != SrcTy.getCanonicalType()))) { Diag(R.getBegin(),diag::err_invalid_conversion_between_ext_vectors) << DestTy << SrcTy << R; return ExprError(); } Kind = CK_BitCast; return CastExpr; } // All non-pointer scalars can be cast to ExtVector type. The appropriate // conversion will take place first from scalar to elt type, and then // splat from elt type to vector. if (SrcTy->isPointerType()) return Diag(R.getBegin(), diag::err_invalid_conversion_between_vector_and_scalar) << DestTy << SrcTy << R; QualType DestElemTy = DestTy->getAs<ExtVectorType>()->getElementType(); ExprResult CastExprRes = CastExpr; CastKind CK = PrepareScalarCast(CastExprRes, DestElemTy); if (CastExprRes.isInvalid()) return ExprError(); CastExpr = ImpCastExprToType(CastExprRes.get(), DestElemTy, CK).get(); Kind = CK_VectorSplat; return CastExpr; } ExprResult Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr) { assert(!D.isInvalidType() && (CastExpr != nullptr) && "ActOnCastExpr(): missing type or expr"); TypeSourceInfo *castTInfo = GetTypeForDeclaratorCast(D, CastExpr->getType()); if (D.isInvalidType()) return ExprError(); if (getLangOpts().CPlusPlus) { // Check that there are no default arguments (C++ only). CheckExtraCXXDefaultArguments(D); } else { // Make sure any TypoExprs have been dealt with. ExprResult Res = CorrectDelayedTyposInExpr(CastExpr); if (!Res.isUsable()) return ExprError(); CastExpr = Res.get(); } checkUnusedDeclAttributes(D); QualType castType = castTInfo->getType(); Ty = CreateParsedType(castType, castTInfo); bool isVectorLiteral = false; // Check for an altivec or OpenCL literal, // i.e. all the elements are integer constants. ParenExpr *PE = dyn_cast<ParenExpr>(CastExpr); ParenListExpr *PLE = dyn_cast<ParenListExpr>(CastExpr); if ((getLangOpts().AltiVec || getLangOpts().ZVector || getLangOpts().OpenCL) && castType->isVectorType() && (PE || PLE)) { if (PLE && PLE->getNumExprs() == 0) { Diag(PLE->getExprLoc(), diag::err_altivec_empty_initializer); return ExprError(); } if (PE || PLE->getNumExprs() == 1) { Expr *E = (PE ? PE->getSubExpr() : PLE->getExpr(0)); if (!E->getType()->isVectorType()) isVectorLiteral = true; } else isVectorLiteral = true; } // If this is a vector initializer, '(' type ')' '(' init, ..., init ')' // then handle it as such. if (isVectorLiteral) return BuildVectorLiteral(LParenLoc, RParenLoc, CastExpr, castTInfo); // If the Expr being casted is a ParenListExpr, handle it specially. // This is not an AltiVec-style cast, so turn the ParenListExpr into a // sequence of BinOp comma operators. if (isa<ParenListExpr>(CastExpr)) { ExprResult Result = MaybeConvertParenListExprToParenExpr(S, CastExpr); if (Result.isInvalid()) return ExprError(); CastExpr = Result.get(); } if (getLangOpts().CPlusPlus && !castType->isVoidType() && !getSourceManager().isInSystemMacro(LParenLoc)) Diag(LParenLoc, diag::warn_old_style_cast) << CastExpr->getSourceRange(); #if 0 // HLSL Change - no support for ObjC constructs CheckTollFreeBridgeCast(castType, CastExpr); CheckObjCBridgeRelatedCast(castType, CastExpr); #endif // HLSL Change return BuildCStyleCastExpr(LParenLoc, castTInfo, RParenLoc, CastExpr); } ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo) { assert((isa<ParenListExpr>(E) || isa<ParenExpr>(E)) && "Expected paren or paren list expression"); Expr **exprs; unsigned numExprs; Expr *subExpr; SourceLocation LiteralLParenLoc, LiteralRParenLoc; if (ParenListExpr *PE = dyn_cast<ParenListExpr>(E)) { LiteralLParenLoc = PE->getLParenLoc(); LiteralRParenLoc = PE->getRParenLoc(); exprs = PE->getExprs(); numExprs = PE->getNumExprs(); } else { // isa<ParenExpr> by assertion at function entrance LiteralLParenLoc = cast<ParenExpr>(E)->getLParen(); LiteralRParenLoc = cast<ParenExpr>(E)->getRParen(); subExpr = cast<ParenExpr>(E)->getSubExpr(); exprs = &subExpr; numExprs = 1; } QualType Ty = TInfo->getType(); assert(Ty->isVectorType() && "Expected vector type"); SmallVector<Expr *, 8> initExprs; const VectorType *VTy = Ty->getAs<VectorType>(); unsigned numElems = Ty->getAs<VectorType>()->getNumElements(); // '(...)' form of vector initialization in AltiVec: the number of // initializers must be one or must match the size of the vector. // If a single value is specified in the initializer then it will be // replicated to all the components of the vector if (VTy->getVectorKind() == VectorType::AltiVecVector) { // The number of initializers must be one or must match the size of the // vector. If a single value is specified in the initializer then it will // be replicated to all the components of the vector if (numExprs == 1) { QualType ElemTy = Ty->getAs<VectorType>()->getElementType(); ExprResult Literal = DefaultLvalueConversion(exprs[0]); if (Literal.isInvalid()) return ExprError(); Literal = ImpCastExprToType(Literal.get(), ElemTy, PrepareScalarCast(Literal, ElemTy)); return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.get()); } else if (numExprs < numElems) { Diag(E->getExprLoc(), diag::err_incorrect_number_of_vector_initializers); return ExprError(); } else initExprs.append(exprs, exprs + numExprs); } else { // For OpenCL, when the number of initializers is a single value, // it will be replicated to all components of the vector. if (getLangOpts().OpenCL && VTy->getVectorKind() == VectorType::GenericVector && numExprs == 1) { QualType ElemTy = Ty->getAs<VectorType>()->getElementType(); ExprResult Literal = DefaultLvalueConversion(exprs[0]); if (Literal.isInvalid()) return ExprError(); Literal = ImpCastExprToType(Literal.get(), ElemTy, PrepareScalarCast(Literal, ElemTy)); return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.get()); } initExprs.append(exprs, exprs + numExprs); } // FIXME: This means that pretty-printing the final AST will produce curly // braces instead of the original commas. InitListExpr *initE = new (Context) InitListExpr(Context, LiteralLParenLoc, initExprs, LiteralRParenLoc); initE->setType(Ty); return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, initE); } /// This is not an AltiVec-style cast or or C++ direct-initialization, so turn /// the ParenListExpr into a sequence of comma binary operators. ExprResult Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) { ParenListExpr *E = dyn_cast<ParenListExpr>(OrigExpr); if (!E) return OrigExpr; ExprResult Result(E->getExpr(0)); for (unsigned i = 1, e = E->getNumExprs(); i != e && !Result.isInvalid(); ++i) Result = ActOnBinOp(S, E->getExprLoc(), tok::comma, Result.get(), E->getExpr(i)); if (Result.isInvalid()) return ExprError(); return ActOnParenExpr(E->getLParenLoc(), E->getRParenLoc(), Result.get()); } ExprResult Sema::ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val) { Expr *expr = new (Context) ParenListExpr(Context, L, Val, R); return expr; } /// \brief Emit a specialized diagnostic when one expression is a null pointer /// constant and the other is not a pointer. Returns true if a diagnostic is /// emitted. bool Sema::DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc) { Expr *NullExpr = LHSExpr; Expr *NonPointerExpr = RHSExpr; Expr::NullPointerConstantKind NullKind = NullExpr->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull); if (NullKind == Expr::NPCK_NotNull) { NullExpr = RHSExpr; NonPointerExpr = LHSExpr; NullKind = NullExpr->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull); } if (NullKind == Expr::NPCK_NotNull) return false; if (NullKind == Expr::NPCK_ZeroExpression) return false; if (NullKind == Expr::NPCK_ZeroLiteral) { // In this case, check to make sure that we got here from a "NULL" // string in the source code. NullExpr = NullExpr->IgnoreParenImpCasts(); SourceLocation loc = NullExpr->getExprLoc(); if (!findMacroSpelling(loc, "NULL")) return false; } int DiagType = (NullKind == Expr::NPCK_CXX11_nullptr); Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands_null) << NonPointerExpr->getType() << DiagType << NonPointerExpr->getSourceRange(); return true; } /// \brief Return false if the condition expression is valid, true otherwise. static bool checkCondition(Sema &S, Expr *Cond, SourceLocation QuestionLoc) { QualType CondTy = Cond->getType(); // OpenCL v1.1 s6.3.i says the condition cannot be a floating point type. if (S.getLangOpts().OpenCL && CondTy->isFloatingType()) { S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_nonfloat) << CondTy << Cond->getSourceRange(); return true; } // C99 6.5.15p2 if (CondTy->isScalarType()) return false; S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_scalar) << CondTy << Cond->getSourceRange(); return true; } /// \brief Handle when one or both operands are void type. static QualType checkConditionalVoidType(Sema &S, ExprResult &LHS, ExprResult &RHS) { Expr *LHSExpr = LHS.get(); Expr *RHSExpr = RHS.get(); if (!LHSExpr->getType()->isVoidType()) S.Diag(RHSExpr->getLocStart(), diag::ext_typecheck_cond_one_void) << RHSExpr->getSourceRange(); if (!RHSExpr->getType()->isVoidType()) S.Diag(LHSExpr->getLocStart(), diag::ext_typecheck_cond_one_void) << LHSExpr->getSourceRange(); LHS = S.ImpCastExprToType(LHS.get(), S.Context.VoidTy, CK_ToVoid); RHS = S.ImpCastExprToType(RHS.get(), S.Context.VoidTy, CK_ToVoid); return S.Context.VoidTy; } /// \brief Return false if the NullExpr can be promoted to PointerTy, /// true otherwise. static bool checkConditionalNullPointer(Sema &S, ExprResult &NullExpr, QualType PointerTy) { if ((!PointerTy->isAnyPointerType() && !PointerTy->isBlockPointerType()) || !NullExpr.get()->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNull)) return true; NullExpr = S.ImpCastExprToType(NullExpr.get(), PointerTy, CK_NullToPointer); return false; } /// \brief Checks compatibility between two pointers and return the resulting /// type. static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); if (S.Context.hasSameType(LHSTy, RHSTy)) { // Two identical pointers types are always compatible. return LHSTy; } QualType lhptee, rhptee; // Get the pointee types. bool IsBlockPointer = false; if (const BlockPointerType *LHSBTy = LHSTy->getAs<BlockPointerType>()) { lhptee = LHSBTy->getPointeeType(); rhptee = RHSTy->castAs<BlockPointerType>()->getPointeeType(); IsBlockPointer = true; } else { lhptee = LHSTy->castAs<PointerType>()->getPointeeType(); rhptee = RHSTy->castAs<PointerType>()->getPointeeType(); } // C99 6.5.15p6: If both operands are pointers to compatible types or to // differently qualified versions of compatible types, the result type is // a pointer to an appropriately qualified version of the composite // type. // Only CVR-qualifiers exist in the standard, and the differently-qualified // clause doesn't make sense for our extensions. E.g. address space 2 should // be incompatible with address space 3: they may live on different devices or // anything. Qualifiers lhQual = lhptee.getQualifiers(); Qualifiers rhQual = rhptee.getQualifiers(); unsigned MergedCVRQual = lhQual.getCVRQualifiers() | rhQual.getCVRQualifiers(); lhQual.removeCVRQualifiers(); rhQual.removeCVRQualifiers(); lhptee = S.Context.getQualifiedType(lhptee.getUnqualifiedType(), lhQual); rhptee = S.Context.getQualifiedType(rhptee.getUnqualifiedType(), rhQual); QualType CompositeTy = S.Context.mergeTypes(lhptee, rhptee); if (CompositeTy.isNull()) { S.Diag(Loc, diag::ext_typecheck_cond_incompatible_pointers) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); // In this situation, we assume void* type. No especially good // reason, but this is what gcc does, and we do have to pick // to get a consistent AST. QualType incompatTy = S.Context.getPointerType(S.Context.VoidTy); LHS = S.ImpCastExprToType(LHS.get(), incompatTy, CK_BitCast); RHS = S.ImpCastExprToType(RHS.get(), incompatTy, CK_BitCast); return incompatTy; } // The pointer types are compatible. QualType ResultTy = CompositeTy.withCVRQualifiers(MergedCVRQual); if (IsBlockPointer) ResultTy = S.Context.getBlockPointerType(ResultTy); else ResultTy = S.Context.getPointerType(ResultTy); LHS = S.ImpCastExprToType(LHS.get(), ResultTy, CK_BitCast); RHS = S.ImpCastExprToType(RHS.get(), ResultTy, CK_BitCast); return ResultTy; } /// \brief Return the resulting type when the operands are both block pointers. static QualType checkConditionalBlockPointerCompatibility(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); if (!LHSTy->isBlockPointerType() || !RHSTy->isBlockPointerType()) { if (LHSTy->isVoidPointerType() || RHSTy->isVoidPointerType()) { QualType destType = S.Context.getPointerType(S.Context.VoidTy); LHS = S.ImpCastExprToType(LHS.get(), destType, CK_BitCast); RHS = S.ImpCastExprToType(RHS.get(), destType, CK_BitCast); return destType; } S.Diag(Loc, diag::err_typecheck_cond_incompatible_operands) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // We have 2 block pointer types. return checkConditionalPointerCompatibility(S, LHS, RHS, Loc); } /// \brief Return the resulting type when the operands are both pointers. static QualType checkConditionalObjectPointersCompatibility(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { // get the pointer types QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); // get the "pointed to" types QualType lhptee = LHSTy->getAs<PointerType>()->getPointeeType(); QualType rhptee = RHSTy->getAs<PointerType>()->getPointeeType(); // ignore qualifiers on void (C99 6.5.15p3, clause 6) if (lhptee->isVoidType() && rhptee->isIncompleteOrObjectType()) { // Figure out necessary qualifiers (C99 6.5.15p6) QualType destPointee = S.Context.getQualifiedType(lhptee, rhptee.getQualifiers()); QualType destType = S.Context.getPointerType(destPointee); // Add qualifiers if necessary. LHS = S.ImpCastExprToType(LHS.get(), destType, CK_NoOp); // Promote to void*. RHS = S.ImpCastExprToType(RHS.get(), destType, CK_BitCast); return destType; } if (rhptee->isVoidType() && lhptee->isIncompleteOrObjectType()) { QualType destPointee = S.Context.getQualifiedType(rhptee, lhptee.getQualifiers()); QualType destType = S.Context.getPointerType(destPointee); // Add qualifiers if necessary. RHS = S.ImpCastExprToType(RHS.get(), destType, CK_NoOp); // Promote to void*. LHS = S.ImpCastExprToType(LHS.get(), destType, CK_BitCast); return destType; } return checkConditionalPointerCompatibility(S, LHS, RHS, Loc); } /// \brief Return false if the first expression is not an integer and the second /// expression is not a pointer, true otherwise. static bool checkPointerIntegerMismatch(Sema &S, ExprResult &Int, Expr* PointerExpr, SourceLocation Loc, bool IsIntFirstExpr) { if (!PointerExpr->getType()->isPointerType() || !Int.get()->getType()->isIntegerType()) return false; Expr *Expr1 = IsIntFirstExpr ? Int.get() : PointerExpr; Expr *Expr2 = IsIntFirstExpr ? PointerExpr : Int.get(); S.Diag(Loc, diag::ext_typecheck_cond_pointer_integer_mismatch) << Expr1->getType() << Expr2->getType() << Expr1->getSourceRange() << Expr2->getSourceRange(); Int = S.ImpCastExprToType(Int.get(), PointerExpr->getType(), CK_IntegralToPointer); return true; } /// \brief Simple conversion between integer and floating point types. /// /// Used when handling the OpenCL conditional operator where the /// condition is a vector while the other operands are scalar. /// /// OpenCL v1.1 s6.3.i and s6.11.6 together require that the scalar /// types are either integer or floating type. Between the two /// operands, the type with the higher rank is defined as the "result /// type". The other operand needs to be promoted to the same type. No /// other type promotion is allowed. We cannot use /// UsualArithmeticConversions() for this purpose, since it always /// promotes promotable types. static QualType OpenCLArithmeticConversions(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc) { LHS = S.DefaultFunctionArrayLvalueConversion(LHS.get()); if (LHS.isInvalid()) return QualType(); RHS = S.DefaultFunctionArrayLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); // For conversion purposes, we ignore any qualifiers. // For example, "const float" and "float" are equivalent. QualType LHSType = S.Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType(); QualType RHSType = S.Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType(); if (!LHSType->isIntegerType() && !LHSType->isRealFloatingType()) { S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_int_float) << LHSType << LHS.get()->getSourceRange(); return QualType(); } if (!RHSType->isIntegerType() && !RHSType->isRealFloatingType()) { S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_int_float) << RHSType << RHS.get()->getSourceRange(); return QualType(); } // If both types are identical, no conversion is needed. if (LHSType == RHSType) return LHSType; // Now handle "real" floating types (i.e. float, double, long double). if (LHSType->isRealFloatingType() || RHSType->isRealFloatingType()) return handleFloatConversion(S, LHS, RHS, LHSType, RHSType, /*IsCompAssign = */ false); // Finally, we have two differing integer types. return handleIntegerConversion<doIntegralCast, doIntegralCast> (S, LHS, RHS, LHSType, RHSType, /*IsCompAssign = */ false); } /// \brief Convert scalar operands to a vector that matches the /// condition in length. /// /// Used when handling the OpenCL conditional operator where the /// condition is a vector while the other operands are scalar. /// /// We first compute the "result type" for the scalar operands /// according to OpenCL v1.1 s6.3.i. Both operands are then converted /// into a vector of that type where the length matches the condition /// vector type. s6.11.6 requires that the element types of the result /// and the condition must have the same number of bits. static QualType OpenCLConvertScalarsToVectors(Sema &S, ExprResult &LHS, ExprResult &RHS, QualType CondTy, SourceLocation QuestionLoc) { QualType ResTy = OpenCLArithmeticConversions(S, LHS, RHS, QuestionLoc); if (ResTy.isNull()) return QualType(); const VectorType *CV = CondTy->getAs<VectorType>(); assert(CV); // Determine the vector result type unsigned NumElements = CV->getNumElements(); QualType VectorTy = S.Context.getExtVectorType(ResTy, NumElements); // Ensure that all types have the same number of bits if (S.Context.getTypeSize(CV->getElementType()) != S.Context.getTypeSize(ResTy)) { // Since VectorTy is created internally, it does not pretty print // with an OpenCL name. Instead, we just print a description. std::string EleTyName = ResTy.getUnqualifiedType().getAsString(); SmallString<64> Str; llvm::raw_svector_ostream OS(Str); OS << "(vector of " << NumElements << " '" << EleTyName << "' values)"; S.Diag(QuestionLoc, diag::err_conditional_vector_element_size) << CondTy << OS.str(); return QualType(); } // Convert operands to the vector result type LHS = S.ImpCastExprToType(LHS.get(), VectorTy, CK_VectorSplat); RHS = S.ImpCastExprToType(RHS.get(), VectorTy, CK_VectorSplat); return VectorTy; } /// \brief Return false if this is a valid OpenCL condition vector static bool checkOpenCLConditionVector(Sema &S, Expr *Cond, SourceLocation QuestionLoc) { // OpenCL v1.1 s6.11.6 says the elements of the vector must be of // integral type. const VectorType *CondTy = Cond->getType()->getAs<VectorType>(); assert(CondTy); QualType EleTy = CondTy->getElementType(); if (EleTy->isIntegerType()) return false; S.Diag(QuestionLoc, diag::err_typecheck_cond_expect_nonfloat) << Cond->getType() << Cond->getSourceRange(); return true; } /// \brief Return false if the vector condition type and the vector /// result type are compatible. /// /// OpenCL v1.1 s6.11.6 requires that both vector types have the same /// number of elements, and their element types have the same number /// of bits. static bool checkVectorResult(Sema &S, QualType CondTy, QualType VecResTy, SourceLocation QuestionLoc) { const VectorType *CV = CondTy->getAs<VectorType>(); const VectorType *RV = VecResTy->getAs<VectorType>(); assert(CV && RV); if (CV->getNumElements() != RV->getNumElements()) { S.Diag(QuestionLoc, diag::err_conditional_vector_size) << CondTy << VecResTy; return true; } QualType CVE = CV->getElementType(); QualType RVE = RV->getElementType(); if (S.Context.getTypeSize(CVE) != S.Context.getTypeSize(RVE)) { S.Diag(QuestionLoc, diag::err_conditional_vector_element_size) << CondTy << VecResTy; return true; } return false; } /// \brief Return the resulting type for the conditional operator in /// OpenCL (aka "ternary selection operator", OpenCL v1.1 /// s6.3.i) when the condition is a vector type. static QualType OpenCLCheckVectorConditional(Sema &S, ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc) { Cond = S.DefaultFunctionArrayLvalueConversion(Cond.get()); if (Cond.isInvalid()) return QualType(); QualType CondTy = Cond.get()->getType(); if (checkOpenCLConditionVector(S, Cond.get(), QuestionLoc)) return QualType(); // If either operand is a vector then find the vector type of the // result as specified in OpenCL v1.1 s6.3.i. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { QualType VecResTy = S.CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false, /*AllowBothBool*/true, /*AllowBoolConversions*/false); if (VecResTy.isNull()) return QualType(); // The result type must match the condition type as specified in // OpenCL v1.1 s6.11.6. if (checkVectorResult(S, CondTy, VecResTy, QuestionLoc)) return QualType(); return VecResTy; } // Both operands are scalar. return OpenCLConvertScalarsToVectors(S, LHS, RHS, CondTy, QuestionLoc); } /// Note that LHS is not null here, even if this is the gnu "x ?: y" extension. /// In that case, LHS = cond. /// C99 6.5.15 QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc) { ExprResult LHSResult = CheckPlaceholderExpr(LHS.get()); if (!LHSResult.isUsable()) return QualType(); LHS = LHSResult; ExprResult RHSResult = CheckPlaceholderExpr(RHS.get()); if (!RHSResult.isUsable()) return QualType(); RHS = RHSResult; // HLSL Change Starts: HLSL supports a vector condition and is // sufficiently different to merit its own checker. if (getLangOpts().HLSL) { // For HLSL 202x+ in a ternary operator we follow C++ rules unless both the // right and left are minimum precision types, or either type is not a // builtin scalar integer or float (e.g. vector, matrix, UDT). QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); if (getLangOpts().HLSLVersion < hlsl::LangStd::v202x || !LHSTy->isBuiltinType() || !RHSTy->isBuiltinType() || (hlsl::IsHLSLMinPrecision(LHSTy) && hlsl::IsHLSLMinPrecision(RHSTy))) return hlsl::CheckVectorConditional(this, Cond, LHS, RHS, QuestionLoc); } // HLSL Change Ends // C++ is sufficiently different to merit its own checker. if (getLangOpts().CPlusPlus) return CXXCheckConditionalOperands(Cond, LHS, RHS, VK, OK, QuestionLoc); VK = VK_RValue; OK = OK_Ordinary; // The OpenCL operator with a vector condition is sufficiently // different to merit its own checker. if (getLangOpts().OpenCL && Cond.get()->getType()->isVectorType()) return OpenCLCheckVectorConditional(*this, Cond, LHS, RHS, QuestionLoc); // First, check the condition. Cond = UsualUnaryConversions(Cond.get()); if (Cond.isInvalid()) return QualType(); if (checkCondition(*this, Cond.get(), QuestionLoc)) return QualType(); // Now check the two expressions. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false, /*AllowBothBool*/true, /*AllowBoolConversions*/false); QualType ResTy = UsualArithmeticConversions(LHS, RHS); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); // If both operands have arithmetic type, do the usual arithmetic conversions // to find a common type: C99 6.5.15p3,5. if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) { LHS = ImpCastExprToType(LHS.get(), ResTy, PrepareScalarCast(LHS, ResTy)); RHS = ImpCastExprToType(RHS.get(), ResTy, PrepareScalarCast(RHS, ResTy)); return ResTy; } // If both operands are the same structure or union type, the result is that // type. if (const RecordType *LHSRT = LHSTy->getAs<RecordType>()) { // C99 6.5.15p3 if (const RecordType *RHSRT = RHSTy->getAs<RecordType>()) if (LHSRT->getDecl() == RHSRT->getDecl()) // "If both the operands have structure or union type, the result has // that type." This implies that CV qualifiers are dropped. return LHSTy.getUnqualifiedType(); // FIXME: Type of conditional expression must be complete in C mode. } // C99 6.5.15p5: "If both operands have void type, the result has void type." // The following || allows only one side to be void (a GCC-ism). if (LHSTy->isVoidType() || RHSTy->isVoidType()) { return checkConditionalVoidType(*this, LHS, RHS); } // C99 6.5.15p6 - "if one operand is a null pointer constant, the result has // the type of the other operand." if (!checkConditionalNullPointer(*this, RHS, LHSTy)) return LHSTy; if (!checkConditionalNullPointer(*this, LHS, RHSTy)) return RHSTy; // All objective-c pointer type analysis is done here. QualType compositeType = FindCompositeObjCPointerType(LHS, RHS, QuestionLoc); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); if (!compositeType.isNull()) return compositeType; // Handle block pointer types. if (LHSTy->isBlockPointerType() || RHSTy->isBlockPointerType()) return checkConditionalBlockPointerCompatibility(*this, LHS, RHS, QuestionLoc); // Check constraints for C object pointers types (C99 6.5.15p3,6). if (LHSTy->isPointerType() && RHSTy->isPointerType()) return checkConditionalObjectPointersCompatibility(*this, LHS, RHS, QuestionLoc); // GCC compatibility: soften pointer/integer mismatch. Note that // null pointers have been filtered out by this point. if (checkPointerIntegerMismatch(*this, LHS, RHS.get(), QuestionLoc, /*isIntFirstExpr=*/true)) return RHSTy; if (checkPointerIntegerMismatch(*this, RHS, LHS.get(), QuestionLoc, /*isIntFirstExpr=*/false)) return LHSTy; // Emit a better diagnostic if one of the expressions is a null pointer // constant and the other is not a pointer type. In this case, the user most // likely forgot to take the address of the other expression. if (DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc)) return QualType(); // Otherwise, the operands are not compatible. Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } /// FindCompositeObjCPointerType - Helper method to find composite type of /// two objective-c pointer types of the two input expressions. QualType Sema::FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc) { QualType LHSTy = LHS.get()->getType(); QualType RHSTy = RHS.get()->getType(); // Handle things like Class and struct objc_class*. Here we case the result // to the pseudo-builtin, because that will be implicitly cast back to the // redefinition type if an attempt is made to access its fields. if (LHSTy->isObjCClassType() && (Context.hasSameType(RHSTy, Context.getObjCClassRedefinitionType()))) { RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_CPointerToObjCPointerCast); return LHSTy; } if (RHSTy->isObjCClassType() && (Context.hasSameType(LHSTy, Context.getObjCClassRedefinitionType()))) { LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_CPointerToObjCPointerCast); return RHSTy; } // And the same for struct objc_object* / id if (LHSTy->isObjCIdType() && (Context.hasSameType(RHSTy, Context.getObjCIdRedefinitionType()))) { RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_CPointerToObjCPointerCast); return LHSTy; } if (RHSTy->isObjCIdType() && (Context.hasSameType(LHSTy, Context.getObjCIdRedefinitionType()))) { LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_CPointerToObjCPointerCast); return RHSTy; } // And the same for struct objc_selector* / SEL if (Context.isObjCSelType(LHSTy) && (Context.hasSameType(RHSTy, Context.getObjCSelRedefinitionType()))) { RHS = ImpCastExprToType(RHS.get(), LHSTy, CK_BitCast); return LHSTy; } if (Context.isObjCSelType(RHSTy) && (Context.hasSameType(LHSTy, Context.getObjCSelRedefinitionType()))) { LHS = ImpCastExprToType(LHS.get(), RHSTy, CK_BitCast); return RHSTy; } // Check constraints for Objective-C object pointers types. if (LHSTy->isObjCObjectPointerType() && RHSTy->isObjCObjectPointerType()) { if (Context.getCanonicalType(LHSTy) == Context.getCanonicalType(RHSTy)) { // Two identical object pointer types are always compatible. return LHSTy; } const ObjCObjectPointerType *LHSOPT = LHSTy->castAs<ObjCObjectPointerType>(); const ObjCObjectPointerType *RHSOPT = RHSTy->castAs<ObjCObjectPointerType>(); QualType compositeType = LHSTy; // If both operands are interfaces and either operand can be // assigned to the other, use that type as the composite // type. This allows // xxx ? (A*) a : (B*) b // where B is a subclass of A. // // Additionally, as for assignment, if either type is 'id' // allow silent coercion. Finally, if the types are // incompatible then make sure to use 'id' as the composite // type so the result is acceptable for sending messages to. // FIXME: Consider unifying with 'areComparableObjCPointerTypes'. // It could return the composite type. if (!(compositeType = Context.areCommonBaseCompatible(LHSOPT, RHSOPT)).isNull()) { // Nothing more to do. } else if (Context.canAssignObjCInterfaces(LHSOPT, RHSOPT)) { compositeType = RHSOPT->isObjCBuiltinType() ? RHSTy : LHSTy; } else if (Context.canAssignObjCInterfaces(RHSOPT, LHSOPT)) { compositeType = LHSOPT->isObjCBuiltinType() ? LHSTy : RHSTy; } else if ((LHSTy->isObjCQualifiedIdType() || RHSTy->isObjCQualifiedIdType()) && Context.ObjCQualifiedIdTypesAreCompatible(LHSTy, RHSTy, true)) { // Need to handle "id<xx>" explicitly. // GCC allows qualified id and any Objective-C type to devolve to // id. Currently localizing to here until clear this should be // part of ObjCQualifiedIdTypesAreCompatible. compositeType = Context.getObjCIdType(); } else if (LHSTy->isObjCIdType() || RHSTy->isObjCIdType()) { compositeType = Context.getObjCIdType(); } else { Diag(QuestionLoc, diag::ext_typecheck_cond_incompatible_operands) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); QualType incompatTy = Context.getObjCIdType(); LHS = ImpCastExprToType(LHS.get(), incompatTy, CK_BitCast); RHS = ImpCastExprToType(RHS.get(), incompatTy, CK_BitCast); return incompatTy; } // The object pointer types are compatible. LHS = ImpCastExprToType(LHS.get(), compositeType, CK_BitCast); RHS = ImpCastExprToType(RHS.get(), compositeType, CK_BitCast); return compositeType; } // Check Objective-C object pointer types and 'void *' if (LHSTy->isVoidPointerType() && RHSTy->isObjCObjectPointerType()) { if (getLangOpts().ObjCAutoRefCount) { // ARC forbids the implicit conversion of object pointers to 'void *', // so these types are not compatible. Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); LHS = RHS = true; return QualType(); } QualType lhptee = LHSTy->getAs<PointerType>()->getPointeeType(); QualType rhptee = RHSTy->getAs<ObjCObjectPointerType>()->getPointeeType(); QualType destPointee = Context.getQualifiedType(lhptee, rhptee.getQualifiers()); QualType destType = Context.getPointerType(destPointee); // Add qualifiers if necessary. LHS = ImpCastExprToType(LHS.get(), destType, CK_NoOp); // Promote to void*. RHS = ImpCastExprToType(RHS.get(), destType, CK_BitCast); return destType; } if (LHSTy->isObjCObjectPointerType() && RHSTy->isVoidPointerType()) { if (getLangOpts().ObjCAutoRefCount) { // ARC forbids the implicit conversion of object pointers to 'void *', // so these types are not compatible. Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); LHS = RHS = true; return QualType(); } QualType lhptee = LHSTy->getAs<ObjCObjectPointerType>()->getPointeeType(); QualType rhptee = RHSTy->getAs<PointerType>()->getPointeeType(); QualType destPointee = Context.getQualifiedType(rhptee, lhptee.getQualifiers()); QualType destType = Context.getPointerType(destPointee); // Add qualifiers if necessary. RHS = ImpCastExprToType(RHS.get(), destType, CK_NoOp); // Promote to void*. LHS = ImpCastExprToType(LHS.get(), destType, CK_BitCast); return destType; } return QualType(); } /// SuggestParentheses - Emit a note with a fixit hint that wraps /// ParenRange in parentheses. static void SuggestParentheses(Sema &Self, SourceLocation Loc, const PartialDiagnostic &Note, SourceRange ParenRange) { SourceLocation EndLoc = Self.PP.getLocForEndOfToken(ParenRange.getEnd()); if (ParenRange.getBegin().isFileID() && ParenRange.getEnd().isFileID() && EndLoc.isValid()) { Self.Diag(Loc, Note) << FixItHint::CreateInsertion(ParenRange.getBegin(), "(") << FixItHint::CreateInsertion(EndLoc, ")"); } else { // We can't display the parentheses, so just show the bare note. Self.Diag(Loc, Note) << ParenRange; } } static bool IsArithmeticOp(BinaryOperatorKind Opc) { return Opc >= BO_Mul && Opc <= BO_Shr; } /// IsArithmeticBinaryExpr - Returns true if E is an arithmetic binary /// expression, either using a built-in or overloaded operator, /// and sets *OpCode to the opcode and *RHSExprs to the right-hand side /// expression. static bool IsArithmeticBinaryExpr(Expr *E, BinaryOperatorKind *Opcode, Expr **RHSExprs) { // Don't strip parenthesis: we should not warn if E is in parenthesis. E = E->IgnoreImpCasts(); E = E->IgnoreConversionOperator(); E = E->IgnoreImpCasts(); // Built-in binary operator. if (BinaryOperator *OP = dyn_cast<BinaryOperator>(E)) { if (IsArithmeticOp(OP->getOpcode())) { *Opcode = OP->getOpcode(); *RHSExprs = OP->getRHS(); return true; } } // Overloaded operator. if (CXXOperatorCallExpr *Call = dyn_cast<CXXOperatorCallExpr>(E)) { if (Call->getNumArgs() != 2) return false; // Make sure this is really a binary operator that is safe to pass into // BinaryOperator::getOverloadedOpcode(), e.g. it's not a subscript op. OverloadedOperatorKind OO = Call->getOperator(); if (OO < OO_Plus || OO > OO_Arrow || OO == OO_PlusPlus || OO == OO_MinusMinus) return false; BinaryOperatorKind OpKind = BinaryOperator::getOverloadedOpcode(OO); if (IsArithmeticOp(OpKind)) { *Opcode = OpKind; *RHSExprs = Call->getArg(1); return true; } } return false; } static bool IsLogicOp(BinaryOperatorKind Opc) { return (Opc >= BO_LT && Opc <= BO_NE) || (Opc >= BO_LAnd && Opc <= BO_LOr); } /// ExprLooksBoolean - Returns true if E looks boolean, i.e. it has boolean type /// or is a logical expression such as (x==y) which has int type, but is /// commonly interpreted as boolean. static bool ExprLooksBoolean(Expr *E) { E = E->IgnoreParenImpCasts(); if (E->getType()->isBooleanType()) return true; if (BinaryOperator *OP = dyn_cast<BinaryOperator>(E)) return IsLogicOp(OP->getOpcode()); if (UnaryOperator *OP = dyn_cast<UnaryOperator>(E)) return OP->getOpcode() == UO_LNot; if (E->getType()->isPointerType()) return true; return false; } /// DiagnoseConditionalPrecedence - Emit a warning when a conditional operator /// and binary operator are mixed in a way that suggests the programmer assumed /// the conditional operator has higher precedence, for example: /// "int x = a + someBinaryCondition ? 1 : 2". static void DiagnoseConditionalPrecedence(Sema &Self, SourceLocation OpLoc, Expr *Condition, Expr *LHSExpr, Expr *RHSExpr) { BinaryOperatorKind CondOpcode; Expr *CondRHS; if (!IsArithmeticBinaryExpr(Condition, &CondOpcode, &CondRHS)) return; if (!ExprLooksBoolean(CondRHS)) return; // The condition is an arithmetic binary expression, with a right- // hand side that looks boolean, so warn. Self.Diag(OpLoc, diag::warn_precedence_conditional) << Condition->getSourceRange() << BinaryOperator::getOpcodeStr(CondOpcode); SuggestParentheses(Self, OpLoc, Self.PDiag(diag::note_precedence_silence) << BinaryOperator::getOpcodeStr(CondOpcode), SourceRange(Condition->getLocStart(), Condition->getLocEnd())); SuggestParentheses(Self, OpLoc, Self.PDiag(diag::note_precedence_conditional_first), SourceRange(CondRHS->getLocStart(), RHSExpr->getLocEnd())); } /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr) { if (!getLangOpts().CPlusPlus) { // C cannot handle TypoExpr nodes in the condition because it // doesn't handle dependent types properly, so make sure any TypoExprs have // been dealt with before checking the operands. ExprResult CondResult = CorrectDelayedTyposInExpr(CondExpr); if (!CondResult.isUsable()) return ExprError(); CondExpr = CondResult.get(); } // If this is the gnu "x ?: y" extension, analyze the types as though the LHS // was the condition. OpaqueValueExpr *opaqueValue = nullptr; Expr *commonExpr = nullptr; if (!LHSExpr) { commonExpr = CondExpr; // Lower out placeholder types first. This is important so that we don't // try to capture a placeholder. This happens in few cases in C++; such // as Objective-C++'s dictionary subscripting syntax. if (commonExpr->hasPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(commonExpr); if (!result.isUsable()) return ExprError(); commonExpr = result.get(); } // We usually want to apply unary conversions *before* saving, except // in the special case of a C++ l-value conditional. if (!(getLangOpts().CPlusPlus && !commonExpr->isTypeDependent() && commonExpr->getValueKind() == RHSExpr->getValueKind() && commonExpr->isGLValue() && commonExpr->isOrdinaryOrBitFieldObject() && RHSExpr->isOrdinaryOrBitFieldObject() && Context.hasSameType(commonExpr->getType(), RHSExpr->getType()))) { ExprResult commonRes = UsualUnaryConversions(commonExpr); if (commonRes.isInvalid()) return ExprError(); commonExpr = commonRes.get(); } opaqueValue = new (Context) OpaqueValueExpr(commonExpr->getExprLoc(), commonExpr->getType(), commonExpr->getValueKind(), commonExpr->getObjectKind(), commonExpr); LHSExpr = CondExpr = opaqueValue; } ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; ExprResult Cond = CondExpr, LHS = LHSExpr, RHS = RHSExpr; QualType result = CheckConditionalOperands(Cond, LHS, RHS, VK, OK, QuestionLoc); if (result.isNull() || Cond.isInvalid() || LHS.isInvalid() || RHS.isInvalid()) return ExprError(); DiagnoseConditionalPrecedence(*this, QuestionLoc, Cond.get(), LHS.get(), RHS.get()); CheckBoolLikeConversion(Cond.get(), QuestionLoc); if (!commonExpr) return new (Context) ConditionalOperator(Cond.get(), QuestionLoc, LHS.get(), ColonLoc, RHS.get(), result, VK, OK); return new (Context) BinaryConditionalOperator( commonExpr, opaqueValue, Cond.get(), LHS.get(), RHS.get(), QuestionLoc, ColonLoc, result, VK, OK); } // checkPointerTypesForAssignment - This is a very tricky routine (despite // being closely modeled after the C99 spec:-). The odd characteristic of this // routine is it effectively iqnores the qualifiers on the top level pointee. // This circumvents the usual type rules specified in 6.2.7p1 & 6.7.5.[1-3]. // FIXME: add a couple examples in this comment. static Sema::AssignConvertType checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) { assert(LHSType.isCanonical() && "LHS not canonicalized!"); assert(RHSType.isCanonical() && "RHS not canonicalized!"); // get the "pointed to" type (ignoring qualifiers at the top level) const Type *lhptee, *rhptee; Qualifiers lhq, rhq; std::tie(lhptee, lhq) = cast<PointerType>(LHSType)->getPointeeType().split().asPair(); std::tie(rhptee, rhq) = cast<PointerType>(RHSType)->getPointeeType().split().asPair(); Sema::AssignConvertType ConvTy = Sema::Compatible; // C99 6.5.16.1p1: This following citation is common to constraints // 3 & 4 (below). ...and the type *pointed to* by the left has all the // qualifiers of the type *pointed to* by the right; // As a special case, 'non-__weak A *' -> 'non-__weak const *' is okay. if (lhq.getObjCLifetime() != rhq.getObjCLifetime() && lhq.compatiblyIncludesObjCLifetime(rhq)) { // Ignore lifetime for further calculation. lhq.removeObjCLifetime(); rhq.removeObjCLifetime(); } if (!lhq.compatiblyIncludes(rhq)) { // Treat address-space mismatches as fatal. TODO: address subspaces if (!lhq.isAddressSpaceSupersetOf(rhq)) ConvTy = Sema::IncompatiblePointerDiscardsQualifiers; // It's okay to add or remove GC or lifetime qualifiers when converting to // and from void*. else if (lhq.withoutObjCGCAttr().withoutObjCLifetime() .compatiblyIncludes( rhq.withoutObjCGCAttr().withoutObjCLifetime()) && (lhptee->isVoidType() || rhptee->isVoidType())) ; // keep old // Treat lifetime mismatches as fatal. else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) ConvTy = Sema::IncompatiblePointerDiscardsQualifiers; // For GCC compatibility, other qualifier mismatches are treated // as still compatible in C. else ConvTy = Sema::CompatiblePointerDiscardsQualifiers; } // C99 6.5.16.1p1 (constraint 4): If one operand is a pointer to an object or // incomplete type and the other is a pointer to a qualified or unqualified // version of void... if (lhptee->isVoidType()) { if (rhptee->isIncompleteOrObjectType()) return ConvTy; // As an extension, we allow cast to/from void* to function pointer. assert(rhptee->isFunctionType()); return Sema::FunctionVoidPointer; } if (rhptee->isVoidType()) { if (lhptee->isIncompleteOrObjectType()) return ConvTy; // As an extension, we allow cast to/from void* to function pointer. assert(lhptee->isFunctionType()); return Sema::FunctionVoidPointer; } // C99 6.5.16.1p1 (constraint 3): both operands are pointers to qualified or // unqualified versions of compatible types, ... QualType ltrans = QualType(lhptee, 0), rtrans = QualType(rhptee, 0); if (!S.Context.typesAreCompatible(ltrans, rtrans)) { // Check if the pointee types are compatible ignoring the sign. // We explicitly check for char so that we catch "char" vs // "unsigned char" on systems where "char" is unsigned. if (lhptee->isCharType()) ltrans = S.Context.UnsignedCharTy; else if (lhptee->hasSignedIntegerRepresentation()) ltrans = S.Context.getCorrespondingUnsignedType(ltrans); if (rhptee->isCharType()) rtrans = S.Context.UnsignedCharTy; else if (rhptee->hasSignedIntegerRepresentation()) rtrans = S.Context.getCorrespondingUnsignedType(rtrans); if (ltrans == rtrans) { // Types are compatible ignoring the sign. Qualifier incompatibility // takes priority over sign incompatibility because the sign // warning can be disabled. if (ConvTy != Sema::Compatible) return ConvTy; return Sema::IncompatiblePointerSign; } // If we are a multi-level pointer, it's possible that our issue is simply // one of qualification - e.g. char ** -> const char ** is not allowed. If // the eventual target type is the same and the pointers have the same // level of indirection, this must be the issue. if (isa<PointerType>(lhptee) && isa<PointerType>(rhptee)) { do { lhptee = cast<PointerType>(lhptee)->getPointeeType().getTypePtr(); rhptee = cast<PointerType>(rhptee)->getPointeeType().getTypePtr(); } while (isa<PointerType>(lhptee) && isa<PointerType>(rhptee)); if (lhptee == rhptee) return Sema::IncompatibleNestedPointerQualifiers; } // General pointer incompatibility takes priority over qualifiers. return Sema::IncompatiblePointer; } if (!S.getLangOpts().CPlusPlus && S.IsNoReturnConversion(ltrans, rtrans, ltrans)) return Sema::IncompatiblePointer; return ConvTy; } /// checkBlockPointerTypesForAssignment - This routine determines whether two /// block pointer types are compatible or whether a block and normal pointer /// are compatible. It is more restrict than comparing two function pointer // types. static Sema::AssignConvertType checkBlockPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) { assert(LHSType.isCanonical() && "LHS not canonicalized!"); assert(RHSType.isCanonical() && "RHS not canonicalized!"); QualType lhptee, rhptee; // get the "pointed to" type (ignoring qualifiers at the top level) lhptee = cast<BlockPointerType>(LHSType)->getPointeeType(); rhptee = cast<BlockPointerType>(RHSType)->getPointeeType(); // In C++, the types have to match exactly. if (S.getLangOpts().CPlusPlus) return Sema::IncompatibleBlockPointer; Sema::AssignConvertType ConvTy = Sema::Compatible; // For blocks we enforce that qualifiers are identical. if (lhptee.getLocalQualifiers() != rhptee.getLocalQualifiers()) ConvTy = Sema::CompatiblePointerDiscardsQualifiers; if (!S.Context.typesAreBlockPointerCompatible(LHSType, RHSType)) return Sema::IncompatibleBlockPointer; return ConvTy; } /// checkObjCPointerTypesForAssignment - Compares two objective-c pointer types /// for assignment compatibility. static Sema::AssignConvertType checkObjCPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) { assert(LHSType.isCanonical() && "LHS was not canonicalized!"); assert(RHSType.isCanonical() && "RHS was not canonicalized!"); if (LHSType->isObjCBuiltinType()) { // Class is not compatible with ObjC object pointers. if (LHSType->isObjCClassType() && !RHSType->isObjCBuiltinType() && !RHSType->isObjCQualifiedClassType()) return Sema::IncompatiblePointer; return Sema::Compatible; } if (RHSType->isObjCBuiltinType()) { if (RHSType->isObjCClassType() && !LHSType->isObjCBuiltinType() && !LHSType->isObjCQualifiedClassType()) return Sema::IncompatiblePointer; return Sema::Compatible; } QualType lhptee = LHSType->getAs<ObjCObjectPointerType>()->getPointeeType(); QualType rhptee = RHSType->getAs<ObjCObjectPointerType>()->getPointeeType(); if (!lhptee.isAtLeastAsQualifiedAs(rhptee) && // make an exception for id<P> !LHSType->isObjCQualifiedIdType()) return Sema::CompatiblePointerDiscardsQualifiers; if (S.Context.typesAreCompatible(LHSType, RHSType)) return Sema::Compatible; if (LHSType->isObjCQualifiedIdType() || RHSType->isObjCQualifiedIdType()) return Sema::IncompatibleObjCQualifiedId; return Sema::IncompatiblePointer; } Sema::AssignConvertType Sema::CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType) { // Fake up an opaque expression. We don't actually care about what // cast operations are required, so if CheckAssignmentConstraints // adds casts to this they'll be wasted, but fortunately that doesn't // usually happen on valid code. OpaqueValueExpr RHSExpr(Loc, RHSType, VK_RValue); ExprResult RHSPtr = &RHSExpr; CastKind K = CK_Invalid; return CheckAssignmentConstraints(LHSType, RHSPtr, K); } /// CheckAssignmentConstraints (C99 6.5.16) - This routine currently /// has code to accommodate several GCC extensions when type checking /// pointers. Here are some objectionable examples that GCC considers warnings: /// /// int a, *pint; /// short *pshort; /// struct foo *pfoo; /// /// pint = pshort; // warning: assignment from incompatible pointer type /// a = pint; // warning: assignment makes integer from pointer without a cast /// pint = a; // warning: assignment makes pointer from integer without a cast /// pint = pfoo; // warning: assignment from incompatible pointer type /// /// As a result, the code for dealing with pointers is more complex than the /// C99 spec dictates. /// /// Sets 'Kind' for any result kind except Incompatible. Sema::AssignConvertType Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind) { QualType RHSType = RHS.get()->getType(); QualType OrigLHSType = LHSType; // Get canonical types. We're not formatting these types, just comparing // them. LHSType = Context.getCanonicalType(LHSType).getUnqualifiedType(); RHSType = Context.getCanonicalType(RHSType).getUnqualifiedType(); // Common case: no conversion required. if (LHSType == RHSType) { Kind = CK_NoOp; return Compatible; } // If we have an atomic type, try a non-atomic assignment, then just add an // atomic qualification step. if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(LHSType)) { Sema::AssignConvertType result = CheckAssignmentConstraints(AtomicTy->getValueType(), RHS, Kind); if (result != Compatible) return result; if (Kind != CK_NoOp) RHS = ImpCastExprToType(RHS.get(), AtomicTy->getValueType(), Kind); Kind = CK_NonAtomicToAtomic; return Compatible; } // If the left-hand side is a reference type, then we are in a // (rare!) case where we've allowed the use of references in C, // e.g., as a parameter type in a built-in function. In this case, // just make sure that the type referenced is compatible with the // right-hand side type. The caller is responsible for adjusting // LHSType so that the resulting expression does not have reference // type. if (const ReferenceType *LHSTypeRef = LHSType->getAs<ReferenceType>()) { if (Context.typesAreCompatible(LHSTypeRef->getPointeeType(), RHSType)) { Kind = CK_LValueBitCast; return Compatible; } return Incompatible; } // Allow scalar to ExtVector assignments, and assignments of an ExtVector type // to the same ExtVector type. if (LHSType->isExtVectorType()) { if (RHSType->isExtVectorType()) return Incompatible; if (RHSType->isArithmeticType()) { // CK_VectorSplat does T -> vector T, so first cast to the // element type. QualType elType = cast<ExtVectorType>(LHSType)->getElementType(); if (elType != RHSType) { Kind = PrepareScalarCast(RHS, elType); RHS = ImpCastExprToType(RHS.get(), elType, Kind); } Kind = CK_VectorSplat; return Compatible; } } // Conversions to or from vector type. if (LHSType->isVectorType() || RHSType->isVectorType()) { if (LHSType->isVectorType() && RHSType->isVectorType()) { // Allow assignments of an AltiVec vector type to an equivalent GCC // vector type and vice versa if (Context.areCompatibleVectorTypes(LHSType, RHSType)) { Kind = CK_BitCast; return Compatible; } // If we are allowing lax vector conversions, and LHS and RHS are both // vectors, the total size only needs to be the same. This is a bitcast; // no bits are changed but the result type is different. if (isLaxVectorConversion(RHSType, LHSType)) { Kind = CK_BitCast; return IncompatibleVectors; } } return Incompatible; } // Arithmetic conversions. if (LHSType->isArithmeticType() && RHSType->isArithmeticType() && !(getLangOpts().CPlusPlus && LHSType->isEnumeralType())) { Kind = PrepareScalarCast(RHS, LHSType); return Compatible; } // Conversions to normal pointers. if (const PointerType *LHSPointer = dyn_cast<PointerType>(LHSType)) { // U* -> T* if (isa<PointerType>(RHSType)) { unsigned AddrSpaceL = LHSPointer->getPointeeType().getAddressSpace(); unsigned AddrSpaceR = RHSType->getPointeeType().getAddressSpace(); Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast; return checkPointerTypesForAssignment(*this, LHSType, RHSType); } // int -> T* if (RHSType->isIntegerType()) { Kind = CK_IntegralToPointer; // FIXME: null? return IntToPointer; } // C pointers are not compatible with ObjC object pointers, // with two exceptions: if (isa<ObjCObjectPointerType>(RHSType)) { // - conversions to void* if (LHSPointer->getPointeeType()->isVoidType()) { Kind = CK_BitCast; return Compatible; } // - conversions from 'Class' to the redefinition type if (RHSType->isObjCClassType() && Context.hasSameType(LHSType, Context.getObjCClassRedefinitionType())) { Kind = CK_BitCast; return Compatible; } Kind = CK_BitCast; return IncompatiblePointer; } // U^ -> void* if (RHSType->getAs<BlockPointerType>()) { if (LHSPointer->getPointeeType()->isVoidType()) { Kind = CK_BitCast; return Compatible; } } return Incompatible; } // Conversions to block pointers. if (isa<BlockPointerType>(LHSType)) { // U^ -> T^ if (RHSType->isBlockPointerType()) { Kind = CK_BitCast; return checkBlockPointerTypesForAssignment(*this, LHSType, RHSType); } // int or null -> T^ if (RHSType->isIntegerType()) { Kind = CK_IntegralToPointer; // FIXME: null return IntToBlockPointer; } // id -> T^ if (getLangOpts().ObjC1 && RHSType->isObjCIdType()) { Kind = CK_AnyPointerToBlockPointerCast; return Compatible; } // void* -> T^ if (const PointerType *RHSPT = RHSType->getAs<PointerType>()) if (RHSPT->getPointeeType()->isVoidType()) { Kind = CK_AnyPointerToBlockPointerCast; return Compatible; } return Incompatible; } // Conversions to Objective-C pointers. if (isa<ObjCObjectPointerType>(LHSType)) { // A* -> B* if (RHSType->isObjCObjectPointerType()) { Kind = CK_BitCast; Sema::AssignConvertType result = checkObjCPointerTypesForAssignment(*this, LHSType, RHSType); if (getLangOpts().ObjCAutoRefCount && result == Compatible && !CheckObjCARCUnavailableWeakConversion(OrigLHSType, RHSType)) result = IncompatibleObjCWeakRef; return result; } // int or null -> A* if (RHSType->isIntegerType()) { Kind = CK_IntegralToPointer; // FIXME: null return IntToPointer; } // In general, C pointers are not compatible with ObjC object pointers, // with two exceptions: if (isa<PointerType>(RHSType)) { Kind = CK_CPointerToObjCPointerCast; // - conversions from 'void*' if (RHSType->isVoidPointerType()) { return Compatible; } // - conversions to 'Class' from its redefinition type if (LHSType->isObjCClassType() && Context.hasSameType(RHSType, Context.getObjCClassRedefinitionType())) { return Compatible; } return IncompatiblePointer; } // Only under strict condition T^ is compatible with an Objective-C pointer. if (RHSType->isBlockPointerType() && LHSType->isBlockCompatibleObjCPointerType(Context)) { maybeExtendBlockObject(RHS); Kind = CK_BlockPointerToObjCPointerCast; return Compatible; } return Incompatible; } // Conversions from pointers that are not covered by the above. if (isa<PointerType>(RHSType)) { // T* -> _Bool if (LHSType == Context.BoolTy) { Kind = CK_PointerToBoolean; return Compatible; } // T* -> int if (LHSType->isIntegerType()) { Kind = CK_PointerToIntegral; return PointerToInt; } return Incompatible; } // Conversions from Objective-C pointers that are not covered by the above. if (isa<ObjCObjectPointerType>(RHSType)) { // T* -> _Bool if (LHSType == Context.BoolTy) { Kind = CK_PointerToBoolean; return Compatible; } // T* -> int if (LHSType->isIntegerType()) { Kind = CK_PointerToIntegral; return PointerToInt; } return Incompatible; } // struct A -> struct B if (isa<TagType>(LHSType) && isa<TagType>(RHSType)) { if (Context.typesAreCompatible(LHSType, RHSType)) { Kind = CK_NoOp; return Compatible; } } return Incompatible; } /// \brief Constructs a transparent union from an expression that is /// used to initialize the transparent union. static void ConstructTransparentUnion(Sema &S, ASTContext &C, ExprResult &EResult, QualType UnionType, FieldDecl *Field) { // Build an initializer list that designates the appropriate member // of the transparent union. Expr *E = EResult.get(); InitListExpr *Initializer = new (C) InitListExpr(C, SourceLocation(), E, SourceLocation()); Initializer->setType(UnionType); Initializer->setInitializedFieldInUnion(Field); // Build a compound literal constructing a value of the transparent // union type from this initializer list. TypeSourceInfo *unionTInfo = C.getTrivialTypeSourceInfo(UnionType); EResult = new (C) CompoundLiteralExpr(SourceLocation(), unionTInfo, UnionType, VK_RValue, Initializer, false); } Sema::AssignConvertType Sema::CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS) { QualType RHSType = RHS.get()->getType(); // If the ArgType is a Union type, we want to handle a potential // transparent_union GCC extension. const RecordType *UT = ArgType->getAsUnionType(); if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>()) return Incompatible; // The field to initialize within the transparent union. RecordDecl *UD = UT->getDecl(); FieldDecl *InitField = nullptr; // It's compatible if the expression matches any of the fields. for (auto *it : UD->fields()) { if (it->getType()->isPointerType()) { // If the transparent union contains a pointer type, we allow: // 1) void pointer // 2) null pointer constant if (RHSType->isPointerType()) if (RHSType->castAs<PointerType>()->getPointeeType()->isVoidType()) { RHS = ImpCastExprToType(RHS.get(), it->getType(), CK_BitCast); InitField = it; break; } if (RHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { RHS = ImpCastExprToType(RHS.get(), it->getType(), CK_NullToPointer); InitField = it; break; } } CastKind Kind = CK_Invalid; if (CheckAssignmentConstraints(it->getType(), RHS, Kind) == Compatible) { RHS = ImpCastExprToType(RHS.get(), it->getType(), Kind); InitField = it; break; } } if (!InitField) return Incompatible; ConstructTransparentUnion(*this, Context, RHS, ArgType, InitField); return Compatible; } Sema::AssignConvertType Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose, bool DiagnoseCFAudited) { // HLSL Change Starts if (getLangOpts().HLSL) { // implicit conversion will take care of diagnostics. return Compatible; } // HLSL Change Ends if (getLangOpts().CPlusPlus) { if (!LHSType->isRecordType() && !LHSType->isAtomicType()) { // C++ 5.17p3: If the left operand is not of class type, the // expression is implicitly converted (C++ 4) to the // cv-unqualified type of the left operand. ExprResult Res; if (Diagnose) { Res = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(), AA_Assigning); } else { ImplicitConversionSequence ICS = TryImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(), /*SuppressUserConversions=*/false, /*AllowExplicit=*/false, /*InOverloadResolution=*/false, /*CStyle=*/false, /*AllowObjCWritebackConversion=*/false); if (ICS.isFailure()) return Incompatible; Res = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(), ICS, AA_Assigning); } if (Res.isInvalid()) return Incompatible; Sema::AssignConvertType result = Compatible; if (getLangOpts().ObjCAutoRefCount && !CheckObjCARCUnavailableWeakConversion(LHSType, RHS.get()->getType())) result = IncompatibleObjCWeakRef; RHS = Res; return result; } // FIXME: Currently, we fall through and treat C++ classes like C // structures. // FIXME: We also fall through for atomics; not sure what should // happen there, though. } // C99 6.5.16.1p1: the left operand is a pointer and the right is // a null pointer constant. if ((LHSType->isPointerType() || LHSType->isObjCObjectPointerType() || LHSType->isBlockPointerType()) && RHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { CastKind Kind; CXXCastPath Path; CheckPointerConversion(RHS.get(), LHSType, Kind, Path, false); RHS = ImpCastExprToType(RHS.get(), LHSType, Kind, VK_RValue, &Path); return Compatible; } // This check seems unnatural, however it is necessary to ensure the proper // conversion of functions/arrays. If the conversion were done for all // DeclExpr's (created by ActOnIdExpression), it would mess up the unary // expressions that suppress this implicit conversion (&, sizeof). // // Suppress this for references: C++ 8.5.3p5. if (!LHSType->isReferenceType()) { RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); if (RHS.isInvalid()) return Incompatible; } Expr *PRE = RHS.get()->IgnoreParenCasts(); if (ObjCProtocolExpr *OPE = dyn_cast<ObjCProtocolExpr>(PRE)) { ObjCProtocolDecl *PDecl = OPE->getProtocol(); if (PDecl && !PDecl->hasDefinition()) { Diag(PRE->getExprLoc(), diag::warn_atprotocol_protocol) << PDecl->getName(); Diag(PDecl->getLocation(), diag::note_entity_declared_at) << PDecl; } } CastKind Kind = CK_Invalid; Sema::AssignConvertType result = CheckAssignmentConstraints(LHSType, RHS, Kind); // C99 6.5.16.1p2: The value of the right operand is converted to the // type of the assignment expression. // CheckAssignmentConstraints allows the left-hand side to be a reference, // so that we can use references in built-in functions even in C. // The getNonReferenceType() call makes sure that the resulting expression // does not have reference type. if (result != Incompatible && RHS.get()->getType() != LHSType) { QualType Ty = LHSType.getNonLValueExprType(Context); Expr *E = RHS.get(); if (getLangOpts().ObjCAutoRefCount) CheckObjCARCConversion(SourceRange(), Ty, E, CCK_ImplicitConversion, DiagnoseCFAudited); if (getLangOpts().ObjC1 && (CheckObjCBridgeRelatedConversions(E->getLocStart(), LHSType, E->getType(), E) || ConversionToObjCStringLiteralCheck(LHSType, E))) { RHS = E; return Compatible; } RHS = ImpCastExprToType(E, Ty, Kind); } return result; } QualType Sema::InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS) { Diag(Loc, diag::err_typecheck_invalid_operands) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } /// Try to convert a value of non-vector type to a vector type by converting /// the type to the element type of the vector and then performing a splat. /// If the language is OpenCL, we only use conversions that promote scalar /// rank; for C, Obj-C, and C++ we allow any real scalar conversion except /// for float->int. /// /// \param scalar - if non-null, actually perform the conversions /// \return true if the operation fails (but without diagnosing the failure) static bool tryVectorConvertAndSplat(Sema &S, ExprResult *scalar, QualType scalarTy, QualType vectorEltTy, QualType vectorTy) { // The conversion to apply to the scalar before splatting it, // if necessary. CastKind scalarCast = CK_Invalid; if (vectorEltTy->isIntegralType(S.Context)) { if (!scalarTy->isIntegralType(S.Context)) return true; if (S.getLangOpts().OpenCL && S.Context.getIntegerTypeOrder(vectorEltTy, scalarTy) < 0) return true; scalarCast = CK_IntegralCast; } else if (vectorEltTy->isRealFloatingType()) { if (scalarTy->isRealFloatingType()) { if (S.getLangOpts().OpenCL && S.Context.getFloatingTypeOrder(vectorEltTy, scalarTy) < 0) return true; scalarCast = CK_FloatingCast; } else if (scalarTy->isIntegralType(S.Context)) scalarCast = CK_IntegralToFloating; else return true; } else { return true; } // Adjust scalar if desired. if (scalar) { if (scalarCast != CK_Invalid) *scalar = S.ImpCastExprToType(scalar->get(), vectorEltTy, scalarCast); *scalar = S.ImpCastExprToType(scalar->get(), vectorTy, CK_VectorSplat); } return false; } QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversions) { if (!IsCompAssign) { LHS = DefaultFunctionArrayLvalueConversion(LHS.get()); if (LHS.isInvalid()) return QualType(); } RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); // For conversion purposes, we ignore any qualifiers. // For example, "const float" and "float" are equivalent. QualType LHSType = LHS.get()->getType().getUnqualifiedType(); QualType RHSType = RHS.get()->getType().getUnqualifiedType(); const VectorType *LHSVecType = LHSType->getAs<VectorType>(); const VectorType *RHSVecType = RHSType->getAs<VectorType>(); assert(LHSVecType || RHSVecType); // AltiVec-style "vector bool op vector bool" combinations are allowed // for some operators but not others. if (!AllowBothBool && LHSVecType && LHSVecType->getVectorKind() == VectorType::AltiVecBool && RHSVecType && RHSVecType->getVectorKind() == VectorType::AltiVecBool) return InvalidOperands(Loc, LHS, RHS); // If the vector types are identical, return. if (Context.hasSameType(LHSType, RHSType)) return LHSType; // If we have compatible AltiVec and GCC vector types, use the AltiVec type. if (LHSVecType && RHSVecType && Context.areCompatibleVectorTypes(LHSType, RHSType)) { if (isa<ExtVectorType>(LHSVecType)) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); return LHSType; } if (!IsCompAssign) LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast); return RHSType; } // AllowBoolConversions says that bool and non-bool AltiVec vectors // can be mixed, with the result being the non-bool type. The non-bool // operand must have integer element type. if (AllowBoolConversions && LHSVecType && RHSVecType && LHSVecType->getNumElements() == RHSVecType->getNumElements() && (Context.getTypeSize(LHSVecType->getElementType()) == Context.getTypeSize(RHSVecType->getElementType()))) { if (LHSVecType->getVectorKind() == VectorType::AltiVecVector && LHSVecType->getElementType()->isIntegerType() && RHSVecType->getVectorKind() == VectorType::AltiVecBool) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); return LHSType; } if (!IsCompAssign && LHSVecType->getVectorKind() == VectorType::AltiVecBool && RHSVecType->getVectorKind() == VectorType::AltiVecVector && RHSVecType->getElementType()->isIntegerType()) { LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast); return RHSType; } } // If there's an ext-vector type and a scalar, try to convert the scalar to // the vector element type and splat. if (!RHSVecType && isa<ExtVectorType>(LHSVecType)) { if (!tryVectorConvertAndSplat(*this, &RHS, RHSType, LHSVecType->getElementType(), LHSType)) return LHSType; } if (!LHSVecType && isa<ExtVectorType>(RHSVecType)) { if (!tryVectorConvertAndSplat(*this, (IsCompAssign ? nullptr : &LHS), LHSType, RHSVecType->getElementType(), RHSType)) return RHSType; } // If we're allowing lax vector conversions, only the total (data) size // needs to be the same. // FIXME: Should we really be allowing this? // FIXME: We really just pick the LHS type arbitrarily? if (isLaxVectorConversion(RHSType, LHSType)) { QualType resultType = LHSType; RHS = ImpCastExprToType(RHS.get(), resultType, CK_BitCast); return resultType; } // Okay, the expression is invalid. // If there's a non-vector, non-real operand, diagnose that. if ((!RHSVecType && !RHSType->isRealType()) || (!LHSVecType && !LHSType->isRealType())) { Diag(Loc, diag::err_typecheck_vector_not_convertable_non_scalar) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // Otherwise, use the generic diagnostic. Diag(Loc, diag::err_typecheck_vector_not_convertable) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // checkArithmeticNull - Detect when a NULL constant is used improperly in an // expression. These are mainly cases where the null pointer is used as an // integer instead of a pointer. static void checkArithmeticNull(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompare) { // The canonical way to check for a GNU null is with isNullPointerConstant, // but we use a bit of a hack here for speed; this is a relatively // hot path, and isNullPointerConstant is slow. bool LHSNull = isa<GNUNullExpr>(LHS.get()->IgnoreParenImpCasts()); bool RHSNull = isa<GNUNullExpr>(RHS.get()->IgnoreParenImpCasts()); QualType NonNullType = LHSNull ? RHS.get()->getType() : LHS.get()->getType(); // Avoid analyzing cases where the result will either be invalid (and // diagnosed as such) or entirely valid and not something to warn about. if ((!LHSNull && !RHSNull) || NonNullType->isBlockPointerType() || NonNullType->isMemberPointerType() || NonNullType->isFunctionType()) return; // Comparison operations would not make sense with a null pointer no matter // what the other expression is. if (!IsCompare) { S.Diag(Loc, diag::warn_null_in_arithmetic_operation) << (LHSNull ? LHS.get()->getSourceRange() : SourceRange()) << (RHSNull ? RHS.get()->getSourceRange() : SourceRange()); return; } // The rest of the operations only make sense with a null pointer // if the other expression is a pointer. if (LHSNull == RHSNull || NonNullType->isAnyPointerType() || NonNullType->canDecayToPointerType()) return; S.Diag(Loc, diag::warn_null_in_comparison_operation) << LHSNull /* LHS is NULL */ << NonNullType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDiv) { checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false); if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, /*AllowBothBool*/getLangOpts().AltiVec, /*AllowBoolConversions*/false); QualType compType = UsualArithmeticConversions(LHS, RHS, IsCompAssign); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); if (compType.isNull() || !compType->isArithmeticType()) return InvalidOperands(Loc, LHS, RHS); // Check for division by zero. llvm::APSInt RHSValue; if (IsDiv && !RHS.get()->isValueDependent() && RHS.get()->EvaluateAsInt(RHSValue, Context) && RHSValue == 0) DiagRuntimeBehavior(Loc, RHS.get(), PDiag(diag::warn_division_by_zero) << RHS.get()->getSourceRange()); return compType; } QualType Sema::CheckRemainderOperands( ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) { checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false); if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { if (LHS.get()->getType()->hasIntegerRepresentation() && RHS.get()->getType()->hasIntegerRepresentation()) return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, /*AllowBothBool*/getLangOpts().AltiVec, /*AllowBoolConversions*/false); return InvalidOperands(Loc, LHS, RHS); } QualType compType = UsualArithmeticConversions(LHS, RHS, IsCompAssign); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); if (compType.isNull() || !compType->isIntegerType()) return InvalidOperands(Loc, LHS, RHS); // Check for remainder by zero. llvm::APSInt RHSValue; if (!RHS.get()->isValueDependent() && RHS.get()->EvaluateAsInt(RHSValue, Context) && RHSValue == 0) DiagRuntimeBehavior(Loc, RHS.get(), PDiag(diag::warn_remainder_by_zero) << RHS.get()->getSourceRange()); return compType; } /// \brief Diagnose invalid arithmetic on two void pointers. static void diagnoseArithmeticOnTwoVoidPointers(Sema &S, SourceLocation Loc, Expr *LHSExpr, Expr *RHSExpr) { S.Diag(Loc, S.getLangOpts().CPlusPlus ? diag::err_typecheck_pointer_arith_void_type : diag::ext_gnu_void_ptr) << 1 /* two pointers */ << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); } /// \brief Diagnose invalid arithmetic on a void pointer. static void diagnoseArithmeticOnVoidPointer(Sema &S, SourceLocation Loc, Expr *Pointer) { S.Diag(Loc, S.getLangOpts().CPlusPlus ? diag::err_typecheck_pointer_arith_void_type : diag::ext_gnu_void_ptr) << 0 /* one pointer */ << Pointer->getSourceRange(); } /// \brief Diagnose invalid arithmetic on two function pointers. static void diagnoseArithmeticOnTwoFunctionPointers(Sema &S, SourceLocation Loc, Expr *LHS, Expr *RHS) { assert(LHS->getType()->isAnyPointerType()); assert(RHS->getType()->isAnyPointerType()); S.Diag(Loc, S.getLangOpts().CPlusPlus ? diag::err_typecheck_pointer_arith_function_type : diag::ext_gnu_ptr_func_arith) << 1 /* two pointers */ << LHS->getType()->getPointeeType() // We only show the second type if it differs from the first. << (unsigned)!S.Context.hasSameUnqualifiedType(LHS->getType(), RHS->getType()) << RHS->getType()->getPointeeType() << LHS->getSourceRange() << RHS->getSourceRange(); } /// \brief Diagnose invalid arithmetic on a function pointer. static void diagnoseArithmeticOnFunctionPointer(Sema &S, SourceLocation Loc, Expr *Pointer) { assert(Pointer->getType()->isAnyPointerType()); S.Diag(Loc, S.getLangOpts().CPlusPlus ? diag::err_typecheck_pointer_arith_function_type : diag::ext_gnu_ptr_func_arith) << 0 /* one pointer */ << Pointer->getType()->getPointeeType() << 0 /* one pointer, so only one type */ << Pointer->getSourceRange(); } /// \brief Emit error if Operand is incomplete pointer type /// /// \returns True if pointer has incomplete type static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc, Expr *Operand) { QualType ResType = Operand->getType(); if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>()) ResType = ResAtomicType->getValueType(); assert(ResType->isAnyPointerType() && !ResType->isDependentType()); QualType PointeeTy = ResType->getPointeeType(); return S.RequireCompleteType(Loc, PointeeTy, diag::err_typecheck_arithmetic_incomplete_type, PointeeTy, Operand->getSourceRange()); } /// \brief Check the validity of an arithmetic pointer operand. /// /// If the operand has pointer type, this code will check for pointer types /// which are invalid in arithmetic operations. These will be diagnosed /// appropriately, including whether or not the use is supported as an /// extension. /// /// \returns True when the operand is valid to use (even if as an extension). static bool checkArithmeticOpPointerOperand(Sema &S, SourceLocation Loc, Expr *Operand) { QualType ResType = Operand->getType(); if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>()) ResType = ResAtomicType->getValueType(); if (!ResType->isAnyPointerType()) return true; QualType PointeeTy = ResType->getPointeeType(); if (PointeeTy->isVoidType()) { diagnoseArithmeticOnVoidPointer(S, Loc, Operand); return !S.getLangOpts().CPlusPlus; } if (PointeeTy->isFunctionType()) { diagnoseArithmeticOnFunctionPointer(S, Loc, Operand); return !S.getLangOpts().CPlusPlus; } if (checkArithmeticIncompletePointerType(S, Loc, Operand)) return false; return true; } /// \brief Check the validity of a binary arithmetic operation w.r.t. pointer /// operands. /// /// This routine will diagnose any invalid arithmetic on pointer operands much /// like \see checkArithmeticOpPointerOperand. However, it has special logic /// for emitting a single diagnostic even for operations where both LHS and RHS /// are (potentially problematic) pointers. /// /// \returns True when the operand is valid to use (even if as an extension). static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc, Expr *LHSExpr, Expr *RHSExpr) { bool isLHSPointer = LHSExpr->getType()->isAnyPointerType(); bool isRHSPointer = RHSExpr->getType()->isAnyPointerType(); if (!isLHSPointer && !isRHSPointer) return true; QualType LHSPointeeTy, RHSPointeeTy; if (isLHSPointer) LHSPointeeTy = LHSExpr->getType()->getPointeeType(); if (isRHSPointer) RHSPointeeTy = RHSExpr->getType()->getPointeeType(); // if both are pointers check if operation is valid wrt address spaces if (isLHSPointer && isRHSPointer) { const PointerType *lhsPtr = LHSExpr->getType()->getAs<PointerType>(); const PointerType *rhsPtr = RHSExpr->getType()->getAs<PointerType>(); if (!lhsPtr->isAddressSpaceOverlapping(*rhsPtr)) { S.Diag(Loc, diag::err_typecheck_op_on_nonoverlapping_address_space_pointers) << LHSExpr->getType() << RHSExpr->getType() << 1 /*arithmetic op*/ << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); return false; } } // Check for arithmetic on pointers to incomplete types. bool isLHSVoidPtr = isLHSPointer && LHSPointeeTy->isVoidType(); bool isRHSVoidPtr = isRHSPointer && RHSPointeeTy->isVoidType(); if (isLHSVoidPtr || isRHSVoidPtr) { if (!isRHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, LHSExpr); else if (!isLHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, RHSExpr); else diagnoseArithmeticOnTwoVoidPointers(S, Loc, LHSExpr, RHSExpr); return !S.getLangOpts().CPlusPlus; } bool isLHSFuncPtr = isLHSPointer && LHSPointeeTy->isFunctionType(); bool isRHSFuncPtr = isRHSPointer && RHSPointeeTy->isFunctionType(); if (isLHSFuncPtr || isRHSFuncPtr) { if (!isRHSFuncPtr) diagnoseArithmeticOnFunctionPointer(S, Loc, LHSExpr); else if (!isLHSFuncPtr) diagnoseArithmeticOnFunctionPointer(S, Loc, RHSExpr); else diagnoseArithmeticOnTwoFunctionPointers(S, Loc, LHSExpr, RHSExpr); return !S.getLangOpts().CPlusPlus; } if (isLHSPointer && checkArithmeticIncompletePointerType(S, Loc, LHSExpr)) return false; if (isRHSPointer && checkArithmeticIncompletePointerType(S, Loc, RHSExpr)) return false; return true; } /// diagnoseStringPlusInt - Emit a warning when adding an integer to a string /// literal. static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { StringLiteral* StrExpr = dyn_cast<StringLiteral>(LHSExpr->IgnoreImpCasts()); Expr* IndexExpr = RHSExpr; if (!StrExpr) { StrExpr = dyn_cast<StringLiteral>(RHSExpr->IgnoreImpCasts()); IndexExpr = LHSExpr; } bool IsStringPlusInt = StrExpr && IndexExpr->getType()->isIntegralOrUnscopedEnumerationType(); if (!IsStringPlusInt || IndexExpr->isValueDependent()) return; llvm::APSInt index; if (IndexExpr->EvaluateAsInt(index, Self.getASTContext())) { unsigned StrLenWithNull = StrExpr->getLength() + 1; if (index.isNonNegative() && index <= llvm::APSInt(llvm::APInt(index.getBitWidth(), StrLenWithNull), index.isUnsigned())) return; } SourceRange DiagRange(LHSExpr->getLocStart(), RHSExpr->getLocEnd()); Self.Diag(OpLoc, diag::warn_string_plus_int) << DiagRange << IndexExpr->IgnoreImpCasts()->getType(); // Only print a fixit for "str" + int, not for int + "str". if (IndexExpr == RHSExpr) { SourceLocation EndLoc = Self.PP.getLocForEndOfToken(RHSExpr->getLocEnd()); Self.Diag(OpLoc, diag::note_string_plus_scalar_silence) << FixItHint::CreateInsertion(LHSExpr->getLocStart(), "&") << FixItHint::CreateReplacement(SourceRange(OpLoc), "[") << FixItHint::CreateInsertion(EndLoc, "]"); } else Self.Diag(OpLoc, diag::note_string_plus_scalar_silence); } /// \brief Emit a warning when adding a char literal to a string. static void diagnoseStringPlusChar(Sema &Self, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { const Expr *StringRefExpr = LHSExpr; const CharacterLiteral *CharExpr = dyn_cast<CharacterLiteral>(RHSExpr->IgnoreImpCasts()); if (!CharExpr) { CharExpr = dyn_cast<CharacterLiteral>(LHSExpr->IgnoreImpCasts()); StringRefExpr = RHSExpr; } if (!CharExpr || !StringRefExpr) return; const QualType StringType = StringRefExpr->getType(); // Return if not a PointerType. if (!StringType->isAnyPointerType()) return; // Return if not a CharacterType. if (!StringType->getPointeeType()->isAnyCharacterType()) return; ASTContext &Ctx = Self.getASTContext(); SourceRange DiagRange(LHSExpr->getLocStart(), RHSExpr->getLocEnd()); const QualType CharType = CharExpr->getType(); if (!CharType->isAnyCharacterType() && CharType->isIntegerType() && llvm::isUIntN(Ctx.getCharWidth(), CharExpr->getValue())) { Self.Diag(OpLoc, diag::warn_string_plus_char) << DiagRange << Ctx.CharTy; } else { Self.Diag(OpLoc, diag::warn_string_plus_char) << DiagRange << CharExpr->getType(); } // Only print a fixit for str + char, not for char + str. if (isa<CharacterLiteral>(RHSExpr->IgnoreImpCasts())) { SourceLocation EndLoc = Self.PP.getLocForEndOfToken(RHSExpr->getLocEnd()); Self.Diag(OpLoc, diag::note_string_plus_scalar_silence) << FixItHint::CreateInsertion(LHSExpr->getLocStart(), "&") << FixItHint::CreateReplacement(SourceRange(OpLoc), "[") << FixItHint::CreateInsertion(EndLoc, "]"); } else { Self.Diag(OpLoc, diag::note_string_plus_scalar_silence); } } /// \brief Emit error when two pointers are incompatible. static void diagnosePointerIncompatibility(Sema &S, SourceLocation Loc, Expr *LHSExpr, Expr *RHSExpr) { assert(LHSExpr->getType()->isAnyPointerType()); assert(RHSExpr->getType()->isAnyPointerType()); S.Diag(Loc, diag::err_typecheck_sub_ptr_compatible) << LHSExpr->getType() << RHSExpr->getType() << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); } QualType Sema::CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy) { checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false); if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { QualType compType = CheckVectorOperands( LHS, RHS, Loc, CompLHSTy, /*AllowBothBool*/getLangOpts().AltiVec, /*AllowBoolConversions*/getLangOpts().ZVector); if (CompLHSTy) *CompLHSTy = compType; return compType; } QualType compType = UsualArithmeticConversions(LHS, RHS, CompLHSTy); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); // Diagnose "string literal" '+' int and string '+' "char literal". if (Opc == BO_Add) { diagnoseStringPlusInt(*this, Loc, LHS.get(), RHS.get()); diagnoseStringPlusChar(*this, Loc, LHS.get(), RHS.get()); } // handle the common case first (both operands are arithmetic). if (!compType.isNull() && compType->isArithmeticType()) { if (CompLHSTy) *CompLHSTy = compType; return compType; } // Type-checking. Ultimately the pointer's going to be in PExp; // note that we bias towards the LHS being the pointer. Expr *PExp = LHS.get(), *IExp = RHS.get(); bool isObjCPointer; if (PExp->getType()->isPointerType()) { isObjCPointer = false; } else if (PExp->getType()->isObjCObjectPointerType()) { isObjCPointer = true; } else { std::swap(PExp, IExp); if (PExp->getType()->isPointerType()) { isObjCPointer = false; } else if (PExp->getType()->isObjCObjectPointerType()) { isObjCPointer = true; } else { return InvalidOperands(Loc, LHS, RHS); } } assert(PExp->getType()->isAnyPointerType()); if (!IExp->getType()->isIntegerType()) return InvalidOperands(Loc, LHS, RHS); if (!checkArithmeticOpPointerOperand(*this, Loc, PExp)) return QualType(); if (isObjCPointer && checkArithmeticOnObjCPointer(*this, Loc, PExp)) return QualType(); // Check array bounds for pointer arithemtic CheckArrayAccess(PExp, IExp); if (CompLHSTy) { QualType LHSTy = Context.isPromotableBitField(LHS.get()); if (LHSTy.isNull()) { LHSTy = LHS.get()->getType(); if (LHSTy->isPromotableIntegerType()) LHSTy = Context.getPromotedIntegerType(LHSTy); } *CompLHSTy = LHSTy; } return PExp->getType(); } // C99 6.5.6 QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy) { checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false); if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { QualType compType = CheckVectorOperands( LHS, RHS, Loc, CompLHSTy, /*AllowBothBool*/getLangOpts().AltiVec, /*AllowBoolConversions*/getLangOpts().ZVector); if (CompLHSTy) *CompLHSTy = compType; return compType; } QualType compType = UsualArithmeticConversions(LHS, RHS, CompLHSTy); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); // Enforce type constraints: C99 6.5.6p3. // Handle the common case first (both operands are arithmetic). if (!compType.isNull() && compType->isArithmeticType()) { if (CompLHSTy) *CompLHSTy = compType; return compType; } // Either ptr - int or ptr - ptr. if (LHS.get()->getType()->isAnyPointerType()) { QualType lpointee = LHS.get()->getType()->getPointeeType(); // Diagnose bad cases where we step over interface counts. if (LHS.get()->getType()->isObjCObjectPointerType() && checkArithmeticOnObjCPointer(*this, Loc, LHS.get())) return QualType(); // The result type of a pointer-int computation is the pointer type. if (RHS.get()->getType()->isIntegerType()) { if (!checkArithmeticOpPointerOperand(*this, Loc, LHS.get())) return QualType(); // Check array bounds for pointer arithemtic CheckArrayAccess(LHS.get(), RHS.get(), /*ArraySubscriptExpr*/nullptr, /*AllowOnePastEnd*/true, /*IndexNegated*/true); if (CompLHSTy) *CompLHSTy = LHS.get()->getType(); return LHS.get()->getType(); } // Handle pointer-pointer subtractions. if (const PointerType *RHSPTy = RHS.get()->getType()->getAs<PointerType>()) { QualType rpointee = RHSPTy->getPointeeType(); if (getLangOpts().CPlusPlus) { // Pointee types must be the same: C++ [expr.add] if (!Context.hasSameUnqualifiedType(lpointee, rpointee)) { diagnosePointerIncompatibility(*this, Loc, LHS.get(), RHS.get()); } } else { // Pointee types must be compatible C99 6.5.6p3 if (!Context.typesAreCompatible( Context.getCanonicalType(lpointee).getUnqualifiedType(), Context.getCanonicalType(rpointee).getUnqualifiedType())) { diagnosePointerIncompatibility(*this, Loc, LHS.get(), RHS.get()); return QualType(); } } if (!checkArithmeticBinOpPointerOperands(*this, Loc, LHS.get(), RHS.get())) return QualType(); // The pointee type may have zero size. As an extension, a structure or // union may have zero size or an array may have zero length. In this // case subtraction does not make sense. if (!rpointee->isVoidType() && !rpointee->isFunctionType()) { CharUnits ElementSize = Context.getTypeSizeInChars(rpointee); if (ElementSize.isZero()) { Diag(Loc,diag::warn_sub_ptr_zero_size_types) << rpointee.getUnqualifiedType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } } if (CompLHSTy) *CompLHSTy = LHS.get()->getType(); return Context.getPointerDiffType(); } } return InvalidOperands(Loc, LHS, RHS); } static bool isScopedEnumerationType(QualType T) { if (const EnumType *ET = T->getAs<EnumType>()) return ET->getDecl()->isScoped(); return false; } static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType LHSType) { // OpenCL 6.3j: shift values are effectively % word size of LHS (more defined), // so skip remaining warnings as we don't want to modify values within Sema. if (S.getLangOpts().OpenCL) return; llvm::APSInt Right; // Check right/shifter operand if (RHS.get()->isValueDependent() || !RHS.get()->EvaluateAsInt(Right, S.Context)) return; if (Right.isNegative()) { S.DiagRuntimeBehavior(Loc, RHS.get(), S.PDiag(diag::warn_shift_negative) << RHS.get()->getSourceRange()); return; } llvm::APInt LeftBits(Right.getBitWidth(), S.Context.getTypeSize(LHS.get()->getType())); if (Right.uge(LeftBits)) { S.DiagRuntimeBehavior(Loc, RHS.get(), S.PDiag(diag::warn_shift_gt_typewidth) << RHS.get()->getSourceRange()); return; } if (Opc != BO_Shl) return; // When left shifting an ICE which is signed, we can check for overflow which // according to C++ has undefined behavior ([expr.shift] 5.8/2). Unsigned // integers have defined behavior modulo one more than the maximum value // representable in the result type, so never warn for those. llvm::APSInt Left; if (LHS.get()->isValueDependent() || LHSType->hasUnsignedIntegerRepresentation() || !LHS.get()->EvaluateAsInt(Left, S.Context)) return; // If LHS does not have a signed type and non-negative value // then, the behavior is undefined. Warn about it. if (Left.isNegative()) { S.DiagRuntimeBehavior(Loc, LHS.get(), S.PDiag(diag::warn_shift_lhs_negative) << LHS.get()->getSourceRange()); return; } llvm::APInt ResultBits = static_cast<llvm::APInt&>(Right) + Left.getMinSignedBits(); if (LeftBits.uge(ResultBits)) return; llvm::APSInt Result = Left.extend(ResultBits.getLimitedValue()); Result = Result.shl(Right); // Print the bit representation of the signed integer as an unsigned // hexadecimal number. SmallString<40> HexResult; Result.toString(HexResult, 16, /*Signed =*/false, /*Literal =*/true); // If we are only missing a sign bit, this is less likely to result in actual // bugs -- if the result is cast back to an unsigned type, it will have the // expected value. Thus we place this behind a different warning that can be // turned off separately if needed. if (LeftBits == ResultBits - 1) { S.Diag(Loc, diag::warn_shift_result_sets_sign_bit) << HexResult << LHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return; } S.Diag(Loc, diag::warn_shift_result_gt_typewidth) << HexResult.str() << Result.getMinSignedBits() << LHSType << Left.getBitWidth() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } /// \brief Return the resulting type when an OpenCL vector is shifted /// by a scalar or vector shift amount. static QualType checkOpenCLVectorShift(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) { // OpenCL v1.1 s6.3.j says RHS can be a vector only if LHS is a vector. if (!LHS.get()->getType()->isVectorType()) { S.Diag(Loc, diag::err_shift_rhs_only_vector) << RHS.get()->getType() << LHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } if (!IsCompAssign) { LHS = S.UsualUnaryConversions(LHS.get()); if (LHS.isInvalid()) return QualType(); } RHS = S.UsualUnaryConversions(RHS.get()); if (RHS.isInvalid()) return QualType(); QualType LHSType = LHS.get()->getType(); const VectorType *LHSVecTy = LHSType->getAs<VectorType>(); QualType LHSEleType = LHSVecTy->getElementType(); // Note that RHS might not be a vector. QualType RHSType = RHS.get()->getType(); const VectorType *RHSVecTy = RHSType->getAs<VectorType>(); QualType RHSEleType = RHSVecTy ? RHSVecTy->getElementType() : RHSType; // OpenCL v1.1 s6.3.j says that the operands need to be integers. if (!LHSEleType->isIntegerType()) { S.Diag(Loc, diag::err_typecheck_expect_int) << LHS.get()->getType() << LHS.get()->getSourceRange(); return QualType(); } if (!RHSEleType->isIntegerType()) { S.Diag(Loc, diag::err_typecheck_expect_int) << RHS.get()->getType() << RHS.get()->getSourceRange(); return QualType(); } if (RHSVecTy) { // OpenCL v1.1 s6.3.j says that for vector types, the operators // are applied component-wise. So if RHS is a vector, then ensure // that the number of elements is the same as LHS... if (RHSVecTy->getNumElements() != LHSVecTy->getNumElements()) { S.Diag(Loc, diag::err_typecheck_vector_lengths_not_equal) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } } else { // ...else expand RHS to match the number of elements in LHS. QualType VecTy = S.Context.getExtVectorType(RHSEleType, LHSVecTy->getNumElements()); RHS = S.ImpCastExprToType(RHS.get(), VecTy, CK_VectorSplat); } return LHSType; } // C99 6.5.7 QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign) { checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false); // Vector shifts promote their scalar inputs to vector type. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { if (LangOpts.OpenCL) return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign); if (LangOpts.ZVector) { // The shift operators for the z vector extensions work basically // like OpenCL shifts, except that neither the LHS nor the RHS is // allowed to be a "vector bool". if (auto LHSVecType = LHS.get()->getType()->getAs<VectorType>()) if (LHSVecType->getVectorKind() == VectorType::AltiVecBool) return InvalidOperands(Loc, LHS, RHS); if (auto RHSVecType = RHS.get()->getType()->getAs<VectorType>()) if (RHSVecType->getVectorKind() == VectorType::AltiVecBool) return InvalidOperands(Loc, LHS, RHS); return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign); } return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, /*AllowBothBool*/true, /*AllowBoolConversions*/false); } // Shifts don't perform usual arithmetic conversions, they just do integer // promotions on each operand. C99 6.5.7p3 // For the LHS, do usual unary conversions, but then reset them away // if this is a compound assignment. ExprResult OldLHS = LHS; LHS = UsualUnaryConversions(LHS.get()); if (LHS.isInvalid()) return QualType(); QualType LHSType = LHS.get()->getType(); if (IsCompAssign) LHS = OldLHS; // The RHS is simpler. RHS = UsualUnaryConversions(RHS.get()); if (RHS.isInvalid()) return QualType(); QualType RHSType = RHS.get()->getType(); // C99 6.5.7p2: Each of the operands shall have integer type. if (!LHSType->hasIntegerRepresentation() || !RHSType->hasIntegerRepresentation()) return InvalidOperands(Loc, LHS, RHS); // C++0x: Don't allow scoped enums. FIXME: Use something better than // hasIntegerRepresentation() above instead of this. if (isScopedEnumerationType(LHSType) || isScopedEnumerationType(RHSType)) { return InvalidOperands(Loc, LHS, RHS); } // Sanity-check shift operands DiagnoseBadShiftValues(*this, LHS, RHS, Loc, Opc, LHSType); // "The type of the result is that of the promoted left operand." return LHSType; } static bool IsWithinTemplateSpecialization(Decl *D) { if (DeclContext *DC = D->getDeclContext()) { if (isa<ClassTemplateSpecializationDecl>(DC)) return true; if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC)) return FD->isFunctionTemplateSpecialization(); } return false; } /// If two different enums are compared, raise a warning. static void checkEnumComparison(Sema &S, SourceLocation Loc, Expr *LHS, Expr *RHS) { QualType LHSStrippedType = LHS->IgnoreParenImpCasts()->getType(); QualType RHSStrippedType = RHS->IgnoreParenImpCasts()->getType(); const EnumType *LHSEnumType = LHSStrippedType->getAs<EnumType>(); if (!LHSEnumType) return; const EnumType *RHSEnumType = RHSStrippedType->getAs<EnumType>(); if (!RHSEnumType) return; // Ignore anonymous enums. if (!LHSEnumType->getDecl()->getIdentifier()) return; if (!RHSEnumType->getDecl()->getIdentifier()) return; if (S.Context.hasSameUnqualifiedType(LHSStrippedType, RHSStrippedType)) return; S.Diag(Loc, diag::warn_comparison_of_mixed_enum_types) << LHSStrippedType << RHSStrippedType << LHS->getSourceRange() << RHS->getSourceRange(); } /// \brief Diagnose bad pointer comparisons. static void diagnoseDistinctPointerComparison(Sema &S, SourceLocation Loc, ExprResult &LHS, ExprResult &RHS, bool IsError) { S.Diag(Loc, IsError ? diag::err_typecheck_comparison_of_distinct_pointers : diag::ext_typecheck_comparison_of_distinct_pointers) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } /// \brief Returns false if the pointers are converted to a composite type, /// true otherwise. static bool convertPointersToCompositeType(Sema &S, SourceLocation Loc, ExprResult &LHS, ExprResult &RHS) { // C++ [expr.rel]p2: // [...] Pointer conversions (4.10) and qualification // conversions (4.4) are performed on pointer operands (or on // a pointer operand and a null pointer constant) to bring // them to their composite pointer type. [...] // // C++ [expr.eq]p1 uses the same notion for (in)equality // comparisons of pointers. // C++ [expr.eq]p2: // In addition, pointers to members can be compared, or a pointer to // member and a null pointer constant. Pointer to member conversions // (4.11) and qualification conversions (4.4) are performed to bring // them to a common type. If one operand is a null pointer constant, // the common type is the type of the other operand. Otherwise, the // common type is a pointer to member type similar (4.4) to the type // of one of the operands, with a cv-qualification signature (4.4) // that is the union of the cv-qualification signatures of the operand // types. QualType LHSType = LHS.get()->getType(); QualType RHSType = RHS.get()->getType(); assert((LHSType->isPointerType() && RHSType->isPointerType()) || (LHSType->isMemberPointerType() && RHSType->isMemberPointerType())); bool NonStandardCompositeType = false; bool *BoolPtr = S.isSFINAEContext() ? nullptr : &NonStandardCompositeType; QualType T = S.FindCompositePointerType(Loc, LHS, RHS, BoolPtr); if (T.isNull()) { diagnoseDistinctPointerComparison(S, Loc, LHS, RHS, /*isError*/true); return true; } if (NonStandardCompositeType) S.Diag(Loc, diag::ext_typecheck_comparison_of_distinct_pointers_nonstandard) << LHSType << RHSType << T << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); LHS = S.ImpCastExprToType(LHS.get(), T, CK_BitCast); RHS = S.ImpCastExprToType(RHS.get(), T, CK_BitCast); return false; } static void diagnoseFunctionPointerToVoidComparison(Sema &S, SourceLocation Loc, ExprResult &LHS, ExprResult &RHS, bool IsError) { S.Diag(Loc, IsError ? diag::err_typecheck_comparison_of_fptr_to_void : diag::ext_typecheck_comparison_of_fptr_to_void) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } #if 1 // HLSL Change Starts Sema::ObjCLiteralKind Sema::CheckLiteralKind(Expr *FromE) { return LK_None; } #else static bool isObjCObjectLiteral(ExprResult &E) { switch (E.get()->IgnoreParenImpCasts()->getStmtClass()) { case Stmt::ObjCArrayLiteralClass: case Stmt::ObjCDictionaryLiteralClass: case Stmt::ObjCStringLiteralClass: case Stmt::ObjCBoxedExprClass: return true; default: // Note that ObjCBoolLiteral is NOT an object literal! return false; } } static bool hasIsEqualMethod(Sema &S, const Expr *LHS, const Expr *RHS) { const ObjCObjectPointerType *Type = LHS->getType()->getAs<ObjCObjectPointerType>(); // If this is not actually an Objective-C object, bail out. if (!Type) return false; // Get the LHS object's interface type. QualType InterfaceType = Type->getPointeeType(); // If the RHS isn't an Objective-C object, bail out. if (!RHS->getType()->isObjCObjectPointerType()) return false; // Try to find the -isEqual: method. Selector IsEqualSel = S.NSAPIObj->getIsEqualSelector(); ObjCMethodDecl *Method = S.LookupMethodInObjectType(IsEqualSel, InterfaceType, /*instance=*/true); if (!Method) { if (Type->isObjCIdType()) { // For 'id', just check the global pool. Method = S.LookupInstanceMethodInGlobalPool(IsEqualSel, SourceRange(), /*receiverId=*/true); } else { // Check protocols. Method = S.LookupMethodInQualifiedType(IsEqualSel, Type, /*instance=*/true); } } if (!Method) return false; QualType T = Method->parameters()[0]->getType(); if (!T->isObjCObjectPointerType()) return false; QualType R = Method->getReturnType(); if (!R->isScalarType()) return false; return true; } Sema::ObjCLiteralKind Sema::CheckLiteralKind(Expr *FromE) { FromE = FromE->IgnoreParenImpCasts(); switch (FromE->getStmtClass()) { default: break; case Stmt::ObjCStringLiteralClass: // "string literal" return LK_String; case Stmt::ObjCArrayLiteralClass: // "array literal" return LK_Array; case Stmt::ObjCDictionaryLiteralClass: // "dictionary literal" return LK_Dictionary; case Stmt::BlockExprClass: return LK_Block; case Stmt::ObjCBoxedExprClass: { Expr *Inner = cast<ObjCBoxedExpr>(FromE)->getSubExpr()->IgnoreParens(); switch (Inner->getStmtClass()) { case Stmt::IntegerLiteralClass: case Stmt::FloatingLiteralClass: case Stmt::CharacterLiteralClass: case Stmt::ObjCBoolLiteralExprClass: case Stmt::CXXBoolLiteralExprClass: // "numeric literal" return LK_Numeric; case Stmt::ImplicitCastExprClass: { CastKind CK = cast<CastExpr>(Inner)->getCastKind(); // Boolean literals can be represented by implicit casts. if (CK == CK_IntegralToBoolean || CK == CK_IntegralCast) return LK_Numeric; break; } default: break; } return LK_Boxed; } } return LK_None; } static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc, ExprResult &LHS, ExprResult &RHS, BinaryOperator::Opcode Opc){ Expr *Literal; Expr *Other; if (isObjCObjectLiteral(LHS)) { Literal = LHS.get(); Other = RHS.get(); } else { Literal = RHS.get(); Other = LHS.get(); } // Don't warn on comparisons against nil. Other = Other->IgnoreParenCasts(); if (Other->isNullPointerConstant(S.getASTContext(), Expr::NPC_ValueDependentIsNotNull)) return; // This should be kept in sync with warn_objc_literal_comparison. // LK_String should always be after the other literals, since it has its own // warning flag. Sema::ObjCLiteralKind LiteralKind = S.CheckLiteralKind(Literal); assert(LiteralKind != Sema::LK_Block); if (LiteralKind == Sema::LK_None) { llvm_unreachable("Unknown Objective-C object literal kind"); } if (LiteralKind == Sema::LK_String) S.Diag(Loc, diag::warn_objc_string_literal_comparison) << Literal->getSourceRange(); else S.Diag(Loc, diag::warn_objc_literal_comparison) << LiteralKind << Literal->getSourceRange(); if (BinaryOperator::isEqualityOp(Opc) && hasIsEqualMethod(S, LHS.get(), RHS.get())) { SourceLocation Start = LHS.get()->getLocStart(); SourceLocation End = S.PP.getLocForEndOfToken(RHS.get()->getLocEnd()); CharSourceRange OpRange = CharSourceRange::getCharRange(Loc, S.PP.getLocForEndOfToken(Loc)); S.Diag(Loc, diag::note_objc_literal_comparison_isequal) << FixItHint::CreateInsertion(Start, Opc == BO_EQ ? "[" : "![") << FixItHint::CreateReplacement(OpRange, " isEqual:") << FixItHint::CreateInsertion(End, "]"); } } #endif // HLSL Change Ends static void diagnoseLogicalNotOnLHSofComparison(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc) { // This checking requires bools. if (!S.getLangOpts().Bool) return; // Check that left hand side is !something. UnaryOperator *UO = dyn_cast<UnaryOperator>(LHS.get()->IgnoreImpCasts()); if (!UO || UO->getOpcode() != UO_LNot) return; // Only check if the right hand side is non-bool arithmetic type. if (RHS.get()->getType()->isBooleanType()) return; // Make sure that the something in !something is not bool. Expr *SubExpr = UO->getSubExpr()->IgnoreImpCasts(); if (SubExpr->getType()->isBooleanType()) return; // Emit warning. S.Diag(UO->getOperatorLoc(), diag::warn_logical_not_on_lhs_of_comparison) << Loc; // First note suggest !(x < y) SourceLocation FirstOpen = SubExpr->getLocStart(); SourceLocation FirstClose = RHS.get()->getLocEnd(); FirstClose = S.getPreprocessor().getLocForEndOfToken(FirstClose); if (FirstClose.isInvalid()) FirstOpen = SourceLocation(); S.Diag(UO->getOperatorLoc(), diag::note_logical_not_fix) << FixItHint::CreateInsertion(FirstOpen, "(") << FixItHint::CreateInsertion(FirstClose, ")"); // Second note suggests (!x) < y SourceLocation SecondOpen = LHS.get()->getLocStart(); SourceLocation SecondClose = LHS.get()->getLocEnd(); SecondClose = S.getPreprocessor().getLocForEndOfToken(SecondClose); if (SecondClose.isInvalid()) SecondOpen = SourceLocation(); S.Diag(UO->getOperatorLoc(), diag::note_logical_not_silence_with_parens) << FixItHint::CreateInsertion(SecondOpen, "(") << FixItHint::CreateInsertion(SecondClose, ")"); } // Get the decl for a simple expression: a reference to a variable, // an implicit C++ field reference, or an implicit ObjC ivar reference. static ValueDecl *getCompareDecl(Expr *E) { if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(E)) return DR->getDecl(); if (ObjCIvarRefExpr* Ivar = dyn_cast<ObjCIvarRefExpr>(E)) { if (Ivar->isFreeIvar()) return Ivar->getDecl(); } if (MemberExpr* Mem = dyn_cast<MemberExpr>(E)) { if (Mem->isImplicitAccess()) return Mem->getMemberDecl(); } return nullptr; } // C99 6.5.8, C++ [expr.rel] QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool IsRelational) { checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/true); BinaryOperatorKind Opc = (BinaryOperatorKind) OpaqueOpc; // Handle vector comparisons separately. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) return CheckVectorCompareOperands(LHS, RHS, Loc, IsRelational); QualType LHSType = LHS.get()->getType(); QualType RHSType = RHS.get()->getType(); Expr *LHSStripped = LHS.get()->IgnoreParenImpCasts(); Expr *RHSStripped = RHS.get()->IgnoreParenImpCasts(); checkEnumComparison(*this, Loc, LHS.get(), RHS.get()); diagnoseLogicalNotOnLHSofComparison(*this, LHS, RHS, Loc, OpaqueOpc); if (!LHSType->hasFloatingRepresentation() && !(LHSType->isBlockPointerType() && IsRelational) && !LHS.get()->getLocStart().isMacroID() && !RHS.get()->getLocStart().isMacroID() && ActiveTemplateInstantiations.empty()) { // For non-floating point types, check for self-comparisons of the form // x == x, x != x, x < x, etc. These always evaluate to a constant, and // often indicate logic errors in the program. // // NOTE: Don't warn about comparison expressions resulting from macro // expansion. Also don't warn about comparisons which are only self // comparisons within a template specialization. The warnings should catch // obvious cases in the definition of the template anyways. The idea is to // warn when the typed comparison operator will always evaluate to the same // result. ValueDecl *DL = getCompareDecl(LHSStripped); ValueDecl *DR = getCompareDecl(RHSStripped); if (DL && DR && DL == DR && !IsWithinTemplateSpecialization(DL)) { DiagRuntimeBehavior(Loc, nullptr, PDiag(diag::warn_comparison_always) << 0 // self- << (Opc == BO_EQ || Opc == BO_LE || Opc == BO_GE)); } else if (DL && DR && LHSType->isArrayType() && RHSType->isArrayType() && !DL->getType()->isReferenceType() && !DR->getType()->isReferenceType()) { // what is it always going to eval to? char always_evals_to; switch(Opc) { case BO_EQ: // e.g. array1 == array2 always_evals_to = 0; // false break; case BO_NE: // e.g. array1 != array2 always_evals_to = 1; // true break; default: // best we can say is 'a constant' always_evals_to = 2; // e.g. array1 <= array2 break; } DiagRuntimeBehavior(Loc, nullptr, PDiag(diag::warn_comparison_always) << 1 // array << always_evals_to); } if (isa<CastExpr>(LHSStripped)) LHSStripped = LHSStripped->IgnoreParenCasts(); if (isa<CastExpr>(RHSStripped)) RHSStripped = RHSStripped->IgnoreParenCasts(); // Warn about comparisons against a string constant (unless the other // operand is null), the user probably wants strcmp. Expr *literalString = nullptr; Expr *literalStringStripped = nullptr; if ((isa<StringLiteral>(LHSStripped) || isa<ObjCEncodeExpr>(LHSStripped)) && !RHSStripped->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { literalString = LHS.get(); literalStringStripped = LHSStripped; } else if ((isa<StringLiteral>(RHSStripped) || isa<ObjCEncodeExpr>(RHSStripped)) && !LHSStripped->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { literalString = RHS.get(); literalStringStripped = RHSStripped; } if (literalString) { DiagRuntimeBehavior(Loc, nullptr, PDiag(diag::warn_stringcompare) << isa<ObjCEncodeExpr>(literalStringStripped) << literalString->getSourceRange()); } } // C99 6.5.8p3 / C99 6.5.9p4 UsualArithmeticConversions(LHS, RHS); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); LHSType = LHS.get()->getType(); RHSType = RHS.get()->getType(); // The result of comparisons is 'bool' in C++, 'int' in C. QualType ResultTy = Context.getLogicalOperationType(); if (IsRelational) { if (LHSType->isRealType() && RHSType->isRealType()) return ResultTy; } else { // Check for comparisons of floating point operands using != and ==. if (LHSType->hasFloatingRepresentation()) CheckFloatComparison(Loc, LHS.get(), RHS.get()); if (LHSType->isArithmeticType() && RHSType->isArithmeticType()) return ResultTy; } const Expr::NullPointerConstantKind LHSNullKind = LHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull); const Expr::NullPointerConstantKind RHSNullKind = RHS.get()->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull); bool LHSIsNull = LHSNullKind != Expr::NPCK_NotNull; bool RHSIsNull = RHSNullKind != Expr::NPCK_NotNull; if (!IsRelational && LHSIsNull != RHSIsNull) { bool IsEquality = Opc == BO_EQ; if (RHSIsNull) DiagnoseAlwaysNonNullPointer(LHS.get(), RHSNullKind, IsEquality, RHS.get()->getSourceRange()); else DiagnoseAlwaysNonNullPointer(RHS.get(), LHSNullKind, IsEquality, LHS.get()->getSourceRange()); } // All of the following pointer-related warnings are GCC extensions, except // when handling null pointer constants. if (LHSType->isPointerType() && RHSType->isPointerType()) { // C99 6.5.8p2 QualType LCanPointeeTy = LHSType->castAs<PointerType>()->getPointeeType().getCanonicalType(); QualType RCanPointeeTy = RHSType->castAs<PointerType>()->getPointeeType().getCanonicalType(); if (getLangOpts().CPlusPlus) { if (LCanPointeeTy == RCanPointeeTy) return ResultTy; if (!IsRelational && (LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) { // Valid unless comparison between non-null pointer and function pointer // This is a gcc extension compatibility comparison. // In a SFINAE context, we treat this as a hard error to maintain // conformance with the C++ standard. if ((LCanPointeeTy->isFunctionType() || RCanPointeeTy->isFunctionType()) && !LHSIsNull && !RHSIsNull) { diagnoseFunctionPointerToVoidComparison( *this, Loc, LHS, RHS, /*isError*/ (bool)isSFINAEContext()); if (isSFINAEContext()) return QualType(); RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); return ResultTy; } } if (convertPointersToCompositeType(*this, Loc, LHS, RHS)) return QualType(); else return ResultTy; } // C99 6.5.9p2 and C99 6.5.8p2 if (Context.typesAreCompatible(LCanPointeeTy.getUnqualifiedType(), RCanPointeeTy.getUnqualifiedType())) { // Valid unless a relational comparison of function pointers if (IsRelational && LCanPointeeTy->isFunctionType()) { Diag(Loc, diag::ext_typecheck_ordered_comparison_of_function_pointers) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } } else if (!IsRelational && (LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) { // Valid unless comparison between non-null pointer and function pointer if ((LCanPointeeTy->isFunctionType() || RCanPointeeTy->isFunctionType()) && !LHSIsNull && !RHSIsNull) diagnoseFunctionPointerToVoidComparison(*this, Loc, LHS, RHS, /*isError*/false); } else { // Invalid diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false); } if (LCanPointeeTy != RCanPointeeTy) { const PointerType *lhsPtr = LHSType->getAs<PointerType>(); if (!lhsPtr->isAddressSpaceOverlapping(*RHSType->getAs<PointerType>())) { Diag(Loc, diag::err_typecheck_op_on_nonoverlapping_address_space_pointers) << LHSType << RHSType << 0 /* comparison */ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } unsigned AddrSpaceL = LCanPointeeTy.getAddressSpace(); unsigned AddrSpaceR = RCanPointeeTy.getAddressSpace(); CastKind Kind = AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast; if (LHSIsNull && !RHSIsNull) LHS = ImpCastExprToType(LHS.get(), RHSType, Kind); else RHS = ImpCastExprToType(RHS.get(), LHSType, Kind); } return ResultTy; } if (getLangOpts().CPlusPlus) { // Comparison of nullptr_t with itself. if (LHSType->isNullPtrType() && RHSType->isNullPtrType()) return ResultTy; // Comparison of pointers with null pointer constants and equality // comparisons of member pointers to null pointer constants. if (RHSIsNull && ((LHSType->isAnyPointerType() || LHSType->isNullPtrType()) || (!IsRelational && (LHSType->isMemberPointerType() || LHSType->isBlockPointerType())))) { RHS = ImpCastExprToType(RHS.get(), LHSType, LHSType->isMemberPointerType() ? CK_NullToMemberPointer : CK_NullToPointer); return ResultTy; } if (LHSIsNull && ((RHSType->isAnyPointerType() || RHSType->isNullPtrType()) || (!IsRelational && (RHSType->isMemberPointerType() || RHSType->isBlockPointerType())))) { LHS = ImpCastExprToType(LHS.get(), RHSType, RHSType->isMemberPointerType() ? CK_NullToMemberPointer : CK_NullToPointer); return ResultTy; } // Comparison of member pointers. if (!IsRelational && LHSType->isMemberPointerType() && RHSType->isMemberPointerType()) { if (convertPointersToCompositeType(*this, Loc, LHS, RHS)) return QualType(); else return ResultTy; } // Handle scoped enumeration types specifically, since they don't promote // to integers. if (LHS.get()->getType()->isEnumeralType() && Context.hasSameUnqualifiedType(LHS.get()->getType(), RHS.get()->getType())) return ResultTy; } // Handle block pointer types. if (!IsRelational && LHSType->isBlockPointerType() && RHSType->isBlockPointerType()) { QualType lpointee = LHSType->castAs<BlockPointerType>()->getPointeeType(); QualType rpointee = RHSType->castAs<BlockPointerType>()->getPointeeType(); if (!LHSIsNull && !RHSIsNull && !Context.typesAreCompatible(lpointee, rpointee)) { Diag(Loc, diag::err_typecheck_comparison_of_distinct_blocks) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); return ResultTy; } // Allow block pointers to be compared with null pointer constants. if (!IsRelational && ((LHSType->isBlockPointerType() && RHSType->isPointerType()) || (LHSType->isPointerType() && RHSType->isBlockPointerType()))) { if (!LHSIsNull && !RHSIsNull) { if (!((RHSType->isPointerType() && RHSType->castAs<PointerType>() ->getPointeeType()->isVoidType()) || (LHSType->isPointerType() && LHSType->castAs<PointerType>() ->getPointeeType()->isVoidType()))) Diag(Loc, diag::err_typecheck_comparison_of_distinct_blocks) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); } if (LHSIsNull && !RHSIsNull) LHS = ImpCastExprToType(LHS.get(), RHSType, RHSType->isPointerType() ? CK_BitCast : CK_AnyPointerToBlockPointerCast); else RHS = ImpCastExprToType(RHS.get(), LHSType, LHSType->isPointerType() ? CK_BitCast : CK_AnyPointerToBlockPointerCast); return ResultTy; } if (LHSType->isObjCObjectPointerType() || RHSType->isObjCObjectPointerType()) { const PointerType *LPT = LHSType->getAs<PointerType>(); const PointerType *RPT = RHSType->getAs<PointerType>(); if (LPT || RPT) { bool LPtrToVoid = LPT ? LPT->getPointeeType()->isVoidType() : false; bool RPtrToVoid = RPT ? RPT->getPointeeType()->isVoidType() : false; if (!LPtrToVoid && !RPtrToVoid && !Context.typesAreCompatible(LHSType, RHSType)) { diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false); } if (LHSIsNull && !RHSIsNull) { Expr *E = LHS.get(); #if 0 // HLSL Change - no ObjC support if (getLangOpts().ObjCAutoRefCount) CheckObjCARCConversion(SourceRange(), RHSType, E, CCK_ImplicitConversion); #endif // HLSL Change LHS = ImpCastExprToType(E, RHSType, RPT ? CK_BitCast :CK_CPointerToObjCPointerCast); } else { Expr *E = RHS.get(); #if 0 // HLSL Change - no ObjC support if (getLangOpts().ObjCAutoRefCount) CheckObjCARCConversion(SourceRange(), LHSType, E, CCK_ImplicitConversion, false, Opc); #endif // HLSL Change RHS = ImpCastExprToType(E, LHSType, LPT ? CK_BitCast :CK_CPointerToObjCPointerCast); } return ResultTy; } #if 0 // HLSL Change - no ObjC support if (LHSType->isObjCObjectPointerType() && RHSType->isObjCObjectPointerType()) { if (!Context.areComparableObjCPointerTypes(LHSType, RHSType)) diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false); if (isObjCObjectLiteral(LHS) || isObjCObjectLiteral(RHS)) diagnoseObjCLiteralComparison(*this, Loc, LHS, RHS, Opc); if (LHSIsNull && !RHSIsNull) LHS = ImpCastExprToType(LHS.get(), RHSType, CK_BitCast); else RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast); return ResultTy; } #endif // HLSL Change } if ((LHSType->isAnyPointerType() && RHSType->isIntegerType()) || (LHSType->isIntegerType() && RHSType->isAnyPointerType())) { unsigned DiagID = 0; bool isError = false; if (LangOpts.DebuggerSupport) { // Under a debugger, allow the comparison of pointers to integers, // since users tend to want to compare addresses. } else if ((LHSIsNull && LHSType->isIntegerType()) || (RHSIsNull && RHSType->isIntegerType())) { if (IsRelational && !getLangOpts().CPlusPlus) DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero; } else if (IsRelational && !getLangOpts().CPlusPlus) DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer; else if (getLangOpts().CPlusPlus) { DiagID = diag::err_typecheck_comparison_of_pointer_integer; isError = true; } else DiagID = diag::ext_typecheck_comparison_of_pointer_integer; if (DiagID) { Diag(Loc, DiagID) << LHSType << RHSType << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); if (isError) return QualType(); } if (LHSType->isIntegerType()) LHS = ImpCastExprToType(LHS.get(), RHSType, LHSIsNull ? CK_NullToPointer : CK_IntegralToPointer); else RHS = ImpCastExprToType(RHS.get(), LHSType, RHSIsNull ? CK_NullToPointer : CK_IntegralToPointer); return ResultTy; } // Handle block pointers. if (!IsRelational && RHSIsNull && LHSType->isBlockPointerType() && RHSType->isIntegerType()) { RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer); return ResultTy; } if (!IsRelational && LHSIsNull && LHSType->isIntegerType() && RHSType->isBlockPointerType()) { LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer); return ResultTy; } return InvalidOperands(Loc, LHS, RHS); } // Return a signed type that is of identical size and number of elements. // For floating point vectors, return an integer type of identical size // and number of elements. QualType Sema::GetSignedVectorType(QualType V) { const VectorType *VTy = V->getAs<VectorType>(); unsigned TypeSize = Context.getTypeSize(VTy->getElementType()); if (TypeSize == Context.getTypeSize(Context.CharTy)) return Context.getExtVectorType(Context.CharTy, VTy->getNumElements()); else if (TypeSize == Context.getTypeSize(Context.ShortTy)) return Context.getExtVectorType(Context.ShortTy, VTy->getNumElements()); else if (TypeSize == Context.getTypeSize(Context.IntTy)) return Context.getExtVectorType(Context.IntTy, VTy->getNumElements()); else if (TypeSize == Context.getTypeSize(Context.LongTy)) return Context.getExtVectorType(Context.LongTy, VTy->getNumElements()); assert(TypeSize == Context.getTypeSize(Context.LongLongTy) && "Unhandled vector element size in vector compare"); return Context.getExtVectorType(Context.LongLongTy, VTy->getNumElements()); } /// CheckVectorCompareOperands - vector comparisons are a clang extension that /// operates on extended vector types. Instead of producing an IntTy result, /// like a scalar comparison, a vector comparison produces a vector of integer /// types. QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsRelational) { // Check to make sure we're operating on vectors of the same type and width, // Allowing one side to be a scalar of element type. QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false, /*AllowBothBool*/true, /*AllowBoolConversions*/getLangOpts().ZVector); if (vType.isNull()) return vType; QualType LHSType = LHS.get()->getType(); // If AltiVec, the comparison results in a numeric type, i.e. // bool for C++, int for C if (getLangOpts().AltiVec && vType->getAs<VectorType>()->getVectorKind() == VectorType::AltiVecVector) return Context.getLogicalOperationType(); // For non-floating point types, check for self-comparisons of the form // x == x, x != x, x < x, etc. These always evaluate to a constant, and // often indicate logic errors in the program. if (!LHSType->hasFloatingRepresentation() && ActiveTemplateInstantiations.empty()) { if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LHS.get()->IgnoreParenImpCasts())) if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RHS.get()->IgnoreParenImpCasts())) if (DRL->getDecl() == DRR->getDecl()) DiagRuntimeBehavior(Loc, nullptr, PDiag(diag::warn_comparison_always) << 0 // self- << 2 // "a constant" ); } // Check for comparisons of floating point operands using != and ==. if (!IsRelational && LHSType->hasFloatingRepresentation()) { assert (RHS.get()->getType()->hasFloatingRepresentation()); CheckFloatComparison(Loc, LHS.get(), RHS.get()); } // Return a signed type for the vector. return GetSignedVectorType(LHSType); } QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { // Ensure that either both operands are of the same vector type, or // one operand is of a vector type and the other is of its element type. QualType vType = CheckVectorOperands(LHS, RHS, Loc, false, /*AllowBothBool*/true, /*AllowBoolConversions*/false); if (vType.isNull()) return InvalidOperands(Loc, LHS, RHS); if (getLangOpts().OpenCL && getLangOpts().OpenCLVersion < 120 && vType->hasFloatingRepresentation()) return InvalidOperands(Loc, LHS, RHS); return GetSignedVectorType(LHS.get()->getType()); } inline QualType Sema::CheckBitwiseOperands( ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) { checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false); if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) { if (LHS.get()->getType()->hasIntegerRepresentation() && RHS.get()->getType()->hasIntegerRepresentation()) return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign, /*AllowBothBool*/true, /*AllowBoolConversions*/getLangOpts().ZVector); return InvalidOperands(Loc, LHS, RHS); } ExprResult LHSResult = LHS, RHSResult = RHS; QualType compType = UsualArithmeticConversions(LHSResult, RHSResult, IsCompAssign); if (LHSResult.isInvalid() || RHSResult.isInvalid()) return QualType(); LHS = LHSResult.get(); RHS = RHSResult.get(); if (!compType.isNull() && compType->isIntegralOrUnscopedEnumerationType()) return compType; return InvalidOperands(Loc, LHS, RHS); } inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc) { // Check vector operands differently. if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType()) return CheckVectorLogicalOperands(LHS, RHS, Loc); // Diagnose cases where the user write a logical and/or but probably meant a // bitwise one. We do this when the LHS is a non-bool integer and the RHS // is a constant. if (LHS.get()->getType()->isIntegerType() && !LHS.get()->getType()->isBooleanType() && RHS.get()->getType()->isIntegerType() && !RHS.get()->isValueDependent() && // Don't warn in macros or template instantiations. !Loc.isMacroID() && ActiveTemplateInstantiations.empty()) { // If the RHS can be constant folded, and if it constant folds to something // that isn't 0 or 1 (which indicate a potential logical operation that // happened to fold to true/false) then warn. // Parens on the RHS are ignored. llvm::APSInt Result; if (RHS.get()->EvaluateAsInt(Result, Context)) if ((getLangOpts().Bool && !RHS.get()->getType()->isBooleanType() && !RHS.get()->getExprLoc().isMacroID()) || (Result != 0 && Result != 1)) { Diag(Loc, diag::warn_logical_instead_of_bitwise) << RHS.get()->getSourceRange() << (Opc == BO_LAnd ? "&&" : "||"); // Suggest replacing the logical operator with the bitwise version Diag(Loc, diag::note_logical_instead_of_bitwise_change_operator) << (Opc == BO_LAnd ? "&" : "|") << FixItHint::CreateReplacement(SourceRange( Loc, Lexer::getLocForEndOfToken(Loc, 0, getSourceManager(), getLangOpts())), Opc == BO_LAnd ? "&" : "|"); if (Opc == BO_LAnd) // Suggest replacing "Foo() && kNonZero" with "Foo()" Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant) << FixItHint::CreateRemoval( SourceRange( Lexer::getLocForEndOfToken(LHS.get()->getLocEnd(), 0, getSourceManager(), getLangOpts()), RHS.get()->getLocEnd())); } } if (!Context.getLangOpts().CPlusPlus) { // OpenCL v1.1 s6.3.g: The logical operators and (&&), or (||) do // not operate on the built-in scalar and vector float types. if (Context.getLangOpts().OpenCL && Context.getLangOpts().OpenCLVersion < 120) { if (LHS.get()->getType()->isFloatingType() || RHS.get()->getType()->isFloatingType()) return InvalidOperands(Loc, LHS, RHS); } LHS = UsualUnaryConversions(LHS.get()); if (LHS.isInvalid()) return QualType(); RHS = UsualUnaryConversions(RHS.get()); if (RHS.isInvalid()) return QualType(); if (!LHS.get()->getType()->isScalarType() || !RHS.get()->getType()->isScalarType()) return InvalidOperands(Loc, LHS, RHS); return Context.IntTy; } // The following is safe because we only use this method for // non-overloadable operands. // C++ [expr.log.and]p1 // C++ [expr.log.or]p1 // The operands are both contextually converted to type bool. ExprResult LHSRes = PerformContextuallyConvertToBool(LHS.get()); if (LHSRes.isInvalid()) return InvalidOperands(Loc, LHS, RHS); LHS = LHSRes; ExprResult RHSRes = PerformContextuallyConvertToBool(RHS.get()); if (RHSRes.isInvalid()) return InvalidOperands(Loc, LHS, RHS); RHS = RHSRes; // C++ [expr.log.and]p2 // C++ [expr.log.or]p2 // The result is a bool. return Context.BoolTy; } static bool IsReadonlyMessage(Expr *E, Sema &S) { const MemberExpr *ME = dyn_cast<MemberExpr>(E); if (!ME) return false; if (!isa<FieldDecl>(ME->getMemberDecl())) return false; ObjCMessageExpr *Base = dyn_cast<ObjCMessageExpr>(ME->getBase()->IgnoreParenImpCasts()); if (!Base) return false; return Base->getMethodDecl() != nullptr; } /// Is the given expression (which must be 'const') a reference to a /// variable which was originally non-const, but which has become /// 'const' due to being captured within a block? enum NonConstCaptureKind { NCCK_None, NCCK_Block, NCCK_Lambda }; static NonConstCaptureKind isReferenceToNonConstCapture(Sema &S, Expr *E) { assert(E->isLValue() && E->getType().isConstQualified()); E = E->IgnoreParens(); // Must be a reference to a declaration from an enclosing scope. DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E); if (!DRE) return NCCK_None; if (!DRE->refersToEnclosingVariableOrCapture()) return NCCK_None; // The declaration must be a variable which is not declared 'const'. VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl()); if (!var) return NCCK_None; if (var->getType().isConstQualified()) return NCCK_None; assert(var->hasLocalStorage() && "capture added 'const' to non-local?"); // Decide whether the first capture was for a block or a lambda. DeclContext *DC = S.CurContext, *Prev = nullptr; while (DC != var->getDeclContext()) { Prev = DC; DC = DC->getParent(); } // Unless we have an init-capture, we've gone one step too far. if (!var->isInitCapture()) DC = Prev; return (isa<BlockDecl>(DC) ? NCCK_Block : NCCK_Lambda); } static bool IsTypeModifiable(QualType Ty, bool IsDereference) { Ty = Ty.getNonReferenceType(); if (IsDereference && Ty->isPointerType()) Ty = Ty->getPointeeType(); return !Ty.isConstQualified(); } /// Emit the "read-only variable not assignable" error and print notes to give /// more information about why the variable is not assignable, such as pointing /// to the declaration of a const variable, showing that a method is const, or /// that the function is returning a const reference. static void DiagnoseConstAssignment(Sema &S, const Expr *E, SourceLocation Loc) { // Update err_typecheck_assign_const and note_typecheck_assign_const // when this enum is changed. enum { ConstFunction, ConstVariable, ConstMember, ConstMethod, ConstUnknown, // Keep as last element }; SourceRange ExprRange = E->getSourceRange(); // Only emit one error on the first const found. All other consts will emit // a note to the error. bool DiagnosticEmitted = false; // Track if the current expression is the result of a derefence, and if the // next checked expression is the result of a derefence. bool IsDereference = false; bool NextIsDereference = false; // Loop to process MemberExpr chains. while (true) { IsDereference = NextIsDereference; NextIsDereference = false; E = E->IgnoreParenImpCasts(); if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) { NextIsDereference = ME->isArrow(); const ValueDecl *VD = ME->getMemberDecl(); if (const FieldDecl *Field = dyn_cast<FieldDecl>(VD)) { // Mutable fields can be modified even if the class is const. if (Field->isMutable()) { assert(DiagnosticEmitted && "Expected diagnostic not emitted."); break; } if (!IsTypeModifiable(Field->getType(), IsDereference)) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstMember << false /*static*/ << Field << Field->getType(); DiagnosticEmitted = true; } S.Diag(VD->getLocation(), diag::note_typecheck_assign_const) << ConstMember << false /*static*/ << Field << Field->getType() << Field->getSourceRange(); } E = ME->getBase(); continue; } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(VD)) { if (VDecl->getType().isConstQualified()) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstMember << true /*static*/ << VDecl << VDecl->getType(); DiagnosticEmitted = true; } S.Diag(VD->getLocation(), diag::note_typecheck_assign_const) << ConstMember << true /*static*/ << VDecl << VDecl->getType() << VDecl->getSourceRange(); } // Static fields do not inherit constness from parents. break; } break; } // End MemberExpr break; } if (const CallExpr *CE = dyn_cast<CallExpr>(E)) { // Function calls const FunctionDecl *FD = CE->getDirectCallee(); if (!IsTypeModifiable(FD->getReturnType(), IsDereference)) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstFunction << FD; DiagnosticEmitted = true; } S.Diag(FD->getReturnTypeSourceRange().getBegin(), diag::note_typecheck_assign_const) << ConstFunction << FD << FD->getReturnType() << FD->getReturnTypeSourceRange(); } } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { // Point to variable declaration. if (const ValueDecl *VD = DRE->getDecl()) { if (!IsTypeModifiable(VD->getType(), IsDereference)) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstVariable << VD << VD->getType(); DiagnosticEmitted = true; } S.Diag(VD->getLocation(), diag::note_typecheck_assign_const) << ConstVariable << VD << VD->getType() << VD->getSourceRange(); } } } else if (isa<CXXThisExpr>(E)) { if (const DeclContext *DC = S.getFunctionLevelDeclContext()) { if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC)) { if (MD->isConst()) { if (!DiagnosticEmitted) { S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstMethod << MD; DiagnosticEmitted = true; } S.Diag(MD->getLocation(), diag::note_typecheck_assign_const) << ConstMethod << MD << MD->getSourceRange(); } } } } if (DiagnosticEmitted) return; // Can't determine a more specific message, so display the generic error. S.Diag(Loc, diag::err_typecheck_assign_const) << ExprRange << ConstUnknown; } static bool HLSLCheckForModifiableLValue( Expr *E, SourceLocation Loc, Sema &S ) { assert(isa<CXXOperatorCallExpr>(E)); const CXXOperatorCallExpr *expr = cast<CXXOperatorCallExpr>(E); const Expr *LHS = expr->getArg(0); QualType qt = LHS->getType(); // Check modifying const matrix with double subscript operator calls if (isa<CXXOperatorCallExpr>(expr->getArg(0))) return HLSLCheckForModifiableLValue(const_cast<Expr *>(expr->getArg(0)), Loc, S); if (qt.isConstQualified() && (hlsl::IsMatrixType(&S, qt) || hlsl::IsVectorType(&S, qt))) { DiagnoseConstAssignment(S, LHS, Loc); return true; } if (!LHS->isLValue()) { S.Diag(Loc, diag::err_typecheck_expression_not_modifiable_lvalue); return true; } return false; } /// CheckForModifiableLvalue - Verify that E is a modifiable lvalue. If not, /// emit an error and return true. If so, return false. bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) { // HLSL Change: export this function assert(!E->hasPlaceholderType(BuiltinType::PseudoObject)); // HLSL Change Starts - check const for array subscript operator for HLSL vector/matrix if (S.Context.getLangOpts().HLSL && E->getStmtClass() == Stmt::CXXOperatorCallExprClass) { // check if it's a vector or matrix const CXXOperatorCallExpr *expr = cast<CXXOperatorCallExpr>(E); QualType qt = expr->getArg(0)->getType(); if ((hlsl::IsMatrixType(&S, qt) || hlsl::IsVectorType(&S, qt))) return HLSLCheckForModifiableLValue(E, Loc, S); } // HLSL Change Ends SourceLocation OrigLoc = Loc; Expr::isModifiableLvalueResult IsLV = E->isModifiableLvalue(S.Context, &Loc); if (IsLV == Expr::MLV_ClassTemporary && IsReadonlyMessage(E, S)) IsLV = Expr::MLV_InvalidMessageExpression; if (IsLV == Expr::MLV_Valid) return false; unsigned DiagID = 0; bool NeedType = false; switch (IsLV) { // C99 6.5.16p2 case Expr::MLV_ConstQualified: // Use a specialized diagnostic when we're assigning to an object // from an enclosing function or block. if (NonConstCaptureKind NCCK = isReferenceToNonConstCapture(S, E)) { if (NCCK == NCCK_Block) DiagID = diag::err_block_decl_ref_not_modifiable_lvalue; else DiagID = diag::err_lambda_decl_ref_not_modifiable_lvalue; break; } // In ARC, use some specialized diagnostics for occasions where we // infer 'const'. These are always pseudo-strong variables. if (S.getLangOpts().ObjCAutoRefCount) { DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()); if (declRef && isa<VarDecl>(declRef->getDecl())) { VarDecl *var = cast<VarDecl>(declRef->getDecl()); // Use the normal diagnostic if it's pseudo-__strong but the // user actually wrote 'const'. if (var->isARCPseudoStrong() && (!var->getTypeSourceInfo() || !var->getTypeSourceInfo()->getType().isConstQualified())) { // There are two pseudo-strong cases: // - self ObjCMethodDecl *method = S.getCurMethodDecl(); if (method && var == method->getSelfDecl()) DiagID = method->isClassMethod() ? diag::err_typecheck_arc_assign_self_class_method : diag::err_typecheck_arc_assign_self; // - fast enumeration variables else DiagID = diag::err_typecheck_arr_assign_enumeration; SourceRange Assign; if (Loc != OrigLoc) Assign = SourceRange(OrigLoc, OrigLoc); S.Diag(Loc, DiagID) << E->getSourceRange() << Assign; // We need to preserve the AST regardless, so migration tool // can do its job. return false; } } } // If none of the special cases above are triggered, then this is a // simple const assignment. if (DiagID == 0) { DiagnoseConstAssignment(S, E, Loc); return true; } break; case Expr::MLV_ConstAddrSpace: DiagnoseConstAssignment(S, E, Loc); return true; case Expr::MLV_ArrayType: case Expr::MLV_ArrayTemporary: DiagID = diag::err_typecheck_array_not_modifiable_lvalue; NeedType = true; break; case Expr::MLV_NotObjectType: DiagID = diag::err_typecheck_non_object_not_modifiable_lvalue; NeedType = true; break; case Expr::MLV_LValueCast: DiagID = diag::err_typecheck_lvalue_casts_not_supported; break; case Expr::MLV_Valid: llvm_unreachable("did not take early return for MLV_Valid"); case Expr::MLV_InvalidExpression: case Expr::MLV_MemberFunction: case Expr::MLV_ClassTemporary: DiagID = diag::err_typecheck_expression_not_modifiable_lvalue; break; case Expr::MLV_IncompleteType: case Expr::MLV_IncompleteVoidType: return S.RequireCompleteType(Loc, E->getType(), diag::err_typecheck_incomplete_type_not_modifiable_lvalue, E); case Expr::MLV_DuplicateVectorComponents: DiagID = diag::err_typecheck_duplicate_vector_components_not_mlvalue; break; // HLSL Change Starts case Expr::MLV_DuplicateMatrixComponents: DiagID = diag::err_hlsl_typecheck_duplicate_matrix_components_not_mlvalue; break; // HLSL Change Ends case Expr::MLV_NoSetterProperty: llvm_unreachable("readonly properties should be processed differently"); case Expr::MLV_InvalidMessageExpression: DiagID = diag::error_readonly_message_assignment; break; case Expr::MLV_SubObjCPropertySetting: DiagID = diag::error_no_subobject_property_setting; break; } SourceRange Assign; if (Loc != OrigLoc) Assign = SourceRange(OrigLoc, OrigLoc); if (NeedType) S.Diag(Loc, DiagID) << E->getType() << E->getSourceRange() << Assign; else S.Diag(Loc, DiagID) << E->getSourceRange() << Assign; return true; } static void CheckIdentityFieldAssignment(Expr *LHSExpr, Expr *RHSExpr, SourceLocation Loc, Sema &Sema) { // C / C++ fields MemberExpr *ML = dyn_cast<MemberExpr>(LHSExpr); MemberExpr *MR = dyn_cast<MemberExpr>(RHSExpr); if (ML && MR && ML->getMemberDecl() == MR->getMemberDecl()) { if (isa<CXXThisExpr>(ML->getBase()) && isa<CXXThisExpr>(MR->getBase())) Sema.Diag(Loc, diag::warn_identity_field_assign) << 0; } // Objective-C instance variables ObjCIvarRefExpr *OL = dyn_cast<ObjCIvarRefExpr>(LHSExpr); ObjCIvarRefExpr *OR = dyn_cast<ObjCIvarRefExpr>(RHSExpr); if (OL && OR && OL->getDecl() == OR->getDecl()) { DeclRefExpr *RL = dyn_cast<DeclRefExpr>(OL->getBase()->IgnoreImpCasts()); DeclRefExpr *RR = dyn_cast<DeclRefExpr>(OR->getBase()->IgnoreImpCasts()); if (RL && RR && RL->getDecl() == RR->getDecl()) Sema.Diag(Loc, diag::warn_identity_field_assign) << 1; } } // C99 6.5.16.1 QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType) { assert(!LHSExpr->hasPlaceholderType(BuiltinType::PseudoObject)); // Verify that LHS is a modifiable lvalue, and emit error if not. if (CheckForModifiableLvalue(LHSExpr, Loc, *this)) return QualType(); QualType LHSType = LHSExpr->getType(); QualType RHSType = CompoundType.isNull() ? RHS.get()->getType() : CompoundType; AssignConvertType ConvTy; if (CompoundType.isNull()) { Expr *RHSCheck = RHS.get(); CheckIdentityFieldAssignment(LHSExpr, RHSCheck, Loc, *this); QualType LHSTy(LHSType); ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS); if (RHS.isInvalid()) return QualType(); // Special case of NSObject attributes on c-style pointer types. if (ConvTy == IncompatiblePointer && ((Context.isObjCNSObjectType(LHSType) && RHSType->isObjCObjectPointerType()) || (Context.isObjCNSObjectType(RHSType) && LHSType->isObjCObjectPointerType()))) ConvTy = Compatible; if (ConvTy == Compatible && LHSType->isObjCObjectType()) Diag(Loc, diag::err_objc_object_assignment) << LHSType; // If the RHS is a unary plus or minus, check to see if they = and + are // right next to each other. If so, the user may have typo'd "x =+ 4" // instead of "x += 4". if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(RHSCheck)) RHSCheck = ICE->getSubExpr(); if (UnaryOperator *UO = dyn_cast<UnaryOperator>(RHSCheck)) { if ((UO->getOpcode() == UO_Plus || UO->getOpcode() == UO_Minus) && Loc.isFileID() && UO->getOperatorLoc().isFileID() && // Only if the two operators are exactly adjacent. Loc.getLocWithOffset(1) == UO->getOperatorLoc() && // And there is a space or other character before the subexpr of the // unary +/-. We don't want to warn on "x=-1". Loc.getLocWithOffset(2) != UO->getSubExpr()->getLocStart() && UO->getSubExpr()->getLocStart().isFileID()) { Diag(Loc, diag::warn_not_compound_assign) << (UO->getOpcode() == UO_Plus ? "+" : "-") << SourceRange(UO->getOperatorLoc(), UO->getOperatorLoc()); } } #if 0 // HLSL Change Starts if (ConvTy == Compatible) { if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong) { // Warn about retain cycles where a block captures the LHS, but // not if the LHS is a simple variable into which the block is // being stored...unless that variable can be captured by reference! const Expr *InnerLHS = LHSExpr->IgnoreParenCasts(); const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(InnerLHS); if (!DRE || DRE->getDecl()->hasAttr<BlocksAttr>()) checkRetainCycles(LHSExpr, RHS.get()); // It is safe to assign a weak reference into a strong variable. // Although this code can still have problems: // id x = self.weakProp; // id y = self.weakProp; // we do not warn to warn spuriously when 'x' and 'y' are on separate // paths through the function. This should be revisited if // -Wrepeated-use-of-weak is made flow-sensitive. if (!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, RHS.get()->getLocStart())) getCurFunction()->markSafeWeakUse(RHS.get()); } else if (getLangOpts().ObjCAutoRefCount) { checkUnsafeExprAssigns(Loc, LHSExpr, RHS.get()); } } #endif // HLSL Change Ends } else { // Compound assignment "x += y" ConvTy = CheckAssignmentConstraints(Loc, LHSType, RHSType); } if (DiagnoseAssignmentResult(ConvTy, Loc, LHSType, RHSType, RHS.get(), AA_Assigning)) return QualType(); CheckForNullPointerDereference(*this, LHSExpr); // C99 6.5.16p3: The type of an assignment expression is the type of the // left operand unless the left operand has qualified type, in which case // it is the unqualified version of the type of the left operand. // C99 6.5.16.1p2: In simple assignment, the value of the right operand // is converted to the type of the assignment expression (above). // C++ 5.17p1: the type of the assignment expression is that of its left // operand. return (getLangOpts().CPlusPlus ? LHSType : LHSType.getUnqualifiedType()); } // C99 6.5.17 static QualType CheckCommaOperands(Sema &S, ExprResult &LHS, ExprResult &RHS, SourceLocation Loc) { LHS = S.CheckPlaceholderExpr(LHS.get()); RHS = S.CheckPlaceholderExpr(RHS.get()); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); // C's comma performs lvalue conversion (C99 6.3.2.1) on both its // operands, but not unary promotions. // C++'s comma does not do any conversions at all (C++ [expr.comma]p1). // So we treat the LHS as a ignored value, and in C++ we allow the // containing site to determine what should be done with the RHS. LHS = S.IgnoredValueConversions(LHS.get()); if (LHS.isInvalid()) return QualType(); S.DiagnoseUnusedExprResult(LHS.get()); if (!S.getLangOpts().CPlusPlus) { RHS = S.DefaultFunctionArrayLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); if (!RHS.get()->getType()->isVoidType()) S.RequireCompleteType(Loc, RHS.get()->getType(), diag::err_incomplete_type); } return RHS.get()->getType(); } /// CheckIncrementDecrementOperand - unlike most "Check" methods, this routine /// doesn't need to call UsualUnaryConversions or UsualArithmeticConversions. static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation OpLoc, bool IsInc, bool IsPrefix) { if (Op->isTypeDependent()) return S.Context.DependentTy; QualType ResType = Op->getType(); // Atomic types can be used for increment / decrement where the non-atomic // versions can, so ignore the _Atomic() specifier for the purpose of // checking. if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>()) ResType = ResAtomicType->getValueType(); assert(!ResType.isNull() && "no type for increment/decrement expression"); if (S.getLangOpts().CPlusPlus && ResType->isBooleanType()) { // Decrement of bool is not allowed. if (!IsInc) { S.Diag(OpLoc, diag::err_decrement_bool) << Op->getSourceRange(); return QualType(); } // Increment of bool sets it to true, but is deprecated. S.Diag(OpLoc, diag::warn_increment_bool) << Op->getSourceRange(); } else if (S.getLangOpts().CPlusPlus && ResType->isEnumeralType()) { // Error on enum increments and decrements in C++ mode S.Diag(OpLoc, diag::err_increment_decrement_enum) << IsInc << ResType; return QualType(); } else if (ResType->isRealType()) { // OK! } else if (ResType->isPointerType()) { // C99 6.5.2.4p2, 6.5.6p2 if (!checkArithmeticOpPointerOperand(S, OpLoc, Op)) return QualType(); } else if (ResType->isObjCObjectPointerType()) { // On modern runtimes, ObjC pointer arithmetic is forbidden. // Otherwise, we just need a complete type. if (checkArithmeticIncompletePointerType(S, OpLoc, Op) || checkArithmeticOnObjCPointer(S, OpLoc, Op)) return QualType(); } else if (ResType->isAnyComplexType()) { // C99 does not support ++/-- on complex types, we allow as an extension. S.Diag(OpLoc, diag::ext_integer_increment_complex) << ResType << Op->getSourceRange(); } else if (ResType->isPlaceholderType()) { ExprResult PR = S.CheckPlaceholderExpr(Op); if (PR.isInvalid()) return QualType(); return CheckIncrementDecrementOperand(S, PR.get(), VK, OK, OpLoc, IsInc, IsPrefix); } else if (S.getLangOpts().AltiVec && ResType->isVectorType()) { // OK! ( C/C++ Language Extensions for CBEA(Version 2.6) 10.3 ) } else if (S.getLangOpts().ZVector && ResType->isVectorType() && (ResType->getAs<VectorType>()->getVectorKind() != VectorType::AltiVecBool)) { // The z vector extensions allow ++ and -- for non-bool vectors. } else if(S.getLangOpts().OpenCL && ResType->isVectorType() && ResType->getAs<VectorType>()->getElementType()->isIntegerType()) { // OpenCL V1.2 6.3 says dec/inc ops operate on integer vector types. } else { S.Diag(OpLoc, diag::err_typecheck_illegal_increment_decrement) << ResType << int(IsInc) << Op->getSourceRange(); return QualType(); } // At this point, we know we have a real, complex or pointer type. // Now make sure the operand is a modifiable lvalue. if (CheckForModifiableLvalue(Op, OpLoc, S)) return QualType(); // In C++, a prefix increment is the same type as the operand. Otherwise // (in C or with postfix), the increment is the unqualified type of the // operand. if (IsPrefix && S.getLangOpts().CPlusPlus) { VK = VK_LValue; OK = Op->getObjectKind(); return ResType; } else { VK = VK_RValue; return ResType.getUnqualifiedType(); } } /// getPrimaryDecl - Helper function for CheckAddressOfOperand(). /// This routine allows us to typecheck complex/recursive expressions /// where the declaration is needed for type checking. We only need to /// handle cases when the expression references a function designator /// or is an lvalue. Here are some examples: /// - &(x) => x /// - &*****f => f for f a function designator. /// - &s.xx => s /// - &s.zz[1].yy -> s, if zz is an array /// - *(x + 1) -> x, if x is an array /// - &"123"[2] -> 0 /// - & __real__ x -> x static ValueDecl *getPrimaryDecl(Expr *E) { switch (E->getStmtClass()) { case Stmt::DeclRefExprClass: return cast<DeclRefExpr>(E)->getDecl(); case Stmt::MemberExprClass: // If this is an arrow operator, the address is an offset from // the base's value, so the object the base refers to is // irrelevant. if (cast<MemberExpr>(E)->isArrow()) return nullptr; // Otherwise, the expression refers to a part of the base return getPrimaryDecl(cast<MemberExpr>(E)->getBase()); case Stmt::ArraySubscriptExprClass: { // FIXME: This code shouldn't be necessary! We should catch the implicit // promotion of register arrays earlier. Expr* Base = cast<ArraySubscriptExpr>(E)->getBase(); if (ImplicitCastExpr* ICE = dyn_cast<ImplicitCastExpr>(Base)) { if (ICE->getSubExpr()->getType()->isArrayType()) return getPrimaryDecl(ICE->getSubExpr()); } return nullptr; } case Stmt::UnaryOperatorClass: { UnaryOperator *UO = cast<UnaryOperator>(E); switch(UO->getOpcode()) { case UO_Real: case UO_Imag: case UO_Extension: return getPrimaryDecl(UO->getSubExpr()); default: return nullptr; } } case Stmt::ParenExprClass: return getPrimaryDecl(cast<ParenExpr>(E)->getSubExpr()); case Stmt::ImplicitCastExprClass: // If the result of an implicit cast is an l-value, we care about // the sub-expression; otherwise, the result here doesn't matter. return getPrimaryDecl(cast<ImplicitCastExpr>(E)->getSubExpr()); default: return nullptr; } } namespace { enum { AO_Bit_Field = 0, AO_Vector_Element = 1, AO_Property_Expansion = 2, AO_Register_Variable = 3, AO_No_Error = 4 }; } /// \brief Diagnose invalid operand for address of operations. /// /// \param Type The type of operand which cannot have its address taken. static void diagnoseAddressOfInvalidType(Sema &S, SourceLocation Loc, Expr *E, unsigned Type) { S.Diag(Loc, diag::err_typecheck_address_of) << Type << E->getSourceRange(); } /// CheckAddressOfOperand - The operand of & must be either a function /// designator or an lvalue designating an object. If it is an lvalue, the /// object cannot be declared with storage class register or be a bit field. /// Note: The usual conversions are *not* applied to the operand of the & /// operator (C99 6.3.2.1p[2-4]), and its result is never an lvalue. /// In C++, the operand might be an overloaded function name, in which case /// we allow the '&' but retain the overloaded-function type. QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) { if (const BuiltinType *PTy = OrigOp.get()->getType()->getAsPlaceholderType()){ if (PTy->getKind() == BuiltinType::Overload) { Expr *E = OrigOp.get()->IgnoreParens(); if (!isa<OverloadExpr>(E)) { assert(cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf); Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof_addrof_function) << OrigOp.get()->getSourceRange(); return QualType(); } OverloadExpr *Ovl = cast<OverloadExpr>(E); if (isa<UnresolvedMemberExpr>(Ovl)) if (!ResolveSingleFunctionTemplateSpecialization(Ovl)) { Diag(OpLoc, diag::err_invalid_form_pointer_member_function) << OrigOp.get()->getSourceRange(); return QualType(); } return Context.OverloadTy; } if (PTy->getKind() == BuiltinType::UnknownAny) return Context.UnknownAnyTy; if (PTy->getKind() == BuiltinType::BoundMember) { Diag(OpLoc, diag::err_invalid_form_pointer_member_function) << OrigOp.get()->getSourceRange(); return QualType(); } OrigOp = CheckPlaceholderExpr(OrigOp.get()); if (OrigOp.isInvalid()) return QualType(); } if (OrigOp.get()->isTypeDependent()) return Context.DependentTy; assert(!OrigOp.get()->getType()->isPlaceholderType()); // Make sure to ignore parentheses in subsequent checks Expr *op = OrigOp.get()->IgnoreParens(); // OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed. if (LangOpts.OpenCL && op->getType()->isFunctionType()) { Diag(op->getExprLoc(), diag::err_opencl_taking_function_address); return QualType(); } if (getLangOpts().C99) { // Implement C99-only parts of addressof rules. if (UnaryOperator* uOp = dyn_cast<UnaryOperator>(op)) { if (uOp->getOpcode() == UO_Deref) // Per C99 6.5.3.2, the address of a deref always returns a valid result // (assuming the deref expression is valid). return uOp->getSubExpr()->getType(); } // Technically, there should be a check for array subscript // expressions here, but the result of one is always an lvalue anyway. } ValueDecl *dcl = getPrimaryDecl(op); Expr::LValueClassification lval = op->ClassifyLValue(Context); unsigned AddressOfError = AO_No_Error; if (lval == Expr::LV_ClassTemporary || lval == Expr::LV_ArrayTemporary) { bool sfinae = (bool)isSFINAEContext(); Diag(OpLoc, isSFINAEContext() ? diag::err_typecheck_addrof_temporary : diag::ext_typecheck_addrof_temporary) << op->getType() << op->getSourceRange(); if (sfinae) return QualType(); // Materialize the temporary as an lvalue so that we can take its address. OrigOp = op = new (Context) MaterializeTemporaryExpr(op->getType(), OrigOp.get(), true); } else if (isa<ObjCSelectorExpr>(op)) { return Context.getPointerType(op->getType()); } else if (lval == Expr::LV_MemberFunction) { // If it's an instance method, make a member pointer. // The expression must have exactly the form &A::foo. // If the underlying expression isn't a decl ref, give up. if (!isa<DeclRefExpr>(op)) { Diag(OpLoc, diag::err_invalid_form_pointer_member_function) << OrigOp.get()->getSourceRange(); return QualType(); } DeclRefExpr *DRE = cast<DeclRefExpr>(op); CXXMethodDecl *MD = cast<CXXMethodDecl>(DRE->getDecl()); // The id-expression was parenthesized. if (OrigOp.get() != DRE) { Diag(OpLoc, diag::err_parens_pointer_member_function) << OrigOp.get()->getSourceRange(); // The method was named without a qualifier. } else if (!DRE->getQualifier()) { if (MD->getParent()->getName().empty()) Diag(OpLoc, diag::err_unqualified_pointer_member_function) << op->getSourceRange(); else { SmallString<32> Str; StringRef Qual = (MD->getParent()->getName() + "::").toStringRef(Str); Diag(OpLoc, diag::err_unqualified_pointer_member_function) << op->getSourceRange() << FixItHint::CreateInsertion(op->getSourceRange().getBegin(), Qual); } } // Taking the address of a dtor is illegal per C++ [class.dtor]p2. if (isa<CXXDestructorDecl>(MD)) Diag(OpLoc, diag::err_typecheck_addrof_dtor) << op->getSourceRange(); QualType MPTy = Context.getMemberPointerType( op->getType(), Context.getTypeDeclType(MD->getParent()).getTypePtr()); if (Context.getTargetInfo().getCXXABI().isMicrosoft()) RequireCompleteType(OpLoc, MPTy, 0); return MPTy; } else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) { // C99 6.5.3.2p1 // The operand must be either an l-value or a function designator if (!op->getType()->isFunctionType()) { // Use a special diagnostic for loads from property references. if (isa<PseudoObjectExpr>(op)) { AddressOfError = AO_Property_Expansion; } else { Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof) << op->getType() << op->getSourceRange(); return QualType(); } } } else if (op->getObjectKind() == OK_BitField) { // C99 6.5.3.2p1 // The operand cannot be a bit-field AddressOfError = AO_Bit_Field; } else if (op->getObjectKind() == OK_VectorComponent) { // The operand cannot be an element of a vector AddressOfError = AO_Vector_Element; } else if (dcl) { // C99 6.5.3.2p1 // We have an lvalue with a decl. Make sure the decl is not declared // with the register storage-class specifier. if (const VarDecl *vd = dyn_cast<VarDecl>(dcl)) { // in C++ it is not error to take address of a register // variable (c++03 7.1.1P3) if (vd->getStorageClass() == SC_Register && !getLangOpts().CPlusPlus) { AddressOfError = AO_Register_Variable; } } else if (isa<MSPropertyDecl>(dcl)) { AddressOfError = AO_Property_Expansion; } else if (isa<FunctionTemplateDecl>(dcl)) { return Context.OverloadTy; } else if (isa<FieldDecl>(dcl) || isa<IndirectFieldDecl>(dcl)) { // Okay: we can take the address of a field. // Could be a pointer to member, though, if there is an explicit // scope qualifier for the class. if (isa<DeclRefExpr>(op) && cast<DeclRefExpr>(op)->getQualifier()) { DeclContext *Ctx = dcl->getDeclContext(); if (Ctx && Ctx->isRecord()) { if (dcl->getType()->isReferenceType()) { Diag(OpLoc, diag::err_cannot_form_pointer_to_member_of_reference_type) << dcl->getDeclName() << dcl->getType(); return QualType(); } while (cast<RecordDecl>(Ctx)->isAnonymousStructOrUnion()) Ctx = Ctx->getParent(); QualType MPTy = Context.getMemberPointerType( op->getType(), Context.getTypeDeclType(cast<RecordDecl>(Ctx)).getTypePtr()); if (Context.getTargetInfo().getCXXABI().isMicrosoft()) RequireCompleteType(OpLoc, MPTy, 0); return MPTy; } } } else if (!isa<FunctionDecl>(dcl) && !isa<NonTypeTemplateParmDecl>(dcl)) llvm_unreachable("Unknown/unexpected decl type"); } if (AddressOfError != AO_No_Error) { diagnoseAddressOfInvalidType(*this, OpLoc, op, AddressOfError); return QualType(); } if (lval == Expr::LV_IncompleteVoidType) { // Taking the address of a void variable is technically illegal, but we // allow it in cases which are otherwise valid. // Example: "extern void x; void* y = &x;". Diag(OpLoc, diag::ext_typecheck_addrof_void) << op->getSourceRange(); } // If the operand has type "type", the result has type "pointer to type". if (op->getType()->isObjCObjectType()) return Context.getObjCObjectPointerType(op->getType()); return Context.getPointerType(op->getType()); } static void RecordModifiableNonNullParam(Sema &S, const Expr *Exp) { const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp); if (!DRE) return; const Decl *D = DRE->getDecl(); if (!D) return; const ParmVarDecl *Param = dyn_cast<ParmVarDecl>(D); if (!Param) return; if (const FunctionDecl* FD = dyn_cast<FunctionDecl>(Param->getDeclContext())) if (!FD->hasAttr<NonNullAttr>() && !Param->hasAttr<NonNullAttr>()) return; if (FunctionScopeInfo *FD = S.getCurFunction()) if (!FD->ModifiedNonNullParams.count(Param)) FD->ModifiedNonNullParams.insert(Param); } /// CheckIndirectionOperand - Type check unary indirection (prefix '*'). static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK, SourceLocation OpLoc) { if (Op->isTypeDependent()) return S.Context.DependentTy; ExprResult ConvResult = S.UsualUnaryConversions(Op); if (ConvResult.isInvalid()) return QualType(); Op = ConvResult.get(); QualType OpTy = Op->getType(); QualType Result; if (isa<CXXReinterpretCastExpr>(Op)) { QualType OpOrigType = Op->IgnoreParenCasts()->getType(); S.CheckCompatibleReinterpretCast(OpOrigType, OpTy, /*IsDereference*/true, Op->getSourceRange()); } if (const PointerType *PT = OpTy->getAs<PointerType>()) Result = PT->getPointeeType(); else if (const ObjCObjectPointerType *OPT = OpTy->getAs<ObjCObjectPointerType>()) Result = OPT->getPointeeType(); else { ExprResult PR = S.CheckPlaceholderExpr(Op); if (PR.isInvalid()) return QualType(); if (PR.get() != Op) return CheckIndirectionOperand(S, PR.get(), VK, OpLoc); } if (Result.isNull()) { S.Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer) << OpTy << Op->getSourceRange(); return QualType(); } // Note that per both C89 and C99, indirection is always legal, even if Result // is an incomplete type or void. It would be possible to warn about // dereferencing a void pointer, but it's completely well-defined, and such a // warning is unlikely to catch any mistakes. In C++, indirection is not valid // for pointers to 'void' but is fine for any other pointer type: // // C++ [expr.unary.op]p1: // [...] the expression to which [the unary * operator] is applied shall // be a pointer to an object type, or a pointer to a function type if (S.getLangOpts().CPlusPlus && Result->isVoidType()) S.Diag(OpLoc, diag::ext_typecheck_indirection_through_void_pointer) << OpTy << Op->getSourceRange(); // Dereferences are usually l-values... VK = VK_LValue; // ...except that certain expressions are never l-values in C. if (!S.getLangOpts().CPlusPlus && Result.isCForbiddenLValueType()) VK = VK_RValue; return Result; } BinaryOperatorKind Sema::ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind) { BinaryOperatorKind Opc; switch (Kind) { default: llvm_unreachable("Unknown binop!"); case tok::periodstar: Opc = BO_PtrMemD; break; case tok::arrowstar: Opc = BO_PtrMemI; break; case tok::star: Opc = BO_Mul; break; case tok::slash: Opc = BO_Div; break; case tok::percent: Opc = BO_Rem; break; case tok::plus: Opc = BO_Add; break; case tok::minus: Opc = BO_Sub; break; case tok::lessless: Opc = BO_Shl; break; case tok::greatergreater: Opc = BO_Shr; break; case tok::lessequal: Opc = BO_LE; break; case tok::less: Opc = BO_LT; break; case tok::greaterequal: Opc = BO_GE; break; case tok::greater: Opc = BO_GT; break; case tok::exclaimequal: Opc = BO_NE; break; case tok::equalequal: Opc = BO_EQ; break; case tok::amp: Opc = BO_And; break; case tok::caret: Opc = BO_Xor; break; case tok::pipe: Opc = BO_Or; break; case tok::ampamp: Opc = BO_LAnd; break; case tok::pipepipe: Opc = BO_LOr; break; case tok::equal: Opc = BO_Assign; break; case tok::starequal: Opc = BO_MulAssign; break; case tok::slashequal: Opc = BO_DivAssign; break; case tok::percentequal: Opc = BO_RemAssign; break; case tok::plusequal: Opc = BO_AddAssign; break; case tok::minusequal: Opc = BO_SubAssign; break; case tok::lesslessequal: Opc = BO_ShlAssign; break; case tok::greatergreaterequal: Opc = BO_ShrAssign; break; case tok::ampequal: Opc = BO_AndAssign; break; case tok::caretequal: Opc = BO_XorAssign; break; case tok::pipeequal: Opc = BO_OrAssign; break; case tok::comma: Opc = BO_Comma; break; } return Opc; } static inline UnaryOperatorKind ConvertTokenKindToUnaryOpcode( tok::TokenKind Kind) { UnaryOperatorKind Opc; switch (Kind) { default: llvm_unreachable("Unknown unary op!"); case tok::plusplus: Opc = UO_PreInc; break; case tok::minusminus: Opc = UO_PreDec; break; case tok::amp: Opc = UO_AddrOf; break; case tok::star: Opc = UO_Deref; break; case tok::plus: Opc = UO_Plus; break; case tok::minus: Opc = UO_Minus; break; case tok::tilde: Opc = UO_Not; break; case tok::exclaim: Opc = UO_LNot; break; case tok::kw___real: Opc = UO_Real; break; case tok::kw___imag: Opc = UO_Imag; break; case tok::kw___extension__: Opc = UO_Extension; break; } return Opc; } /// DiagnoseSelfAssignment - Emits a warning if a value is assigned to itself. /// This warning is only emitted for builtin assignment operations. It is also /// suppressed in the event of macro expansions. static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr, SourceLocation OpLoc) { if (!S.ActiveTemplateInstantiations.empty()) return; if (OpLoc.isInvalid() || OpLoc.isMacroID()) return; LHSExpr = LHSExpr->IgnoreParenImpCasts(); RHSExpr = RHSExpr->IgnoreParenImpCasts(); const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr); const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr); if (!LHSDeclRef || !RHSDeclRef || LHSDeclRef->getLocation().isMacroID() || RHSDeclRef->getLocation().isMacroID()) return; const ValueDecl *LHSDecl = cast<ValueDecl>(LHSDeclRef->getDecl()->getCanonicalDecl()); const ValueDecl *RHSDecl = cast<ValueDecl>(RHSDeclRef->getDecl()->getCanonicalDecl()); if (LHSDecl != RHSDecl) return; if (LHSDecl->getType().isVolatileQualified()) return; if (const ReferenceType *RefTy = LHSDecl->getType()->getAs<ReferenceType>()) if (RefTy->getPointeeType().isVolatileQualified()) return; S.Diag(OpLoc, diag::warn_self_assignment) << LHSDeclRef->getType() << LHSExpr->getSourceRange() << RHSExpr->getSourceRange(); } /// Check if a bitwise-& is performed on an Objective-C pointer. This /// is usually indicative of introspection within the Objective-C pointer. static void checkObjCPointerIntrospection(Sema &S, ExprResult &L, ExprResult &R, SourceLocation OpLoc) { if (!S.getLangOpts().ObjC1) return; const Expr *ObjCPointerExpr = nullptr, *OtherExpr = nullptr; const Expr *LHS = L.get(); const Expr *RHS = R.get(); if (LHS->IgnoreParenCasts()->getType()->isObjCObjectPointerType()) { ObjCPointerExpr = LHS; OtherExpr = RHS; } else if (RHS->IgnoreParenCasts()->getType()->isObjCObjectPointerType()) { ObjCPointerExpr = RHS; OtherExpr = LHS; } // This warning is deliberately made very specific to reduce false // positives with logic that uses '&' for hashing. This logic mainly // looks for code trying to introspect into tagged pointers, which // code should generally never do. if (ObjCPointerExpr && isa<IntegerLiteral>(OtherExpr->IgnoreParenCasts())) { unsigned Diag = diag::warn_objc_pointer_masking; // Determine if we are introspecting the result of performSelectorXXX. const Expr *Ex = ObjCPointerExpr->IgnoreParenCasts(); // Special case messages to -performSelector and friends, which // can return non-pointer values boxed in a pointer value. // Some clients may wish to silence warnings in this subcase. if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(Ex)) { Selector S = ME->getSelector(); StringRef SelArg0 = S.getNameForSlot(0); if (SelArg0.startswith("performSelector")) Diag = diag::warn_objc_pointer_masking_performSelector; } S.Diag(OpLoc, Diag) << ObjCPointerExpr->getSourceRange(); } } static NamedDecl *getDeclFromExpr(Expr *E) { if (!E) return nullptr; if (auto *DRE = dyn_cast<DeclRefExpr>(E)) return DRE->getDecl(); if (auto *ME = dyn_cast<MemberExpr>(E)) return ME->getMemberDecl(); if (auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) return IRE->getDecl(); return nullptr; } /// CreateBuiltinBinOp - Creates a new built-in binary operation with /// operator @p Opc at location @c TokLoc. This routine only supports /// built-in operations; ActOnBinOp handles overloaded operators. ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr) { if (getLangOpts().CPlusPlus11 && isa<InitListExpr>(RHSExpr)) { // The syntax only allows initializer lists on the RHS of assignment, // so we don't need to worry about accepting invalid code for // non-assignment operators. // C++11 5.17p9: // The meaning of x = {v} [...] is that of x = T(v) [...]. The meaning // of x = {} is x = T(). InitializationKind Kind = InitializationKind::CreateDirectList(RHSExpr->getLocStart()); InitializedEntity Entity = InitializedEntity::InitializeTemporary(LHSExpr->getType()); InitializationSequence InitSeq(*this, Entity, Kind, RHSExpr); ExprResult Init = InitSeq.Perform(*this, Entity, Kind, RHSExpr); if (Init.isInvalid()) return Init; RHSExpr = Init.get(); } ExprResult LHS = LHSExpr, RHS = RHSExpr; QualType ResultTy; // Result type of the binary operator. // The following two variables are used for compound assignment operators QualType CompLHSTy; // Type of LHS after promotions for computation QualType CompResultTy; // Type of computation result ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; // HLSL Change Starts // Handle HLSL binary operands differently if ((getLangOpts().HLSL && (getLangOpts().HLSLVersion < hlsl::LangStd::v2021 || !hlsl::IsUserDefinedRecordType(LHSExpr->getType()))) || !hlsl::DoesTypeDefineOverloadedOperator( LHSExpr->getType(), clang::BinaryOperator::getOverloadedOperator(Opc), RHSExpr->getType())) { hlsl::CheckBinOpForHLSL(*this, OpLoc, Opc, LHS, RHS, ResultTy, CompLHSTy, CompResultTy); if (!ResultTy.isNull() && Opc == BO_Comma) { // In C/C++, the RHS value kind should propagate. In HLSL, it should yield an r-value. // VK = RHS.get()->getValueKind(); OK = RHS.get()->getObjectKind(); } goto CasesHandled; } // HLSL Change Ends if (!getLangOpts().CPlusPlus) { // C cannot handle TypoExpr nodes on either side of a binop because it // doesn't handle dependent types properly, so make sure any TypoExprs have // been dealt with before checking the operands. LHS = CorrectDelayedTyposInExpr(LHSExpr); RHS = CorrectDelayedTyposInExpr(RHSExpr, [Opc, LHS](Expr *E) { if (Opc != BO_Assign) return ExprResult(E); // Avoid correcting the RHS to the same Expr as the LHS. Decl *D = getDeclFromExpr(E); return (D && D == getDeclFromExpr(LHS.get())) ? ExprError() : E; }); if (!LHS.isUsable() || !RHS.isUsable()) return ExprError(); } switch (Opc) { case BO_Assign: ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType()); if (getLangOpts().CPlusPlus && LHS.get()->getObjectKind() != OK_ObjCProperty) { VK = LHS.get()->getValueKind(); OK = LHS.get()->getObjectKind(); } if (!ResultTy.isNull()) { DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc); DiagnoseSelfMove(LHS.get(), RHS.get(), OpLoc); } RecordModifiableNonNullParam(*this, LHS.get()); break; case BO_PtrMemD: case BO_PtrMemI: ResultTy = CheckPointerToMemberOperands(LHS, RHS, VK, OpLoc, Opc == BO_PtrMemI); break; case BO_Mul: case BO_Div: ResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, false, Opc == BO_Div); break; case BO_Rem: ResultTy = CheckRemainderOperands(LHS, RHS, OpLoc); break; case BO_Add: ResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc); break; case BO_Sub: ResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc); break; case BO_Shl: case BO_Shr: ResultTy = CheckShiftOperands(LHS, RHS, OpLoc, Opc); break; case BO_LE: case BO_LT: case BO_GE: case BO_GT: ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc, true); break; case BO_EQ: case BO_NE: ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc, false); break; case BO_And: checkObjCPointerIntrospection(*this, LHS, RHS, OpLoc); LLVM_FALLTHROUGH; // HLSL Change case BO_Xor: case BO_Or: ResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc); break; case BO_LAnd: case BO_LOr: ResultTy = CheckLogicalOperands(LHS, RHS, OpLoc, Opc); break; case BO_MulAssign: case BO_DivAssign: CompResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, true, Opc == BO_DivAssign); CompLHSTy = CompResultTy; if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_RemAssign: CompResultTy = CheckRemainderOperands(LHS, RHS, OpLoc, true); CompLHSTy = CompResultTy; if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_AddAssign: CompResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc, &CompLHSTy); if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_SubAssign: CompResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc, &CompLHSTy); if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_ShlAssign: case BO_ShrAssign: CompResultTy = CheckShiftOperands(LHS, RHS, OpLoc, Opc, true); CompLHSTy = CompResultTy; if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_AndAssign: case BO_OrAssign: // fallthrough DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc); LLVM_FALLTHROUGH; // HLSL Change case BO_XorAssign: CompResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, true); CompLHSTy = CompResultTy; if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid()) ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy); break; case BO_Comma: ResultTy = CheckCommaOperands(*this, LHS, RHS, OpLoc); if (getLangOpts().CPlusPlus && !RHS.isInvalid()) { VK = RHS.get()->getValueKind(); OK = RHS.get()->getObjectKind(); } break; } CasesHandled: // HLSL Change: minimize code changes by avoiding a branch above if (ResultTy.isNull() || LHS.isInvalid() || RHS.isInvalid()) return ExprError(); // Check for array bounds violations for both sides of the BinaryOperator CheckArrayAccess(LHS.get()); CheckArrayAccess(RHS.get()); if (const ObjCIsaExpr *OISA = dyn_cast<ObjCIsaExpr>(LHS.get()->IgnoreParenCasts())) { NamedDecl *ObjectSetClass = LookupSingleName(TUScope, &Context.Idents.get("object_setClass"), SourceLocation(), LookupOrdinaryName); if (ObjectSetClass && isa<ObjCIsaExpr>(LHS.get())) { SourceLocation RHSLocEnd = PP.getLocForEndOfToken(RHS.get()->getLocEnd()); Diag(LHS.get()->getExprLoc(), diag::warn_objc_isa_assign) << FixItHint::CreateInsertion(LHS.get()->getLocStart(), "object_setClass(") << FixItHint::CreateReplacement(SourceRange(OISA->getOpLoc(), OpLoc), ",") << FixItHint::CreateInsertion(RHSLocEnd, ")"); } else Diag(LHS.get()->getExprLoc(), diag::warn_objc_isa_assign); } else if (const ObjCIvarRefExpr *OIRE = dyn_cast<ObjCIvarRefExpr>(LHS.get()->IgnoreParenCasts())) DiagnoseDirectIsaAccess(*this, OIRE, OpLoc, RHS.get()); if (CompResultTy.isNull()) return new (Context) BinaryOperator(LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, OpLoc, FPFeatures.fp_contract); if (getLangOpts().CPlusPlus && LHS.get()->getObjectKind() != OK_ObjCProperty) { VK = VK_LValue; OK = LHS.get()->getObjectKind(); } return new (Context) CompoundAssignOperator( LHS.get(), RHS.get(), Opc, ResultTy, VK, OK, CompLHSTy, CompResultTy, OpLoc, FPFeatures.fp_contract); } /// DiagnoseBitwisePrecedence - Emit a warning when bitwise and comparison /// operators are mixed in a way that suggests that the programmer forgot that /// comparison operators have higher precedence. The most typical example of /// such code is "flags & 0x0020 != 0", which is equivalent to "flags & 1". static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { BinaryOperator *LHSBO = dyn_cast<BinaryOperator>(LHSExpr); BinaryOperator *RHSBO = dyn_cast<BinaryOperator>(RHSExpr); // Check that one of the sides is a comparison operator. bool isLeftComp = LHSBO && LHSBO->isComparisonOp(); bool isRightComp = RHSBO && RHSBO->isComparisonOp(); if (!isLeftComp && !isRightComp) return; // Bitwise operations are sometimes used as eager logical ops. // Don't diagnose this. bool isLeftBitwise = LHSBO && LHSBO->isBitwiseOp(); bool isRightBitwise = RHSBO && RHSBO->isBitwiseOp(); if ((isLeftComp || isLeftBitwise) && (isRightComp || isRightBitwise)) return; SourceRange DiagRange = isLeftComp ? SourceRange(LHSExpr->getLocStart(), OpLoc) : SourceRange(OpLoc, RHSExpr->getLocEnd()); StringRef OpStr = isLeftComp ? LHSBO->getOpcodeStr() : RHSBO->getOpcodeStr(); SourceRange ParensRange = isLeftComp ? SourceRange(LHSBO->getRHS()->getLocStart(), RHSExpr->getLocEnd()) : SourceRange(LHSExpr->getLocStart(), RHSBO->getLHS()->getLocEnd()); Self.Diag(OpLoc, diag::warn_precedence_bitwise_rel) << DiagRange << BinaryOperator::getOpcodeStr(Opc) << OpStr; SuggestParentheses(Self, OpLoc, Self.PDiag(diag::note_precedence_silence) << OpStr, (isLeftComp ? LHSExpr : RHSExpr)->getSourceRange()); SuggestParentheses(Self, OpLoc, Self.PDiag(diag::note_precedence_bitwise_first) << BinaryOperator::getOpcodeStr(Opc), ParensRange); } /// \brief It accepts a '&' expr that is inside a '|' one. /// Emit a diagnostic together with a fixit hint that wraps the '&' expression /// in parentheses. static void EmitDiagnosticForBitwiseAndInBitwiseOr(Sema &Self, SourceLocation OpLoc, BinaryOperator *Bop) { assert(Bop->getOpcode() == BO_And); Self.Diag(Bop->getOperatorLoc(), diag::warn_bitwise_and_in_bitwise_or) << Bop->getSourceRange() << OpLoc; SuggestParentheses(Self, Bop->getOperatorLoc(), Self.PDiag(diag::note_precedence_silence) << Bop->getOpcodeStr(), Bop->getSourceRange()); } /// \brief It accepts a '&&' expr that is inside a '||' one. /// Emit a diagnostic together with a fixit hint that wraps the '&&' expression /// in parentheses. static void EmitDiagnosticForLogicalAndInLogicalOr(Sema &Self, SourceLocation OpLoc, BinaryOperator *Bop) { assert(Bop->getOpcode() == BO_LAnd); Self.Diag(Bop->getOperatorLoc(), diag::warn_logical_and_in_logical_or) << Bop->getSourceRange() << OpLoc; SuggestParentheses(Self, Bop->getOperatorLoc(), Self.PDiag(diag::note_precedence_silence) << Bop->getOpcodeStr(), Bop->getSourceRange()); } /// \brief Returns true if the given expression can be evaluated as a constant /// 'true'. static bool EvaluatesAsTrue(Sema &S, Expr *E) { bool Res; return !E->isValueDependent() && E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && Res; } /// \brief Returns true if the given expression can be evaluated as a constant /// 'false'. static bool EvaluatesAsFalse(Sema &S, Expr *E) { bool Res; return !E->isValueDependent() && E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && !Res; } /// \brief Look for '&&' in the left hand of a '||' expr. static void DiagnoseLogicalAndInLogicalOrLHS(Sema &S, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(LHSExpr)) { if (Bop->getOpcode() == BO_LAnd) { // If it's "a && b || 0" don't warn since the precedence doesn't matter. if (EvaluatesAsFalse(S, RHSExpr)) return; // If it's "1 && a || b" don't warn since the precedence doesn't matter. if (!EvaluatesAsTrue(S, Bop->getLHS())) return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop); } else if (Bop->getOpcode() == BO_LOr) { if (BinaryOperator *RBop = dyn_cast<BinaryOperator>(Bop->getRHS())) { // If it's "a || b && 1 || c" we didn't warn earlier for // "a || b && 1", but warn now. if (RBop->getOpcode() == BO_LAnd && EvaluatesAsTrue(S, RBop->getRHS())) return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, RBop); } } } } /// \brief Look for '&&' in the right hand of a '||' expr. static void DiagnoseLogicalAndInLogicalOrRHS(Sema &S, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(RHSExpr)) { if (Bop->getOpcode() == BO_LAnd) { // If it's "0 || a && b" don't warn since the precedence doesn't matter. if (EvaluatesAsFalse(S, LHSExpr)) return; // If it's "a || b && 1" don't warn since the precedence doesn't matter. if (!EvaluatesAsTrue(S, Bop->getRHS())) return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop); } } } /// \brief Look for '&' in the left or right hand of a '|' expr. static void DiagnoseBitwiseAndInBitwiseOr(Sema &S, SourceLocation OpLoc, Expr *OrArg) { if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(OrArg)) { if (Bop->getOpcode() == BO_And) return EmitDiagnosticForBitwiseAndInBitwiseOr(S, OpLoc, Bop); } } static void DiagnoseAdditionInShift(Sema &S, SourceLocation OpLoc, Expr *SubExpr, StringRef Shift) { if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(SubExpr)) { if (Bop->getOpcode() == BO_Add || Bop->getOpcode() == BO_Sub) { StringRef Op = Bop->getOpcodeStr(); S.Diag(Bop->getOperatorLoc(), diag::warn_addition_in_bitshift) << Bop->getSourceRange() << OpLoc << Shift << Op; SuggestParentheses(S, Bop->getOperatorLoc(), S.PDiag(diag::note_precedence_silence) << Op, Bop->getSourceRange()); } } } static void DiagnoseShiftCompare(Sema &S, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr) { CXXOperatorCallExpr *OCE = dyn_cast<CXXOperatorCallExpr>(LHSExpr); if (!OCE) return; FunctionDecl *FD = OCE->getDirectCallee(); if (!FD || !FD->isOverloadedOperator()) return; OverloadedOperatorKind Kind = FD->getOverloadedOperator(); if (Kind != OO_LessLess && Kind != OO_GreaterGreater) return; S.Diag(OpLoc, diag::warn_overloaded_shift_in_comparison) << LHSExpr->getSourceRange() << RHSExpr->getSourceRange() << (Kind == OO_LessLess); SuggestParentheses(S, OCE->getOperatorLoc(), S.PDiag(diag::note_precedence_silence) << (Kind == OO_LessLess ? "<<" : ">>"), OCE->getSourceRange()); SuggestParentheses(S, OpLoc, S.PDiag(diag::note_evaluate_comparison_first), SourceRange(OCE->getArg(1)->getLocStart(), RHSExpr->getLocEnd())); } /// DiagnoseBinOpPrecedence - Emit warnings for expressions with tricky /// precedence. static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc, SourceLocation OpLoc, Expr *LHSExpr, Expr *RHSExpr){ // Diagnose "arg1 'bitwise' arg2 'eq' arg3". if (BinaryOperator::isBitwiseOp(Opc)) DiagnoseBitwisePrecedence(Self, Opc, OpLoc, LHSExpr, RHSExpr); // Diagnose "arg1 & arg2 | arg3" if (Opc == BO_Or && !OpLoc.isMacroID()/* Don't warn in macros. */) { DiagnoseBitwiseAndInBitwiseOr(Self, OpLoc, LHSExpr); DiagnoseBitwiseAndInBitwiseOr(Self, OpLoc, RHSExpr); } // Warn about arg1 || arg2 && arg3, as GCC 4.3+ does. // We don't warn for 'assert(a || b && "bad")' since this is safe. if (Opc == BO_LOr && !OpLoc.isMacroID()/* Don't warn in macros. */) { DiagnoseLogicalAndInLogicalOrLHS(Self, OpLoc, LHSExpr, RHSExpr); DiagnoseLogicalAndInLogicalOrRHS(Self, OpLoc, LHSExpr, RHSExpr); } if ((Opc == BO_Shl && LHSExpr->getType()->isIntegralType(Self.getASTContext())) || Opc == BO_Shr) { StringRef Shift = BinaryOperator::getOpcodeStr(Opc); DiagnoseAdditionInShift(Self, OpLoc, LHSExpr, Shift); DiagnoseAdditionInShift(Self, OpLoc, RHSExpr, Shift); } // Warn on overloaded shift operators and comparisons, such as: // cout << 5 == 4; if (BinaryOperator::isComparisonOp(Opc)) DiagnoseShiftCompare(Self, OpLoc, LHSExpr, RHSExpr); } // Binary Operators. 'Tok' is the token for the operator. ExprResult Sema::ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr) { BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Kind); assert(LHSExpr && "ActOnBinOp(): missing left expression"); assert(RHSExpr && "ActOnBinOp(): missing right expression"); // Emit warnings for tricky precedence issues, e.g. "bitfield & 0x4 == 0" DiagnoseBinOpPrecedence(*this, Opc, TokLoc, LHSExpr, RHSExpr); return BuildBinOp(S, TokLoc, Opc, LHSExpr, RHSExpr); } /// Build an overloaded binary operator expression in the given scope. static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHS, Expr *RHS) { // Find all of the overloaded operators visible from this // point. We perform both an operator-name lookup from the local // scope and an argument-dependent lookup based on the types of // the arguments. UnresolvedSet<16> Functions; OverloadedOperatorKind OverOp = BinaryOperator::getOverloadedOperator(Opc); if (Sc && OverOp != OO_None && OverOp != OO_Equal) S.LookupOverloadedOperatorName(OverOp, Sc, LHS->getType(), RHS->getType(), Functions); // Build the (potentially-overloaded, potentially-dependent) // binary operation. return S.CreateOverloadedBinOp(OpLoc, Opc, Functions, LHS, RHS); } ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr) { // We want to end up calling one of checkPseudoObjectAssignment // (if the LHS is a pseudo-object), BuildOverloadedBinOp (if // both expressions are overloadable or either is type-dependent), // or CreateBuiltinBinOp (in any other case). We also want to get // any placeholder types out of the way. // Handle pseudo-objects in the LHS. if (const BuiltinType *pty = LHSExpr->getType()->getAsPlaceholderType()) { // Assignments with a pseudo-object l-value need special analysis. if (pty->getKind() == BuiltinType::PseudoObject && BinaryOperator::isAssignmentOp(Opc)) return checkPseudoObjectAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr); // Don't resolve overloads if the other type is overloadable. if (pty->getKind() == BuiltinType::Overload) { // We can't actually test that if we still have a placeholder, // though. Fortunately, none of the exceptions we see in that // code below are valid when the LHS is an overload set. Note // that an overload set can be dependently-typed, but it never // instantiates to having an overloadable type. ExprResult resolvedRHS = CheckPlaceholderExpr(RHSExpr); if (resolvedRHS.isInvalid()) return ExprError(); RHSExpr = resolvedRHS.get(); if (RHSExpr->isTypeDependent() || RHSExpr->getType()->isOverloadableType()) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); } ExprResult LHS = CheckPlaceholderExpr(LHSExpr); if (LHS.isInvalid()) return ExprError(); LHSExpr = LHS.get(); } // Handle pseudo-objects in the RHS. if (const BuiltinType *pty = RHSExpr->getType()->getAsPlaceholderType()) { // An overload in the RHS can potentially be resolved by the type // being assigned to. if (Opc == BO_Assign && pty->getKind() == BuiltinType::Overload) { if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent()) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); if (LHSExpr->getType()->isOverloadableType()) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr); } // Don't resolve overloads if the other type is overloadable. if (pty->getKind() == BuiltinType::Overload && LHSExpr->getType()->isOverloadableType()) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); ExprResult resolvedRHS = CheckPlaceholderExpr(RHSExpr); if (!resolvedRHS.isUsable()) return ExprError(); RHSExpr = resolvedRHS.get(); } // HLSL Change: The condition of this if-statement must be false for the // binary operator overloading for HLSL-specific resource types because they // must be handled by the following CreateBuiltinBinOp(). If it is a // user-defined type with operator overloading methods, we know it is not a // binary operator overloading for a HLSL-specific resource type. This // if-statement condition does not perfectly checks all the cases, but it // simply checks whether it is a user-defined type with operator overloading // methods or not. if (getLangOpts().CPlusPlus && (!getLangOpts().HLSL || getLangOpts().HLSLVersion >= hlsl::LangStd::v2021) && hlsl::IsUserDefinedRecordType(LHSExpr->getType()) && hlsl::DoesTypeDefineOverloadedOperator( LHSExpr->getType(), clang::BinaryOperator::getOverloadedOperator(Opc), RHSExpr->getType())) { // If either expression is type-dependent, always build an // overloaded op. if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent()) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); // Otherwise, build an overloaded op if either expression has an // overloadable type. if (LHSExpr->getType()->isOverloadableType() || RHSExpr->getType()->isOverloadableType()) return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr); } // Build a built-in binary operation. return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr); } ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr) { ExprResult Input = InputExpr; ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; QualType resultType; // HLSL Change Starts if (getLangOpts().HLSL) { resultType = hlsl::CheckUnaryOpForHLSL(*this, OpLoc, Opc, Input, VK, OK); goto CasesHandled; } // HLSL Change Ends switch (Opc) { case UO_PreInc: case UO_PreDec: case UO_PostInc: case UO_PostDec: resultType = CheckIncrementDecrementOperand(*this, Input.get(), VK, OK, OpLoc, Opc == UO_PreInc || Opc == UO_PostInc, Opc == UO_PreInc || Opc == UO_PreDec); break; case UO_AddrOf: resultType = CheckAddressOfOperand(Input, OpLoc); RecordModifiableNonNullParam(*this, InputExpr); break; case UO_Deref: { Input = DefaultFunctionArrayLvalueConversion(Input.get()); if (Input.isInvalid()) return ExprError(); resultType = CheckIndirectionOperand(*this, Input.get(), VK, OpLoc); break; } case UO_Plus: case UO_Minus: Input = UsualUnaryConversions(Input.get()); if (Input.isInvalid()) return ExprError(); resultType = Input.get()->getType(); if (resultType->isDependentType()) break; if (resultType->isArithmeticType()) // C99 6.5.3.3p1 break; else if (resultType->isVectorType() && // The z vector extensions don't allow + or - with bool vectors. (!Context.getLangOpts().ZVector || resultType->getAs<VectorType>()->getVectorKind() != VectorType::AltiVecBool)) break; else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6 Opc == UO_Plus && resultType->isPointerType()) break; return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); case UO_Not: // bitwise complement Input = UsualUnaryConversions(Input.get()); if (Input.isInvalid()) return ExprError(); resultType = Input.get()->getType(); if (resultType->isDependentType()) break; // C99 6.5.3.3p1. We allow complex int and float as a GCC extension. if (resultType->isComplexType() || resultType->isComplexIntegerType()) // C99 does not support '~' for complex conjugation. Diag(OpLoc, diag::ext_integer_complement_complex) << resultType << Input.get()->getSourceRange(); else if (resultType->hasIntegerRepresentation()) break; else if (resultType->isExtVectorType()) { if (Context.getLangOpts().OpenCL) { // OpenCL v1.1 s6.3.f: The bitwise operator not (~) does not operate // on vector float types. QualType T = resultType->getAs<ExtVectorType>()->getElementType(); if (!T->isIntegerType()) return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); } break; } else { return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); } break; case UO_LNot: // logical negation // Unlike +/-/~, integer promotions aren't done here (C99 6.5.3.3p5). Input = DefaultFunctionArrayLvalueConversion(Input.get()); if (Input.isInvalid()) return ExprError(); resultType = Input.get()->getType(); // Though we still have to promote half FP to float... if (resultType->isHalfType() && !Context.getLangOpts().NativeHalfType) { Input = ImpCastExprToType(Input.get(), Context.FloatTy, CK_FloatingCast).get(); resultType = Context.FloatTy; } if (resultType->isDependentType()) break; if (resultType->isScalarType() && !isScopedEnumerationType(resultType)) { // C99 6.5.3.3p1: ok, fallthrough; if (Context.getLangOpts().CPlusPlus) { // C++03 [expr.unary.op]p8, C++0x [expr.unary.op]p9: // operand contextually converted to bool. Input = ImpCastExprToType(Input.get(), Context.BoolTy, ScalarTypeToBooleanCastKind(resultType)); } else if (Context.getLangOpts().OpenCL && Context.getLangOpts().OpenCLVersion < 120) { // OpenCL v1.1 6.3.h: The logical operator not (!) does not // operate on scalar float types. if (!resultType->isIntegerType()) return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); } } else if (resultType->isExtVectorType()) { if (Context.getLangOpts().OpenCL && Context.getLangOpts().OpenCLVersion < 120) { // OpenCL v1.1 6.3.h: The logical operator not (!) does not // operate on vector float types. QualType T = resultType->getAs<ExtVectorType>()->getElementType(); if (!T->isIntegerType()) return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); } // Vector logical not returns the signed variant of the operand type. resultType = GetSignedVectorType(resultType); break; } else { return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr) << resultType << Input.get()->getSourceRange()); } // LNot always has type int. C99 6.5.3.3p5. // In C++, it's bool. C++ 5.3.1p8 resultType = Context.getLogicalOperationType(); break; case UO_Real: case UO_Imag: resultType = CheckRealImagOperand(*this, Input, OpLoc, Opc == UO_Real); // _Real maps ordinary l-values into ordinary l-values. _Imag maps ordinary // complex l-values to ordinary l-values and all other values to r-values. if (Input.isInvalid()) return ExprError(); if (Opc == UO_Real || Input.get()->getType()->isAnyComplexType()) { if (Input.get()->getValueKind() != VK_RValue && Input.get()->getObjectKind() == OK_Ordinary) VK = Input.get()->getValueKind(); } else if (!getLangOpts().CPlusPlus) { // In C, a volatile scalar is read by __imag. In C++, it is not. Input = DefaultLvalueConversion(Input.get()); } break; case UO_Extension: resultType = Input.get()->getType(); VK = Input.get()->getValueKind(); OK = Input.get()->getObjectKind(); break; } CasesHandled: // HLSL Change: add label to skip C/C++ unary operator processing if (resultType.isNull() || Input.isInvalid()) return ExprError(); // Check for array bounds violations in the operand of the UnaryOperator, // except for the '*' and '&' operators that have to be handled specially // by CheckArrayAccess (as there are special cases like &array[arraysize] // that are explicitly defined as valid by the standard). if (Opc != UO_AddrOf && Opc != UO_Deref) CheckArrayAccess(Input.get()); return new (Context) UnaryOperator(Input.get(), Opc, resultType, VK, OK, OpLoc); } /// \brief Determine whether the given expression is a qualified member /// access expression, of a form that could be turned into a pointer to member /// with the address-of operator. static bool isQualifiedMemberAccess(Expr *E) { if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) { if (!DRE->getQualifier()) return false; ValueDecl *VD = DRE->getDecl(); if (!VD->isCXXClassMember()) return false; if (isa<FieldDecl>(VD) || isa<IndirectFieldDecl>(VD)) return true; if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(VD)) return Method->isInstance(); return false; } if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) { if (!ULE->getQualifier()) return false; for (UnresolvedLookupExpr::decls_iterator D = ULE->decls_begin(), DEnd = ULE->decls_end(); D != DEnd; ++D) { if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*D)) { if (Method->isInstance()) return true; } else { // Overload set does not contain methods. break; } } return false; } return false; } ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input) { // HLSL Change Starts - placeholders and overloaded operators not supported if (getLangOpts().HLSL) return CreateBuiltinUnaryOp(OpLoc, Opc, Input); // HLSL Change Ends // First things first: handle placeholders so that the // overloaded-operator check considers the right type. if (const BuiltinType *pty = Input->getType()->getAsPlaceholderType()) { // Increment and decrement of pseudo-object references. if (pty->getKind() == BuiltinType::PseudoObject && UnaryOperator::isIncrementDecrementOp(Opc)) return checkPseudoObjectIncDec(S, OpLoc, Opc, Input); // extension is always a builtin operator. if (Opc == UO_Extension) return CreateBuiltinUnaryOp(OpLoc, Opc, Input); // & gets special logic for several kinds of placeholder. // The builtin code knows what to do. if (Opc == UO_AddrOf && (pty->getKind() == BuiltinType::Overload || pty->getKind() == BuiltinType::UnknownAny || pty->getKind() == BuiltinType::BoundMember)) return CreateBuiltinUnaryOp(OpLoc, Opc, Input); // Anything else needs to be handled now. ExprResult Result = CheckPlaceholderExpr(Input); if (Result.isInvalid()) return ExprError(); Input = Result.get(); } if (getLangOpts().CPlusPlus && Input->getType()->isOverloadableType() && UnaryOperator::getOverloadedOperator(Opc) != OO_None && !(Opc == UO_AddrOf && isQualifiedMemberAccess(Input))) { // Find all of the overloaded operators visible from this // point. We perform both an operator-name lookup from the local // scope and an argument-dependent lookup based on the types of // the arguments. UnresolvedSet<16> Functions; OverloadedOperatorKind OverOp = UnaryOperator::getOverloadedOperator(Opc); if (S && OverOp != OO_None) LookupOverloadedOperatorName(OverOp, S, Input->getType(), QualType(), Functions); return CreateOverloadedUnaryOp(OpLoc, Opc, Functions, Input); } return CreateBuiltinUnaryOp(OpLoc, Opc, Input); } // Unary Operators. 'Tok' is the token for the operator. ExprResult Sema::ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input) { return BuildUnaryOp(S, OpLoc, ConvertTokenKindToUnaryOpcode(Op), Input); } /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl) { TheDecl->markUsed(Context); // Create the AST node. The address of a label always has type 'void*'. return new (Context) AddrLabelExpr(OpLoc, LabLoc, TheDecl, Context.getPointerType(Context.VoidTy)); } /// Given the last statement in a statement-expression, check whether /// the result is a producing expression (like a call to an /// ns_returns_retained function) and, if so, rebuild it to hoist the /// release out of the full-expression. Otherwise, return null. /// Cannot fail. static Expr *maybeRebuildARCConsumingStmt(Stmt *Statement) { // Should always be wrapped with one of these. ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(Statement); if (!cleanups) return nullptr; ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(cleanups->getSubExpr()); if (!cast || cast->getCastKind() != CK_ARCConsumeObject) return nullptr; // Splice out the cast. This shouldn't modify any interesting // features of the statement. Expr *producer = cast->getSubExpr(); assert(producer->getType() == cast->getType()); assert(producer->getValueKind() == cast->getValueKind()); cleanups->setSubExpr(producer); return cleanups; } void Sema::ActOnStartStmtExpr() { PushExpressionEvaluationContext(ExprEvalContexts.back().Context); } void Sema::ActOnStmtExprError() { // Note that function is also called by TreeTransform when leaving a // StmtExpr scope without rebuilding anything. DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); } ExprResult Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc) { // "({..})" assert(SubStmt && isa<CompoundStmt>(SubStmt) && "Invalid action invocation!"); CompoundStmt *Compound = cast<CompoundStmt>(SubStmt); if (hasAnyUnrecoverableErrorsInThisFunction()) DiscardCleanupsInEvaluationContext(); assert(!ExprNeedsCleanups && "cleanups within StmtExpr not correctly bound!"); PopExpressionEvaluationContext(); // FIXME: there are a variety of strange constraints to enforce here, for // example, it is not possible to goto into a stmt expression apparently. // More semantic analysis is needed. // If there are sub-stmts in the compound stmt, take the type of the last one // as the type of the stmtexpr. QualType Ty = Context.VoidTy; bool StmtExprMayBindToTemp = false; if (!Compound->body_empty()) { Stmt *LastStmt = Compound->body_back(); LabelStmt *LastLabelStmt = nullptr; // If LastStmt is a label, skip down through into the body. while (LabelStmt *Label = dyn_cast<LabelStmt>(LastStmt)) { LastLabelStmt = Label; LastStmt = Label->getSubStmt(); } if (Expr *LastE = dyn_cast<Expr>(LastStmt)) { // Do function/array conversion on the last expression, but not // lvalue-to-rvalue. However, initialize an unqualified type. ExprResult LastExpr = DefaultFunctionArrayConversion(LastE); if (LastExpr.isInvalid()) return ExprError(); Ty = LastExpr.get()->getType().getUnqualifiedType(); if (!Ty->isDependentType() && !LastExpr.get()->isTypeDependent()) { // In ARC, if the final expression ends in a consume, splice // the consume out and bind it later. In the alternate case // (when dealing with a retainable type), the result // initialization will create a produce. In both cases the // result will be +1, and we'll need to balance that out with // a bind. if (Expr *rebuiltLastStmt = maybeRebuildARCConsumingStmt(LastExpr.get())) { LastExpr = rebuiltLastStmt; } else { LastExpr = PerformCopyInitialization( InitializedEntity::InitializeResult(LPLoc, Ty, false), SourceLocation(), LastExpr); } if (LastExpr.isInvalid()) return ExprError(); if (LastExpr.get() != nullptr) { if (!LastLabelStmt) Compound->setLastStmt(LastExpr.get()); else LastLabelStmt->setSubStmt(LastExpr.get()); StmtExprMayBindToTemp = true; } } } } // FIXME: Check that expression type is complete/non-abstract; statement // expressions are not lvalues. Expr *ResStmtExpr = new (Context) StmtExpr(Compound, Ty, LPLoc, RPLoc); if (StmtExprMayBindToTemp) return MaybeBindToTemporary(ResStmtExpr); return ResStmtExpr; } ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc) { QualType ArgTy = TInfo->getType(); bool Dependent = ArgTy->isDependentType(); SourceRange TypeRange = TInfo->getTypeLoc().getLocalSourceRange(); // We must have at least one component that refers to the type, and the first // one is known to be a field designator. Verify that the ArgTy represents // a struct/union/class. if (!Dependent && !ArgTy->isRecordType()) return ExprError(Diag(BuiltinLoc, diag::err_offsetof_record_type) << ArgTy << TypeRange); // Type must be complete per C99 7.17p3 because a declaring a variable // with an incomplete type would be ill-formed. if (!Dependent && RequireCompleteType(BuiltinLoc, ArgTy, diag::err_offsetof_incomplete_type, TypeRange)) return ExprError(); // offsetof with non-identifier designators (e.g. "offsetof(x, a.b[c])") are a // GCC extension, diagnose them. // FIXME: This diagnostic isn't actually visible because the location is in // a system header! if (NumComponents != 1) Diag(BuiltinLoc, diag::ext_offsetof_extended_field_designator) << SourceRange(CompPtr[1].LocStart, CompPtr[NumComponents-1].LocEnd); bool DidWarnAboutNonPOD = false; QualType CurrentType = ArgTy; typedef OffsetOfExpr::OffsetOfNode OffsetOfNode; SmallVector<OffsetOfNode, 4> Comps; SmallVector<Expr*, 4> Exprs; for (unsigned i = 0; i != NumComponents; ++i) { const OffsetOfComponent &OC = CompPtr[i]; if (OC.isBrackets) { // Offset of an array sub-field. TODO: Should we allow vector elements? if (!CurrentType->isDependentType()) { const ArrayType *AT = Context.getAsArrayType(CurrentType); if(!AT) return ExprError(Diag(OC.LocEnd, diag::err_offsetof_array_type) << CurrentType); CurrentType = AT->getElementType(); } else CurrentType = Context.DependentTy; ExprResult IdxRval = DefaultLvalueConversion(static_cast<Expr*>(OC.U.E)); if (IdxRval.isInvalid()) return ExprError(); Expr *Idx = IdxRval.get(); // The expression must be an integral expression. // FIXME: An integral constant expression? if (!Idx->isTypeDependent() && !Idx->isValueDependent() && !Idx->getType()->isIntegerType()) return ExprError(Diag(Idx->getLocStart(), diag::err_typecheck_subscript_not_integer) << Idx->getSourceRange()); // Record this array index. Comps.push_back(OffsetOfNode(OC.LocStart, Exprs.size(), OC.LocEnd)); Exprs.push_back(Idx); continue; } // Offset of a field. if (CurrentType->isDependentType()) { // We have the offset of a field, but we can't look into the dependent // type. Just record the identifier of the field. Comps.push_back(OffsetOfNode(OC.LocStart, OC.U.IdentInfo, OC.LocEnd)); CurrentType = Context.DependentTy; continue; } // We need to have a complete type to look into. if (RequireCompleteType(OC.LocStart, CurrentType, diag::err_offsetof_incomplete_type)) return ExprError(); // Look for the designated field. const RecordType *RC = CurrentType->getAs<RecordType>(); if (!RC) return ExprError(Diag(OC.LocEnd, diag::err_offsetof_record_type) << CurrentType); RecordDecl *RD = RC->getDecl(); // C++ [lib.support.types]p5: // The macro offsetof accepts a restricted set of type arguments in this // International Standard. type shall be a POD structure or a POD union // (clause 9). // C++11 [support.types]p4: // If type is not a standard-layout class (Clause 9), the results are // undefined. if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { bool IsSafe = LangOpts.CPlusPlus11? CRD->isStandardLayout() : CRD->isPOD(); unsigned DiagID = LangOpts.CPlusPlus11? diag::ext_offsetof_non_standardlayout_type : diag::ext_offsetof_non_pod_type; if (!IsSafe && !DidWarnAboutNonPOD && DiagRuntimeBehavior(BuiltinLoc, nullptr, PDiag(DiagID) << SourceRange(CompPtr[0].LocStart, OC.LocEnd) << CurrentType)) DidWarnAboutNonPOD = true; } // Look for the field. LookupResult R(*this, OC.U.IdentInfo, OC.LocStart, LookupMemberName); LookupQualifiedName(R, RD); FieldDecl *MemberDecl = R.getAsSingle<FieldDecl>(); IndirectFieldDecl *IndirectMemberDecl = nullptr; if (!MemberDecl) { if ((IndirectMemberDecl = R.getAsSingle<IndirectFieldDecl>())) MemberDecl = IndirectMemberDecl->getAnonField(); } if (!MemberDecl) return ExprError(Diag(BuiltinLoc, diag::err_no_member) << OC.U.IdentInfo << RD << SourceRange(OC.LocStart, OC.LocEnd)); // C99 7.17p3: // (If the specified member is a bit-field, the behavior is undefined.) // // We diagnose this as an error. if (MemberDecl->isBitField()) { Diag(OC.LocEnd, diag::err_offsetof_bitfield) << MemberDecl->getDeclName() << SourceRange(BuiltinLoc, RParenLoc); Diag(MemberDecl->getLocation(), diag::note_bitfield_decl); return ExprError(); } RecordDecl *Parent = MemberDecl->getParent(); if (IndirectMemberDecl) Parent = cast<RecordDecl>(IndirectMemberDecl->getDeclContext()); // If the member was found in a base class, introduce OffsetOfNodes for // the base class indirections. CXXBasePaths Paths; if (IsDerivedFrom(CurrentType, Context.getTypeDeclType(Parent), Paths)) { if (Paths.getDetectedVirtual()) { Diag(OC.LocEnd, diag::err_offsetof_field_of_virtual_base) << MemberDecl->getDeclName() << SourceRange(BuiltinLoc, RParenLoc); return ExprError(); } CXXBasePath &Path = Paths.front(); for (CXXBasePath::iterator B = Path.begin(), BEnd = Path.end(); B != BEnd; ++B) Comps.push_back(OffsetOfNode(B->Base)); } if (IndirectMemberDecl) { for (auto *FI : IndirectMemberDecl->chain()) { assert(isa<FieldDecl>(FI)); Comps.push_back(OffsetOfNode(OC.LocStart, cast<FieldDecl>(FI), OC.LocEnd)); } } else Comps.push_back(OffsetOfNode(OC.LocStart, MemberDecl, OC.LocEnd)); CurrentType = MemberDecl->getType().getNonReferenceType(); } return OffsetOfExpr::Create(Context, Context.getSizeType(), BuiltinLoc, TInfo, Comps, Exprs, RParenLoc); } ExprResult Sema::ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc) { TypeSourceInfo *ArgTInfo; QualType ArgTy = GetTypeFromParser(ParsedArgTy, &ArgTInfo); if (ArgTy.isNull()) return ExprError(); if (!ArgTInfo) ArgTInfo = Context.getTrivialTypeSourceInfo(ArgTy, TypeLoc); return BuildBuiltinOffsetOf(BuiltinLoc, ArgTInfo, CompPtr, NumComponents, RParenLoc); } ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc) { assert((CondExpr && LHSExpr && RHSExpr) && "Missing type argument(s)"); ExprValueKind VK = VK_RValue; ExprObjectKind OK = OK_Ordinary; QualType resType; bool ValueDependent = false; bool CondIsTrue = false; if (CondExpr->isTypeDependent() || CondExpr->isValueDependent()) { resType = Context.DependentTy; ValueDependent = true; } else { // The conditional expression is required to be a constant expression. llvm::APSInt condEval(32); ExprResult CondICE = VerifyIntegerConstantExpression(CondExpr, &condEval, diag::err_typecheck_choose_expr_requires_constant, false); if (CondICE.isInvalid()) return ExprError(); CondExpr = CondICE.get(); CondIsTrue = condEval.getZExtValue(); // If the condition is > zero, then the AST type is the same as the LSHExpr. Expr *ActiveExpr = CondIsTrue ? LHSExpr : RHSExpr; resType = ActiveExpr->getType(); ValueDependent = ActiveExpr->isValueDependent(); VK = ActiveExpr->getValueKind(); OK = ActiveExpr->getObjectKind(); } return new (Context) ChooseExpr(BuiltinLoc, CondExpr, LHSExpr, RHSExpr, resType, VK, OK, RPLoc, CondIsTrue, resType->isDependentType(), ValueDependent); } //===----------------------------------------------------------------------===// // Clang Extensions. //===----------------------------------------------------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is started. void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope) { BlockDecl *Block = BlockDecl::Create(Context, CurContext, CaretLoc); if (LangOpts.CPlusPlus) { Decl *ManglingContextDecl; if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(Block->getDeclContext(), ManglingContextDecl)) { unsigned ManglingNumber = MCtx->getManglingNumber(Block); Block->setBlockMangling(ManglingNumber, ManglingContextDecl); } } PushBlockScope(CurScope, Block); CurContext->addDecl(Block); if (CurScope) PushDeclContext(CurScope, Block); else CurContext = Block; getCurBlock()->HasImplicitReturnType = true; // Enter a new evaluation context to insulate the block from any // cleanups from the enclosing full-expression. PushExpressionEvaluationContext(PotentiallyEvaluated); } void Sema::ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope) { assert(ParamInfo.getIdentifier() == nullptr && "block-id should have no identifier!"); assert(ParamInfo.getContext() == Declarator::BlockLiteralContext); BlockScopeInfo *CurBlock = getCurBlock(); TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope); QualType T = Sig->getType(); // FIXME: We should allow unexpanded parameter packs here, but that would, // in turn, make the block expression contain unexpanded parameter packs. if (DiagnoseUnexpandedParameterPack(CaretLoc, Sig, UPPC_Block)) { // Drop the parameters. FunctionProtoType::ExtProtoInfo EPI; EPI.HasTrailingReturn = false; EPI.TypeQuals |= DeclSpec::TQ_const; T = Context.getFunctionType(Context.DependentTy, None, EPI, None); // HLSL Change - add param mods Sig = Context.getTrivialTypeSourceInfo(T); } // GetTypeForDeclarator always produces a function type for a block // literal signature. Furthermore, it is always a FunctionProtoType // unless the function was written with a typedef. assert(T->isFunctionType() && "GetTypeForDeclarator made a non-function block signature"); // Look for an explicit signature in that function type. FunctionProtoTypeLoc ExplicitSignature; TypeLoc tmp = Sig->getTypeLoc().IgnoreParens(); if ((ExplicitSignature = tmp.getAs<FunctionProtoTypeLoc>())) { // Check whether that explicit signature was synthesized by // GetTypeForDeclarator. If so, don't save that as part of the // written signature. if (ExplicitSignature.getLocalRangeBegin() == ExplicitSignature.getLocalRangeEnd()) { // This would be much cheaper if we stored TypeLocs instead of // TypeSourceInfos. TypeLoc Result = ExplicitSignature.getReturnLoc(); unsigned Size = Result.getFullDataSize(); Sig = Context.CreateTypeSourceInfo(Result.getType(), Size); Sig->getTypeLoc().initializeFullCopy(Result, Size); ExplicitSignature = FunctionProtoTypeLoc(); } } CurBlock->TheDecl->setSignatureAsWritten(Sig); CurBlock->FunctionType = T; const FunctionType *Fn = T->getAs<FunctionType>(); QualType RetTy = Fn->getReturnType(); bool isVariadic = (isa<FunctionProtoType>(Fn) && cast<FunctionProtoType>(Fn)->isVariadic()); CurBlock->TheDecl->setIsVariadic(isVariadic); // Context.DependentTy is used as a placeholder for a missing block // return type. TODO: what should we do with declarators like: // ^ * { ... } // If the answer is "apply template argument deduction".... if (RetTy != Context.DependentTy) { CurBlock->ReturnType = RetTy; CurBlock->TheDecl->setBlockMissingReturnType(false); CurBlock->HasImplicitReturnType = false; } // Push block parameters from the declarator if we had them. SmallVector<ParmVarDecl*, 8> Params; if (ExplicitSignature) { for (unsigned I = 0, E = ExplicitSignature.getNumParams(); I != E; ++I) { ParmVarDecl *Param = ExplicitSignature.getParam(I); if (Param->getIdentifier() == nullptr && !Param->isImplicit() && !Param->isInvalidDecl() && !getLangOpts().CPlusPlus) Diag(Param->getLocation(), diag::err_parameter_name_omitted); Params.push_back(Param); } // Fake up parameter variables if we have a typedef, like // ^ fntype { ... } } else if (const FunctionProtoType *Fn = T->getAs<FunctionProtoType>()) { for (const auto &I : Fn->param_types()) { ParmVarDecl *Param = BuildParmVarDeclForTypedef( CurBlock->TheDecl, ParamInfo.getLocStart(), I); Params.push_back(Param); } } // Set the parameters on the block decl. if (!Params.empty()) { CurBlock->TheDecl->setParams(Params); CheckParmsForFunctionDef(CurBlock->TheDecl->param_begin(), CurBlock->TheDecl->param_end(), /*CheckParameterNames=*/false); } // Finally we can process decl attributes. ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo); // Put the parameter variables in scope. for (auto AI : CurBlock->TheDecl->params()) { AI->setOwningFunction(CurBlock->TheDecl); // If this has an identifier, add it to the scope stack. if (AI->getIdentifier()) { CheckShadow(CurBlock->TheScope, AI); PushOnScopeChains(AI, CurBlock->TheScope); } } } /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void Sema::ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope) { // Leave the expression-evaluation context. DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); // Pop off CurBlock, handle nested blocks. PopDeclContext(); PopFunctionScopeInfo(); } /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope) { // HLSL Changes Start llvm_unreachable("block statements unsupported and unreachable in HLSL"); #if 0 // HLSL Changes End // If blocks are disabled, emit an error. if (!LangOpts.Blocks) Diag(CaretLoc, diag::err_blocks_disable); // Leave the expression-evaluation context. if (hasAnyUnrecoverableErrorsInThisFunction()) DiscardCleanupsInEvaluationContext(); assert(!ExprNeedsCleanups && "cleanups within block not correctly bound!"); PopExpressionEvaluationContext(); if (getLangOpts().HLSL) { return ExprError(); } BlockScopeInfo *BSI = cast<BlockScopeInfo>(FunctionScopes.back()); if (BSI->HasImplicitReturnType) deduceClosureReturnType(*BSI); PopDeclContext(); QualType RetTy = Context.VoidTy; if (!BSI->ReturnType.isNull()) RetTy = BSI->ReturnType; bool NoReturn = BSI->TheDecl->hasAttr<NoReturnAttr>(); QualType BlockTy; // Set the captured variables on the block. // FIXME: Share capture structure between BlockDecl and CapturingScopeInfo! SmallVector<BlockDecl::Capture, 4> Captures; for (unsigned i = 0, e = BSI->Captures.size(); i != e; i++) { CapturingScopeInfo::Capture &Cap = BSI->Captures[i]; if (Cap.isThisCapture()) continue; BlockDecl::Capture NewCap(Cap.getVariable(), Cap.isBlockCapture(), Cap.isNested(), Cap.getInitExpr()); Captures.push_back(NewCap); } BSI->TheDecl->setCaptures(Context, Captures.begin(), Captures.end(), BSI->CXXThisCaptureIndex != 0); // If the user wrote a function type in some form, try to use that. if (!BSI->FunctionType.isNull()) { const FunctionType *FTy = BSI->FunctionType->getAs<FunctionType>(); FunctionType::ExtInfo Ext = FTy->getExtInfo(); if (NoReturn && !Ext.getNoReturn()) Ext = Ext.withNoReturn(true); // Turn protoless block types into nullary block types. if (isa<FunctionNoProtoType>(FTy)) { FunctionProtoType::ExtProtoInfo EPI; EPI.ExtInfo = Ext; BlockTy = Context.getFunctionType(RetTy, None, EPI); // Otherwise, if we don't need to change anything about the function type, // preserve its sugar structure. } else if (FTy->getReturnType() == RetTy && (!NoReturn || FTy->getNoReturnAttr())) { BlockTy = BSI->FunctionType; // Otherwise, make the minimal modifications to the function type. } else { const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy); FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); EPI.TypeQuals = 0; // FIXME: silently? EPI.ExtInfo = Ext; BlockTy = Context.getFunctionType(RetTy, FPT->getParamTypes(), EPI); } // If we don't have a function type, just build one from nothing. } else { FunctionProtoType::ExtProtoInfo EPI; EPI.ExtInfo = FunctionType::ExtInfo().withNoReturn(NoReturn); BlockTy = Context.getFunctionType(RetTy, None, EPI); } DiagnoseUnusedParameters(BSI->TheDecl->param_begin(), BSI->TheDecl->param_end()); BlockTy = Context.getBlockPointerType(BlockTy); // If needed, diagnose invalid gotos and switches in the block. if (getCurFunction()->NeedsScopeChecking() && !PP.isCodeCompletionEnabled()) DiagnoseInvalidJumps(cast<CompoundStmt>(Body)); BSI->TheDecl->setBody(cast<CompoundStmt>(Body)); // Try to apply the named return value optimization. We have to check again // if we can do this, though, because blocks keep return statements around // to deduce an implicit return type. if (getLangOpts().CPlusPlus && RetTy->isRecordType() && !BSI->TheDecl->isDependentContext()) computeNRVO(Body, BSI); BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy); AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy(); PopFunctionScopeInfo(&WP, Result->getBlockDecl(), Result); // If the block isn't obviously global, i.e. it captures anything at // all, then we need to do a few things in the surrounding context: if (Result->getBlockDecl()->hasCaptures()) { // First, this expression has a new cleanup object. ExprCleanupObjects.push_back(Result->getBlockDecl()); ExprNeedsCleanups = true; // It also gets a branch-protected scope if any of the captured // variables needs destruction. for (const auto &CI : Result->getBlockDecl()->captures()) { const VarDecl *var = CI.getVariable(); if (var->getType().isDestructedType() != QualType::DK_none) { getCurFunction()->setHasBranchProtectedScope(); break; } } } return Result; #endif // HLSL Change } ExprResult Sema::ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc) { TypeSourceInfo *TInfo; GetTypeFromParser(Ty, &TInfo); return BuildVAArgExpr(BuiltinLoc, E, TInfo, RPLoc); } ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc) { Expr *OrigExpr = E; // Get the va_list type QualType VaListType = Context.getBuiltinVaListType(); if (VaListType->isArrayType()) { // Deal with implicit array decay; for example, on x86-64, // va_list is an array, but it's supposed to decay to // a pointer for va_arg. VaListType = Context.getArrayDecayedType(VaListType); // Make sure the input expression also decays appropriately. ExprResult Result = UsualUnaryConversions(E); if (Result.isInvalid()) return ExprError(); E = Result.get(); } else if (VaListType->isRecordType() && getLangOpts().CPlusPlus) { // If va_list is a record type and we are compiling in C++ mode, // check the argument using reference binding. InitializedEntity Entity = InitializedEntity::InitializeParameter(Context, Context.getLValueReferenceType(VaListType), false); ExprResult Init = PerformCopyInitialization(Entity, SourceLocation(), E); if (Init.isInvalid()) return ExprError(); E = Init.getAs<Expr>(); } else { // Otherwise, the va_list argument must be an l-value because // it is modified by va_arg. if (!E->isTypeDependent() && CheckForModifiableLvalue(E, BuiltinLoc, *this)) return ExprError(); } if (!E->isTypeDependent() && !Context.hasSameType(VaListType, E->getType())) { return ExprError(Diag(E->getLocStart(), diag::err_first_argument_to_va_arg_not_of_type_va_list) << OrigExpr->getType() << E->getSourceRange()); } if (!TInfo->getType()->isDependentType()) { if (RequireCompleteType(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType(), diag::err_second_parameter_to_va_arg_incomplete, TInfo->getTypeLoc())) return ExprError(); if (RequireNonAbstractType(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType(), diag::err_second_parameter_to_va_arg_abstract, TInfo->getTypeLoc())) return ExprError(); if (!TInfo->getType().isPODType(Context)) { Diag(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType()->isObjCLifetimeType() ? diag::warn_second_parameter_to_va_arg_ownership_qualified : diag::warn_second_parameter_to_va_arg_not_pod) << TInfo->getType() << TInfo->getTypeLoc().getSourceRange(); } // Check for va_arg where arguments of the given type will be promoted // (i.e. this va_arg is guaranteed to have undefined behavior). QualType PromoteType; if (TInfo->getType()->isPromotableIntegerType()) { PromoteType = Context.getPromotedIntegerType(TInfo->getType()); if (Context.typesAreCompatible(PromoteType, TInfo->getType())) PromoteType = QualType(); } if (TInfo->getType()->isSpecificBuiltinType(BuiltinType::Float)) PromoteType = Context.DoubleTy; if (!PromoteType.isNull()) DiagRuntimeBehavior(TInfo->getTypeLoc().getBeginLoc(), E, PDiag(diag::warn_second_parameter_to_va_arg_never_compatible) << TInfo->getType() << PromoteType << TInfo->getTypeLoc().getSourceRange()); } QualType T = TInfo->getType().getNonLValueExprType(Context); return new (Context) VAArgExpr(BuiltinLoc, E, TInfo, RPLoc, T); } ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) { // The type of __null will be int or long, depending on the size of // pointers on the target. QualType Ty; unsigned pw = Context.getTargetInfo().getPointerWidth(0); if (pw == Context.getTargetInfo().getIntWidth()) Ty = Context.IntTy; else if (pw == Context.getTargetInfo().getLongWidth()) Ty = Context.LongTy; else if (pw == Context.getTargetInfo().getLongLongWidth()) Ty = Context.LongLongTy; else { llvm_unreachable("I don't know size of pointer!"); } return new (Context) GNUNullExpr(Ty, TokenLoc); } bool Sema::ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&Exp) { if (!getLangOpts().ObjC1) return false; const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>(); if (!PT) return false; if (!PT->isObjCIdType()) { // Check if the destination is the 'NSString' interface. const ObjCInterfaceDecl *ID = PT->getInterfaceDecl(); if (!ID || !ID->getIdentifier()->isStr("NSString")) return false; } // Ignore any parens, implicit casts (should only be // array-to-pointer decays), and not-so-opaque values. The last is // important for making this trigger for property assignments. Expr *SrcExpr = Exp->IgnoreParenImpCasts(); if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(SrcExpr)) if (OV->getSourceExpr()) SrcExpr = OV->getSourceExpr()->IgnoreParenImpCasts(); StringLiteral *SL = dyn_cast<StringLiteral>(SrcExpr); if (!SL || !SL->isAscii()) return false; Diag(SL->getLocStart(), diag::err_missing_atsign_prefix) << FixItHint::CreateInsertion(SL->getLocStart(), "@"); Exp = BuildObjCStringLiteral(SL->getLocStart(), SL).get(); return true; } bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained) { if (Complained) *Complained = false; // Decode the result (notice that AST's are still created for extensions). bool CheckInferredResultType = false; bool isInvalid = false; unsigned DiagKind = 0; FixItHint Hint; ConversionFixItGenerator ConvHints; bool MayHaveConvFixit = false; bool MayHaveFunctionDiff = false; const ObjCInterfaceDecl *IFace = nullptr; const ObjCProtocolDecl *PDecl = nullptr; switch (ConvTy) { case Compatible: DiagnoseAssignmentEnum(DstType, SrcType, SrcExpr); return false; case PointerToInt: DiagKind = diag::ext_typecheck_convert_pointer_int; ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this); MayHaveConvFixit = true; break; case IntToPointer: DiagKind = diag::ext_typecheck_convert_int_pointer; ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this); MayHaveConvFixit = true; break; case IncompatiblePointer: DiagKind = (Action == AA_Passing_CFAudited ? diag::err_arc_typecheck_convert_incompatible_pointer : diag::ext_typecheck_convert_incompatible_pointer); CheckInferredResultType = DstType->isObjCObjectPointerType() && SrcType->isObjCObjectPointerType(); if (Hint.isNull() && !CheckInferredResultType) { ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this); } else if (CheckInferredResultType) { SrcType = SrcType.getUnqualifiedType(); DstType = DstType.getUnqualifiedType(); } MayHaveConvFixit = true; break; case IncompatiblePointerSign: DiagKind = diag::ext_typecheck_convert_incompatible_pointer_sign; break; case FunctionVoidPointer: DiagKind = diag::ext_typecheck_convert_pointer_void_func; break; case IncompatiblePointerDiscardsQualifiers: { // Perform array-to-pointer decay if necessary. if (SrcType->isArrayType()) SrcType = Context.getArrayDecayedType(SrcType); Qualifiers lhq = SrcType->getPointeeType().getQualifiers(); Qualifiers rhq = DstType->getPointeeType().getQualifiers(); if (lhq.getAddressSpace() != rhq.getAddressSpace()) { DiagKind = diag::err_typecheck_incompatible_address_space; break; } else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) { DiagKind = diag::err_typecheck_incompatible_ownership; break; } llvm_unreachable("unknown error case for discarding qualifiers!"); // fallthrough } case CompatiblePointerDiscardsQualifiers: // If the qualifiers lost were because we were applying the // (deprecated) C++ conversion from a string literal to a char* // (or wchar_t*), then there was no error (C++ 4.2p2). FIXME: // Ideally, this check would be performed in // checkPointerTypesForAssignment. However, that would require a // bit of refactoring (so that the second argument is an // expression, rather than a type), which should be done as part // of a larger effort to fix checkPointerTypesForAssignment for // C++ semantics. if (getLangOpts().CPlusPlus && IsStringLiteralToNonConstPointerConversion(SrcExpr, DstType)) return false; DiagKind = diag::ext_typecheck_convert_discards_qualifiers; break; case IncompatibleNestedPointerQualifiers: DiagKind = diag::ext_nested_pointer_qualifier_mismatch; break; case IntToBlockPointer: DiagKind = diag::err_int_to_block_pointer; break; case IncompatibleBlockPointer: DiagKind = diag::err_typecheck_convert_incompatible_block_pointer; break; case IncompatibleObjCQualifiedId: { if (SrcType->isObjCQualifiedIdType()) { const ObjCObjectPointerType *srcOPT = SrcType->getAs<ObjCObjectPointerType>(); for (auto *srcProto : srcOPT->quals()) { PDecl = srcProto; break; } if (const ObjCInterfaceType *IFaceT = DstType->getAs<ObjCObjectPointerType>()->getInterfaceType()) IFace = IFaceT->getDecl(); } else if (DstType->isObjCQualifiedIdType()) { const ObjCObjectPointerType *dstOPT = DstType->getAs<ObjCObjectPointerType>(); for (auto *dstProto : dstOPT->quals()) { PDecl = dstProto; break; } if (const ObjCInterfaceType *IFaceT = SrcType->getAs<ObjCObjectPointerType>()->getInterfaceType()) IFace = IFaceT->getDecl(); } DiagKind = diag::warn_incompatible_qualified_id; break; } case IncompatibleVectors: DiagKind = diag::warn_incompatible_vectors; break; case IncompatibleObjCWeakRef: DiagKind = diag::err_arc_weak_unavailable_assign; break; case Incompatible: DiagKind = diag::err_typecheck_convert_incompatible; ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this); MayHaveConvFixit = true; isInvalid = true; MayHaveFunctionDiff = true; break; } QualType FirstType, SecondType; switch (Action) { case AA_Assigning: case AA_Initializing: // The destination type comes first. FirstType = DstType; SecondType = SrcType; break; case AA_Returning: case AA_Passing: case AA_Passing_CFAudited: case AA_Converting: case AA_Sending: case AA_Casting: // The source type comes first. FirstType = SrcType; SecondType = DstType; break; } PartialDiagnostic FDiag = PDiag(DiagKind); if (Action == AA_Passing_CFAudited) FDiag << FirstType << SecondType << AA_Passing << SrcExpr->getSourceRange(); else FDiag << FirstType << SecondType << Action << SrcExpr->getSourceRange(); // If we can fix the conversion, suggest the FixIts. assert(ConvHints.isNull() || Hint.isNull()); if (!ConvHints.isNull()) { for (std::vector<FixItHint>::iterator HI = ConvHints.Hints.begin(), HE = ConvHints.Hints.end(); HI != HE; ++HI) FDiag << *HI; } else { FDiag << Hint; } if (MayHaveConvFixit) { FDiag << (unsigned) (ConvHints.Kind); } if (MayHaveFunctionDiff) HandleFunctionTypeMismatch(FDiag, SecondType, FirstType); Diag(Loc, FDiag); if (DiagKind == diag::warn_incompatible_qualified_id && PDecl && IFace && !IFace->hasDefinition()) Diag(IFace->getLocation(), diag::not_incomplete_class_and_qualified_id) << IFace->getName() << PDecl->getName(); if (SecondType == Context.OverloadTy) NoteAllOverloadCandidates(OverloadExpr::find(SrcExpr).Expression, FirstType); if (CheckInferredResultType) EmitRelatedResultTypeNote(SrcExpr); if (Action == AA_Returning && ConvTy == IncompatiblePointer) EmitRelatedResultTypeNoteForReturn(DstType); if (Complained) *Complained = true; return isInvalid; } ExprResult Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result) { class SimpleICEDiagnoser : public VerifyICEDiagnoser { public: void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override { S.Diag(Loc, diag::err_expr_not_ice) << S.LangOpts.CPlusPlus << SR; } } Diagnoser; return VerifyIntegerConstantExpression(E, Result, Diagnoser); } ExprResult Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold) { class IDDiagnoser : public VerifyICEDiagnoser { unsigned DiagID; public: IDDiagnoser(unsigned DiagID) : VerifyICEDiagnoser(DiagID == 0), DiagID(DiagID) { } void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override { S.Diag(Loc, DiagID) << SR; } } Diagnoser(DiagID); return VerifyIntegerConstantExpression(E, Result, Diagnoser, AllowFold); } void Sema::VerifyICEDiagnoser::diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR) { S.Diag(Loc, diag::ext_expr_not_ice) << SR << S.LangOpts.CPlusPlus; } ExprResult Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold) { SourceLocation DiagLoc = E->getLocStart(); if (getLangOpts().CPlusPlus11) { // C++11 [expr.const]p5: // If an expression of literal class type is used in a context where an // integral constant expression is required, then that class type shall // have a single non-explicit conversion function to an integral or // unscoped enumeration type ExprResult Converted; class CXX11ConvertDiagnoser : public ICEConvertDiagnoser { public: CXX11ConvertDiagnoser(bool Silent) : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, Silent, true) {} SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_ice_not_integral) << T; } SemaDiagnosticBuilder diagnoseIncomplete( Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_ice_incomplete_type) << T; } SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, diag::err_ice_explicit_conversion) << T << ConvTy; } SemaDiagnosticBuilder noteExplicitConv( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_ice_conversion_here) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseAmbiguous( Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_ice_ambiguous_conversion) << T; } SemaDiagnosticBuilder noteAmbiguous( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_ice_conversion_here) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { llvm_unreachable("conversion functions are permitted"); } } ConvertDiagnoser(Diagnoser.Suppress); Converted = PerformContextualImplicitConversion(DiagLoc, E, ConvertDiagnoser); if (Converted.isInvalid()) return Converted; E = Converted.get(); if (!E->getType()->isIntegralOrUnscopedEnumerationType()) return ExprError(); } else if (!E->getType()->isIntegralOrUnscopedEnumerationType()) { // An ICE must be of integral or unscoped enumeration type. if (!Diagnoser.Suppress) Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange()); return ExprError(); } // Circumvent ICE checking in C++11 to avoid evaluating the expression twice // in the non-ICE case. if (!getLangOpts().CPlusPlus11 && E->isIntegerConstantExpr(Context)) { if (Result) *Result = E->EvaluateKnownConstInt(Context); return E; } Expr::EvalResult EvalResult; SmallVector<PartialDiagnosticAt, 8> Notes; EvalResult.Diag = &Notes; // Try to evaluate the expression, and produce diagnostics explaining why it's // not a constant expression as a side-effect. bool Folded = E->EvaluateAsRValue(EvalResult, Context) && EvalResult.Val.isInt() && !EvalResult.HasSideEffects; // In C++11, we can rely on diagnostics being produced for any expression // which is not a constant expression. If no diagnostics were produced, then // this is a constant expression. if (Folded && getLangOpts().CPlusPlus11 && Notes.empty()) { if (Result) *Result = EvalResult.Val.getInt(); return E; } // If our only note is the usual "invalid subexpression" note, just point // the caret at its location rather than producing an essentially // redundant note. if (Notes.size() == 1 && Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr) { DiagLoc = Notes[0].first; Notes.clear(); } if (!Folded || !AllowFold) { if (!Diagnoser.Suppress) { Diagnoser.diagnoseNotICE(*this, DiagLoc, E->getSourceRange()); for (unsigned I = 0, N = Notes.size(); I != N; ++I) Diag(Notes[I].first, Notes[I].second); } return ExprError(); } Diagnoser.diagnoseFold(*this, DiagLoc, E->getSourceRange()); for (unsigned I = 0, N = Notes.size(); I != N; ++I) Diag(Notes[I].first, Notes[I].second); if (Result) *Result = EvalResult.Val.getInt(); return E; } namespace { // Handle the case where we conclude a expression which we speculatively // considered to be unevaluated is actually evaluated. class TransformToPE : public TreeTransform<TransformToPE> { typedef TreeTransform<TransformToPE> BaseTransform; public: TransformToPE(Sema &SemaRef) : BaseTransform(SemaRef) { } // Make sure we redo semantic analysis bool AlwaysRebuild() { return true; } // Make sure we handle LabelStmts correctly. // FIXME: This does the right thing, but maybe we need a more general // fix to TreeTransform? StmtResult TransformLabelStmt(LabelStmt *S) { S->getDecl()->setStmt(nullptr); return BaseTransform::TransformLabelStmt(S); } // We need to special-case DeclRefExprs referring to FieldDecls which // are not part of a member pointer formation; normal TreeTransforming // doesn't catch this case because of the way we represent them in the AST. // FIXME: This is a bit ugly; is it really the best way to handle this // case? // // Error on DeclRefExprs referring to FieldDecls. ExprResult TransformDeclRefExpr(DeclRefExpr *E) { if (isa<FieldDecl>(E->getDecl()) && !SemaRef.isUnevaluatedContext()) return SemaRef.Diag(E->getLocation(), diag::err_invalid_non_static_member_use) << E->getDecl() << E->getSourceRange(); return BaseTransform::TransformDeclRefExpr(E); } // Exception: filter out member pointer formation ExprResult TransformUnaryOperator(UnaryOperator *E) { if (E->getOpcode() == UO_AddrOf && E->getType()->isMemberPointerType()) return E; return BaseTransform::TransformUnaryOperator(E); } ExprResult TransformLambdaExpr(LambdaExpr *E) { // Lambdas never need to be transformed. return E; } }; } ExprResult Sema::TransformToPotentiallyEvaluated(Expr *E) { assert(isUnevaluatedContext() && "Should only transform unevaluated expressions"); ExprEvalContexts.back().Context = ExprEvalContexts[ExprEvalContexts.size()-2].Context; if (isUnevaluatedContext()) return E; return TransformToPE(*this).TransformExpr(E); } void Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl, bool IsDecltype) { ExprEvalContexts.emplace_back(NewContext, ExprCleanupObjects.size(), ExprNeedsCleanups, LambdaContextDecl, IsDecltype); ExprNeedsCleanups = false; if (!MaybeODRUseExprs.empty()) std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs); } void Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype) { Decl *ClosureContextDecl = ExprEvalContexts.back().ManglingContextDecl; PushExpressionEvaluationContext(NewContext, ClosureContextDecl, IsDecltype); } void Sema::PopExpressionEvaluationContext() { ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back(); unsigned NumTypos = Rec.NumTypos; if (!Rec.Lambdas.empty()) { if (Rec.isUnevaluated() || Rec.Context == ConstantEvaluated) { unsigned D; if (Rec.isUnevaluated()) { // C++11 [expr.prim.lambda]p2: // A lambda-expression shall not appear in an unevaluated operand // (Clause 5). D = diag::err_lambda_unevaluated_operand; } else { // C++1y [expr.const]p2: // A conditional-expression e is a core constant expression unless the // evaluation of e, following the rules of the abstract machine, would // evaluate [...] a lambda-expression. D = diag::err_lambda_in_constant_expression; } for (const auto *L : Rec.Lambdas) Diag(L->getLocStart(), D); } else { // Mark the capture expressions odr-used. This was deferred // during lambda expression creation. for (auto *Lambda : Rec.Lambdas) { for (auto *C : Lambda->capture_inits()) MarkDeclarationsReferencedInExpr(C); } } } // When are coming out of an unevaluated context, clear out any // temporaries that we may have created as part of the evaluation of // the expression in that context: they aren't relevant because they // will never be constructed. if (Rec.isUnevaluated() || Rec.Context == ConstantEvaluated) { ExprCleanupObjects.erase(ExprCleanupObjects.begin() + Rec.NumCleanupObjects, ExprCleanupObjects.end()); ExprNeedsCleanups = Rec.ParentNeedsCleanups; CleanupVarDeclMarking(); std::swap(MaybeODRUseExprs, Rec.SavedMaybeODRUseExprs); // Otherwise, merge the contexts together. } else { ExprNeedsCleanups |= Rec.ParentNeedsCleanups; MaybeODRUseExprs.insert(Rec.SavedMaybeODRUseExprs.begin(), Rec.SavedMaybeODRUseExprs.end()); } // Pop the current expression evaluation context off the stack. ExprEvalContexts.pop_back(); if (!ExprEvalContexts.empty()) ExprEvalContexts.back().NumTypos += NumTypos; else assert(NumTypos == 0 && "There are outstanding typos after popping the " "last ExpressionEvaluationContextRecord"); } void Sema::DiscardCleanupsInEvaluationContext() { ExprCleanupObjects.erase( ExprCleanupObjects.begin() + ExprEvalContexts.back().NumCleanupObjects, ExprCleanupObjects.end()); ExprNeedsCleanups = false; MaybeODRUseExprs.clear(); } ExprResult Sema::HandleExprEvaluationContextForTypeof(Expr *E) { if (!E->getType()->isVariablyModifiedType()) return E; return TransformToPotentiallyEvaluated(E); } static bool IsPotentiallyEvaluatedContext(Sema &SemaRef) { // Do not mark anything as "used" within a dependent context; wait for // an instantiation. if (SemaRef.CurContext->isDependentContext()) return false; switch (SemaRef.ExprEvalContexts.back().Context) { case Sema::Unevaluated: case Sema::UnevaluatedAbstract: // We are in an expression that is not potentially evaluated; do nothing. // (Depending on how you read the standard, we actually do need to do // something here for null pointer constants, but the standard's // definition of a null pointer constant is completely crazy.) return false; case Sema::ConstantEvaluated: case Sema::PotentiallyEvaluated: // We are in a potentially evaluated expression (or a constant-expression // in C++03); we need to do implicit template instantiation, implicitly // define class members, and mark most declarations as used. return true; case Sema::PotentiallyEvaluatedIfUsed: // Referenced declarations will only be used if the construct in the // containing expression is used. return false; } llvm_unreachable("Invalid context"); } /// \brief Mark a function referenced, and check whether it is odr-used /// (C++ [basic.def.odr]p2, C99 6.9p3) void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse) { assert(Func && "No function?"); Func->setReferenced(); // C++11 [basic.def.odr]p3: // A function whose name appears as a potentially-evaluated expression is // odr-used if it is the unique lookup result or the selected member of a // set of overloaded functions [...]. // // We (incorrectly) mark overload resolution as an unevaluated context, so we // can just check that here. Skip the rest of this function if we've already // marked the function as used. if (Func->isUsed(/*CheckUsedAttr=*/false) || !IsPotentiallyEvaluatedContext(*this)) { // C++11 [temp.inst]p3: // Unless a function template specialization has been explicitly // instantiated or explicitly specialized, the function template // specialization is implicitly instantiated when the specialization is // referenced in a context that requires a function definition to exist. // // We consider constexpr function templates to be referenced in a context // that requires a definition to exist whenever they are referenced. // // FIXME: This instantiates constexpr functions too frequently. If this is // really an unevaluated context (and we're not just in the definition of a // function template or overload resolution or other cases which we // incorrectly consider to be unevaluated contexts), and we're not in a // subexpression which we actually need to evaluate (for instance, a // template argument, array bound or an expression in a braced-init-list), // we are not permitted to instantiate this constexpr function definition. // // FIXME: This also implicitly defines special members too frequently. They // are only supposed to be implicitly defined if they are odr-used, but they // are not odr-used from constant expressions in unevaluated contexts. // However, they cannot be referenced if they are deleted, and they are // deleted whenever the implicit definition of the special member would // fail. if (!Func->isConstexpr() || Func->getBody()) return; CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Func); if (!Func->isImplicitlyInstantiable() && (!MD || MD->isUserProvided())) return; } // Note that this declaration has been used. if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Func)) { Constructor = cast<CXXConstructorDecl>(Constructor->getFirstDecl()); if (Constructor->isDefaulted() && !Constructor->isDeleted()) { if (Constructor->isDefaultConstructor()) { if (Constructor->isTrivial() && !Constructor->hasAttr<DLLExportAttr>()) return; DefineImplicitDefaultConstructor(Loc, Constructor); } else if (Constructor->isCopyConstructor()) { DefineImplicitCopyConstructor(Loc, Constructor); } else if (Constructor->isMoveConstructor()) { DefineImplicitMoveConstructor(Loc, Constructor); } } else if (Constructor->getInheritedConstructor()) { DefineInheritingConstructor(Loc, Constructor); } } else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(Func)) { Destructor = cast<CXXDestructorDecl>(Destructor->getFirstDecl()); if (Destructor->isDefaulted() && !Destructor->isDeleted()) { if (Destructor->isTrivial() && !Destructor->hasAttr<DLLExportAttr>()) return; DefineImplicitDestructor(Loc, Destructor); } if (Destructor->isVirtual() && getLangOpts().AppleKext) MarkVTableUsed(Loc, Destructor->getParent()); } else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(Func)) { if (MethodDecl->isOverloadedOperator() && MethodDecl->getOverloadedOperator() == OO_Equal) { MethodDecl = cast<CXXMethodDecl>(MethodDecl->getFirstDecl()); if (MethodDecl->isDefaulted() && !MethodDecl->isDeleted()) { if (MethodDecl->isCopyAssignmentOperator()) DefineImplicitCopyAssignment(Loc, MethodDecl); else DefineImplicitMoveAssignment(Loc, MethodDecl); } } else if (isa<CXXConversionDecl>(MethodDecl) && MethodDecl->getParent()->isLambda()) { CXXConversionDecl *Conversion = cast<CXXConversionDecl>(MethodDecl->getFirstDecl()); if (Conversion->isLambdaToBlockPointerConversion()) DefineImplicitLambdaToBlockPointerConversion(Loc, Conversion); else DefineImplicitLambdaToFunctionPointerConversion(Loc, Conversion); } else if (MethodDecl->isVirtual() && getLangOpts().AppleKext) MarkVTableUsed(Loc, MethodDecl->getParent()); } // Recursive functions should be marked when used from another function. // FIXME: Is this really right? if (CurContext == Func) return; // Resolve the exception specification for any function which is // used: CodeGen will need it. const FunctionProtoType *FPT = Func->getType()->getAs<FunctionProtoType>(); if (FPT && isUnresolvedExceptionSpec(FPT->getExceptionSpecType())) ResolveExceptionSpec(Loc, FPT); if (!OdrUse) return; // Implicit instantiation of function templates and member functions of // class templates. if (Func->isImplicitlyInstantiable()) { bool AlreadyInstantiated = false; SourceLocation PointOfInstantiation = Loc; if (FunctionTemplateSpecializationInfo *SpecInfo = Func->getTemplateSpecializationInfo()) { if (SpecInfo->getPointOfInstantiation().isInvalid()) SpecInfo->setPointOfInstantiation(Loc); else if (SpecInfo->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) { AlreadyInstantiated = true; PointOfInstantiation = SpecInfo->getPointOfInstantiation(); } } else if (MemberSpecializationInfo *MSInfo = Func->getMemberSpecializationInfo()) { if (MSInfo->getPointOfInstantiation().isInvalid()) MSInfo->setPointOfInstantiation(Loc); else if (MSInfo->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) { AlreadyInstantiated = true; PointOfInstantiation = MSInfo->getPointOfInstantiation(); } } if (!AlreadyInstantiated || Func->isConstexpr()) { if (isa<CXXRecordDecl>(Func->getDeclContext()) && cast<CXXRecordDecl>(Func->getDeclContext())->isLocalClass() && ActiveTemplateInstantiations.size()) PendingLocalImplicitInstantiations.push_back( std::make_pair(Func, PointOfInstantiation)); else if (Func->isConstexpr()) // Do not defer instantiations of constexpr functions, to avoid the // expression evaluator needing to call back into Sema if it sees a // call to such a function. InstantiateFunctionDefinition(PointOfInstantiation, Func); else { PendingInstantiations.push_back(std::make_pair(Func, PointOfInstantiation)); // Notify the consumer that a function was implicitly instantiated. Consumer.HandleCXXImplicitFunctionInstantiation(Func); } } } else { // Walk redefinitions, as some of them may be instantiable. for (auto i : Func->redecls()) { if (!i->isUsed(false) && i->isImplicitlyInstantiable()) MarkFunctionReferenced(Loc, i); } } // Keep track of used but undefined functions. if (!Func->isDefined()) { if (mightHaveNonExternalLinkage(Func)) UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc)); else if (Func->getMostRecentDecl()->isInlined() && !LangOpts.GNUInline && !Func->getMostRecentDecl()->hasAttr<GNUInlineAttr>()) UndefinedButUsed.insert(std::make_pair(Func->getCanonicalDecl(), Loc)); } // Normally the most current decl is marked used while processing the use and // any subsequent decls are marked used by decl merging. This fails with // template instantiation since marking can happen at the end of the file // and, because of the two phase lookup, this function is called with at // decl in the middle of a decl chain. We loop to maintain the invariant // that once a decl is used, all decls after it are also used. for (FunctionDecl *F = Func->getMostRecentDecl();; F = F->getPreviousDecl()) { F->markUsed(Context); if (F == Func) break; } } static void diagnoseUncapturableValueReference(Sema &S, SourceLocation loc, VarDecl *var, DeclContext *DC) { DeclContext *VarDC = var->getDeclContext(); // If the parameter still belongs to the translation unit, then // we're actually just using one parameter in the declaration of // the next. if (isa<ParmVarDecl>(var) && isa<TranslationUnitDecl>(VarDC)) return; // For C code, don't diagnose about capture if we're not actually in code // right now; it's impossible to write a non-constant expression outside of // function context, so we'll get other (more useful) diagnostics later. // // For C++, things get a bit more nasty... it would be nice to suppress this // diagnostic for certain cases like using a local variable in an array bound // for a member of a local class, but the correct predicate is not obvious. if (!S.getLangOpts().CPlusPlus && !S.CurContext->isFunctionOrMethod()) return; if (isa<CXXMethodDecl>(VarDC) && cast<CXXRecordDecl>(VarDC->getParent())->isLambda()) { S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_lambda) << var->getIdentifier(); } else if (FunctionDecl *fn = dyn_cast<FunctionDecl>(VarDC)) { S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_function) << var->getIdentifier() << fn->getDeclName(); } else if (isa<BlockDecl>(VarDC)) { S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_block) << var->getIdentifier(); } else { // FIXME: Is there any other context where a local variable can be // declared? S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_context) << var->getIdentifier(); } S.Diag(var->getLocation(), diag::note_entity_declared_at) << var->getIdentifier(); // FIXME: Add additional diagnostic info about class etc. which prevents // capture. } static bool isVariableAlreadyCapturedInScopeInfo(CapturingScopeInfo *CSI, VarDecl *Var, bool &SubCapturesAreNested, QualType &CaptureType, QualType &DeclRefType) { // Check whether we've already captured it. if (CSI->CaptureMap.count(Var)) { // If we found a capture, any subcaptures are nested. SubCapturesAreNested = true; // Retrieve the capture type for this variable. CaptureType = CSI->getCapture(Var).getCaptureType(); // Compute the type of an expression that refers to this variable. DeclRefType = CaptureType.getNonReferenceType(); const CapturingScopeInfo::Capture &Cap = CSI->getCapture(Var); if (Cap.isCopyCapture() && !(isa<LambdaScopeInfo>(CSI) && cast<LambdaScopeInfo>(CSI)->Mutable)) DeclRefType.addConst(); return true; } return false; } // Only block literals, captured statements, and lambda expressions can // capture; other scopes don't work. static DeclContext *getParentOfCapturingContextOrNull(DeclContext *DC, VarDecl *Var, SourceLocation Loc, const bool Diagnose, Sema &S) { if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC) || isLambdaCallOperator(DC)) return getLambdaAwareParentOfDeclContext(DC); else if (Var->hasLocalStorage()) { if (Diagnose) diagnoseUncapturableValueReference(S, Loc, Var, DC); } return nullptr; } // Certain capturing entities (lambdas, blocks etc.) are not allowed to capture // certain types of variables (unnamed, variably modified types etc.) // so check for eligibility. static bool isVariableCapturable(CapturingScopeInfo *CSI, VarDecl *Var, SourceLocation Loc, const bool Diagnose, Sema &S) { bool IsBlock = isa<BlockScopeInfo>(CSI); bool IsLambda = isa<LambdaScopeInfo>(CSI); // Lambdas are not allowed to capture unnamed variables // (e.g. anonymous unions). // FIXME: The C++11 rule don't actually state this explicitly, but I'm // assuming that's the intent. if (IsLambda && !Var->getDeclName()) { if (Diagnose) { S.Diag(Loc, diag::err_lambda_capture_anonymous_var); S.Diag(Var->getLocation(), diag::note_declared_at); } return false; } // Prohibit variably-modified types in blocks; they're difficult to deal with. if (Var->getType()->isVariablyModifiedType() && IsBlock) { if (Diagnose) { S.Diag(Loc, diag::err_ref_vm_type); S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); } return false; } // Prohibit structs with flexible array members too. // We cannot capture what is in the tail end of the struct. if (const RecordType *VTTy = Var->getType()->getAs<RecordType>()) { if (VTTy->getDecl()->hasFlexibleArrayMember()) { if (Diagnose) { if (IsBlock) S.Diag(Loc, diag::err_ref_flexarray_type); else S.Diag(Loc, diag::err_lambda_capture_flexarray_type) << Var->getDeclName(); S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); } return false; } } const bool HasBlocksAttr = Var->hasAttr<BlocksAttr>(); // Lambdas and captured statements are not allowed to capture __block // variables; they don't support the expected semantics. if (HasBlocksAttr && (IsLambda || isa<CapturedRegionScopeInfo>(CSI))) { if (Diagnose) { S.Diag(Loc, diag::err_capture_block_variable) << Var->getDeclName() << !IsLambda; S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); } return false; } return true; } // Returns true if the capture by block was successful. static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var, SourceLocation Loc, const bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const bool Nested, Sema &S) { Expr *CopyExpr = nullptr; bool ByRef = false; // Blocks are not allowed to capture arrays. if (CaptureType->isArrayType()) { if (BuildAndDiagnose) { S.Diag(Loc, diag::err_ref_array_type); S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); } return false; } // Forbid the block-capture of autoreleasing variables. if (CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) { if (BuildAndDiagnose) { S.Diag(Loc, diag::err_arc_autoreleasing_capture) << /*block*/ 0; S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); } return false; } const bool HasBlocksAttr = Var->hasAttr<BlocksAttr>(); if (HasBlocksAttr || CaptureType->isReferenceType()) { // Block capture by reference does not change the capture or // declaration reference types. ByRef = true; } else { // Block capture by copy introduces 'const'. CaptureType = CaptureType.getNonReferenceType().withConst(); DeclRefType = CaptureType; if (S.getLangOpts().CPlusPlus && BuildAndDiagnose) { if (const RecordType *Record = DeclRefType->getAs<RecordType>()) { // The capture logic needs the destructor, so make sure we mark it. // Usually this is unnecessary because most local variables have // their destructors marked at declaration time, but parameters are // an exception because it's technically only the call site that // actually requires the destructor. if (isa<ParmVarDecl>(Var)) S.FinalizeVarWithDestructor(Var, Record); // Enter a new evaluation context to insulate the copy // full-expression. EnterExpressionEvaluationContext scope(S, S.PotentiallyEvaluated); // According to the blocks spec, the capture of a variable from // the stack requires a const copy constructor. This is not true // of the copy/move done to move a __block variable to the heap. Expr *DeclRef = new (S.Context) DeclRefExpr(Var, Nested, DeclRefType.withConst(), VK_LValue, Loc); ExprResult Result = S.PerformCopyInitialization( InitializedEntity::InitializeBlock(Var->getLocation(), CaptureType, false), Loc, DeclRef); // Build a full-expression copy expression if initialization // succeeded and used a non-trivial constructor. Recover from // errors by pretending that the copy isn't necessary. if (!Result.isInvalid() && !cast<CXXConstructExpr>(Result.get())->getConstructor() ->isTrivial()) { Result = S.MaybeCreateExprWithCleanups(Result); CopyExpr = Result.get(); } } } } // Actually capture the variable. if (BuildAndDiagnose) BSI->addCapture(Var, HasBlocksAttr, ByRef, Nested, Loc, SourceLocation(), CaptureType, CopyExpr); return true; } /// \brief Capture the given variable in the captured region. static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI, VarDecl *Var, SourceLocation Loc, const bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const bool RefersToCapturedVariable, Sema &S) { // By default, capture variables by reference. bool ByRef = true; // Using an LValue reference type is consistent with Lambdas (see below). if (S.getLangOpts().OpenMP && S.IsOpenMPCapturedVar(Var)) DeclRefType = DeclRefType.getUnqualifiedType(); CaptureType = S.Context.getLValueReferenceType(DeclRefType); Expr *CopyExpr = nullptr; if (BuildAndDiagnose) { // The current implementation assumes that all variables are captured // by references. Since there is no capture by copy, no expression // evaluation will be needed. RecordDecl *RD = RSI->TheRecordDecl; FieldDecl *Field = FieldDecl::Create(S.Context, RD, Loc, Loc, nullptr, CaptureType, S.Context.getTrivialTypeSourceInfo(CaptureType, Loc), nullptr, false, ICIS_NoInit); Field->setImplicit(true); Field->setAccess(AS_private); RD->addDecl(Field); CopyExpr = new (S.Context) DeclRefExpr(Var, RefersToCapturedVariable, DeclRefType, VK_LValue, Loc); Var->setReferenced(true); Var->markUsed(S.Context); } // Actually capture the variable. if (BuildAndDiagnose) RSI->addCapture(Var, /*isBlock*/false, ByRef, RefersToCapturedVariable, Loc, SourceLocation(), CaptureType, CopyExpr); return true; } /// \brief Create a field within the lambda class for the variable /// being captured. static void addAsFieldToClosureType(Sema &S, LambdaScopeInfo *LSI, VarDecl *Var, QualType FieldType, QualType DeclRefType, SourceLocation Loc, bool RefersToCapturedVariable) { CXXRecordDecl *Lambda = LSI->Lambda; // Build the non-static data member. FieldDecl *Field = FieldDecl::Create(S.Context, Lambda, Loc, Loc, nullptr, FieldType, S.Context.getTrivialTypeSourceInfo(FieldType, Loc), nullptr, false, ICIS_NoInit); Field->setImplicit(true); Field->setAccess(AS_private); Lambda->addDecl(Field); } /// \brief Capture the given variable in the lambda. static bool captureInLambda(LambdaScopeInfo *LSI, VarDecl *Var, SourceLocation Loc, const bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const bool RefersToCapturedVariable, const Sema::TryCaptureKind Kind, SourceLocation EllipsisLoc, const bool IsTopScope, Sema &S) { // Determine whether we are capturing by reference or by value. bool ByRef = false; if (IsTopScope && Kind != Sema::TryCapture_Implicit) { ByRef = (Kind == Sema::TryCapture_ExplicitByRef); } else { ByRef = (LSI->ImpCaptureStyle == LambdaScopeInfo::ImpCap_LambdaByref); } // Compute the type of the field that will capture this variable. if (ByRef) { // C++11 [expr.prim.lambda]p15: // An entity is captured by reference if it is implicitly or // explicitly captured but not captured by copy. It is // unspecified whether additional unnamed non-static data // members are declared in the closure type for entities // captured by reference. // // FIXME: It is not clear whether we want to build an lvalue reference // to the DeclRefType or to CaptureType.getNonReferenceType(). GCC appears // to do the former, while EDG does the latter. Core issue 1249 will // clarify, but for now we follow GCC because it's a more permissive and // easily defensible position. CaptureType = S.Context.getLValueReferenceType(DeclRefType); } else { // C++11 [expr.prim.lambda]p14: // For each entity captured by copy, an unnamed non-static // data member is declared in the closure type. The // declaration order of these members is unspecified. The type // of such a data member is the type of the corresponding // captured entity if the entity is not a reference to an // object, or the referenced type otherwise. [Note: If the // captured entity is a reference to a function, the // corresponding data member is also a reference to a // function. - end note ] if (const ReferenceType *RefType = CaptureType->getAs<ReferenceType>()){ if (!RefType->getPointeeType()->isFunctionType()) CaptureType = RefType->getPointeeType(); } // Forbid the lambda copy-capture of autoreleasing variables. if (CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) { if (BuildAndDiagnose) { S.Diag(Loc, diag::err_arc_autoreleasing_capture) << /*lambda*/ 1; S.Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); } return false; } // Make sure that by-copy captures are of a complete and non-abstract type. if (BuildAndDiagnose) { if (!CaptureType->isDependentType() && S.RequireCompleteType(Loc, CaptureType, diag::err_capture_of_incomplete_type, Var->getDeclName())) return false; if (S.RequireNonAbstractType(Loc, CaptureType, diag::err_capture_of_abstract_type)) return false; } } // Capture this variable in the lambda. if (BuildAndDiagnose) addAsFieldToClosureType(S, LSI, Var, CaptureType, DeclRefType, Loc, RefersToCapturedVariable); // Compute the type of a reference to this captured variable. if (ByRef) DeclRefType = CaptureType.getNonReferenceType(); else { // C++ [expr.prim.lambda]p5: // The closure type for a lambda-expression has a public inline // function call operator [...]. This function call operator is // declared const (9.3.1) if and only if the lambda-expression’s // parameter-declaration-clause is not followed by mutable. DeclRefType = CaptureType.getNonReferenceType(); if (!LSI->Mutable && !CaptureType->isReferenceType()) DeclRefType.addConst(); } // Add the capture. if (BuildAndDiagnose) LSI->addCapture(Var, /*IsBlock=*/false, ByRef, RefersToCapturedVariable, Loc, EllipsisLoc, CaptureType, /*CopyExpr=*/nullptr); return true; } bool Sema::tryCaptureVariable( VarDecl *Var, SourceLocation ExprLoc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt) { // An init-capture is notionally from the context surrounding its // declaration, but its parent DC is the lambda class. DeclContext *VarDC = Var->getDeclContext(); if (Var->isInitCapture()) VarDC = VarDC->getParent(); DeclContext *DC = CurContext; const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt ? *FunctionScopeIndexToStopAt : FunctionScopes.size() - 1; // We need to sync up the Declaration Context with the // FunctionScopeIndexToStopAt if (FunctionScopeIndexToStopAt) { unsigned FSIndex = FunctionScopes.size() - 1; while (FSIndex != MaxFunctionScopesIndex) { DC = getLambdaAwareParentOfDeclContext(DC); --FSIndex; } } // If the variable is declared in the current context, there is no need to // capture it. if (VarDC == DC) return true; // Capture global variables if it is required to use private copy of this // variable. bool IsGlobal = !Var->hasLocalStorage(); if (IsGlobal && !(LangOpts.OpenMP && IsOpenMPCapturedVar(Var))) return true; // Walk up the stack to determine whether we can capture the variable, // performing the "simple" checks that don't depend on type. We stop when // we've either hit the declared scope of the variable or find an existing // capture of that variable. We start from the innermost capturing-entity // (the DC) and ensure that all intervening capturing-entities // (blocks/lambdas etc.) between the innermost capturer and the variable`s // declcontext can either capture the variable or have already captured // the variable. CaptureType = Var->getType(); DeclRefType = CaptureType.getNonReferenceType(); bool Nested = false; bool Explicit = (Kind != TryCapture_Implicit); unsigned FunctionScopesIndex = MaxFunctionScopesIndex; unsigned OpenMPLevel = 0; do { // Only block literals, captured statements, and lambda expressions can // capture; other scopes don't work. DeclContext *ParentDC = getParentOfCapturingContextOrNull(DC, Var, ExprLoc, BuildAndDiagnose, *this); // We need to check for the parent *first* because, if we *have* // private-captured a global variable, we need to recursively capture it in // intermediate blocks, lambdas, etc. if (!ParentDC) { if (IsGlobal) { FunctionScopesIndex = MaxFunctionScopesIndex - 1; break; } return true; } FunctionScopeInfo *FSI = FunctionScopes[FunctionScopesIndex]; CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FSI); // Check whether we've already captured it. if (isVariableAlreadyCapturedInScopeInfo(CSI, Var, Nested, CaptureType, DeclRefType)) break; if (getLangOpts().OpenMP) { if (auto *RSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) { // OpenMP private variables should not be captured in outer scope, so // just break here. if (RSI->CapRegionKind == CR_OpenMP) { if (isOpenMPPrivateVar(Var, OpenMPLevel)) { Nested = true; DeclRefType = DeclRefType.getUnqualifiedType(); CaptureType = Context.getLValueReferenceType(DeclRefType); break; } ++OpenMPLevel; } } } // If we are instantiating a generic lambda call operator body, // we do not want to capture new variables. What was captured // during either a lambdas transformation or initial parsing // should be used. if (isGenericLambdaCallOperatorSpecialization(DC)) { if (BuildAndDiagnose) { LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CSI); if (LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None) { Diag(ExprLoc, diag::err_lambda_impcap) << Var->getDeclName(); Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); Diag(LSI->Lambda->getLocStart(), diag::note_lambda_decl); } else diagnoseUncapturableValueReference(*this, ExprLoc, Var, DC); } return true; } // Certain capturing entities (lambdas, blocks etc.) are not allowed to capture // certain types of variables (unnamed, variably modified types etc.) // so check for eligibility. if (!isVariableCapturable(CSI, Var, ExprLoc, BuildAndDiagnose, *this)) return true; // Try to capture variable-length arrays types. if (Var->getType()->isVariablyModifiedType()) { // We're going to walk down into the type and look for VLA // expressions. QualType QTy = Var->getType(); if (ParmVarDecl *PVD = dyn_cast_or_null<ParmVarDecl>(Var)) QTy = PVD->getOriginalType(); do { const Type *Ty = QTy.getTypePtr(); switch (Ty->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) #include "clang/AST/TypeNodes.def" QTy = QualType(); break; // These types are never variably-modified. case Type::Builtin: case Type::Complex: case Type::Vector: case Type::ExtVector: case Type::Record: case Type::Enum: case Type::Elaborated: case Type::TemplateSpecialization: case Type::ObjCObject: case Type::ObjCInterface: case Type::ObjCObjectPointer: llvm_unreachable("type class is never variably-modified!"); case Type::Adjusted: QTy = cast<AdjustedType>(Ty)->getOriginalType(); break; case Type::Decayed: QTy = cast<DecayedType>(Ty)->getPointeeType(); break; case Type::Pointer: QTy = cast<PointerType>(Ty)->getPointeeType(); break; case Type::BlockPointer: QTy = cast<BlockPointerType>(Ty)->getPointeeType(); break; case Type::LValueReference: case Type::RValueReference: QTy = cast<ReferenceType>(Ty)->getPointeeType(); break; case Type::MemberPointer: QTy = cast<MemberPointerType>(Ty)->getPointeeType(); break; case Type::ConstantArray: case Type::IncompleteArray: // Losing element qualification here is fine. QTy = cast<ArrayType>(Ty)->getElementType(); break; case Type::VariableArray: { // Losing element qualification here is fine. const VariableArrayType *VAT = cast<VariableArrayType>(Ty); // Unknown size indication requires no size computation. // Otherwise, evaluate and record it. if (auto Size = VAT->getSizeExpr()) { if (!CSI->isVLATypeCaptured(VAT)) { RecordDecl *CapRecord = nullptr; if (auto LSI = dyn_cast<LambdaScopeInfo>(CSI)) { CapRecord = LSI->Lambda; } else if (auto CRSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) { CapRecord = CRSI->TheRecordDecl; } if (CapRecord) { auto ExprLoc = Size->getExprLoc(); auto SizeType = Context.getSizeType(); // Build the non-static data member. auto Field = FieldDecl::Create( Context, CapRecord, ExprLoc, ExprLoc, /*Id*/ nullptr, SizeType, /*TInfo*/ nullptr, /*BW*/ nullptr, /*Mutable*/ false, /*InitStyle*/ ICIS_NoInit); Field->setImplicit(true); Field->setAccess(AS_private); Field->setCapturedVLAType(VAT); CapRecord->addDecl(Field); CSI->addVLATypeCapture(ExprLoc, SizeType); } } } QTy = VAT->getElementType(); break; } case Type::FunctionProto: case Type::FunctionNoProto: QTy = cast<FunctionType>(Ty)->getReturnType(); break; case Type::Paren: case Type::TypeOf: case Type::UnaryTransform: case Type::Attributed: case Type::SubstTemplateTypeParm: case Type::PackExpansion: // Keep walking after single level desugaring. QTy = QTy.getSingleStepDesugaredType(getASTContext()); break; case Type::Typedef: QTy = cast<TypedefType>(Ty)->desugar(); break; case Type::Decltype: QTy = cast<DecltypeType>(Ty)->desugar(); break; case Type::Auto: QTy = cast<AutoType>(Ty)->getDeducedType(); break; case Type::TypeOfExpr: QTy = cast<TypeOfExprType>(Ty)->getUnderlyingExpr()->getType(); break; case Type::Atomic: QTy = cast<AtomicType>(Ty)->getValueType(); break; } } while (!QTy.isNull() && QTy->isVariablyModifiedType()); } if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None && !Explicit) { // No capture-default, and this is not an explicit capture // so cannot capture this variable. if (BuildAndDiagnose) { Diag(ExprLoc, diag::err_lambda_impcap) << Var->getDeclName(); Diag(Var->getLocation(), diag::note_previous_decl) << Var->getDeclName(); Diag(cast<LambdaScopeInfo>(CSI)->Lambda->getLocStart(), diag::note_lambda_decl); // FIXME: If we error out because an outer lambda can not implicitly // capture a variable that an inner lambda explicitly captures, we // should have the inner lambda do the explicit capture - because // it makes for cleaner diagnostics later. This would purely be done // so that the diagnostic does not misleadingly claim that a variable // can not be captured by a lambda implicitly even though it is captured // explicitly. Suggestion: // - create const bool VariableCaptureWasInitiallyExplicit = Explicit // at the function head // - cache the StartingDeclContext - this must be a lambda // - captureInLambda in the innermost lambda the variable. } return true; } FunctionScopesIndex--; DC = ParentDC; Explicit = false; } while (!VarDC->Equals(DC)); // Walk back down the scope stack, (e.g. from outer lambda to inner lambda) // computing the type of the capture at each step, checking type-specific // requirements, and adding captures if requested. // If the variable had already been captured previously, we start capturing // at the lambda nested within that one. for (unsigned I = ++FunctionScopesIndex, N = MaxFunctionScopesIndex + 1; I != N; ++I) { CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[I]); if (BlockScopeInfo *BSI = dyn_cast<BlockScopeInfo>(CSI)) { if (!captureInBlock(BSI, Var, ExprLoc, BuildAndDiagnose, CaptureType, DeclRefType, Nested, *this)) return true; Nested = true; } else if (CapturedRegionScopeInfo *RSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) { if (!captureInCapturedRegion(RSI, Var, ExprLoc, BuildAndDiagnose, CaptureType, DeclRefType, Nested, *this)) return true; Nested = true; } else { LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CSI); if (!captureInLambda(LSI, Var, ExprLoc, BuildAndDiagnose, CaptureType, DeclRefType, Nested, Kind, EllipsisLoc, /*IsTopScope*/I == N - 1, *this)) return true; Nested = true; } } return false; } bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc) { QualType CaptureType; QualType DeclRefType; return tryCaptureVariable(Var, Loc, Kind, EllipsisLoc, /*BuildAndDiagnose=*/true, CaptureType, DeclRefType, nullptr); } bool Sema::NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc) { QualType CaptureType; QualType DeclRefType; return !tryCaptureVariable(Var, Loc, TryCapture_Implicit, SourceLocation(), /*BuildAndDiagnose=*/false, CaptureType, DeclRefType, nullptr); } QualType Sema::getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc) { QualType CaptureType; QualType DeclRefType; // Determine whether we can capture this variable. if (tryCaptureVariable(Var, Loc, TryCapture_Implicit, SourceLocation(), /*BuildAndDiagnose=*/false, CaptureType, DeclRefType, nullptr)) return QualType(); return DeclRefType; } // If either the type of the variable or the initializer is dependent, // return false. Otherwise, determine whether the variable is a constant // expression. Use this if you need to know if a variable that might or // might not be dependent is truly a constant expression. static inline bool IsVariableNonDependentAndAConstantExpression(VarDecl *Var, ASTContext &Context) { if (Var->getType()->isDependentType()) return false; const VarDecl *DefVD = nullptr; Var->getAnyInitializer(DefVD); if (!DefVD) return false; EvaluatedStmt *Eval = DefVD->ensureEvaluatedStmt(); Expr *Init = cast<Expr>(Eval->Value); if (Init->isValueDependent()) return false; return IsVariableAConstantExpression(Var, Context); } void Sema::UpdateMarkingForLValueToRValue(Expr *E) { // Per C++11 [basic.def.odr], a variable is odr-used "unless it is // an object that satisfies the requirements for appearing in a // constant expression (5.19) and the lvalue-to-rvalue conversion (4.1) // is immediately applied." This function handles the lvalue-to-rvalue // conversion part. MaybeODRUseExprs.erase(E->IgnoreParens()); // If we are in a lambda, check if this DeclRefExpr or MemberExpr refers // to a variable that is a constant expression, and if so, identify it as // a reference to a variable that does not involve an odr-use of that // variable. if (LambdaScopeInfo *LSI = getCurLambda()) { Expr *SansParensExpr = E->IgnoreParens(); VarDecl *Var = nullptr; if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(SansParensExpr)) Var = dyn_cast<VarDecl>(DRE->getFoundDecl()); else if (MemberExpr *ME = dyn_cast<MemberExpr>(SansParensExpr)) Var = dyn_cast<VarDecl>(ME->getMemberDecl()); if (Var && IsVariableNonDependentAndAConstantExpression(Var, Context)) LSI->markVariableExprAsNonODRUsed(SansParensExpr); } } ExprResult Sema::ActOnConstantExpression(ExprResult Res) { Res = CorrectDelayedTyposInExpr(Res); if (!Res.isUsable()) return Res; // If a constant-expression is a reference to a variable where we delay // deciding whether it is an odr-use, just assume we will apply the // lvalue-to-rvalue conversion. In the one case where this doesn't happen // (a non-type template argument), we have special handling anyway. UpdateMarkingForLValueToRValue(Res.get()); return Res; } void Sema::CleanupVarDeclMarking() { for (llvm::SmallPtrSetIterator<Expr*> i = MaybeODRUseExprs.begin(), e = MaybeODRUseExprs.end(); i != e; ++i) { VarDecl *Var; SourceLocation Loc; if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(*i)) { Var = cast<VarDecl>(DRE->getDecl()); Loc = DRE->getLocation(); } else if (MemberExpr *ME = dyn_cast<MemberExpr>(*i)) { Var = cast<VarDecl>(ME->getMemberDecl()); Loc = ME->getMemberLoc(); } else { llvm_unreachable("Unexpected expression"); } MarkVarDeclODRUsed(Var, Loc, *this, /*MaxFunctionScopeIndex Pointer*/ nullptr); } MaybeODRUseExprs.clear(); } static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc, VarDecl *Var, Expr *E) { assert((!E || isa<DeclRefExpr>(E) || isa<MemberExpr>(E)) && "Invalid Expr argument to DoMarkVarDeclReferenced"); Var->setReferenced(); TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind(); bool MarkODRUsed = true; // If the context is not potentially evaluated, this is not an odr-use and // does not trigger instantiation. if (!IsPotentiallyEvaluatedContext(SemaRef)) { if (SemaRef.isUnevaluatedContext()) return; // If we don't yet know whether this context is going to end up being an // evaluated context, and we're referencing a variable from an enclosing // scope, add a potential capture. // // FIXME: Is this necessary? These contexts are only used for default // arguments, where local variables can't be used. const bool RefersToEnclosingScope = (SemaRef.CurContext != Var->getDeclContext() && Var->getDeclContext()->isFunctionOrMethod() && Var->hasLocalStorage()); if (RefersToEnclosingScope) { if (LambdaScopeInfo *const LSI = SemaRef.getCurLambda()) { // If a variable could potentially be odr-used, defer marking it so // until we finish analyzing the full expression for any // lvalue-to-rvalue // or discarded value conversions that would obviate odr-use. // Add it to the list of potential captures that will be analyzed // later (ActOnFinishFullExpr) for eventual capture and odr-use marking // unless the variable is a reference that was initialized by a constant // expression (this will never need to be captured or odr-used). assert(E && "Capture variable should be used in an expression."); if (!Var->getType()->isReferenceType() || !IsVariableNonDependentAndAConstantExpression(Var, SemaRef.Context)) LSI->addPotentialCapture(E->IgnoreParens()); } } if (!isTemplateInstantiation(TSK)) return; // Instantiate, but do not mark as odr-used, variable templates. MarkODRUsed = false; } VarTemplateSpecializationDecl *VarSpec = dyn_cast<VarTemplateSpecializationDecl>(Var); assert(!isa<VarTemplatePartialSpecializationDecl>(Var) && "Can't instantiate a partial template specialization."); // Perform implicit instantiation of static data members, static data member // templates of class templates, and variable template specializations. Delay // instantiations of variable templates, except for those that could be used // in a constant expression. if (isTemplateInstantiation(TSK)) { bool TryInstantiating = TSK == TSK_ImplicitInstantiation; if (TryInstantiating && !isa<VarTemplateSpecializationDecl>(Var)) { if (Var->getPointOfInstantiation().isInvalid()) { // This is a modification of an existing AST node. Notify listeners. if (ASTMutationListener *L = SemaRef.getASTMutationListener()) L->StaticDataMemberInstantiated(Var); } else if (!Var->isUsableInConstantExpressions(SemaRef.Context)) // Don't bother trying to instantiate it again, unless we might need // its initializer before we get to the end of the TU. TryInstantiating = false; } if (Var->getPointOfInstantiation().isInvalid()) Var->setTemplateSpecializationKind(TSK, Loc); if (TryInstantiating) { SourceLocation PointOfInstantiation = Var->getPointOfInstantiation(); bool InstantiationDependent = false; bool IsNonDependent = VarSpec ? !TemplateSpecializationType::anyDependentTemplateArguments( VarSpec->getTemplateArgsInfo(), InstantiationDependent) : true; // Do not instantiate specializations that are still type-dependent. if (IsNonDependent) { if (Var->isUsableInConstantExpressions(SemaRef.Context)) { // Do not defer instantiations of variables which could be used in a // constant expression. SemaRef.InstantiateVariableDefinition(PointOfInstantiation, Var); } else { SemaRef.PendingInstantiations .push_back(std::make_pair(Var, PointOfInstantiation)); } } } } if(!MarkODRUsed) return; // HLSL Change Begin -External variable is in cbuffer, cannot use as immediate. // Mark used for referenced external variable. if (SemaRef.getLangOpts().HLSL && Var->hasExternalFormalLinkage() && !isa<EnumConstantDecl>(Var)) MarkVarDeclODRUsed(Var, Loc, SemaRef, /*MaxFunctionScopeIndex ptr*/ nullptr); // HLSL Change End. // Per C++11 [basic.def.odr], a variable is odr-used "unless it satisfies // the requirements for appearing in a constant expression (5.19) and, if // it is an object, the lvalue-to-rvalue conversion (4.1) // is immediately applied." We check the first part here, and // Sema::UpdateMarkingForLValueToRValue deals with the second part. // Note that we use the C++11 definition everywhere because nothing in // C++03 depends on whether we get the C++03 version correct. The second // part does not apply to references, since they are not objects. if (E && IsVariableAConstantExpression(Var, SemaRef.Context)) { // A reference initialized by a constant expression can never be // odr-used, so simply ignore it. if (!Var->getType()->isReferenceType()) SemaRef.MaybeODRUseExprs.insert(E); } else MarkVarDeclODRUsed(Var, Loc, SemaRef, /*MaxFunctionScopeIndex ptr*/ nullptr); } /// \brief Mark a variable referenced, and check whether it is odr-used /// (C++ [basic.def.odr]p2, C99 6.9p3). Note that this should not be /// used directly for normal expressions referring to VarDecl. void Sema::MarkVariableReferenced(SourceLocation Loc, VarDecl *Var) { DoMarkVarDeclReferenced(*this, Loc, Var, nullptr); } static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc, Decl *D, Expr *E, bool OdrUse) { if (VarDecl *Var = dyn_cast<VarDecl>(D)) { DoMarkVarDeclReferenced(SemaRef, Loc, Var, E); return; } SemaRef.MarkAnyDeclReferenced(Loc, D, OdrUse); // If this is a call to a method via a cast, also mark the method in the // derived class used in case codegen can devirtualize the call. const MemberExpr *ME = dyn_cast<MemberExpr>(E); if (!ME) return; CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ME->getMemberDecl()); if (!MD) return; // Only attempt to devirtualize if this is truly a virtual call. bool IsVirtualCall = MD->isVirtual() && !ME->hasQualifier(); if (!IsVirtualCall) return; const Expr *Base = ME->getBase(); const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); if (!MostDerivedClassDecl) return; CXXMethodDecl *DM = MD->getCorrespondingMethodInClass(MostDerivedClassDecl); if (!DM || DM->isPure()) return; SemaRef.MarkAnyDeclReferenced(Loc, DM, OdrUse); } /// \brief Perform reference-marking and odr-use handling for a DeclRefExpr. void Sema::MarkDeclRefReferenced(DeclRefExpr *E) { // TODO: update this with DR# once a defect report is filed. // C++11 defect. The address of a pure member should not be an ODR use, even // if it's a qualified reference. bool OdrUse = true; if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(E->getDecl())) if (Method->isVirtual()) OdrUse = false; MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E, OdrUse); } /// \brief Perform reference-marking and odr-use handling for a MemberExpr. void Sema::MarkMemberReferenced(MemberExpr *E) { // C++11 [basic.def.odr]p2: // A non-overloaded function whose name appears as a potentially-evaluated // expression or a member of a set of candidate functions, if selected by // overload resolution when referred to from a potentially-evaluated // expression, is odr-used, unless it is a pure virtual function and its // name is not explicitly qualified. bool OdrUse = true; if (!E->hasQualifier()) { if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(E->getMemberDecl())) if (Method->isPure()) OdrUse = false; } SourceLocation Loc = E->getMemberLoc().isValid() ? E->getMemberLoc() : E->getLocStart(); MarkExprReferenced(*this, Loc, E->getMemberDecl(), E, OdrUse); } /// \brief Perform marking for a reference to an arbitrary declaration. It /// marks the declaration referenced, and performs odr-use checking for /// functions and variables. This method should not be used when building a /// normal expression which refers to a variable. void Sema::MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse) { if (OdrUse) { if (auto *VD = dyn_cast<VarDecl>(D)) { MarkVariableReferenced(Loc, VD); return; } } if (auto *FD = dyn_cast<FunctionDecl>(D)) { MarkFunctionReferenced(Loc, FD, OdrUse); return; } D->setReferenced(); } namespace { // Mark all of the declarations referenced // FIXME: Not fully implemented yet! We need to have a better understanding // of when we're entering class MarkReferencedDecls : public RecursiveASTVisitor<MarkReferencedDecls> { Sema &S; SourceLocation Loc; public: typedef RecursiveASTVisitor<MarkReferencedDecls> Inherited; MarkReferencedDecls(Sema &S, SourceLocation Loc) : S(S), Loc(Loc) { } bool TraverseTemplateArgument(const TemplateArgument &Arg); bool TraverseRecordType(RecordType *T); }; } bool MarkReferencedDecls::TraverseTemplateArgument( const TemplateArgument &Arg) { if (Arg.getKind() == TemplateArgument::Declaration) { if (Decl *D = Arg.getAsDecl()) S.MarkAnyDeclReferenced(Loc, D, true); } return Inherited::TraverseTemplateArgument(Arg); } bool MarkReferencedDecls::TraverseRecordType(RecordType *T) { if (ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl())) { const TemplateArgumentList &Args = Spec->getTemplateArgs(); return TraverseTemplateArguments(Args.data(), Args.size()); } return true; } void Sema::MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T) { MarkReferencedDecls Marker(*this, Loc); Marker.TraverseType(Context.getCanonicalType(T)); } namespace { /// \brief Helper class that marks all of the declarations referenced by /// potentially-evaluated subexpressions as "referenced". class EvaluatedExprMarker : public EvaluatedExprVisitor<EvaluatedExprMarker> { Sema &S; bool SkipLocalVariables; public: typedef EvaluatedExprVisitor<EvaluatedExprMarker> Inherited; EvaluatedExprMarker(Sema &S, bool SkipLocalVariables) : Inherited(S.Context), S(S), SkipLocalVariables(SkipLocalVariables) { } void VisitDeclRefExpr(DeclRefExpr *E) { // If we were asked not to visit local variables, don't. if (SkipLocalVariables) { if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) if (VD->hasLocalStorage()) return; } S.MarkDeclRefReferenced(E); } void VisitMemberExpr(MemberExpr *E) { S.MarkMemberReferenced(E); Inherited::VisitMemberExpr(E); } void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) { S.MarkFunctionReferenced(E->getLocStart(), const_cast<CXXDestructorDecl*>(E->getTemporary()->getDestructor())); Visit(E->getSubExpr()); } void VisitCXXNewExpr(CXXNewExpr *E) { if (E->getOperatorNew()) S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorNew()); if (E->getOperatorDelete()) S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorDelete()); Inherited::VisitCXXNewExpr(E); } void VisitCXXDeleteExpr(CXXDeleteExpr *E) { if (E->getOperatorDelete()) S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorDelete()); QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType()); if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) { CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl()); S.MarkFunctionReferenced(E->getLocStart(), S.LookupDestructor(Record)); } Inherited::VisitCXXDeleteExpr(E); } void VisitCXXConstructExpr(CXXConstructExpr *E) { S.MarkFunctionReferenced(E->getLocStart(), E->getConstructor()); Inherited::VisitCXXConstructExpr(E); } void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) { Visit(E->getExpr()); } void VisitImplicitCastExpr(ImplicitCastExpr *E) { Inherited::VisitImplicitCastExpr(E); if (E->getCastKind() == CK_LValueToRValue) S.UpdateMarkingForLValueToRValue(E->getSubExpr()); } }; } /// \brief Mark any declarations that appear within this expression or any /// potentially-evaluated subexpressions as "referenced". /// /// \param SkipLocalVariables If true, don't mark local variables as /// 'referenced'. void Sema::MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables) { EvaluatedExprMarker(*this, SkipLocalVariables).Visit(E); } /// \brief Emit a diagnostic that describes an effect on the run-time behavior /// of the program being compiled. /// /// This routine emits the given diagnostic when the code currently being /// type-checked is "potentially evaluated", meaning that there is a /// possibility that the code will actually be executable. Code in sizeof() /// expressions, code used only during overload resolution, etc., are not /// potentially evaluated. This routine will suppress such diagnostics or, /// in the absolutely nutty case of potentially potentially evaluated /// expressions (C++ typeid), queue the diagnostic to potentially emit it /// later. /// /// This routine should be used for all diagnostics that describe the run-time /// behavior of a program, such as passing a non-POD value through an ellipsis. /// Failure to do so will likely result in spurious diagnostics or failures /// during overload resolution or within sizeof/alignof/typeof/typeid. bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD) { switch (ExprEvalContexts.back().Context) { case Unevaluated: case UnevaluatedAbstract: // The argument will never be evaluated, so don't complain. break; case ConstantEvaluated: // Relevant diagnostics should be produced by constant evaluation. break; case PotentiallyEvaluated: case PotentiallyEvaluatedIfUsed: if (Statement && getCurFunctionOrMethodDecl()) { FunctionScopes.back()->PossiblyUnreachableDiags. push_back(sema::PossiblyUnreachableDiag(PD, Loc, Statement)); } else Diag(Loc, PD); return true; } return false; } bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD) { if (ReturnType->isVoidType() || !ReturnType->isIncompleteType()) return false; // If we're inside a decltype's expression, don't check for a valid return // type or construct temporaries until we know whether this is the last call. if (ExprEvalContexts.back().IsDecltype) { ExprEvalContexts.back().DelayedDecltypeCalls.push_back(CE); return false; } class CallReturnIncompleteDiagnoser : public TypeDiagnoser { FunctionDecl *FD; CallExpr *CE; public: CallReturnIncompleteDiagnoser(FunctionDecl *FD, CallExpr *CE) : FD(FD), CE(CE) { } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (!FD) { S.Diag(Loc, diag::err_call_incomplete_return) << T << CE->getSourceRange(); return; } S.Diag(Loc, diag::err_call_function_incomplete_return) << CE->getSourceRange() << FD->getDeclName() << T; S.Diag(FD->getLocation(), diag::note_entity_declared_at) << FD->getDeclName(); } } Diagnoser(FD, CE); if (RequireCompleteType(Loc, ReturnType, Diagnoser)) return true; return false; } // Diagnose the s/=/==/ and s/\|=/!=/ typos. Note that adding parentheses // will prevent this condition from triggering, which is what we want. void Sema::DiagnoseAssignmentAsCondition(Expr *E) { SourceLocation Loc; unsigned diagnostic = diag::warn_condition_is_assignment; bool IsOrAssign = false; if (BinaryOperator *Op = dyn_cast<BinaryOperator>(E)) { if (Op->getOpcode() != BO_Assign && Op->getOpcode() != BO_OrAssign) return; IsOrAssign = Op->getOpcode() == BO_OrAssign; // Greylist some idioms by putting them into a warning subcategory. if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(Op->getRHS()->IgnoreParenCasts())) { Selector Sel = ME->getSelector(); // self = [<foo> init...] if (isSelfExpr(Op->getLHS()) && ME->getMethodFamily() == OMF_init) diagnostic = diag::warn_condition_is_idiomatic_assignment; // <foo> = [<bar> nextObject] else if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "nextObject") diagnostic = diag::warn_condition_is_idiomatic_assignment; } Loc = Op->getOperatorLoc(); } else if (CXXOperatorCallExpr *Op = dyn_cast<CXXOperatorCallExpr>(E)) { if (Op->getOperator() != OO_Equal && Op->getOperator() != OO_PipeEqual) return; IsOrAssign = Op->getOperator() == OO_PipeEqual; Loc = Op->getOperatorLoc(); } else if (PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) return DiagnoseAssignmentAsCondition(POE->getSyntacticForm()); else { // Not an assignment. return; } Diag(Loc, diagnostic) << E->getSourceRange(); SourceLocation Open = E->getLocStart(); SourceLocation Close = PP.getLocForEndOfToken(E->getSourceRange().getEnd()); Diag(Loc, diag::note_condition_assign_silence) << FixItHint::CreateInsertion(Open, "(") << FixItHint::CreateInsertion(Close, ")"); if (IsOrAssign) Diag(Loc, diag::note_condition_or_assign_to_comparison) << FixItHint::CreateReplacement(Loc, "!="); else Diag(Loc, diag::note_condition_assign_to_comparison) << FixItHint::CreateReplacement(Loc, "=="); } /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void Sema::DiagnoseEqualityWithExtraParens(ParenExpr *ParenE) { // Don't warn if the parens came from a macro. SourceLocation parenLoc = ParenE->getLocStart(); if (parenLoc.isInvalid() || parenLoc.isMacroID()) return; // Don't warn for dependent expressions. if (ParenE->isTypeDependent()) return; Expr *E = ParenE->IgnoreParens(); if (BinaryOperator *opE = dyn_cast<BinaryOperator>(E)) if (opE->getOpcode() == BO_EQ && opE->getLHS()->IgnoreParenImpCasts()->isModifiableLvalue(Context) == Expr::MLV_Valid) { SourceLocation Loc = opE->getOperatorLoc(); Diag(Loc, diag::warn_equality_with_extra_parens) << E->getSourceRange(); SourceRange ParenERange = ParenE->getSourceRange(); Diag(Loc, diag::note_equality_comparison_silence) << FixItHint::CreateRemoval(ParenERange.getBegin()) << FixItHint::CreateRemoval(ParenERange.getEnd()); Diag(Loc, diag::note_equality_comparison_to_assign) << FixItHint::CreateReplacement(Loc, "="); } } ExprResult Sema::CheckBooleanCondition(Expr *E, SourceLocation Loc) { DiagnoseAssignmentAsCondition(E); if (ParenExpr *parenE = dyn_cast<ParenExpr>(E)) DiagnoseEqualityWithExtraParens(parenE); ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return ExprError(); E = result.get(); if (!E->isTypeDependent()) { if (getLangOpts().CPlusPlus) return CheckCXXBooleanCondition(E); // C++ 6.4p4 ExprResult ERes = DefaultFunctionArrayLvalueConversion(E); if (ERes.isInvalid()) return ExprError(); E = ERes.get(); QualType T = E->getType(); if (!T->isScalarType()) { // C99 6.8.4.1p1 Diag(Loc, diag::err_typecheck_statement_requires_scalar) << T << E->getSourceRange(); return ExprError(); } CheckBoolLikeConversion(E, Loc); } return E; } ExprResult Sema::ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr) { if (!SubExpr) return ExprError(); return CheckBooleanCondition(SubExpr, Loc); } namespace { /// A visitor for rebuilding a call to an __unknown_any expression /// to have an appropriate type. struct RebuildUnknownAnyFunction : StmtVisitor<RebuildUnknownAnyFunction, ExprResult> { Sema &S; RebuildUnknownAnyFunction(Sema &S) : S(S) {} ExprResult VisitStmt(Stmt *S) { llvm_unreachable("unexpected statement!"); } ExprResult VisitExpr(Expr *E) { S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_call) << E->getSourceRange(); return ExprError(); } /// Rebuild an expression which simply semantically wraps another /// expression which it shares the type and value kind of. template <class T> ExprResult rebuildSugarExpr(T *E) { ExprResult SubResult = Visit(E->getSubExpr()); if (SubResult.isInvalid()) return ExprError(); Expr *SubExpr = SubResult.get(); E->setSubExpr(SubExpr); E->setType(SubExpr->getType()); E->setValueKind(SubExpr->getValueKind()); assert(E->getObjectKind() == OK_Ordinary); return E; } ExprResult VisitParenExpr(ParenExpr *E) { return rebuildSugarExpr(E); } ExprResult VisitUnaryExtension(UnaryOperator *E) { return rebuildSugarExpr(E); } ExprResult VisitUnaryAddrOf(UnaryOperator *E) { ExprResult SubResult = Visit(E->getSubExpr()); if (SubResult.isInvalid()) return ExprError(); Expr *SubExpr = SubResult.get(); E->setSubExpr(SubExpr); E->setType(S.Context.getPointerType(SubExpr->getType())); assert(E->getValueKind() == VK_RValue); assert(E->getObjectKind() == OK_Ordinary); return E; } ExprResult resolveDecl(Expr *E, ValueDecl *VD) { if (!isa<FunctionDecl>(VD)) return VisitExpr(E); E->setType(VD->getType()); assert(E->getValueKind() == VK_RValue); if (S.getLangOpts().CPlusPlus && !(isa<CXXMethodDecl>(VD) && cast<CXXMethodDecl>(VD)->isInstance())) E->setValueKind(VK_LValue); return E; } ExprResult VisitMemberExpr(MemberExpr *E) { return resolveDecl(E, E->getMemberDecl()); } ExprResult VisitDeclRefExpr(DeclRefExpr *E) { return resolveDecl(E, E->getDecl()); } }; } /// Given a function expression of unknown-any type, try to rebuild it /// to have a function type. static ExprResult rebuildUnknownAnyFunction(Sema &S, Expr *FunctionExpr) { ExprResult Result = RebuildUnknownAnyFunction(S).Visit(FunctionExpr); if (Result.isInvalid()) return ExprError(); return S.DefaultFunctionArrayConversion(Result.get()); } namespace { /// A visitor for rebuilding an expression of type __unknown_anytype /// into one which resolves the type directly on the referring /// expression. Strict preservation of the original source /// structure is not a goal. struct RebuildUnknownAnyExpr : StmtVisitor<RebuildUnknownAnyExpr, ExprResult> { Sema &S; /// The current destination type. QualType DestType; RebuildUnknownAnyExpr(Sema &S, QualType CastType) : S(S), DestType(CastType) {} ExprResult VisitStmt(Stmt *S) { llvm_unreachable("unexpected statement!"); } ExprResult VisitExpr(Expr *E) { S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_expr) << E->getSourceRange(); return ExprError(); } ExprResult VisitCallExpr(CallExpr *E); ExprResult VisitObjCMessageExpr(ObjCMessageExpr *E); /// Rebuild an expression which simply semantically wraps another /// expression which it shares the type and value kind of. template <class T> ExprResult rebuildSugarExpr(T *E) { ExprResult SubResult = Visit(E->getSubExpr()); if (SubResult.isInvalid()) return ExprError(); Expr *SubExpr = SubResult.get(); E->setSubExpr(SubExpr); E->setType(SubExpr->getType()); E->setValueKind(SubExpr->getValueKind()); assert(E->getObjectKind() == OK_Ordinary); return E; } ExprResult VisitParenExpr(ParenExpr *E) { return rebuildSugarExpr(E); } ExprResult VisitUnaryExtension(UnaryOperator *E) { return rebuildSugarExpr(E); } ExprResult VisitUnaryAddrOf(UnaryOperator *E) { const PointerType *Ptr = DestType->getAs<PointerType>(); if (!Ptr) { S.Diag(E->getOperatorLoc(), diag::err_unknown_any_addrof) << E->getSourceRange(); return ExprError(); } assert(E->getValueKind() == VK_RValue); assert(E->getObjectKind() == OK_Ordinary); E->setType(DestType); // Build the sub-expression as if it were an object of the pointee type. DestType = Ptr->getPointeeType(); ExprResult SubResult = Visit(E->getSubExpr()); if (SubResult.isInvalid()) return ExprError(); E->setSubExpr(SubResult.get()); return E; } ExprResult VisitImplicitCastExpr(ImplicitCastExpr *E); ExprResult resolveDecl(Expr *E, ValueDecl *VD); ExprResult VisitMemberExpr(MemberExpr *E) { return resolveDecl(E, E->getMemberDecl()); } ExprResult VisitDeclRefExpr(DeclRefExpr *E) { return resolveDecl(E, E->getDecl()); } }; } /// Rebuilds a call expression which yielded __unknown_anytype. ExprResult RebuildUnknownAnyExpr::VisitCallExpr(CallExpr *E) { Expr *CalleeExpr = E->getCallee(); enum FnKind { FK_MemberFunction, FK_FunctionPointer, FK_BlockPointer }; FnKind Kind; QualType CalleeType = CalleeExpr->getType(); if (CalleeType == S.Context.BoundMemberTy) { assert(isa<CXXMemberCallExpr>(E) || isa<CXXOperatorCallExpr>(E)); Kind = FK_MemberFunction; CalleeType = Expr::findBoundMemberType(CalleeExpr); } else if (const PointerType *Ptr = CalleeType->getAs<PointerType>()) { CalleeType = Ptr->getPointeeType(); Kind = FK_FunctionPointer; } else { CalleeType = CalleeType->castAs<BlockPointerType>()->getPointeeType(); Kind = FK_BlockPointer; } const FunctionType *FnType = CalleeType->castAs<FunctionType>(); // Verify that this is a legal result type of a function. if (DestType->isArrayType() || DestType->isFunctionType()) { unsigned diagID = diag::err_func_returning_array_function; if (Kind == FK_BlockPointer) diagID = diag::err_block_returning_array_function; S.Diag(E->getExprLoc(), diagID) << DestType->isFunctionType() << DestType; return ExprError(); } // Otherwise, go ahead and set DestType as the call's result. E->setType(DestType.getNonLValueExprType(S.Context)); E->setValueKind(Expr::getValueKindForType(DestType)); assert(E->getObjectKind() == OK_Ordinary); // Rebuild the function type, replacing the result type with DestType. const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FnType); if (Proto) { // __unknown_anytype(...) is a special case used by the debugger when // it has no idea what a function's signature is. // // We want to build this call essentially under the K&R // unprototyped rules, but making a FunctionNoProtoType in C++ // would foul up all sorts of assumptions. However, we cannot // simply pass all arguments as variadic arguments, nor can we // portably just call the function under a non-variadic type; see // the comment on IR-gen's TargetInfo::isNoProtoCallVariadic. // However, it turns out that in practice it is generally safe to // call a function declared as "A foo(B,C,D);" under the prototype // "A foo(B,C,D,...);". The only known exception is with the // Windows ABI, where any variadic function is implicitly cdecl // regardless of its normal CC. Therefore we change the parameter // types to match the types of the arguments. // // This is a hack, but it is far superior to moving the // corresponding target-specific code from IR-gen to Sema/AST. ArrayRef<QualType> ParamTypes = Proto->getParamTypes(); SmallVector<QualType, 8> ArgTypes; if (ParamTypes.empty() && Proto->isVariadic()) { // the special case ArgTypes.reserve(E->getNumArgs()); for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { Expr *Arg = E->getArg(i); QualType ArgType = Arg->getType(); if (E->isLValue()) { ArgType = S.Context.getLValueReferenceType(ArgType); } else if (E->isXValue()) { ArgType = S.Context.getRValueReferenceType(ArgType); } ArgTypes.push_back(ArgType); } ParamTypes = ArgTypes; } DestType = S.Context.getFunctionType(DestType, ParamTypes, Proto->getExtProtoInfo(), Proto->getParamMods()); // HLSL Change - assume args are preserved } else { DestType = S.Context.getFunctionNoProtoType(DestType, FnType->getExtInfo()); } // Rebuild the appropriate pointer-to-function type. switch (Kind) { case FK_MemberFunction: // Nothing to do. break; case FK_FunctionPointer: DestType = S.Context.getPointerType(DestType); break; case FK_BlockPointer: DestType = S.Context.getBlockPointerType(DestType); break; } // Finally, we can recurse. ExprResult CalleeResult = Visit(CalleeExpr); if (!CalleeResult.isUsable()) return ExprError(); E->setCallee(CalleeResult.get()); // Bind a temporary if necessary. return S.MaybeBindToTemporary(E); } ExprResult RebuildUnknownAnyExpr::VisitObjCMessageExpr(ObjCMessageExpr *E) { // Verify that this is a legal result type of a call. if (DestType->isArrayType() || DestType->isFunctionType()) { S.Diag(E->getExprLoc(), diag::err_func_returning_array_function) << DestType->isFunctionType() << DestType; return ExprError(); } // Rewrite the method result type if available. if (ObjCMethodDecl *Method = E->getMethodDecl()) { assert(Method->getReturnType() == S.Context.UnknownAnyTy); Method->setReturnType(DestType); } // Change the type of the message. E->setType(DestType.getNonReferenceType()); E->setValueKind(Expr::getValueKindForType(DestType)); return S.MaybeBindToTemporary(E); } ExprResult RebuildUnknownAnyExpr::VisitImplicitCastExpr(ImplicitCastExpr *E) { // The only case we should ever see here is a function-to-pointer decay. if (E->getCastKind() == CK_FunctionToPointerDecay) { assert(E->getValueKind() == VK_RValue); assert(E->getObjectKind() == OK_Ordinary); E->setType(DestType); // Rebuild the sub-expression as the pointee (function) type. DestType = DestType->castAs<PointerType>()->getPointeeType(); ExprResult Result = Visit(E->getSubExpr()); if (!Result.isUsable()) return ExprError(); E->setSubExpr(Result.get()); return E; } else if (E->getCastKind() == CK_LValueToRValue) { assert(E->getValueKind() == VK_RValue); assert(E->getObjectKind() == OK_Ordinary); assert(isa<BlockPointerType>(E->getType())); E->setType(DestType); // The sub-expression has to be a lvalue reference, so rebuild it as such. DestType = S.Context.getLValueReferenceType(DestType); ExprResult Result = Visit(E->getSubExpr()); if (!Result.isUsable()) return ExprError(); E->setSubExpr(Result.get()); return E; } else { llvm_unreachable("Unhandled cast type!"); } } ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) { ExprValueKind ValueKind = VK_LValue; QualType Type = DestType; // We know how to make this work for certain kinds of decls: // - functions if (FunctionDecl *FD = dyn_cast<FunctionDecl>(VD)) { if (const PointerType *Ptr = Type->getAs<PointerType>()) { DestType = Ptr->getPointeeType(); ExprResult Result = resolveDecl(E, VD); if (Result.isInvalid()) return ExprError(); return S.ImpCastExprToType(Result.get(), Type, CK_FunctionToPointerDecay, VK_RValue); } if (!Type->isFunctionType()) { S.Diag(E->getExprLoc(), diag::err_unknown_any_function) << VD << E->getSourceRange(); return ExprError(); } if (const FunctionProtoType *FT = Type->getAs<FunctionProtoType>()) { // We must match the FunctionDecl's type to the hack introduced in // RebuildUnknownAnyExpr::VisitCallExpr to vararg functions of unknown // type. See the lengthy commentary in that routine. QualType FDT = FD->getType(); const FunctionType *FnType = FDT->castAs<FunctionType>(); const FunctionProtoType *Proto = dyn_cast_or_null<FunctionProtoType>(FnType); DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E); if (DRE && Proto && Proto->getParamTypes().empty() && Proto->isVariadic()) { SourceLocation Loc = FD->getLocation(); FunctionDecl *NewFD = FunctionDecl::Create(FD->getASTContext(), FD->getDeclContext(), Loc, Loc, FD->getNameInfo().getName(), DestType, FD->getTypeSourceInfo(), SC_None, false/*isInlineSpecified*/, FD->hasPrototype(), false/*isConstexprSpecified*/); if (FD->getQualifier()) NewFD->setQualifierInfo(FD->getQualifierLoc()); SmallVector<ParmVarDecl*, 16> Params; for (const auto &AI : FT->param_types()) { ParmVarDecl *Param = S.BuildParmVarDeclForTypedef(FD, Loc, AI); Param->setScopeInfo(0, Params.size()); Params.push_back(Param); } NewFD->setParams(Params); DRE->setDecl(NewFD); VD = DRE->getDecl(); } } if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) if (MD->isInstance()) { ValueKind = VK_RValue; Type = S.Context.BoundMemberTy; } // Function references aren't l-values in C. if (!S.getLangOpts().CPlusPlus) ValueKind = VK_RValue; // - variables } else if (isa<VarDecl>(VD)) { if (const ReferenceType *RefTy = Type->getAs<ReferenceType>()) { Type = RefTy->getPointeeType(); } else if (Type->isFunctionType()) { S.Diag(E->getExprLoc(), diag::err_unknown_any_var_function_type) << VD << E->getSourceRange(); return ExprError(); } // - nothing else } else { S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_decl) << VD << E->getSourceRange(); return ExprError(); } // Modifying the declaration like this is friendly to IR-gen but // also really dangerous. VD->setType(DestType); E->setType(Type); E->setValueKind(ValueKind); return E; } /// Check a cast of an unknown-any type. We intentionally only /// trigger this for C-style casts. ExprResult Sema::checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path) { // Rewrite the casted expression from scratch. ExprResult result = RebuildUnknownAnyExpr(*this, CastType).Visit(CastExpr); if (!result.isUsable()) return ExprError(); CastExpr = result.get(); VK = CastExpr->getValueKind(); CastKind = CK_NoOp; return CastExpr; } ExprResult Sema::forceUnknownAnyToType(Expr *E, QualType ToType) { return RebuildUnknownAnyExpr(*this, ToType).Visit(E); } ExprResult Sema::checkUnknownAnyArg(SourceLocation callLoc, Expr *arg, QualType &paramType) { // If the syntactic form of the argument is not an explicit cast of // any sort, just do default argument promotion. ExplicitCastExpr *castArg = dyn_cast<ExplicitCastExpr>(arg->IgnoreParens()); if (!castArg) { ExprResult result = DefaultArgumentPromotion(arg); if (result.isInvalid()) return ExprError(); paramType = result.get()->getType(); return result; } // Otherwise, use the type that was written in the explicit cast. assert(!arg->hasPlaceholderType()); paramType = castArg->getTypeAsWritten(); // Copy-initialize a parameter of that type. InitializedEntity entity = InitializedEntity::InitializeParameter(Context, paramType, /*consumed*/ false); return PerformCopyInitialization(entity, callLoc, arg); } static ExprResult diagnoseUnknownAnyExpr(Sema &S, Expr *E) { Expr *orig = E; unsigned diagID = diag::err_uncasted_use_of_unknown_any; while (true) { E = E->IgnoreParenImpCasts(); if (CallExpr *call = dyn_cast<CallExpr>(E)) { E = call->getCallee(); diagID = diag::err_uncasted_call_of_unknown_any; } else { break; } } SourceLocation loc; NamedDecl *d; if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(E)) { loc = ref->getLocation(); d = ref->getDecl(); } else if (MemberExpr *mem = dyn_cast<MemberExpr>(E)) { loc = mem->getMemberLoc(); d = mem->getMemberDecl(); } else if (ObjCMessageExpr *msg = dyn_cast<ObjCMessageExpr>(E)) { diagID = diag::err_uncasted_call_of_unknown_any; loc = msg->getSelectorStartLoc(); d = msg->getMethodDecl(); if (!d) { S.Diag(loc, diag::err_uncasted_send_to_unknown_any_method) << static_cast<unsigned>(msg->isClassMessage()) << msg->getSelector() << orig->getSourceRange(); return ExprError(); } } else { S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_expr) << E->getSourceRange(); return ExprError(); } S.Diag(loc, diagID) << d << orig->getSourceRange(); // Never recoverable. return ExprError(); } /// Check for operands with placeholder types and complain if found. /// Returns true if there was an error and no recovery was possible. ExprResult Sema::CheckPlaceholderExpr(Expr *E) { if (!getLangOpts().CPlusPlus) { // C cannot handle TypoExpr nodes on either side of a binop because it // doesn't handle dependent types properly, so make sure any TypoExprs have // been dealt with before checking the operands. ExprResult Result = CorrectDelayedTyposInExpr(E); if (!Result.isUsable()) return ExprError(); E = Result.get(); } const BuiltinType *placeholderType = E->getType()->getAsPlaceholderType(); if (!placeholderType) return E; switch (placeholderType->getKind()) { // Overloaded expressions. case BuiltinType::Overload: { // Try to resolve a single function template specialization. // This is obligatory. ExprResult result = E; if (ResolveAndFixSingleFunctionTemplateSpecialization(result, false)) { return result; // If that failed, try to recover with a call. } else { tryToRecoverWithCall(result, PDiag(diag::err_ovl_unresolvable), /*complain*/ true); return result; } } // Bound member functions. case BuiltinType::BoundMember: { ExprResult result = E; const Expr *BME = E->IgnoreParens(); PartialDiagnostic PD = PDiag(diag::err_bound_member_function); // Try to give a nicer diagnostic if it is a bound member that we recognize. if (isa<CXXPseudoDestructorExpr>(BME)) { PD = PDiag(diag::err_dtor_expr_without_call) << /*pseudo-destructor*/ 1; } else if (const auto *ME = dyn_cast<MemberExpr>(BME)) { if (ME->getMemberNameInfo().getName().getNameKind() == DeclarationName::CXXDestructorName) PD = PDiag(diag::err_dtor_expr_without_call) << /*destructor*/ 0; } tryToRecoverWithCall(result, PD, /*complain*/ true); return result; } // ARC unbridged casts. case BuiltinType::ARCUnbridgedCast: { Expr *realCast = stripARCUnbridgedCast(E); diagnoseARCUnbridgedCast(realCast); return realCast; } // Expressions of unknown type. case BuiltinType::UnknownAny: return diagnoseUnknownAnyExpr(*this, E); // Pseudo-objects. case BuiltinType::PseudoObject: return checkPseudoObjectRValue(E); case BuiltinType::BuiltinFn: { // Accept __noop without parens by implicitly converting it to a call expr. auto *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()); if (DRE) { auto *FD = cast<FunctionDecl>(DRE->getDecl()); if (FD->getBuiltinID() == Builtin::BI__noop) { E = ImpCastExprToType(E, Context.getPointerType(FD->getType()), CK_BuiltinFnToFnPtr).get(); return new (Context) CallExpr(Context, E, None, Context.IntTy, VK_RValue, SourceLocation()); } } Diag(E->getLocStart(), diag::err_builtin_fn_use); return ExprError(); } // Everything else should be impossible. #define BUILTIN_TYPE(Id, SingletonId) \ case BuiltinType::Id: #define PLACEHOLDER_TYPE(Id, SingletonId) #include "clang/AST/BuiltinTypes.def" break; } llvm_unreachable("invalid placeholder type!"); } bool Sema::CheckCaseExpression(Expr *E) { if (E->isTypeDependent()) return true; if (E->isValueDependent() || E->isIntegerConstantExpr(Context)) return E->getType()->isIntegralOrEnumerationType(); return false; } /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) { assert((Kind == tok::kw___objc_yes || Kind == tok::kw___objc_no) && "Unknown Objective-C Boolean value!"); QualType BoolT = Context.ObjCBuiltinBoolTy; if (!Context.getBOOLDecl()) { LookupResult Result(*this, &Context.Idents.get("BOOL"), OpLoc, Sema::LookupOrdinaryName); if (LookupName(Result, getCurScope()) && Result.isSingleResult()) { NamedDecl *ND = Result.getFoundDecl(); if (TypedefDecl *TD = dyn_cast<TypedefDecl>(ND)) Context.setBOOLDecl(TD); } } if (Context.getBOOLDecl()) BoolT = Context.getBOOLType(); return new (Context) ObjCBoolLiteralExpr(Kind == tok::kw___objc_yes, BoolT, OpLoc); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaOpenMP.cpp
//===--- SemaOpenMP.cpp - Semantic Analysis for OpenMP constructs ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// \file /// \brief This file implements semantic analysis for OpenMP directives and /// clauses. /// //===----------------------------------------------------------------------===// #include "TreeTransform.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaInternal.h" using namespace clang; // HLSL Change Starts // No OpenMP Sema support, so simply skip all of this compilation. // Here are enough stubs to link the current targets. #if 1 //===----------------------------------------------------------------------===// // Stack of data-sharing attributes for variables //===----------------------------------------------------------------------===// void Sema::InitDataSharingAttributesStack() { llvm_unreachable("HLSL does not support OpenMP constructs"); } bool Sema::IsOpenMPCapturedVar(VarDecl *VD) { llvm_unreachable("HLSL does not support OpenMP constructs"); } bool Sema::isOpenMPPrivateVar(VarDecl *VD, unsigned Level) { llvm_unreachable("HLSL does not support OpenMP constructs"); } void Sema::DestroyDataSharingAttributesStack() { llvm_unreachable("HLSL does not support OpenMP constructs"); } void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } void Sema::StartOpenMPClause(OpenMPClauseKind K) { llvm_unreachable("HLSL does not support OpenMP constructs"); } void Sema::EndOpenMPClause() { llvm_unreachable("HLSL does not support OpenMP constructs"); } void Sema::EndOpenMPDSABlock(Stmt *CurDirective) { llvm_unreachable("HLSL does not support OpenMP constructs"); } ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id) { llvm_unreachable("HLSL does not support OpenMP constructs"); } Sema::DeclGroupPtrTy Sema::ActOnOpenMPThreadprivateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPThreadPrivateDecl * Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) { llvm_unreachable("HLSL does not support OpenMP constructs"); } void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion) { llvm_unreachable("HLSL does not support OpenMP constructs"); } StmtResult Sema::ActOnOpenMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } ExprResult Sema::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc, Expr *Op) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E, OpenMPClauseKind CKind) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPSimpleClause( OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindKwLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindKwLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPScheduleClause( OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, SourceLocation DepLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPAlignedClause( ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause *Sema::ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } OMPClause * Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { llvm_unreachable("HLSL does not support OpenMP constructs"); } #else // HLSL Change Ends //===----------------------------------------------------------------------===// // Stack of data-sharing attributes for variables //===----------------------------------------------------------------------===// namespace { /// \brief Default data sharing attributes, which can be applied to directive. enum DefaultDataSharingAttributes { DSA_unspecified = 0, /// \brief Data sharing attribute not specified. DSA_none = 1 << 0, /// \brief Default data sharing attribute 'none'. DSA_shared = 1 << 1 /// \brief Default data sharing attribute 'shared'. }; template <class T> struct MatchesAny { explicit MatchesAny(ArrayRef<T> Arr) : Arr(std::move(Arr)) {} bool operator()(T Kind) { for (auto KindEl : Arr) if (KindEl == Kind) return true; return false; } private: ArrayRef<T> Arr; }; struct MatchesAlways { MatchesAlways() {} template <class T> bool operator()(T) { return true; } }; typedef MatchesAny<OpenMPClauseKind> MatchesAnyClause; typedef MatchesAny<OpenMPDirectiveKind> MatchesAnyDirective; /// \brief Stack for tracking declarations used in OpenMP directives and /// clauses and their data-sharing attributes. class DSAStackTy { public: struct DSAVarData { OpenMPDirectiveKind DKind; OpenMPClauseKind CKind; DeclRefExpr *RefExpr; SourceLocation ImplicitDSALoc; DSAVarData() : DKind(OMPD_unknown), CKind(OMPC_unknown), RefExpr(nullptr), ImplicitDSALoc() {} }; private: struct DSAInfo { OpenMPClauseKind Attributes; DeclRefExpr *RefExpr; }; typedef llvm::SmallDenseMap<VarDecl *, DSAInfo, 64> DeclSAMapTy; typedef llvm::SmallDenseMap<VarDecl *, DeclRefExpr *, 64> AlignedMapTy; typedef llvm::DenseSet<VarDecl *> LoopControlVariablesSetTy; struct SharingMapTy { DeclSAMapTy SharingMap; AlignedMapTy AlignedMap; LoopControlVariablesSetTy LCVSet; DefaultDataSharingAttributes DefaultAttr; SourceLocation DefaultAttrLoc; OpenMPDirectiveKind Directive; DeclarationNameInfo DirectiveName; Scope *CurScope; SourceLocation ConstructLoc; bool OrderedRegion; bool NowaitRegion; unsigned CollapseNumber; SourceLocation InnerTeamsRegionLoc; SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name, Scope *CurScope, SourceLocation Loc) : SharingMap(), AlignedMap(), LCVSet(), DefaultAttr(DSA_unspecified), Directive(DKind), DirectiveName(std::move(Name)), CurScope(CurScope), ConstructLoc(Loc), OrderedRegion(false), NowaitRegion(false), CollapseNumber(1), InnerTeamsRegionLoc() {} SharingMapTy() : SharingMap(), AlignedMap(), LCVSet(), DefaultAttr(DSA_unspecified), Directive(OMPD_unknown), DirectiveName(), CurScope(nullptr), ConstructLoc(), OrderedRegion(false), NowaitRegion(false), CollapseNumber(1), InnerTeamsRegionLoc() {} }; typedef SmallVector<SharingMapTy, 64> StackTy; /// \brief Stack of used declaration and their data-sharing attributes. StackTy Stack; /// \brief true, if check for DSA must be from parent directive, false, if /// from current directive. OpenMPClauseKind ClauseKindMode; Sema &SemaRef; bool ForceCapturing; typedef SmallVector<SharingMapTy, 8>::reverse_iterator reverse_iterator; DSAVarData getDSA(StackTy::reverse_iterator Iter, VarDecl *D); /// \brief Checks if the variable is a local for OpenMP region. bool isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter); public: explicit DSAStackTy(Sema &S) : Stack(1), ClauseKindMode(OMPC_unknown), SemaRef(S), ForceCapturing(false) {} bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; } void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; } bool isForceVarCapturing() const { return ForceCapturing; } void setForceVarCapturing(bool V) { ForceCapturing = V; } void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc) { Stack.push_back(SharingMapTy(DKind, DirName, CurScope, Loc)); Stack.back().DefaultAttrLoc = Loc; } void pop() { assert(Stack.size() > 1 && "Data-sharing attributes stack is empty!"); Stack.pop_back(); } /// \brief If 'aligned' declaration for given variable \a D was not seen yet, /// add it and return NULL; otherwise return previous occurrence's expression /// for diagnostics. DeclRefExpr *addUniqueAligned(VarDecl *D, DeclRefExpr *NewDE); /// \brief Register specified variable as loop control variable. void addLoopControlVariable(VarDecl *D); /// \brief Check if the specified variable is a loop control variable for /// current region. bool isLoopControlVariable(VarDecl *D); /// \brief Adds explicit data sharing attribute to the specified declaration. void addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A); /// \brief Returns data sharing attributes from top of the stack for the /// specified declaration. DSAVarData getTopDSA(VarDecl *D, bool FromParent); /// \brief Returns data-sharing attributes for the specified declaration. DSAVarData getImplicitDSA(VarDecl *D, bool FromParent); /// \brief Checks if the specified variables has data-sharing attributes which /// match specified \a CPred predicate in any directive which matches \a DPred /// predicate. template <class ClausesPredicate, class DirectivesPredicate> DSAVarData hasDSA(VarDecl *D, ClausesPredicate CPred, DirectivesPredicate DPred, bool FromParent); /// \brief Checks if the specified variables has data-sharing attributes which /// match specified \a CPred predicate in any innermost directive which /// matches \a DPred predicate. template <class ClausesPredicate, class DirectivesPredicate> DSAVarData hasInnermostDSA(VarDecl *D, ClausesPredicate CPred, DirectivesPredicate DPred, bool FromParent); /// \brief Checks if the specified variables has explicit data-sharing /// attributes which match specified \a CPred predicate at the specified /// OpenMP region. bool hasExplicitDSA(VarDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> &CPred, unsigned Level); /// \brief Finds a directive which matches specified \a DPred predicate. template <class NamedDirectivesPredicate> bool hasDirective(NamedDirectivesPredicate DPred, bool FromParent); /// \brief Returns currently analyzed directive. OpenMPDirectiveKind getCurrentDirective() const { return Stack.back().Directive; } /// \brief Returns parent directive. OpenMPDirectiveKind getParentDirective() const { if (Stack.size() > 2) return Stack[Stack.size() - 2].Directive; return OMPD_unknown; } /// \brief Set default data sharing attribute to none. void setDefaultDSANone(SourceLocation Loc) { Stack.back().DefaultAttr = DSA_none; Stack.back().DefaultAttrLoc = Loc; } /// \brief Set default data sharing attribute to shared. void setDefaultDSAShared(SourceLocation Loc) { Stack.back().DefaultAttr = DSA_shared; Stack.back().DefaultAttrLoc = Loc; } DefaultDataSharingAttributes getDefaultDSA() const { return Stack.back().DefaultAttr; } SourceLocation getDefaultDSALocation() const { return Stack.back().DefaultAttrLoc; } /// \brief Checks if the specified variable is a threadprivate. bool isThreadPrivate(VarDecl *D) { DSAVarData DVar = getTopDSA(D, false); return isOpenMPThreadPrivate(DVar.CKind); } /// \brief Marks current region as ordered (it has an 'ordered' clause). void setOrderedRegion(bool IsOrdered = true) { Stack.back().OrderedRegion = IsOrdered; } /// \brief Returns true, if parent region is ordered (has associated /// 'ordered' clause), false - otherwise. bool isParentOrderedRegion() const { if (Stack.size() > 2) return Stack[Stack.size() - 2].OrderedRegion; return false; } /// \brief Marks current region as nowait (it has a 'nowait' clause). void setNowaitRegion(bool IsNowait = true) { Stack.back().NowaitRegion = IsNowait; } /// \brief Returns true, if parent region is nowait (has associated /// 'nowait' clause), false - otherwise. bool isParentNowaitRegion() const { if (Stack.size() > 2) return Stack[Stack.size() - 2].NowaitRegion; return false; } /// \brief Set collapse value for the region. void setCollapseNumber(unsigned Val) { Stack.back().CollapseNumber = Val; } /// \brief Return collapse value for region. unsigned getCollapseNumber() const { return Stack.back().CollapseNumber; } /// \brief Marks current target region as one with closely nested teams /// region. void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) { if (Stack.size() > 2) Stack[Stack.size() - 2].InnerTeamsRegionLoc = TeamsRegionLoc; } /// \brief Returns true, if current region has closely nested teams region. bool hasInnerTeamsRegion() const { return getInnerTeamsRegionLoc().isValid(); } /// \brief Returns location of the nested teams region (if any). SourceLocation getInnerTeamsRegionLoc() const { if (Stack.size() > 1) return Stack.back().InnerTeamsRegionLoc; return SourceLocation(); } Scope *getCurScope() const { return Stack.back().CurScope; } Scope *getCurScope() { return Stack.back().CurScope; } SourceLocation getConstructLoc() { return Stack.back().ConstructLoc; } }; bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) { return isOpenMPParallelDirective(DKind) || DKind == OMPD_task || isOpenMPTeamsDirective(DKind) || DKind == OMPD_unknown; } } // namespace DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator Iter, VarDecl *D) { D = D->getCanonicalDecl(); DSAVarData DVar; if (Iter == std::prev(Stack.rend())) { // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a region but not in construct] // File-scope or namespace-scope variables referenced in called routines // in the region are shared unless they appear in a threadprivate // directive. if (!D->isFunctionOrMethodVarDecl() && !isa<ParmVarDecl>(D)) DVar.CKind = OMPC_shared; // OpenMP [2.9.1.2, Data-sharing Attribute Rules for Variables Referenced // in a region but not in construct] // Variables with static storage duration that are declared in called // routines in the region are shared. if (D->hasGlobalStorage()) DVar.CKind = OMPC_shared; return DVar; } DVar.DKind = Iter->Directive; // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, predetermined, p.1] // Variables with automatic storage duration that are declared in a scope // inside the construct are private. if (isOpenMPLocal(D, Iter) && D->isLocalVarDecl() && (D->getStorageClass() == SC_Auto || D->getStorageClass() == SC_None)) { DVar.CKind = OMPC_private; return DVar; } // Explicitly specified attributes and local variables with predetermined // attributes. if (Iter->SharingMap.count(D)) { DVar.RefExpr = Iter->SharingMap[D].RefExpr; DVar.CKind = Iter->SharingMap[D].Attributes; DVar.ImplicitDSALoc = Iter->DefaultAttrLoc; return DVar; } // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, implicitly determined, p.1] // In a parallel or task construct, the data-sharing attributes of these // variables are determined by the default clause, if present. switch (Iter->DefaultAttr) { case DSA_shared: DVar.CKind = OMPC_shared; DVar.ImplicitDSALoc = Iter->DefaultAttrLoc; return DVar; case DSA_none: return DVar; case DSA_unspecified: // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, implicitly determined, p.2] // In a parallel construct, if no default clause is present, these // variables are shared. DVar.ImplicitDSALoc = Iter->DefaultAttrLoc; if (isOpenMPParallelDirective(DVar.DKind) || isOpenMPTeamsDirective(DVar.DKind)) { DVar.CKind = OMPC_shared; return DVar; } // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, implicitly determined, p.4] // In a task construct, if no default clause is present, a variable that in // the enclosing context is determined to be shared by all implicit tasks // bound to the current team is shared. if (DVar.DKind == OMPD_task) { DSAVarData DVarTemp; for (StackTy::reverse_iterator I = std::next(Iter), EE = Stack.rend(); I != EE; ++I) { // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables // Referenced // in a Construct, implicitly determined, p.6] // In a task construct, if no default clause is present, a variable // whose data-sharing attribute is not determined by the rules above is // firstprivate. DVarTemp = getDSA(I, D); if (DVarTemp.CKind != OMPC_shared) { DVar.RefExpr = nullptr; DVar.DKind = OMPD_task; DVar.CKind = OMPC_firstprivate; return DVar; } if (isParallelOrTaskRegion(I->Directive)) break; } DVar.DKind = OMPD_task; DVar.CKind = (DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared; return DVar; } } // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, implicitly determined, p.3] // For constructs other than task, if no default clause is present, these // variables inherit their data-sharing attributes from the enclosing // context. return getDSA(std::next(Iter), D); } DeclRefExpr *DSAStackTy::addUniqueAligned(VarDecl *D, DeclRefExpr *NewDE) { assert(Stack.size() > 1 && "Data sharing attributes stack is empty"); D = D->getCanonicalDecl(); auto It = Stack.back().AlignedMap.find(D); if (It == Stack.back().AlignedMap.end()) { assert(NewDE && "Unexpected nullptr expr to be added into aligned map"); Stack.back().AlignedMap[D] = NewDE; return nullptr; } else { assert(It->second && "Unexpected nullptr expr in the aligned map"); return It->second; } return nullptr; } void DSAStackTy::addLoopControlVariable(VarDecl *D) { assert(Stack.size() > 1 && "Data-sharing attributes stack is empty"); D = D->getCanonicalDecl(); Stack.back().LCVSet.insert(D); } bool DSAStackTy::isLoopControlVariable(VarDecl *D) { assert(Stack.size() > 1 && "Data-sharing attributes stack is empty"); D = D->getCanonicalDecl(); return Stack.back().LCVSet.count(D) > 0; } void DSAStackTy::addDSA(VarDecl *D, DeclRefExpr *E, OpenMPClauseKind A) { D = D->getCanonicalDecl(); if (A == OMPC_threadprivate) { Stack[0].SharingMap[D].Attributes = A; Stack[0].SharingMap[D].RefExpr = E; } else { assert(Stack.size() > 1 && "Data-sharing attributes stack is empty"); Stack.back().SharingMap[D].Attributes = A; Stack.back().SharingMap[D].RefExpr = E; } } bool DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) { D = D->getCanonicalDecl(); if (Stack.size() > 2) { reverse_iterator I = Iter, E = std::prev(Stack.rend()); Scope *TopScope = nullptr; while (I != E && !isParallelOrTaskRegion(I->Directive)) { ++I; } if (I == E) return false; TopScope = I->CurScope ? I->CurScope->getParent() : nullptr; Scope *CurScope = getCurScope(); while (CurScope != TopScope && !CurScope->isDeclScope(D)) { CurScope = CurScope->getParent(); } return CurScope != TopScope; } return false; } /// \brief Build a variable declaration for OpenMP loop iteration variable. static VarDecl *buildVarDecl(Sema &SemaRef, SourceLocation Loc, QualType Type, StringRef Name) { DeclContext *DC = SemaRef.CurContext; IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name); TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc); VarDecl *Decl = VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type, TInfo, SC_None); Decl->setImplicit(); return Decl; } static DeclRefExpr *buildDeclRefExpr(Sema &S, VarDecl *D, QualType Ty, SourceLocation Loc, bool RefersToCapture = false) { D->setReferenced(); D->markUsed(S.Context); return DeclRefExpr::Create(S.getASTContext(), NestedNameSpecifierLoc(), SourceLocation(), D, RefersToCapture, Loc, Ty, VK_LValue); } DSAStackTy::DSAVarData DSAStackTy::getTopDSA(VarDecl *D, bool FromParent) { D = D->getCanonicalDecl(); DSAVarData DVar; // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, predetermined, p.1] // Variables appearing in threadprivate directives are threadprivate. if ((D->getTLSKind() != VarDecl::TLS_None && !(D->hasAttr<OMPThreadPrivateDeclAttr>() && SemaRef.getLangOpts().OpenMPUseTLS && SemaRef.getASTContext().getTargetInfo().isTLSSupported())) || (D->getStorageClass() == SC_Register && D->hasAttr<AsmLabelAttr>() && !D->isLocalVarDecl())) { addDSA(D, buildDeclRefExpr(SemaRef, D, D->getType().getNonReferenceType(), D->getLocation()), OMPC_threadprivate); } if (Stack[0].SharingMap.count(D)) { DVar.RefExpr = Stack[0].SharingMap[D].RefExpr; DVar.CKind = OMPC_threadprivate; return DVar; } // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, predetermined, p.1] // Variables with automatic storage duration that are declared in a scope // inside the construct are private. OpenMPDirectiveKind Kind = FromParent ? getParentDirective() : getCurrentDirective(); auto StartI = std::next(Stack.rbegin()); auto EndI = std::prev(Stack.rend()); if (FromParent && StartI != EndI) { StartI = std::next(StartI); } if (!isParallelOrTaskRegion(Kind)) { if (isOpenMPLocal(D, StartI) && ((D->isLocalVarDecl() && (D->getStorageClass() == SC_Auto || D->getStorageClass() == SC_None)) || isa<ParmVarDecl>(D))) { DVar.CKind = OMPC_private; return DVar; } // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, predetermined, p.4] // Static data members are shared. // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, predetermined, p.7] // Variables with static storage duration that are declared in a scope // inside the construct are shared. if (D->isStaticDataMember() || D->isStaticLocal()) { DSAVarData DVarTemp = hasDSA(D, isOpenMPPrivate, MatchesAlways(), FromParent); if (DVarTemp.CKind != OMPC_unknown && DVarTemp.RefExpr) return DVar; DVar.CKind = OMPC_shared; return DVar; } } QualType Type = D->getType().getNonReferenceType().getCanonicalType(); bool IsConstant = Type.isConstant(SemaRef.getASTContext()); Type = SemaRef.getASTContext().getBaseElementType(Type); // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, predetermined, p.6] // Variables with const qualified type having no mutable member are // shared. CXXRecordDecl *RD = SemaRef.getLangOpts().CPlusPlus ? Type->getAsCXXRecordDecl() : nullptr; if (IsConstant && !(SemaRef.getLangOpts().CPlusPlus && RD && RD->hasMutableFields())) { // Variables with const-qualified type having no mutable member may be // listed in a firstprivate clause, even if they are static data members. DSAVarData DVarTemp = hasDSA(D, MatchesAnyClause(OMPC_firstprivate), MatchesAlways(), FromParent); if (DVarTemp.CKind == OMPC_firstprivate && DVarTemp.RefExpr) return DVar; DVar.CKind = OMPC_shared; return DVar; } // Explicitly specified attributes and local variables with predetermined // attributes. auto I = std::prev(StartI); if (I->SharingMap.count(D)) { DVar.RefExpr = I->SharingMap[D].RefExpr; DVar.CKind = I->SharingMap[D].Attributes; DVar.ImplicitDSALoc = I->DefaultAttrLoc; } return DVar; } DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(VarDecl *D, bool FromParent) { D = D->getCanonicalDecl(); auto StartI = Stack.rbegin(); auto EndI = std::prev(Stack.rend()); if (FromParent && StartI != EndI) { StartI = std::next(StartI); } return getDSA(StartI, D); } template <class ClausesPredicate, class DirectivesPredicate> DSAStackTy::DSAVarData DSAStackTy::hasDSA(VarDecl *D, ClausesPredicate CPred, DirectivesPredicate DPred, bool FromParent) { D = D->getCanonicalDecl(); auto StartI = std::next(Stack.rbegin()); auto EndI = std::prev(Stack.rend()); if (FromParent && StartI != EndI) { StartI = std::next(StartI); } for (auto I = StartI, EE = EndI; I != EE; ++I) { if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive)) continue; DSAVarData DVar = getDSA(I, D); if (CPred(DVar.CKind)) return DVar; } return DSAVarData(); } template <class ClausesPredicate, class DirectivesPredicate> DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(VarDecl *D, ClausesPredicate CPred, DirectivesPredicate DPred, bool FromParent) { D = D->getCanonicalDecl(); auto StartI = std::next(Stack.rbegin()); auto EndI = std::prev(Stack.rend()); if (FromParent && StartI != EndI) { StartI = std::next(StartI); } for (auto I = StartI, EE = EndI; I != EE; ++I) { if (!DPred(I->Directive)) break; DSAVarData DVar = getDSA(I, D); if (CPred(DVar.CKind)) return DVar; return DSAVarData(); } return DSAVarData(); } bool DSAStackTy::hasExplicitDSA( VarDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> &CPred, unsigned Level) { if (CPred(ClauseKindMode)) return true; if (isClauseParsingMode()) ++Level; D = D->getCanonicalDecl(); auto StartI = Stack.rbegin(); auto EndI = std::prev(Stack.rend()); if (std::distance(StartI, EndI) <= (int)Level) return false; std::advance(StartI, Level); return (StartI->SharingMap.count(D) > 0) && StartI->SharingMap[D].RefExpr && CPred(StartI->SharingMap[D].Attributes); } template <class NamedDirectivesPredicate> bool DSAStackTy::hasDirective(NamedDirectivesPredicate DPred, bool FromParent) { auto StartI = std::next(Stack.rbegin()); auto EndI = std::prev(Stack.rend()); if (FromParent && StartI != EndI) { StartI = std::next(StartI); } for (auto I = StartI, EE = EndI; I != EE; ++I) { if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc)) return true; } return false; } void Sema::InitDataSharingAttributesStack() { VarDataSharingAttributesStack = new DSAStackTy(*this); } #define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack) bool Sema::IsOpenMPCapturedVar(VarDecl *VD) { assert(LangOpts.OpenMP && "OpenMP is not allowed"); VD = VD->getCanonicalDecl(); if (DSAStack->getCurrentDirective() != OMPD_unknown && (!DSAStack->isClauseParsingMode() || DSAStack->getParentDirective() != OMPD_unknown)) { if (DSAStack->isLoopControlVariable(VD) || (VD->hasLocalStorage() && isParallelOrTaskRegion(DSAStack->getCurrentDirective())) || DSAStack->isForceVarCapturing()) return true; auto DVarPrivate = DSAStack->getTopDSA(VD, DSAStack->isClauseParsingMode()); if (DVarPrivate.CKind != OMPC_unknown && isOpenMPPrivate(DVarPrivate.CKind)) return true; DVarPrivate = DSAStack->hasDSA(VD, isOpenMPPrivate, MatchesAlways(), DSAStack->isClauseParsingMode()); return DVarPrivate.CKind != OMPC_unknown; } return false; } bool Sema::isOpenMPPrivateVar(VarDecl *VD, unsigned Level) { assert(LangOpts.OpenMP && "OpenMP is not allowed"); return DSAStack->hasExplicitDSA( VD, [](OpenMPClauseKind K) -> bool { return K == OMPC_private; }, Level); } void Sema::DestroyDataSharingAttributesStack() { delete DSAStack; } void Sema::StartOpenMPDSABlock(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc) { DSAStack->push(DKind, DirName, CurScope, Loc); PushExpressionEvaluationContext(PotentiallyEvaluated); } void Sema::StartOpenMPClause(OpenMPClauseKind K) { DSAStack->setClauseParsingMode(K); } void Sema::EndOpenMPClause() { DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown); } void Sema::EndOpenMPDSABlock(Stmt *CurDirective) { // OpenMP [2.14.3.5, Restrictions, C/C++, p.1] // A variable of class type (or array thereof) that appears in a lastprivate // clause requires an accessible, unambiguous default constructor for the // class type, unless the list item is also specified in a firstprivate // clause. if (auto D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) { for (auto *C : D->clauses()) { if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) { SmallVector<Expr *, 8> PrivateCopies; for (auto *DE : Clause->varlists()) { if (DE->isValueDependent() || DE->isTypeDependent()) { PrivateCopies.push_back(nullptr); continue; } auto *VD = cast<VarDecl>(cast<DeclRefExpr>(DE)->getDecl()); QualType Type = VD->getType(); auto DVar = DSAStack->getTopDSA(VD, false); if (DVar.CKind == OMPC_lastprivate) { // Generate helper private variable and initialize it with the // default value. The address of the original variable is replaced // by the address of the new private variable in CodeGen. This new // variable is not added to IdResolver, so the code in the OpenMP // region uses original variable for proper diagnostics. auto *VDPrivate = buildVarDecl(*this, DE->getExprLoc(), Type.getUnqualifiedType(), VD->getName()); ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto=*/false); if (VDPrivate->isInvalidDecl()) continue; PrivateCopies.push_back(buildDeclRefExpr( *this, VDPrivate, DE->getType(), DE->getExprLoc())); } else { // The variable is also a firstprivate, so initialization sequence // for private copy is generated already. PrivateCopies.push_back(nullptr); } } // Set initializers to private copies if no errors were found. if (PrivateCopies.size() == Clause->varlist_size()) { Clause->setPrivateCopies(PrivateCopies); } } } } DSAStack->pop(); DiscardCleanupsInEvaluationContext(); PopExpressionEvaluationContext(); } static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV, Expr *NumIterations, Sema &SemaRef, Scope *S); namespace { class VarDeclFilterCCC : public CorrectionCandidateCallback { private: Sema &SemaRef; public: explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {} bool ValidateCandidate(const TypoCorrection &Candidate) override { NamedDecl *ND = Candidate.getCorrectionDecl(); if (VarDecl *VD = dyn_cast_or_null<VarDecl>(ND)) { return VD->hasGlobalStorage() && SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(), SemaRef.getCurScope()); } return false; } }; } // namespace ExprResult Sema::ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id) { LookupResult Lookup(*this, Id, LookupOrdinaryName); LookupParsedName(Lookup, CurScope, &ScopeSpec, true); if (Lookup.isAmbiguous()) return ExprError(); VarDecl *VD; if (!Lookup.isSingleResult()) { if (TypoCorrection Corrected = CorrectTypo( Id, LookupOrdinaryName, CurScope, nullptr, llvm::make_unique<VarDeclFilterCCC>(*this), CTK_ErrorRecovery)) { diagnoseTypo(Corrected, PDiag(Lookup.empty() ? diag::err_undeclared_var_use_suggest : diag::err_omp_expected_var_arg_suggest) << Id.getName()); VD = Corrected.getCorrectionDeclAs<VarDecl>(); } else { Diag(Id.getLoc(), Lookup.empty() ? diag::err_undeclared_var_use : diag::err_omp_expected_var_arg) << Id.getName(); return ExprError(); } } else { if (!(VD = Lookup.getAsSingle<VarDecl>())) { Diag(Id.getLoc(), diag::err_omp_expected_var_arg) << Id.getName(); Diag(Lookup.getFoundDecl()->getLocation(), diag::note_declared_at); return ExprError(); } } Lookup.suppressDiagnostics(); // OpenMP [2.9.2, Syntax, C/C++] // Variables must be file-scope, namespace-scope, or static block-scope. if (!VD->hasGlobalStorage()) { Diag(Id.getLoc(), diag::err_omp_global_var_arg) << getOpenMPDirectiveName(OMPD_threadprivate) << !VD->isStaticLocal(); bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; return ExprError(); } VarDecl *CanonicalVD = VD->getCanonicalDecl(); NamedDecl *ND = cast<NamedDecl>(CanonicalVD); // OpenMP [2.9.2, Restrictions, C/C++, p.2] // A threadprivate directive for file-scope variables must appear outside // any definition or declaration. if (CanonicalVD->getDeclContext()->isTranslationUnit() && !getCurLexicalContext()->isTranslationUnit()) { Diag(Id.getLoc(), diag::err_omp_var_scope) << getOpenMPDirectiveName(OMPD_threadprivate) << VD; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; return ExprError(); } // OpenMP [2.9.2, Restrictions, C/C++, p.3] // A threadprivate directive for static class member variables must appear // in the class definition, in the same scope in which the member // variables are declared. if (CanonicalVD->isStaticDataMember() && !CanonicalVD->getDeclContext()->Equals(getCurLexicalContext())) { Diag(Id.getLoc(), diag::err_omp_var_scope) << getOpenMPDirectiveName(OMPD_threadprivate) << VD; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; return ExprError(); } // OpenMP [2.9.2, Restrictions, C/C++, p.4] // A threadprivate directive for namespace-scope variables must appear // outside any definition or declaration other than the namespace // definition itself. if (CanonicalVD->getDeclContext()->isNamespace() && (!getCurLexicalContext()->isFileContext() || !getCurLexicalContext()->Encloses(CanonicalVD->getDeclContext()))) { Diag(Id.getLoc(), diag::err_omp_var_scope) << getOpenMPDirectiveName(OMPD_threadprivate) << VD; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; return ExprError(); } // OpenMP [2.9.2, Restrictions, C/C++, p.6] // A threadprivate directive for static block-scope variables must appear // in the scope of the variable and not in a nested scope. if (CanonicalVD->isStaticLocal() && CurScope && !isDeclInScope(ND, getCurLexicalContext(), CurScope)) { Diag(Id.getLoc(), diag::err_omp_var_scope) << getOpenMPDirectiveName(OMPD_threadprivate) << VD; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; return ExprError(); } // OpenMP [2.9.2, Restrictions, C/C++, p.2-6] // A threadprivate directive must lexically precede all references to any // of the variables in its list. if (VD->isUsed() && !DSAStack->isThreadPrivate(VD)) { Diag(Id.getLoc(), diag::err_omp_var_used) << getOpenMPDirectiveName(OMPD_threadprivate) << VD; return ExprError(); } QualType ExprType = VD->getType().getNonReferenceType(); ExprResult DE = buildDeclRefExpr(*this, VD, ExprType, Id.getLoc()); return DE; } Sema::DeclGroupPtrTy Sema::ActOnOpenMPThreadprivateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList) { if (OMPThreadPrivateDecl *D = CheckOMPThreadPrivateDecl(Loc, VarList)) { CurContext->addDecl(D); return DeclGroupPtrTy::make(DeclGroupRef(D)); } return DeclGroupPtrTy(); } namespace { class LocalVarRefChecker : public ConstStmtVisitor<LocalVarRefChecker, bool> { Sema &SemaRef; public: bool VisitDeclRefExpr(const DeclRefExpr *E) { if (auto VD = dyn_cast<VarDecl>(E->getDecl())) { if (VD->hasLocalStorage()) { SemaRef.Diag(E->getLocStart(), diag::err_omp_local_var_in_threadprivate_init) << E->getSourceRange(); SemaRef.Diag(VD->getLocation(), diag::note_defined_here) << VD << VD->getSourceRange(); return true; } } return false; } bool VisitStmt(const Stmt *S) { for (auto Child : S->children()) { if (Child && Visit(Child)) return true; } return false; } explicit LocalVarRefChecker(Sema &SemaRef) : SemaRef(SemaRef) {} }; } // namespace OMPThreadPrivateDecl * Sema::CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList) { SmallVector<Expr *, 8> Vars; for (auto &RefExpr : VarList) { DeclRefExpr *DE = cast<DeclRefExpr>(RefExpr); VarDecl *VD = cast<VarDecl>(DE->getDecl()); SourceLocation ILoc = DE->getExprLoc(); QualType QType = VD->getType(); if (QType->isDependentType() || QType->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); continue; } // OpenMP [2.9.2, Restrictions, C/C++, p.10] // A threadprivate variable must not have an incomplete type. if (RequireCompleteType(ILoc, VD->getType(), diag::err_omp_threadprivate_incomplete_type)) { continue; } // OpenMP [2.9.2, Restrictions, C/C++, p.10] // A threadprivate variable must not have a reference type. if (VD->getType()->isReferenceType()) { Diag(ILoc, diag::err_omp_ref_type_arg) << getOpenMPDirectiveName(OMPD_threadprivate) << VD->getType(); bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // Check if this is a TLS variable. If TLS is not being supported, produce // the corresponding diagnostic. if ((VD->getTLSKind() != VarDecl::TLS_None && !(VD->hasAttr<OMPThreadPrivateDeclAttr>() && getLangOpts().OpenMPUseTLS && getASTContext().getTargetInfo().isTLSSupported())) || (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl())) { Diag(ILoc, diag::err_omp_var_thread_local) << VD << ((VD->getTLSKind() != VarDecl::TLS_None) ? 0 : 1); bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // Check if initial value of threadprivate variable reference variable with // local storage (it is not supported by runtime). if (auto Init = VD->getAnyInitializer()) { LocalVarRefChecker Checker(*this); if (Checker.Visit(Init)) continue; } Vars.push_back(RefExpr); DSAStack->addDSA(VD, DE, OMPC_threadprivate); VD->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit( Context, SourceRange(Loc, Loc))); if (auto *ML = Context.getASTMutationListener()) ML->DeclarationMarkedOpenMPThreadPrivate(VD); } OMPThreadPrivateDecl *D = nullptr; if (!Vars.empty()) { D = OMPThreadPrivateDecl::Create(Context, getCurLexicalContext(), Loc, Vars); D->setAccess(AS_public); } return D; } static void ReportOriginalDSA(Sema &SemaRef, DSAStackTy *Stack, const VarDecl *VD, DSAStackTy::DSAVarData DVar, bool IsLoopIterVar = false) { if (DVar.RefExpr) { SemaRef.Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_explicit_dsa) << getOpenMPClauseName(DVar.CKind); return; } enum { PDSA_StaticMemberShared, PDSA_StaticLocalVarShared, PDSA_LoopIterVarPrivate, PDSA_LoopIterVarLinear, PDSA_LoopIterVarLastprivate, PDSA_ConstVarShared, PDSA_GlobalVarShared, PDSA_TaskVarFirstprivate, PDSA_LocalVarPrivate, PDSA_Implicit } Reason = PDSA_Implicit; bool ReportHint = false; auto ReportLoc = VD->getLocation(); if (IsLoopIterVar) { if (DVar.CKind == OMPC_private) Reason = PDSA_LoopIterVarPrivate; else if (DVar.CKind == OMPC_lastprivate) Reason = PDSA_LoopIterVarLastprivate; else Reason = PDSA_LoopIterVarLinear; } else if (DVar.DKind == OMPD_task && DVar.CKind == OMPC_firstprivate) { Reason = PDSA_TaskVarFirstprivate; ReportLoc = DVar.ImplicitDSALoc; } else if (VD->isStaticLocal()) Reason = PDSA_StaticLocalVarShared; else if (VD->isStaticDataMember()) Reason = PDSA_StaticMemberShared; else if (VD->isFileVarDecl()) Reason = PDSA_GlobalVarShared; else if (VD->getType().isConstant(SemaRef.getASTContext())) Reason = PDSA_ConstVarShared; else if (VD->isLocalVarDecl() && DVar.CKind == OMPC_private) { ReportHint = true; Reason = PDSA_LocalVarPrivate; } if (Reason != PDSA_Implicit) { SemaRef.Diag(ReportLoc, diag::note_omp_predetermined_dsa) << Reason << ReportHint << getOpenMPDirectiveName(Stack->getCurrentDirective()); } else if (DVar.ImplicitDSALoc.isValid()) { SemaRef.Diag(DVar.ImplicitDSALoc, diag::note_omp_implicit_dsa) << getOpenMPClauseName(DVar.CKind); } } namespace { class DSAAttrChecker : public StmtVisitor<DSAAttrChecker, void> { DSAStackTy *Stack; Sema &SemaRef; bool ErrorFound; CapturedStmt *CS; llvm::SmallVector<Expr *, 8> ImplicitFirstprivate; llvm::DenseMap<VarDecl *, Expr *> VarsWithInheritedDSA; public: void VisitDeclRefExpr(DeclRefExpr *E) { if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) { // Skip internally declared variables. if (VD->isLocalVarDecl() && !CS->capturesVariable(VD)) return; auto DVar = Stack->getTopDSA(VD, false); // Check if the variable has explicit DSA set and stop analysis if it so. if (DVar.RefExpr) return; auto ELoc = E->getExprLoc(); auto DKind = Stack->getCurrentDirective(); // The default(none) clause requires that each variable that is referenced // in the construct, and does not have a predetermined data-sharing // attribute, must have its data-sharing attribute explicitly determined // by being listed in a data-sharing attribute clause. if (DVar.CKind == OMPC_unknown && Stack->getDefaultDSA() == DSA_none && isParallelOrTaskRegion(DKind) && VarsWithInheritedDSA.count(VD) == 0) { VarsWithInheritedDSA[VD] = E; return; } // OpenMP [2.9.3.6, Restrictions, p.2] // A list item that appears in a reduction clause of the innermost // enclosing worksharing or parallel construct may not be accessed in an // explicit task. DVar = Stack->hasInnermostDSA(VD, MatchesAnyClause(OMPC_reduction), [](OpenMPDirectiveKind K) -> bool { return isOpenMPParallelDirective(K) || isOpenMPWorksharingDirective(K) || isOpenMPTeamsDirective(K); }, false); if (DKind == OMPD_task && DVar.CKind == OMPC_reduction) { ErrorFound = true; SemaRef.Diag(ELoc, diag::err_omp_reduction_in_task); ReportOriginalDSA(SemaRef, Stack, VD, DVar); return; } // Define implicit data-sharing attributes for task. DVar = Stack->getImplicitDSA(VD, false); if (DKind == OMPD_task && DVar.CKind != OMPC_shared) ImplicitFirstprivate.push_back(E); } } void VisitOMPExecutableDirective(OMPExecutableDirective *S) { for (auto *C : S->clauses()) { // Skip analysis of arguments of implicitly defined firstprivate clause // for task directives. if (C && (!isa<OMPFirstprivateClause>(C) || C->getLocStart().isValid())) for (auto *CC : C->children()) { if (CC) Visit(CC); } } } void VisitStmt(Stmt *S) { for (auto *C : S->children()) { if (C && !isa<OMPExecutableDirective>(C)) Visit(C); } } bool isErrorFound() { return ErrorFound; } ArrayRef<Expr *> getImplicitFirstprivate() { return ImplicitFirstprivate; } llvm::DenseMap<VarDecl *, Expr *> &getVarsWithInheritedDSA() { return VarsWithInheritedDSA; } DSAAttrChecker(DSAStackTy *S, Sema &SemaRef, CapturedStmt *CS) : Stack(S), SemaRef(SemaRef), ErrorFound(false), CS(CS) {} }; } // namespace void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) { switch (DKind) { case OMPD_parallel: { QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1); QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty); Sema::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_simd: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_for: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_for_simd: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_sections: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_section: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_single: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_master: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_critical: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_parallel_for: { QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1); QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty); Sema::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_parallel_for_simd: { QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1); QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty); Sema::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_parallel_sections: { QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1); QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty); Sema::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_task: { QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1); QualType Args[] = {Context.VoidPtrTy.withConst().withRestrict()}; FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = true; QualType CopyFnType = Context.getFunctionType(Context.VoidTy, Args, EPI); Sema::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32Ty), std::make_pair(".part_id.", KmpInt32Ty), std::make_pair(".privates.", Context.VoidPtrTy.withConst().withRestrict()), std::make_pair( ".copy_fn.", Context.getPointerType(CopyFnType).withConst().withRestrict()), std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); // Mark this captured region as inlined, because we don't use outlined // function directly. getCurCapturedRegion()->TheCapturedDecl->addAttr( AlwaysInlineAttr::CreateImplicit( Context, AlwaysInlineAttr::Keyword_forceinline, SourceRange())); break; } case OMPD_ordered: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_atomic: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_target: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_teams: { QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1); QualType KmpInt32PtrTy = Context.getPointerType(KmpInt32Ty); Sema::CapturedParamNameType Params[] = { std::make_pair(".global_tid.", KmpInt32PtrTy), std::make_pair(".bound_tid.", KmpInt32PtrTy), std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_taskgroup: { Sema::CapturedParamNameType Params[] = { std::make_pair(StringRef(), QualType()) // __context with shared vars }; ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP, Params); break; } case OMPD_threadprivate: case OMPD_taskyield: case OMPD_barrier: case OMPD_taskwait: case OMPD_cancellation_point: case OMPD_cancel: case OMPD_flush: llvm_unreachable("OpenMP Directive is not allowed"); case OMPD_unknown: llvm_unreachable("Unknown OpenMP directive"); } } StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses) { if (!S.isUsable()) { ActOnCapturedRegionError(); return StmtError(); } // This is required for proper codegen. for (auto *Clause : Clauses) { if (isOpenMPPrivate(Clause->getClauseKind()) || Clause->getClauseKind() == OMPC_copyprivate || (getLangOpts().OpenMPUseTLS && getASTContext().getTargetInfo().isTLSSupported() && Clause->getClauseKind() == OMPC_copyin)) { DSAStack->setForceVarCapturing(Clause->getClauseKind() == OMPC_copyin); // Mark all variables in private list clauses as used in inner region. for (auto *VarRef : Clause->children()) { if (auto *E = cast_or_null<Expr>(VarRef)) { MarkDeclarationsReferencedInExpr(E); } } DSAStack->setForceVarCapturing(/*V=*/false); } else if (isParallelOrTaskRegion(DSAStack->getCurrentDirective()) && Clause->getClauseKind() == OMPC_schedule) { // Mark all variables in private list clauses as used in inner region. // Required for proper codegen of combined directives. // TODO: add processing for other clauses. if (auto *E = cast_or_null<Expr>( cast<OMPScheduleClause>(Clause)->getHelperChunkSize())) { MarkDeclarationsReferencedInExpr(E); } } } return ActOnCapturedRegionEnd(S.get()); } static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack, OpenMPDirectiveKind CurrentRegion, const DeclarationNameInfo &CurrentName, OpenMPDirectiveKind CancelRegion, SourceLocation StartLoc) { // Allowed nesting of constructs // +------------------+-----------------+------------------------------------+ // | Parent directive | Child directive | Closely (!), No-Closely(+), Both(*)| // +------------------+-----------------+------------------------------------+ // | parallel | parallel | * | // | parallel | for | * | // | parallel | for simd | * | // | parallel | master | * | // | parallel | critical | * | // | parallel | simd | * | // | parallel | sections | * | // | parallel | section | + | // | parallel | single | * | // | parallel | parallel for | * | // | parallel |parallel for simd| * | // | parallel |parallel sections| * | // | parallel | task | * | // | parallel | taskyield | * | // | parallel | barrier | * | // | parallel | taskwait | * | // | parallel | taskgroup | * | // | parallel | flush | * | // | parallel | ordered | + | // | parallel | atomic | * | // | parallel | target | * | // | parallel | teams | + | // | parallel | cancellation | | // | | point | ! | // | parallel | cancel | ! | // +------------------+-----------------+------------------------------------+ // | for | parallel | * | // | for | for | + | // | for | for simd | + | // | for | master | + | // | for | critical | * | // | for | simd | * | // | for | sections | + | // | for | section | + | // | for | single | + | // | for | parallel for | * | // | for |parallel for simd| * | // | for |parallel sections| * | // | for | task | * | // | for | taskyield | * | // | for | barrier | + | // | for | taskwait | * | // | for | taskgroup | * | // | for | flush | * | // | for | ordered | * (if construct is ordered) | // | for | atomic | * | // | for | target | * | // | for | teams | + | // | for | cancellation | | // | | point | ! | // | for | cancel | ! | // +------------------+-----------------+------------------------------------+ // | master | parallel | * | // | master | for | + | // | master | for simd | + | // | master | master | * | // | master | critical | * | // | master | simd | * | // | master | sections | + | // | master | section | + | // | master | single | + | // | master | parallel for | * | // | master |parallel for simd| * | // | master |parallel sections| * | // | master | task | * | // | master | taskyield | * | // | master | barrier | + | // | master | taskwait | * | // | master | taskgroup | * | // | master | flush | * | // | master | ordered | + | // | master | atomic | * | // | master | target | * | // | master | teams | + | // | master | cancellation | | // | | point | | // | master | cancel | | // +------------------+-----------------+------------------------------------+ // | critical | parallel | * | // | critical | for | + | // | critical | for simd | + | // | critical | master | * | // | critical | critical | * (should have different names) | // | critical | simd | * | // | critical | sections | + | // | critical | section | + | // | critical | single | + | // | critical | parallel for | * | // | critical |parallel for simd| * | // | critical |parallel sections| * | // | critical | task | * | // | critical | taskyield | * | // | critical | barrier | + | // | critical | taskwait | * | // | critical | taskgroup | * | // | critical | ordered | + | // | critical | atomic | * | // | critical | target | * | // | critical | teams | + | // | critical | cancellation | | // | | point | | // | critical | cancel | | // +------------------+-----------------+------------------------------------+ // | simd | parallel | | // | simd | for | | // | simd | for simd | | // | simd | master | | // | simd | critical | | // | simd | simd | | // | simd | sections | | // | simd | section | | // | simd | single | | // | simd | parallel for | | // | simd |parallel for simd| | // | simd |parallel sections| | // | simd | task | | // | simd | taskyield | | // | simd | barrier | | // | simd | taskwait | | // | simd | taskgroup | | // | simd | flush | | // | simd | ordered | | // | simd | atomic | | // | simd | target | | // | simd | teams | | // | simd | cancellation | | // | | point | | // | simd | cancel | | // +------------------+-----------------+------------------------------------+ // | for simd | parallel | | // | for simd | for | | // | for simd | for simd | | // | for simd | master | | // | for simd | critical | | // | for simd | simd | | // | for simd | sections | | // | for simd | section | | // | for simd | single | | // | for simd | parallel for | | // | for simd |parallel for simd| | // | for simd |parallel sections| | // | for simd | task | | // | for simd | taskyield | | // | for simd | barrier | | // | for simd | taskwait | | // | for simd | taskgroup | | // | for simd | flush | | // | for simd | ordered | | // | for simd | atomic | | // | for simd | target | | // | for simd | teams | | // | for simd | cancellation | | // | | point | | // | for simd | cancel | | // +------------------+-----------------+------------------------------------+ // | parallel for simd| parallel | | // | parallel for simd| for | | // | parallel for simd| for simd | | // | parallel for simd| master | | // | parallel for simd| critical | | // | parallel for simd| simd | | // | parallel for simd| sections | | // | parallel for simd| section | | // | parallel for simd| single | | // | parallel for simd| parallel for | | // | parallel for simd|parallel for simd| | // | parallel for simd|parallel sections| | // | parallel for simd| task | | // | parallel for simd| taskyield | | // | parallel for simd| barrier | | // | parallel for simd| taskwait | | // | parallel for simd| taskgroup | | // | parallel for simd| flush | | // | parallel for simd| ordered | | // | parallel for simd| atomic | | // | parallel for simd| target | | // | parallel for simd| teams | | // | parallel for simd| cancellation | | // | | point | | // | parallel for simd| cancel | | // +------------------+-----------------+------------------------------------+ // | sections | parallel | * | // | sections | for | + | // | sections | for simd | + | // | sections | master | + | // | sections | critical | * | // | sections | simd | * | // | sections | sections | + | // | sections | section | * | // | sections | single | + | // | sections | parallel for | * | // | sections |parallel for simd| * | // | sections |parallel sections| * | // | sections | task | * | // | sections | taskyield | * | // | sections | barrier | + | // | sections | taskwait | * | // | sections | taskgroup | * | // | sections | flush | * | // | sections | ordered | + | // | sections | atomic | * | // | sections | target | * | // | sections | teams | + | // | sections | cancellation | | // | | point | ! | // | sections | cancel | ! | // +------------------+-----------------+------------------------------------+ // | section | parallel | * | // | section | for | + | // | section | for simd | + | // | section | master | + | // | section | critical | * | // | section | simd | * | // | section | sections | + | // | section | section | + | // | section | single | + | // | section | parallel for | * | // | section |parallel for simd| * | // | section |parallel sections| * | // | section | task | * | // | section | taskyield | * | // | section | barrier | + | // | section | taskwait | * | // | section | taskgroup | * | // | section | flush | * | // | section | ordered | + | // | section | atomic | * | // | section | target | * | // | section | teams | + | // | section | cancellation | | // | | point | ! | // | section | cancel | ! | // +------------------+-----------------+------------------------------------+ // | single | parallel | * | // | single | for | + | // | single | for simd | + | // | single | master | + | // | single | critical | * | // | single | simd | * | // | single | sections | + | // | single | section | + | // | single | single | + | // | single | parallel for | * | // | single |parallel for simd| * | // | single |parallel sections| * | // | single | task | * | // | single | taskyield | * | // | single | barrier | + | // | single | taskwait | * | // | single | taskgroup | * | // | single | flush | * | // | single | ordered | + | // | single | atomic | * | // | single | target | * | // | single | teams | + | // | single | cancellation | | // | | point | | // | single | cancel | | // +------------------+-----------------+------------------------------------+ // | parallel for | parallel | * | // | parallel for | for | + | // | parallel for | for simd | + | // | parallel for | master | + | // | parallel for | critical | * | // | parallel for | simd | * | // | parallel for | sections | + | // | parallel for | section | + | // | parallel for | single | + | // | parallel for | parallel for | * | // | parallel for |parallel for simd| * | // | parallel for |parallel sections| * | // | parallel for | task | * | // | parallel for | taskyield | * | // | parallel for | barrier | + | // | parallel for | taskwait | * | // | parallel for | taskgroup | * | // | parallel for | flush | * | // | parallel for | ordered | * (if construct is ordered) | // | parallel for | atomic | * | // | parallel for | target | * | // | parallel for | teams | + | // | parallel for | cancellation | | // | | point | ! | // | parallel for | cancel | ! | // +------------------+-----------------+------------------------------------+ // | parallel sections| parallel | * | // | parallel sections| for | + | // | parallel sections| for simd | + | // | parallel sections| master | + | // | parallel sections| critical | + | // | parallel sections| simd | * | // | parallel sections| sections | + | // | parallel sections| section | * | // | parallel sections| single | + | // | parallel sections| parallel for | * | // | parallel sections|parallel for simd| * | // | parallel sections|parallel sections| * | // | parallel sections| task | * | // | parallel sections| taskyield | * | // | parallel sections| barrier | + | // | parallel sections| taskwait | * | // | parallel sections| taskgroup | * | // | parallel sections| flush | * | // | parallel sections| ordered | + | // | parallel sections| atomic | * | // | parallel sections| target | * | // | parallel sections| teams | + | // | parallel sections| cancellation | | // | | point | ! | // | parallel sections| cancel | ! | // +------------------+-----------------+------------------------------------+ // | task | parallel | * | // | task | for | + | // | task | for simd | + | // | task | master | + | // | task | critical | * | // | task | simd | * | // | task | sections | + | // | task | section | + | // | task | single | + | // | task | parallel for | * | // | task |parallel for simd| * | // | task |parallel sections| * | // | task | task | * | // | task | taskyield | * | // | task | barrier | + | // | task | taskwait | * | // | task | taskgroup | * | // | task | flush | * | // | task | ordered | + | // | task | atomic | * | // | task | target | * | // | task | teams | + | // | task | cancellation | | // | | point | ! | // | task | cancel | ! | // +------------------+-----------------+------------------------------------+ // | ordered | parallel | * | // | ordered | for | + | // | ordered | for simd | + | // | ordered | master | * | // | ordered | critical | * | // | ordered | simd | * | // | ordered | sections | + | // | ordered | section | + | // | ordered | single | + | // | ordered | parallel for | * | // | ordered |parallel for simd| * | // | ordered |parallel sections| * | // | ordered | task | * | // | ordered | taskyield | * | // | ordered | barrier | + | // | ordered | taskwait | * | // | ordered | taskgroup | * | // | ordered | flush | * | // | ordered | ordered | + | // | ordered | atomic | * | // | ordered | target | * | // | ordered | teams | + | // | ordered | cancellation | | // | | point | | // | ordered | cancel | | // +------------------+-----------------+------------------------------------+ // | atomic | parallel | | // | atomic | for | | // | atomic | for simd | | // | atomic | master | | // | atomic | critical | | // | atomic | simd | | // | atomic | sections | | // | atomic | section | | // | atomic | single | | // | atomic | parallel for | | // | atomic |parallel for simd| | // | atomic |parallel sections| | // | atomic | task | | // | atomic | taskyield | | // | atomic | barrier | | // | atomic | taskwait | | // | atomic | taskgroup | | // | atomic | flush | | // | atomic | ordered | | // | atomic | atomic | | // | atomic | target | | // | atomic | teams | | // | atomic | cancellation | | // | | point | | // | atomic | cancel | | // +------------------+-----------------+------------------------------------+ // | target | parallel | * | // | target | for | * | // | target | for simd | * | // | target | master | * | // | target | critical | * | // | target | simd | * | // | target | sections | * | // | target | section | * | // | target | single | * | // | target | parallel for | * | // | target |parallel for simd| * | // | target |parallel sections| * | // | target | task | * | // | target | taskyield | * | // | target | barrier | * | // | target | taskwait | * | // | target | taskgroup | * | // | target | flush | * | // | target | ordered | * | // | target | atomic | * | // | target | target | * | // | target | teams | * | // | target | cancellation | | // | | point | | // | target | cancel | | // +------------------+-----------------+------------------------------------+ // | teams | parallel | * | // | teams | for | + | // | teams | for simd | + | // | teams | master | + | // | teams | critical | + | // | teams | simd | + | // | teams | sections | + | // | teams | section | + | // | teams | single | + | // | teams | parallel for | * | // | teams |parallel for simd| * | // | teams |parallel sections| * | // | teams | task | + | // | teams | taskyield | + | // | teams | barrier | + | // | teams | taskwait | + | // | teams | taskgroup | + | // | teams | flush | + | // | teams | ordered | + | // | teams | atomic | + | // | teams | target | + | // | teams | teams | + | // | teams | cancellation | | // | | point | | // | teams | cancel | | // +------------------+-----------------+------------------------------------+ if (Stack->getCurScope()) { auto ParentRegion = Stack->getParentDirective(); bool NestingProhibited = false; bool CloseNesting = true; enum { NoRecommend, ShouldBeInParallelRegion, ShouldBeInOrderedRegion, ShouldBeInTargetRegion } Recommend = NoRecommend; if (isOpenMPSimdDirective(ParentRegion)) { // OpenMP [2.16, Nesting of Regions] // OpenMP constructs may not be nested inside a simd region. SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_simd); return true; } if (ParentRegion == OMPD_atomic) { // OpenMP [2.16, Nesting of Regions] // OpenMP constructs may not be nested inside an atomic region. SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_atomic); return true; } if (CurrentRegion == OMPD_section) { // OpenMP [2.7.2, sections Construct, Restrictions] // Orphaned section directives are prohibited. That is, the section // directives must appear within the sections construct and must not be // encountered elsewhere in the sections region. if (ParentRegion != OMPD_sections && ParentRegion != OMPD_parallel_sections) { SemaRef.Diag(StartLoc, diag::err_omp_orphaned_section_directive) << (ParentRegion != OMPD_unknown) << getOpenMPDirectiveName(ParentRegion); return true; } return false; } // Allow some constructs to be orphaned (they could be used in functions, // called from OpenMP regions with the required preconditions). if (ParentRegion == OMPD_unknown) return false; if (CurrentRegion == OMPD_cancellation_point || CurrentRegion == OMPD_cancel) { // OpenMP [2.16, Nesting of Regions] // A cancellation point construct for which construct-type-clause is // taskgroup must be nested inside a task construct. A cancellation // point construct for which construct-type-clause is not taskgroup must // be closely nested inside an OpenMP construct that matches the type // specified in construct-type-clause. // A cancel construct for which construct-type-clause is taskgroup must be // nested inside a task construct. A cancel construct for which // construct-type-clause is not taskgroup must be closely nested inside an // OpenMP construct that matches the type specified in // construct-type-clause. NestingProhibited = !((CancelRegion == OMPD_parallel && ParentRegion == OMPD_parallel) || (CancelRegion == OMPD_for && ParentRegion == OMPD_for) || (CancelRegion == OMPD_taskgroup && ParentRegion == OMPD_task) || (CancelRegion == OMPD_sections && (ParentRegion == OMPD_section || ParentRegion == OMPD_sections))); } else if (CurrentRegion == OMPD_master) { // OpenMP [2.16, Nesting of Regions] // A master region may not be closely nested inside a worksharing, // atomic, or explicit task region. NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) || ParentRegion == OMPD_task; } else if (CurrentRegion == OMPD_critical && CurrentName.getName()) { // OpenMP [2.16, Nesting of Regions] // A critical region may not be nested (closely or otherwise) inside a // critical region with the same name. Note that this restriction is not // sufficient to prevent deadlock. SourceLocation PreviousCriticalLoc; bool DeadLock = Stack->hasDirective([CurrentName, &PreviousCriticalLoc]( OpenMPDirectiveKind K, const DeclarationNameInfo &DNI, SourceLocation Loc) ->bool { if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) { PreviousCriticalLoc = Loc; return true; } else return false; }, false /* skip top directive */); if (DeadLock) { SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region_critical_same_name) << CurrentName.getName(); if (PreviousCriticalLoc.isValid()) SemaRef.Diag(PreviousCriticalLoc, diag::note_omp_previous_critical_region); return true; } } else if (CurrentRegion == OMPD_barrier) { // OpenMP [2.16, Nesting of Regions] // A barrier region may not be closely nested inside a worksharing, // explicit task, critical, ordered, atomic, or master region. NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) || ParentRegion == OMPD_task || ParentRegion == OMPD_master || ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered; } else if (isOpenMPWorksharingDirective(CurrentRegion) && !isOpenMPParallelDirective(CurrentRegion)) { // OpenMP [2.16, Nesting of Regions] // A worksharing region may not be closely nested inside a worksharing, // explicit task, critical, ordered, atomic, or master region. NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) || ParentRegion == OMPD_task || ParentRegion == OMPD_master || ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered; Recommend = ShouldBeInParallelRegion; } else if (CurrentRegion == OMPD_ordered) { // OpenMP [2.16, Nesting of Regions] // An ordered region may not be closely nested inside a critical, // atomic, or explicit task region. // An ordered region must be closely nested inside a loop region (or // parallel loop region) with an ordered clause. NestingProhibited = ParentRegion == OMPD_critical || ParentRegion == OMPD_task || !Stack->isParentOrderedRegion(); Recommend = ShouldBeInOrderedRegion; } else if (isOpenMPTeamsDirective(CurrentRegion)) { // OpenMP [2.16, Nesting of Regions] // If specified, a teams construct must be contained within a target // construct. NestingProhibited = ParentRegion != OMPD_target; Recommend = ShouldBeInTargetRegion; Stack->setParentTeamsRegionLoc(Stack->getConstructLoc()); } if (!NestingProhibited && isOpenMPTeamsDirective(ParentRegion)) { // OpenMP [2.16, Nesting of Regions] // distribute, parallel, parallel sections, parallel workshare, and the // parallel loop and parallel loop SIMD constructs are the only OpenMP // constructs that can be closely nested in the teams region. // TODO: add distribute directive. NestingProhibited = !isOpenMPParallelDirective(CurrentRegion); Recommend = ShouldBeInParallelRegion; } if (NestingProhibited) { SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region) << CloseNesting << getOpenMPDirectiveName(ParentRegion) << Recommend << getOpenMPDirectiveName(CurrentRegion); return true; } } return false; } StmtResult Sema::ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { StmtResult Res = StmtError(); if (CheckNestingOfRegions(*this, DSAStack, Kind, DirName, CancelRegion, StartLoc)) return StmtError(); llvm::SmallVector<OMPClause *, 8> ClausesWithImplicit; llvm::DenseMap<VarDecl *, Expr *> VarsWithInheritedDSA; bool ErrorFound = false; ClausesWithImplicit.append(Clauses.begin(), Clauses.end()); if (AStmt) { assert(isa<CapturedStmt>(AStmt) && "Captured statement expected"); // Check default data sharing attributes for referenced variables. DSAAttrChecker DSAChecker(DSAStack, *this, cast<CapturedStmt>(AStmt)); DSAChecker.Visit(cast<CapturedStmt>(AStmt)->getCapturedStmt()); if (DSAChecker.isErrorFound()) return StmtError(); // Generate list of implicitly defined firstprivate variables. VarsWithInheritedDSA = DSAChecker.getVarsWithInheritedDSA(); if (!DSAChecker.getImplicitFirstprivate().empty()) { if (OMPClause *Implicit = ActOnOpenMPFirstprivateClause( DSAChecker.getImplicitFirstprivate(), SourceLocation(), SourceLocation(), SourceLocation())) { ClausesWithImplicit.push_back(Implicit); ErrorFound = cast<OMPFirstprivateClause>(Implicit)->varlist_size() != DSAChecker.getImplicitFirstprivate().size(); } else ErrorFound = true; } } switch (Kind) { case OMPD_parallel: Res = ActOnOpenMPParallelDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); break; case OMPD_simd: Res = ActOnOpenMPSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); break; case OMPD_for: Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); break; case OMPD_for_simd: Res = ActOnOpenMPForSimdDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); break; case OMPD_sections: Res = ActOnOpenMPSectionsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); break; case OMPD_section: assert(ClausesWithImplicit.empty() && "No clauses are allowed for 'omp section' directive"); Res = ActOnOpenMPSectionDirective(AStmt, StartLoc, EndLoc); break; case OMPD_single: Res = ActOnOpenMPSingleDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); break; case OMPD_master: assert(ClausesWithImplicit.empty() && "No clauses are allowed for 'omp master' directive"); Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc); break; case OMPD_critical: assert(ClausesWithImplicit.empty() && "No clauses are allowed for 'omp critical' directive"); Res = ActOnOpenMPCriticalDirective(DirName, AStmt, StartLoc, EndLoc); break; case OMPD_parallel_for: Res = ActOnOpenMPParallelForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); break; case OMPD_parallel_for_simd: Res = ActOnOpenMPParallelForSimdDirective( ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA); break; case OMPD_parallel_sections: Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); break; case OMPD_task: Res = ActOnOpenMPTaskDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); break; case OMPD_taskyield: assert(ClausesWithImplicit.empty() && "No clauses are allowed for 'omp taskyield' directive"); assert(AStmt == nullptr && "No associated statement allowed for 'omp taskyield' directive"); Res = ActOnOpenMPTaskyieldDirective(StartLoc, EndLoc); break; case OMPD_barrier: assert(ClausesWithImplicit.empty() && "No clauses are allowed for 'omp barrier' directive"); assert(AStmt == nullptr && "No associated statement allowed for 'omp barrier' directive"); Res = ActOnOpenMPBarrierDirective(StartLoc, EndLoc); break; case OMPD_taskwait: assert(ClausesWithImplicit.empty() && "No clauses are allowed for 'omp taskwait' directive"); assert(AStmt == nullptr && "No associated statement allowed for 'omp taskwait' directive"); Res = ActOnOpenMPTaskwaitDirective(StartLoc, EndLoc); break; case OMPD_taskgroup: assert(ClausesWithImplicit.empty() && "No clauses are allowed for 'omp taskgroup' directive"); Res = ActOnOpenMPTaskgroupDirective(AStmt, StartLoc, EndLoc); break; case OMPD_flush: assert(AStmt == nullptr && "No associated statement allowed for 'omp flush' directive"); Res = ActOnOpenMPFlushDirective(ClausesWithImplicit, StartLoc, EndLoc); break; case OMPD_ordered: assert(ClausesWithImplicit.empty() && "No clauses are allowed for 'omp ordered' directive"); Res = ActOnOpenMPOrderedDirective(AStmt, StartLoc, EndLoc); break; case OMPD_atomic: Res = ActOnOpenMPAtomicDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); break; case OMPD_teams: Res = ActOnOpenMPTeamsDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); break; case OMPD_target: Res = ActOnOpenMPTargetDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc); break; case OMPD_cancellation_point: assert(ClausesWithImplicit.empty() && "No clauses are allowed for 'omp cancellation point' directive"); assert(AStmt == nullptr && "No associated statement allowed for 'omp " "cancellation point' directive"); Res = ActOnOpenMPCancellationPointDirective(StartLoc, EndLoc, CancelRegion); break; case OMPD_cancel: assert(ClausesWithImplicit.empty() && "No clauses are allowed for 'omp cancel' directive"); assert(AStmt == nullptr && "No associated statement allowed for 'omp cancel' directive"); Res = ActOnOpenMPCancelDirective(StartLoc, EndLoc, CancelRegion); break; case OMPD_threadprivate: llvm_unreachable("OpenMP Directive is not allowed"); case OMPD_unknown: llvm_unreachable("Unknown OpenMP directive"); } for (auto P : VarsWithInheritedDSA) { Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable) << P.first << P.second->getSourceRange(); } if (!VarsWithInheritedDSA.empty()) return StmtError(); if (ErrorFound) return StmtError(); return Res; } StmtResult Sema::ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); CapturedStmt *CS = cast<CapturedStmt>(AStmt); // 1.2.2 OpenMP Language Terminology // Structured block - An executable statement with a single entry at the // top and a single exit at the bottom. // The point of exit cannot be a branch out of the structured block. // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); getCurFunction()->setHasBranchProtectedScope(); return OMPParallelDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); } namespace { /// \brief Helper class for checking canonical form of the OpenMP loops and /// extracting iteration space of each loop in the loop nest, that will be used /// for IR generation. class OpenMPIterationSpaceChecker { /// \brief Reference to Sema. Sema &SemaRef; /// \brief A location for diagnostics (when there is no some better location). SourceLocation DefaultLoc; /// \brief A location for diagnostics (when increment is not compatible). SourceLocation ConditionLoc; /// \brief A source location for referring to loop init later. SourceRange InitSrcRange; /// \brief A source location for referring to condition later. SourceRange ConditionSrcRange; /// \brief A source location for referring to increment later. SourceRange IncrementSrcRange; /// \brief Loop variable. VarDecl *Var; /// \brief Reference to loop variable. DeclRefExpr *VarRef; /// \brief Lower bound (initializer for the var). Expr *LB; /// \brief Upper bound. Expr *UB; /// \brief Loop step (increment). Expr *Step; /// \brief This flag is true when condition is one of: /// Var < UB /// Var <= UB /// UB > Var /// UB >= Var bool TestIsLessOp; /// \brief This flag is true when condition is strict ( < or > ). bool TestIsStrictOp; /// \brief This flag is true when step is subtracted on each iteration. bool SubtractStep; public: OpenMPIterationSpaceChecker(Sema &SemaRef, SourceLocation DefaultLoc) : SemaRef(SemaRef), DefaultLoc(DefaultLoc), ConditionLoc(DefaultLoc), InitSrcRange(SourceRange()), ConditionSrcRange(SourceRange()), IncrementSrcRange(SourceRange()), Var(nullptr), VarRef(nullptr), LB(nullptr), UB(nullptr), Step(nullptr), TestIsLessOp(false), TestIsStrictOp(false), SubtractStep(false) {} /// \brief Check init-expr for canonical loop form and save loop counter /// variable - #Var and its initialization value - #LB. bool CheckInit(Stmt *S, bool EmitDiags = true); /// \brief Check test-expr for canonical form, save upper-bound (#UB), flags /// for less/greater and for strict/non-strict comparison. bool CheckCond(Expr *S); /// \brief Check incr-expr for canonical loop form and return true if it /// does not conform, otherwise save loop step (#Step). bool CheckInc(Expr *S); /// \brief Return the loop counter variable. VarDecl *GetLoopVar() const { return Var; } /// \brief Return the reference expression to loop counter variable. DeclRefExpr *GetLoopVarRefExpr() const { return VarRef; } /// \brief Source range of the loop init. SourceRange GetInitSrcRange() const { return InitSrcRange; } /// \brief Source range of the loop condition. SourceRange GetConditionSrcRange() const { return ConditionSrcRange; } /// \brief Source range of the loop increment. SourceRange GetIncrementSrcRange() const { return IncrementSrcRange; } /// \brief True if the step should be subtracted. bool ShouldSubtractStep() const { return SubtractStep; } /// \brief Build the expression to calculate the number of iterations. Expr *BuildNumIterations(Scope *S, const bool LimitedType) const; /// \brief Build the precondition expression for the loops. Expr *BuildPreCond(Scope *S, Expr *Cond) const; /// \brief Build reference expression to the counter be used for codegen. Expr *BuildCounterVar() const; /// \brief Build initization of the counter be used for codegen. Expr *BuildCounterInit() const; /// \brief Build step of the counter be used for codegen. Expr *BuildCounterStep() const; /// \brief Return true if any expression is dependent. bool Dependent() const; private: /// \brief Check the right-hand side of an assignment in the increment /// expression. bool CheckIncRHS(Expr *RHS); /// \brief Helper to set loop counter variable and its initializer. bool SetVarAndLB(VarDecl *NewVar, DeclRefExpr *NewVarRefExpr, Expr *NewLB); /// \brief Helper to set upper bound. bool SetUB(Expr *NewUB, bool LessOp, bool StrictOp, const SourceRange &SR, const SourceLocation &SL); /// \brief Helper to set loop increment. bool SetStep(Expr *NewStep, bool Subtract); }; bool OpenMPIterationSpaceChecker::Dependent() const { if (!Var) { assert(!LB && !UB && !Step); return false; } return Var->getType()->isDependentType() || (LB && LB->isValueDependent()) || (UB && UB->isValueDependent()) || (Step && Step->isValueDependent()); } template <typename T> static T *getExprAsWritten(T *E) { if (auto *ExprTemp = dyn_cast<ExprWithCleanups>(E)) E = ExprTemp->getSubExpr(); if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E)) E = MTE->GetTemporaryExpr(); while (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E)) E = Binder->getSubExpr(); if (auto *ICE = dyn_cast<ImplicitCastExpr>(E)) E = ICE->getSubExprAsWritten(); return E->IgnoreParens(); } bool OpenMPIterationSpaceChecker::SetVarAndLB(VarDecl *NewVar, DeclRefExpr *NewVarRefExpr, Expr *NewLB) { // State consistency checking to ensure correct usage. assert(Var == nullptr && LB == nullptr && VarRef == nullptr && UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp); if (!NewVar || !NewLB) return true; Var = NewVar; VarRef = NewVarRefExpr; if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(NewLB)) if (const CXXConstructorDecl *Ctor = CE->getConstructor()) if ((Ctor->isCopyOrMoveConstructor() || Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) && CE->getNumArgs() > 0 && CE->getArg(0) != nullptr) NewLB = CE->getArg(0)->IgnoreParenImpCasts(); LB = NewLB; return false; } bool OpenMPIterationSpaceChecker::SetUB(Expr *NewUB, bool LessOp, bool StrictOp, const SourceRange &SR, const SourceLocation &SL) { // State consistency checking to ensure correct usage. assert(Var != nullptr && LB != nullptr && UB == nullptr && Step == nullptr && !TestIsLessOp && !TestIsStrictOp); if (!NewUB) return true; UB = NewUB; TestIsLessOp = LessOp; TestIsStrictOp = StrictOp; ConditionSrcRange = SR; ConditionLoc = SL; return false; } bool OpenMPIterationSpaceChecker::SetStep(Expr *NewStep, bool Subtract) { // State consistency checking to ensure correct usage. assert(Var != nullptr && LB != nullptr && Step == nullptr); if (!NewStep) return true; if (!NewStep->isValueDependent()) { // Check that the step is integer expression. SourceLocation StepLoc = NewStep->getLocStart(); ExprResult Val = SemaRef.PerformOpenMPImplicitIntegerConversion(StepLoc, NewStep); if (Val.isInvalid()) return true; NewStep = Val.get(); // OpenMP [2.6, Canonical Loop Form, Restrictions] // If test-expr is of form var relational-op b and relational-op is < or // <= then incr-expr must cause var to increase on each iteration of the // loop. If test-expr is of form var relational-op b and relational-op is // > or >= then incr-expr must cause var to decrease on each iteration of // the loop. // If test-expr is of form b relational-op var and relational-op is < or // <= then incr-expr must cause var to decrease on each iteration of the // loop. If test-expr is of form b relational-op var and relational-op is // > or >= then incr-expr must cause var to increase on each iteration of // the loop. llvm::APSInt Result; bool IsConstant = NewStep->isIntegerConstantExpr(Result, SemaRef.Context); bool IsUnsigned = !NewStep->getType()->hasSignedIntegerRepresentation(); bool IsConstNeg = IsConstant && Result.isSigned() && (Subtract != Result.isNegative()); bool IsConstPos = IsConstant && Result.isSigned() && (Subtract == Result.isNegative()); bool IsConstZero = IsConstant && !Result.getBoolValue(); if (UB && (IsConstZero || (TestIsLessOp ? (IsConstNeg || (IsUnsigned && Subtract)) : (IsConstPos || (IsUnsigned && !Subtract))))) { SemaRef.Diag(NewStep->getExprLoc(), diag::err_omp_loop_incr_not_compatible) << Var << TestIsLessOp << NewStep->getSourceRange(); SemaRef.Diag(ConditionLoc, diag::note_omp_loop_cond_requres_compatible_incr) << TestIsLessOp << ConditionSrcRange; return true; } if (TestIsLessOp == Subtract) { NewStep = SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep).get(); Subtract = !Subtract; } } Step = NewStep; SubtractStep = Subtract; return false; } bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) { // Check init-expr for canonical loop form and save loop counter // variable - #Var and its initialization value - #LB. // OpenMP [2.6] Canonical loop form. init-expr may be one of the following: // var = lb // integer-type var = lb // random-access-iterator-type var = lb // pointer-type var = lb // if (!S) { if (EmitDiags) { SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_init); } return true; } InitSrcRange = S->getSourceRange(); if (Expr *E = dyn_cast<Expr>(S)) S = E->IgnoreParens(); if (auto BO = dyn_cast<BinaryOperator>(S)) { if (BO->getOpcode() == BO_Assign) if (auto DRE = dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens())) return SetVarAndLB(dyn_cast<VarDecl>(DRE->getDecl()), DRE, BO->getRHS()); } else if (auto DS = dyn_cast<DeclStmt>(S)) { if (DS->isSingleDecl()) { if (auto Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) { if (Var->hasInit()) { // Accept non-canonical init form here but emit ext. warning. if (Var->getInitStyle() != VarDecl::CInit && EmitDiags) SemaRef.Diag(S->getLocStart(), diag::ext_omp_loop_not_canonical_init) << S->getSourceRange(); return SetVarAndLB(Var, nullptr, Var->getInit()); } } } } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) if (CE->getOperator() == OO_Equal) if (auto DRE = dyn_cast<DeclRefExpr>(CE->getArg(0))) return SetVarAndLB(dyn_cast<VarDecl>(DRE->getDecl()), DRE, CE->getArg(1)); if (EmitDiags) { SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_init) << S->getSourceRange(); } return true; } /// \brief Ignore parenthesizes, implicit casts, copy constructor and return the /// variable (which may be the loop variable) if possible. static const VarDecl *GetInitVarDecl(const Expr *E) { if (!E) return nullptr; E = getExprAsWritten(E); if (auto *CE = dyn_cast_or_null<CXXConstructExpr>(E)) if (const CXXConstructorDecl *Ctor = CE->getConstructor()) if ((Ctor->isCopyOrMoveConstructor() || Ctor->isConvertingConstructor(/*AllowExplicit=*/false)) && CE->getNumArgs() > 0 && CE->getArg(0) != nullptr) E = CE->getArg(0)->IgnoreParenImpCasts(); auto DRE = dyn_cast_or_null<DeclRefExpr>(E); if (!DRE) return nullptr; return dyn_cast<VarDecl>(DRE->getDecl()); } bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) { // Check test-expr for canonical form, save upper-bound UB, flags for // less/greater and for strict/non-strict comparison. // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following: // var relational-op b // b relational-op var // if (!S) { SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_cond) << Var; return true; } S = getExprAsWritten(S); SourceLocation CondLoc = S->getLocStart(); if (auto BO = dyn_cast<BinaryOperator>(S)) { if (BO->isRelationalOp()) { if (GetInitVarDecl(BO->getLHS()) == Var) return SetUB(BO->getRHS(), (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE), (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT), BO->getSourceRange(), BO->getOperatorLoc()); if (GetInitVarDecl(BO->getRHS()) == Var) return SetUB(BO->getLHS(), (BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE), (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT), BO->getSourceRange(), BO->getOperatorLoc()); } } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) { if (CE->getNumArgs() == 2) { auto Op = CE->getOperator(); switch (Op) { case OO_Greater: case OO_GreaterEqual: case OO_Less: case OO_LessEqual: if (GetInitVarDecl(CE->getArg(0)) == Var) return SetUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual, Op == OO_Less || Op == OO_Greater, CE->getSourceRange(), CE->getOperatorLoc()); if (GetInitVarDecl(CE->getArg(1)) == Var) return SetUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual, Op == OO_Less || Op == OO_Greater, CE->getSourceRange(), CE->getOperatorLoc()); break; default: break; } } } SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond) << S->getSourceRange() << Var; return true; } bool OpenMPIterationSpaceChecker::CheckIncRHS(Expr *RHS) { // RHS of canonical loop form increment can be: // var + incr // incr + var // var - incr // RHS = RHS->IgnoreParenImpCasts(); if (auto BO = dyn_cast<BinaryOperator>(RHS)) { if (BO->isAdditiveOp()) { bool IsAdd = BO->getOpcode() == BO_Add; if (GetInitVarDecl(BO->getLHS()) == Var) return SetStep(BO->getRHS(), !IsAdd); if (IsAdd && GetInitVarDecl(BO->getRHS()) == Var) return SetStep(BO->getLHS(), false); } } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(RHS)) { bool IsAdd = CE->getOperator() == OO_Plus; if ((IsAdd || CE->getOperator() == OO_Minus) && CE->getNumArgs() == 2) { if (GetInitVarDecl(CE->getArg(0)) == Var) return SetStep(CE->getArg(1), !IsAdd); if (IsAdd && GetInitVarDecl(CE->getArg(1)) == Var) return SetStep(CE->getArg(0), false); } } SemaRef.Diag(RHS->getLocStart(), diag::err_omp_loop_not_canonical_incr) << RHS->getSourceRange() << Var; return true; } bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) { // Check incr-expr for canonical loop form and return true if it // does not conform. // OpenMP [2.6] Canonical loop form. Test-expr may be one of the following: // ++var // var++ // --var // var-- // var += incr // var -= incr // var = var + incr // var = incr + var // var = var - incr // if (!S) { SemaRef.Diag(DefaultLoc, diag::err_omp_loop_not_canonical_incr) << Var; return true; } IncrementSrcRange = S->getSourceRange(); S = S->IgnoreParens(); if (auto UO = dyn_cast<UnaryOperator>(S)) { if (UO->isIncrementDecrementOp() && GetInitVarDecl(UO->getSubExpr()) == Var) return SetStep( SemaRef.ActOnIntegerConstant(UO->getLocStart(), (UO->isDecrementOp() ? -1 : 1)).get(), false); } else if (auto BO = dyn_cast<BinaryOperator>(S)) { switch (BO->getOpcode()) { case BO_AddAssign: case BO_SubAssign: if (GetInitVarDecl(BO->getLHS()) == Var) return SetStep(BO->getRHS(), BO->getOpcode() == BO_SubAssign); break; case BO_Assign: if (GetInitVarDecl(BO->getLHS()) == Var) return CheckIncRHS(BO->getRHS()); break; default: break; } } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) { switch (CE->getOperator()) { case OO_PlusPlus: case OO_MinusMinus: if (GetInitVarDecl(CE->getArg(0)) == Var) return SetStep( SemaRef.ActOnIntegerConstant( CE->getLocStart(), ((CE->getOperator() == OO_MinusMinus) ? -1 : 1)).get(), false); break; case OO_PlusEqual: case OO_MinusEqual: if (GetInitVarDecl(CE->getArg(0)) == Var) return SetStep(CE->getArg(1), CE->getOperator() == OO_MinusEqual); break; case OO_Equal: if (GetInitVarDecl(CE->getArg(0)) == Var) return CheckIncRHS(CE->getArg(1)); break; default: break; } } SemaRef.Diag(S->getLocStart(), diag::err_omp_loop_not_canonical_incr) << S->getSourceRange() << Var; return true; } namespace { // Transform variables declared in GNU statement expressions to new ones to // avoid crash on codegen. class TransformToNewDefs : public TreeTransform<TransformToNewDefs> { typedef TreeTransform<TransformToNewDefs> BaseTransform; public: TransformToNewDefs(Sema &SemaRef) : BaseTransform(SemaRef) {} Decl *TransformDefinition(SourceLocation Loc, Decl *D) { if (auto *VD = cast<VarDecl>(D)) if (!isa<ParmVarDecl>(D) && !isa<VarTemplateSpecializationDecl>(D) && !isa<ImplicitParamDecl>(D)) { auto *NewVD = VarDecl::Create( SemaRef.Context, VD->getDeclContext(), VD->getLocStart(), VD->getLocation(), VD->getIdentifier(), VD->getType(), VD->getTypeSourceInfo(), VD->getStorageClass()); NewVD->setTSCSpec(VD->getTSCSpec()); NewVD->setInit(VD->getInit()); NewVD->setInitStyle(VD->getInitStyle()); NewVD->setExceptionVariable(VD->isExceptionVariable()); NewVD->setNRVOVariable(VD->isNRVOVariable()); NewVD->setCXXForRangeDecl(VD->isInExternCXXContext()); NewVD->setConstexpr(VD->isConstexpr()); NewVD->setInitCapture(VD->isInitCapture()); NewVD->setPreviousDeclInSameBlockScope( VD->isPreviousDeclInSameBlockScope()); VD->getDeclContext()->addHiddenDecl(NewVD); transformedLocalDecl(VD, NewVD); return NewVD; } return BaseTransform::TransformDefinition(Loc, D); } ExprResult TransformDeclRefExpr(DeclRefExpr *E) { if (auto *NewD = TransformDecl(E->getExprLoc(), E->getDecl())) if (E->getDecl() != NewD) { NewD->setReferenced(); NewD->markUsed(SemaRef.Context); return DeclRefExpr::Create( SemaRef.Context, E->getQualifierLoc(), E->getTemplateKeywordLoc(), cast<ValueDecl>(NewD), E->refersToEnclosingVariableOrCapture(), E->getNameInfo(), E->getType(), E->getValueKind()); } return BaseTransform::TransformDeclRefExpr(E); } }; } /// \brief Build the expression to calculate the number of iterations. Expr * OpenMPIterationSpaceChecker::BuildNumIterations(Scope *S, const bool LimitedType) const { TransformToNewDefs Transform(SemaRef); ExprResult Diff; auto VarType = Var->getType().getNonReferenceType(); if (VarType->isIntegerType() || VarType->isPointerType() || SemaRef.getLangOpts().CPlusPlus) { // Upper - Lower auto *UBExpr = TestIsLessOp ? UB : LB; auto *LBExpr = TestIsLessOp ? LB : UB; Expr *Upper = Transform.TransformExpr(UBExpr).get(); Expr *Lower = Transform.TransformExpr(LBExpr).get(); if (!Upper || !Lower) return nullptr; Upper = SemaRef.PerformImplicitConversion(Upper, UBExpr->getType(), Sema::AA_Converting, /*AllowExplicit=*/true) .get(); Lower = SemaRef.PerformImplicitConversion(Lower, LBExpr->getType(), Sema::AA_Converting, /*AllowExplicit=*/true) .get(); if (!Upper || !Lower) return nullptr; Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower); if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) { // BuildBinOp already emitted error, this one is to point user to upper // and lower bound, and to tell what is passed to 'operator-'. SemaRef.Diag(Upper->getLocStart(), diag::err_omp_loop_diff_cxx) << Upper->getSourceRange() << Lower->getSourceRange(); return nullptr; } } if (!Diff.isUsable()) return nullptr; // Upper - Lower [- 1] if (TestIsStrictOp) Diff = SemaRef.BuildBinOp( S, DefaultLoc, BO_Sub, Diff.get(), SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get()); if (!Diff.isUsable()) return nullptr; // Upper - Lower [- 1] + Step auto NewStep = Transform.TransformExpr(Step->IgnoreImplicit()); if (NewStep.isInvalid()) return nullptr; NewStep = SemaRef.PerformImplicitConversion( NewStep.get(), Step->IgnoreImplicit()->getType(), Sema::AA_Converting, /*AllowExplicit=*/true); if (NewStep.isInvalid()) return nullptr; Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Add, Diff.get(), NewStep.get()); if (!Diff.isUsable()) return nullptr; // Parentheses (for dumping/debugging purposes only). Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get()); if (!Diff.isUsable()) return nullptr; // (Upper - Lower [- 1] + Step) / Step NewStep = Transform.TransformExpr(Step->IgnoreImplicit()); if (NewStep.isInvalid()) return nullptr; NewStep = SemaRef.PerformImplicitConversion( NewStep.get(), Step->IgnoreImplicit()->getType(), Sema::AA_Converting, /*AllowExplicit=*/true); if (NewStep.isInvalid()) return nullptr; Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get()); if (!Diff.isUsable()) return nullptr; // OpenMP runtime requires 32-bit or 64-bit loop variables. QualType Type = Diff.get()->getType(); auto &C = SemaRef.Context; bool UseVarType = VarType->hasIntegerRepresentation() && C.getTypeSize(Type) > C.getTypeSize(VarType); if (!Type->isIntegerType() || UseVarType) { unsigned NewSize = UseVarType ? C.getTypeSize(VarType) : C.getTypeSize(Type); bool IsSigned = UseVarType ? VarType->hasSignedIntegerRepresentation() : Type->hasSignedIntegerRepresentation(); Type = C.getIntTypeForBitwidth(NewSize, IsSigned); Diff = SemaRef.PerformImplicitConversion( Diff.get(), Type, Sema::AA_Converting, /*AllowExplicit=*/true); if (!Diff.isUsable()) return nullptr; } if (LimitedType) { unsigned NewSize = (C.getTypeSize(Type) > 32) ? 64 : 32; if (NewSize != C.getTypeSize(Type)) { if (NewSize < C.getTypeSize(Type)) { assert(NewSize == 64 && "incorrect loop var size"); SemaRef.Diag(DefaultLoc, diag::warn_omp_loop_64_bit_var) << InitSrcRange << ConditionSrcRange; } QualType NewType = C.getIntTypeForBitwidth( NewSize, Type->hasSignedIntegerRepresentation() || C.getTypeSize(Type) < NewSize); Diff = SemaRef.PerformImplicitConversion(Diff.get(), NewType, Sema::AA_Converting, true); if (!Diff.isUsable()) return nullptr; } } return Diff.get(); } Expr *OpenMPIterationSpaceChecker::BuildPreCond(Scope *S, Expr *Cond) const { // Try to build LB <op> UB, where <op> is <, >, <=, or >=. bool Suppress = SemaRef.getDiagnostics().getSuppressAllDiagnostics(); SemaRef.getDiagnostics().setSuppressAllDiagnostics(/*Val=*/true); TransformToNewDefs Transform(SemaRef); auto NewLB = Transform.TransformExpr(LB); auto NewUB = Transform.TransformExpr(UB); if (NewLB.isInvalid() || NewUB.isInvalid()) return Cond; NewLB = SemaRef.PerformImplicitConversion(NewLB.get(), LB->getType(), Sema::AA_Converting, /*AllowExplicit=*/true); NewUB = SemaRef.PerformImplicitConversion(NewUB.get(), UB->getType(), Sema::AA_Converting, /*AllowExplicit=*/true); if (NewLB.isInvalid() || NewUB.isInvalid()) return Cond; auto CondExpr = SemaRef.BuildBinOp( S, DefaultLoc, TestIsLessOp ? (TestIsStrictOp ? BO_LT : BO_LE) : (TestIsStrictOp ? BO_GT : BO_GE), NewLB.get(), NewUB.get()); if (CondExpr.isUsable()) { CondExpr = SemaRef.PerformImplicitConversion( CondExpr.get(), SemaRef.Context.BoolTy, /*Action=*/Sema::AA_Casting, /*AllowExplicit=*/true); } SemaRef.getDiagnostics().setSuppressAllDiagnostics(Suppress); // Otherwise use original loop conditon and evaluate it in runtime. return CondExpr.isUsable() ? CondExpr.get() : Cond; } /// \brief Build reference expression to the counter be used for codegen. Expr *OpenMPIterationSpaceChecker::BuildCounterVar() const { return buildDeclRefExpr(SemaRef, Var, Var->getType(), DefaultLoc); } /// \brief Build initization of the counter be used for codegen. Expr *OpenMPIterationSpaceChecker::BuildCounterInit() const { return LB; } /// \brief Build step of the counter be used for codegen. Expr *OpenMPIterationSpaceChecker::BuildCounterStep() const { return Step; } /// \brief Iteration space of a single for loop. struct LoopIterationSpace { /// \brief Condition of the loop. Expr *PreCond; /// \brief This expression calculates the number of iterations in the loop. /// It is always possible to calculate it before starting the loop. Expr *NumIterations; /// \brief The loop counter variable. Expr *CounterVar; /// \brief This is initializer for the initial value of #CounterVar. Expr *CounterInit; /// \brief This is step for the #CounterVar used to generate its update: /// #CounterVar = #CounterInit + #CounterStep * CurrentIteration. Expr *CounterStep; /// \brief Should step be subtracted? bool Subtract; /// \brief Source range of the loop init. SourceRange InitSrcRange; /// \brief Source range of the loop condition. SourceRange CondSrcRange; /// \brief Source range of the loop increment. SourceRange IncSrcRange; }; } // namespace void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) { assert(getLangOpts().OpenMP && "OpenMP is not active."); assert(Init && "Expected loop in canonical form."); unsigned CollapseIteration = DSAStack->getCollapseNumber(); if (CollapseIteration > 0 && isOpenMPLoopDirective(DSAStack->getCurrentDirective())) { OpenMPIterationSpaceChecker ISC(*this, ForLoc); if (!ISC.CheckInit(Init, /*EmitDiags=*/false)) { DSAStack->addLoopControlVariable(ISC.GetLoopVar()); } DSAStack->setCollapseNumber(CollapseIteration - 1); } } /// \brief Called on a for stmt to check and extract its iteration space /// for further processing (such as collapsing). static bool CheckOpenMPIterationSpace( OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA, unsigned CurrentNestedLoopCount, unsigned NestedLoopCount, Expr *NestedLoopCountExpr, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA, LoopIterationSpace &ResultIterSpace) { // OpenMP [2.6, Canonical Loop Form] // for (init-expr; test-expr; incr-expr) structured-block auto For = dyn_cast_or_null<ForStmt>(S); if (!For) { SemaRef.Diag(S->getLocStart(), diag::err_omp_not_for) << (NestedLoopCountExpr != nullptr) << getOpenMPDirectiveName(DKind) << NestedLoopCount << (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount; if (NestedLoopCount > 1) SemaRef.Diag(NestedLoopCountExpr->getExprLoc(), diag::note_omp_collapse_expr) << NestedLoopCountExpr->getSourceRange(); return true; } assert(For->getBody()); OpenMPIterationSpaceChecker ISC(SemaRef, For->getForLoc()); // Check init. auto Init = For->getInit(); if (ISC.CheckInit(Init)) { return true; } bool HasErrors = false; // Check loop variable's type. auto Var = ISC.GetLoopVar(); // OpenMP [2.6, Canonical Loop Form] // Var is one of the following: // A variable of signed or unsigned integer type. // For C++, a variable of a random access iterator type. // For C, a variable of a pointer type. auto VarType = Var->getType(); if (!VarType->isDependentType() && !VarType->isIntegerType() && !VarType->isPointerType() && !(SemaRef.getLangOpts().CPlusPlus && VarType->isOverloadableType())) { SemaRef.Diag(Init->getLocStart(), diag::err_omp_loop_variable_type) << SemaRef.getLangOpts().CPlusPlus; HasErrors = true; } // OpenMP, 2.14.1.1 Data-sharing Attribute Rules for Variables Referenced in a // Construct // The loop iteration variable(s) in the associated for-loop(s) of a for or // parallel for construct is (are) private. // The loop iteration variable in the associated for-loop of a simd construct // with just one associated for-loop is linear with a constant-linear-step // that is the increment of the associated for-loop. // Exclude loop var from the list of variables with implicitly defined data // sharing attributes. VarsWithImplicitDSA.erase(Var); // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced in // a Construct, C/C++]. // The loop iteration variable in the associated for-loop of a simd construct // with just one associated for-loop may be listed in a linear clause with a // constant-linear-step that is the increment of the associated for-loop. // The loop iteration variable(s) in the associated for-loop(s) of a for or // parallel for construct may be listed in a private or lastprivate clause. DSAStackTy::DSAVarData DVar = DSA.getTopDSA(Var, false); auto LoopVarRefExpr = ISC.GetLoopVarRefExpr(); // If LoopVarRefExpr is nullptr it means the corresponding loop variable is // declared in the loop and it is predetermined as a private. auto PredeterminedCKind = isOpenMPSimdDirective(DKind) ? ((NestedLoopCount == 1) ? OMPC_linear : OMPC_lastprivate) : OMPC_private; if (((isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_threadprivate && DVar.CKind != PredeterminedCKind) || (isOpenMPWorksharingDirective(DKind) && !isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate && DVar.CKind != OMPC_threadprivate)) && ((DVar.CKind != OMPC_private && DVar.CKind != OMPC_threadprivate) || DVar.RefExpr != nullptr)) { SemaRef.Diag(Init->getLocStart(), diag::err_omp_loop_var_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(PredeterminedCKind); if (DVar.RefExpr == nullptr) DVar.CKind = PredeterminedCKind; ReportOriginalDSA(SemaRef, &DSA, Var, DVar, /*IsLoopIterVar=*/true); HasErrors = true; } else if (LoopVarRefExpr != nullptr) { // Make the loop iteration variable private (for worksharing constructs), // linear (for simd directives with the only one associated loop) or // lastprivate (for simd directives with several collapsed loops). if (DVar.CKind == OMPC_unknown) DVar = DSA.hasDSA(Var, isOpenMPPrivate, MatchesAlways(), /*FromParent=*/false); DSA.addDSA(Var, LoopVarRefExpr, PredeterminedCKind); } assert(isOpenMPLoopDirective(DKind) && "DSA for non-loop vars"); // Check test-expr. HasErrors |= ISC.CheckCond(For->getCond()); // Check incr-expr. HasErrors |= ISC.CheckInc(For->getInc()); if (ISC.Dependent() || SemaRef.CurContext->isDependentContext() || HasErrors) return HasErrors; // Build the loop's iteration space representation. ResultIterSpace.PreCond = ISC.BuildPreCond(DSA.getCurScope(), For->getCond()); ResultIterSpace.NumIterations = ISC.BuildNumIterations( DSA.getCurScope(), /* LimitedType */ isOpenMPWorksharingDirective(DKind)); ResultIterSpace.CounterVar = ISC.BuildCounterVar(); ResultIterSpace.CounterInit = ISC.BuildCounterInit(); ResultIterSpace.CounterStep = ISC.BuildCounterStep(); ResultIterSpace.InitSrcRange = ISC.GetInitSrcRange(); ResultIterSpace.CondSrcRange = ISC.GetConditionSrcRange(); ResultIterSpace.IncSrcRange = ISC.GetIncrementSrcRange(); ResultIterSpace.Subtract = ISC.ShouldSubtractStep(); HasErrors |= (ResultIterSpace.PreCond == nullptr || ResultIterSpace.NumIterations == nullptr || ResultIterSpace.CounterVar == nullptr || ResultIterSpace.CounterInit == nullptr || ResultIterSpace.CounterStep == nullptr); return HasErrors; } /// \brief Build 'VarRef = Start. static ExprResult BuildCounterInit(Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef, ExprResult Start) { TransformToNewDefs Transform(SemaRef); // Build 'VarRef = Start. auto NewStart = Transform.TransformExpr(Start.get()->IgnoreImplicit()); if (NewStart.isInvalid()) return ExprError(); NewStart = SemaRef.PerformImplicitConversion( NewStart.get(), Start.get()->IgnoreImplicit()->getType(), Sema::AA_Converting, /*AllowExplicit=*/true); if (NewStart.isInvalid()) return ExprError(); NewStart = SemaRef.PerformImplicitConversion( NewStart.get(), VarRef.get()->getType(), Sema::AA_Converting, /*AllowExplicit=*/true); if (!NewStart.isUsable()) return ExprError(); auto Init = SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), NewStart.get()); return Init; } /// \brief Build 'VarRef = Start + Iter * Step'. static ExprResult BuildCounterUpdate(Sema &SemaRef, Scope *S, SourceLocation Loc, ExprResult VarRef, ExprResult Start, ExprResult Iter, ExprResult Step, bool Subtract) { // Add parentheses (for debugging purposes only). Iter = SemaRef.ActOnParenExpr(Loc, Loc, Iter.get()); if (!VarRef.isUsable() || !Start.isUsable() || !Iter.isUsable() || !Step.isUsable()) return ExprError(); TransformToNewDefs Transform(SemaRef); auto NewStep = Transform.TransformExpr(Step.get()->IgnoreImplicit()); if (NewStep.isInvalid()) return ExprError(); NewStep = SemaRef.PerformImplicitConversion( NewStep.get(), Step.get()->IgnoreImplicit()->getType(), Sema::AA_Converting, /*AllowExplicit=*/true); if (NewStep.isInvalid()) return ExprError(); ExprResult Update = SemaRef.BuildBinOp(S, Loc, BO_Mul, Iter.get(), NewStep.get()); if (!Update.isUsable()) return ExprError(); // Build 'VarRef = Start + Iter * Step'. auto NewStart = Transform.TransformExpr(Start.get()->IgnoreImplicit()); if (NewStart.isInvalid()) return ExprError(); NewStart = SemaRef.PerformImplicitConversion( NewStart.get(), Start.get()->IgnoreImplicit()->getType(), Sema::AA_Converting, /*AllowExplicit=*/true); if (NewStart.isInvalid()) return ExprError(); Update = SemaRef.BuildBinOp(S, Loc, (Subtract ? BO_Sub : BO_Add), NewStart.get(), Update.get()); if (!Update.isUsable()) return ExprError(); Update = SemaRef.PerformImplicitConversion( Update.get(), VarRef.get()->getType(), Sema::AA_Converting, true); if (!Update.isUsable()) return ExprError(); Update = SemaRef.BuildBinOp(S, Loc, BO_Assign, VarRef.get(), Update.get()); return Update; } /// \brief Convert integer expression \a E to make it have at least \a Bits /// bits. static ExprResult WidenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) { if (E == nullptr) return ExprError(); auto &C = SemaRef.Context; QualType OldType = E->getType(); unsigned HasBits = C.getTypeSize(OldType); if (HasBits >= Bits) return ExprResult(E); // OK to convert to signed, because new type has more bits than old. QualType NewType = C.getIntTypeForBitwidth(Bits, /* Signed */ true); return SemaRef.PerformImplicitConversion(E, NewType, Sema::AA_Converting, true); } /// \brief Check if the given expression \a E is a constant integer that fits /// into \a Bits bits. static bool FitsInto(unsigned Bits, bool Signed, Expr *E, Sema &SemaRef) { if (E == nullptr) return false; llvm::APSInt Result; if (E->isIntegerConstantExpr(Result, SemaRef.Context)) return Signed ? Result.isSignedIntN(Bits) : Result.isIntN(Bits); return false; } /// \brief Called on a for stmt to check itself and nested loops (if any). /// \return Returns 0 if one of the collapsed stmts is not canonical for loop, /// number of collapsed loops otherwise. static unsigned CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *NestedLoopCountExpr, Stmt *AStmt, Sema &SemaRef, DSAStackTy &DSA, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA, OMPLoopDirective::HelperExprs &Built) { unsigned NestedLoopCount = 1; if (NestedLoopCountExpr) { // Found 'collapse' clause - calculate collapse number. llvm::APSInt Result; if (NestedLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext())) NestedLoopCount = Result.getLimitedValue(); } // This is helper routine for loop directives (e.g., 'for', 'simd', // 'for simd', etc.). SmallVector<LoopIterationSpace, 4> IterSpaces; IterSpaces.resize(NestedLoopCount); Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true); for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) { if (CheckOpenMPIterationSpace(DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount, NestedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt])) return 0; // Move on to the next nested for loop, or to the loop body. // OpenMP [2.8.1, simd construct, Restrictions] // All loops associated with the construct must be perfectly nested; that // is, there must be no intervening code nor any OpenMP directive between // any two loops. CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers(); } Built.clear(/* size */ NestedLoopCount); if (SemaRef.CurContext->isDependentContext()) return NestedLoopCount; // An example of what is generated for the following code: // // #pragma omp simd collapse(2) // for (i = 0; i < NI; ++i) // for (j = J0; j < NJ; j+=2) { // <loop body> // } // // We generate the code below. // Note: the loop body may be outlined in CodeGen. // Note: some counters may be C++ classes, operator- is used to find number of // iterations and operator+= to calculate counter value. // Note: decltype(NumIterations) must be integer type (in 'omp for', only i32 // or i64 is currently supported). // // #define NumIterations (NI * ((NJ - J0 - 1 + 2) / 2)) // for (int[32|64]_t IV = 0; IV < NumIterations; ++IV ) { // .local.i = IV / ((NJ - J0 - 1 + 2) / 2); // .local.j = J0 + (IV % ((NJ - J0 - 1 + 2) / 2)) * 2; // // similar updates for vars in clauses (e.g. 'linear') // <loop body (using local i and j)> // } // i = NI; // assign final values of counters // j = NJ; // // Last iteration number is (I1 * I2 * ... In) - 1, where I1, I2 ... In are // the iteration counts of the collapsed for loops. // Precondition tests if there is at least one iteration (all conditions are // true). auto PreCond = ExprResult(IterSpaces[0].PreCond); auto N0 = IterSpaces[0].NumIterations; ExprResult LastIteration32 = WidenIterationCount( 32 /* Bits */, SemaRef.PerformImplicitConversion( N0->IgnoreImpCasts(), N0->getType(), Sema::AA_Converting, /*AllowExplicit=*/true) .get(), SemaRef); ExprResult LastIteration64 = WidenIterationCount( 64 /* Bits */, SemaRef.PerformImplicitConversion( N0->IgnoreImpCasts(), N0->getType(), Sema::AA_Converting, /*AllowExplicit=*/true) .get(), SemaRef); if (!LastIteration32.isUsable() || !LastIteration64.isUsable()) return NestedLoopCount; auto &C = SemaRef.Context; bool AllCountsNeedLessThan32Bits = C.getTypeSize(N0->getType()) < 32; Scope *CurScope = DSA.getCurScope(); for (unsigned Cnt = 1; Cnt < NestedLoopCount; ++Cnt) { if (PreCond.isUsable()) { PreCond = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_LAnd, PreCond.get(), IterSpaces[Cnt].PreCond); } auto N = IterSpaces[Cnt].NumIterations; AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32; if (LastIteration32.isUsable()) LastIteration32 = SemaRef.BuildBinOp( CurScope, SourceLocation(), BO_Mul, LastIteration32.get(), SemaRef.PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(), Sema::AA_Converting, /*AllowExplicit=*/true) .get()); if (LastIteration64.isUsable()) LastIteration64 = SemaRef.BuildBinOp( CurScope, SourceLocation(), BO_Mul, LastIteration64.get(), SemaRef.PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(), Sema::AA_Converting, /*AllowExplicit=*/true) .get()); } // Choose either the 32-bit or 64-bit version. ExprResult LastIteration = LastIteration64; if (LastIteration32.isUsable() && C.getTypeSize(LastIteration32.get()->getType()) == 32 && (AllCountsNeedLessThan32Bits || NestedLoopCount == 1 || FitsInto( 32 /* Bits */, LastIteration32.get()->getType()->hasSignedIntegerRepresentation(), LastIteration64.get(), SemaRef))) LastIteration = LastIteration32; if (!LastIteration.isUsable()) return 0; // Save the number of iterations. ExprResult NumIterations = LastIteration; { LastIteration = SemaRef.BuildBinOp( CurScope, SourceLocation(), BO_Sub, LastIteration.get(), SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get()); if (!LastIteration.isUsable()) return 0; } // Calculate the last iteration number beforehand instead of doing this on // each iteration. Do not do this if the number of iterations may be kfold-ed. llvm::APSInt Result; bool IsConstant = LastIteration.get()->isIntegerConstantExpr(Result, SemaRef.Context); ExprResult CalcLastIteration; if (!IsConstant) { SourceLocation SaveLoc; VarDecl *SaveVar = buildVarDecl(SemaRef, SaveLoc, LastIteration.get()->getType(), ".omp.last.iteration"); ExprResult SaveRef = buildDeclRefExpr( SemaRef, SaveVar, LastIteration.get()->getType(), SaveLoc); CalcLastIteration = SemaRef.BuildBinOp(CurScope, SaveLoc, BO_Assign, SaveRef.get(), LastIteration.get()); LastIteration = SaveRef; // Prepare SaveRef + 1. NumIterations = SemaRef.BuildBinOp( CurScope, SaveLoc, BO_Add, SaveRef.get(), SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get()); if (!NumIterations.isUsable()) return 0; } SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin(); QualType VType = LastIteration.get()->getType(); // Build variables passed into runtime, nesessary for worksharing directives. ExprResult LB, UB, IL, ST, EUB; if (isOpenMPWorksharingDirective(DKind)) { // Lower bound variable, initialized with zero. VarDecl *LBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.lb"); LB = buildDeclRefExpr(SemaRef, LBDecl, VType, InitLoc); SemaRef.AddInitializerToDecl( LBDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(), /*DirectInit*/ false, /*TypeMayContainAuto*/ false); // Upper bound variable, initialized with last iteration number. VarDecl *UBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.ub"); UB = buildDeclRefExpr(SemaRef, UBDecl, VType, InitLoc); SemaRef.AddInitializerToDecl(UBDecl, LastIteration.get(), /*DirectInit*/ false, /*TypeMayContainAuto*/ false); // A 32-bit variable-flag where runtime returns 1 for the last iteration. // This will be used to implement clause 'lastprivate'. QualType Int32Ty = SemaRef.Context.getIntTypeForBitwidth(32, true); VarDecl *ILDecl = buildVarDecl(SemaRef, InitLoc, Int32Ty, ".omp.is_last"); IL = buildDeclRefExpr(SemaRef, ILDecl, Int32Ty, InitLoc); SemaRef.AddInitializerToDecl( ILDecl, SemaRef.ActOnIntegerConstant(InitLoc, 0).get(), /*DirectInit*/ false, /*TypeMayContainAuto*/ false); // Stride variable returned by runtime (we initialize it to 1 by default). VarDecl *STDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.stride"); ST = buildDeclRefExpr(SemaRef, STDecl, VType, InitLoc); SemaRef.AddInitializerToDecl( STDecl, SemaRef.ActOnIntegerConstant(InitLoc, 1).get(), /*DirectInit*/ false, /*TypeMayContainAuto*/ false); // Build expression: UB = min(UB, LastIteration) // It is nesessary for CodeGen of directives with static scheduling. ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, InitLoc, BO_GT, UB.get(), LastIteration.get()); ExprResult CondOp = SemaRef.ActOnConditionalOp( InitLoc, InitLoc, IsUBGreater.get(), LastIteration.get(), UB.get()); EUB = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, UB.get(), CondOp.get()); EUB = SemaRef.ActOnFinishFullExpr(EUB.get()); } // Build the iteration variable and its initialization before loop. ExprResult IV; ExprResult Init; { VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.iv"); IV = buildDeclRefExpr(SemaRef, IVDecl, VType, InitLoc); Expr *RHS = isOpenMPWorksharingDirective(DKind) ? LB.get() : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get(); Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS); Init = SemaRef.ActOnFinishFullExpr(Init.get()); } // Loop condition (IV < NumIterations) or (IV <= UB) for worksharing loops. SourceLocation CondLoc; ExprResult Cond = isOpenMPWorksharingDirective(DKind) ? SemaRef.BuildBinOp(CurScope, CondLoc, BO_LE, IV.get(), UB.get()) : SemaRef.BuildBinOp(CurScope, CondLoc, BO_LT, IV.get(), NumIterations.get()); // Loop increment (IV = IV + 1) SourceLocation IncLoc; ExprResult Inc = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, IV.get(), SemaRef.ActOnIntegerConstant(IncLoc, 1).get()); if (!Inc.isUsable()) return 0; Inc = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, IV.get(), Inc.get()); Inc = SemaRef.ActOnFinishFullExpr(Inc.get()); if (!Inc.isUsable()) return 0; // Increments for worksharing loops (LB = LB + ST; UB = UB + ST). // Used for directives with static scheduling. ExprResult NextLB, NextUB; if (isOpenMPWorksharingDirective(DKind)) { // LB + ST NextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, LB.get(), ST.get()); if (!NextLB.isUsable()) return 0; // LB = LB + ST NextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, LB.get(), NextLB.get()); NextLB = SemaRef.ActOnFinishFullExpr(NextLB.get()); if (!NextLB.isUsable()) return 0; // UB + ST NextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, UB.get(), ST.get()); if (!NextUB.isUsable()) return 0; // UB = UB + ST NextUB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Assign, UB.get(), NextUB.get()); NextUB = SemaRef.ActOnFinishFullExpr(NextUB.get()); if (!NextUB.isUsable()) return 0; } // Build updates and final values of the loop counters. bool HasErrors = false; Built.Counters.resize(NestedLoopCount); Built.Inits.resize(NestedLoopCount); Built.Updates.resize(NestedLoopCount); Built.Finals.resize(NestedLoopCount); { ExprResult Div; // Go from inner nested loop to outer. for (int Cnt = NestedLoopCount - 1; Cnt >= 0; --Cnt) { LoopIterationSpace &IS = IterSpaces[Cnt]; SourceLocation UpdLoc = IS.IncSrcRange.getBegin(); // Build: Iter = (IV / Div) % IS.NumIters // where Div is product of previous iterations' IS.NumIters. ExprResult Iter; if (Div.isUsable()) { Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Div, IV.get(), Div.get()); } else { Iter = IV; assert((Cnt == (int)NestedLoopCount - 1) && "unusable div expected on first iteration only"); } if (Cnt != 0 && Iter.isUsable()) Iter = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Rem, Iter.get(), IS.NumIterations); if (!Iter.isUsable()) { HasErrors = true; break; } // Build update: IS.CounterVar(Private) = IS.Start + Iter * IS.Step auto *CounterVar = buildDeclRefExpr( SemaRef, cast<VarDecl>(cast<DeclRefExpr>(IS.CounterVar)->getDecl()), IS.CounterVar->getType(), IS.CounterVar->getExprLoc(), /*RefersToCapture=*/true); ExprResult Init = BuildCounterInit(SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit); if (!Init.isUsable()) { HasErrors = true; break; } ExprResult Update = BuildCounterUpdate(SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit, Iter, IS.CounterStep, IS.Subtract); if (!Update.isUsable()) { HasErrors = true; break; } // Build final: IS.CounterVar = IS.Start + IS.NumIters * IS.Step ExprResult Final = BuildCounterUpdate( SemaRef, CurScope, UpdLoc, CounterVar, IS.CounterInit, IS.NumIterations, IS.CounterStep, IS.Subtract); if (!Final.isUsable()) { HasErrors = true; break; } // Build Div for the next iteration: Div <- Div * IS.NumIters if (Cnt != 0) { if (Div.isUnset()) Div = IS.NumIterations; else Div = SemaRef.BuildBinOp(CurScope, UpdLoc, BO_Mul, Div.get(), IS.NumIterations); // Add parentheses (for debugging purposes only). if (Div.isUsable()) Div = SemaRef.ActOnParenExpr(UpdLoc, UpdLoc, Div.get()); if (!Div.isUsable()) { HasErrors = true; break; } } if (!Update.isUsable() || !Final.isUsable()) { HasErrors = true; break; } // Save results Built.Counters[Cnt] = IS.CounterVar; Built.Inits[Cnt] = Init.get(); Built.Updates[Cnt] = Update.get(); Built.Finals[Cnt] = Final.get(); } } if (HasErrors) return 0; // Save results Built.IterationVarRef = IV.get(); Built.LastIteration = LastIteration.get(); Built.NumIterations = NumIterations.get(); Built.CalcLastIteration = SemaRef.ActOnFinishFullExpr(CalcLastIteration.get()).get(); Built.PreCond = PreCond.get(); Built.Cond = Cond.get(); Built.Init = Init.get(); Built.Inc = Inc.get(); Built.LB = LB.get(); Built.UB = UB.get(); Built.IL = IL.get(); Built.ST = ST.get(); Built.EUB = EUB.get(); Built.NLB = NextLB.get(); Built.NUB = NextUB.get(); return NestedLoopCount; } static Expr *GetCollapseNumberExpr(ArrayRef<OMPClause *> Clauses) { auto &&CollapseFilter = [](const OMPClause *C) -> bool { return C->getClauseKind() == OMPC_collapse; }; OMPExecutableDirective::filtered_clause_iterator<decltype(CollapseFilter)> I( Clauses, std::move(CollapseFilter)); if (I) return cast<OMPCollapseClause>(*I)->getNumForLoops(); return nullptr; } StmtResult Sema::ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) { OMPLoopDirective::HelperExprs B; // In presence of clause 'collapse', it will define the nested loops number. unsigned NestedLoopCount = CheckOpenMPLoop(OMPD_simd, GetCollapseNumberExpr(Clauses), AStmt, *this, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); assert((CurContext->isDependentContext() || B.builtAll()) && "omp simd loop exprs were not built"); if (!CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (auto C : Clauses) { if (auto LC = dyn_cast<OMPLinearClause>(C)) if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef), B.NumIterations, *this, CurScope)) return StmtError(); } } getCurFunction()->setHasBranchProtectedScope(); return OMPSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } StmtResult Sema::ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) { OMPLoopDirective::HelperExprs B; // In presence of clause 'collapse', it will define the nested loops number. unsigned NestedLoopCount = CheckOpenMPLoop(OMPD_for, GetCollapseNumberExpr(Clauses), AStmt, *this, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); assert((CurContext->isDependentContext() || B.builtAll()) && "omp for loop exprs were not built"); getCurFunction()->setHasBranchProtectedScope(); return OMPForDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } StmtResult Sema::ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) { OMPLoopDirective::HelperExprs B; // In presence of clause 'collapse', it will define the nested loops number. unsigned NestedLoopCount = CheckOpenMPLoop(OMPD_for_simd, GetCollapseNumberExpr(Clauses), AStmt, *this, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); assert((CurContext->isDependentContext() || B.builtAll()) && "omp for simd loop exprs were not built"); if (!CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (auto C : Clauses) { if (auto LC = dyn_cast<OMPLinearClause>(C)) if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef), B.NumIterations, *this, CurScope)) return StmtError(); } } getCurFunction()->setHasBranchProtectedScope(); return OMPForSimdDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); auto BaseStmt = AStmt; while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt)) BaseStmt = CS->getCapturedStmt(); if (auto C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) { auto S = C->children(); if (!S) return StmtError(); // All associated statements must be '#pragma omp section' except for // the first one. for (Stmt *SectionStmt : ++S) { if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) { if (SectionStmt) Diag(SectionStmt->getLocStart(), diag::err_omp_sections_substmt_not_section); return StmtError(); } } } else { Diag(AStmt->getLocStart(), diag::err_omp_sections_not_compound_stmt); return StmtError(); } getCurFunction()->setHasBranchProtectedScope(); return OMPSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); } StmtResult Sema::ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); getCurFunction()->setHasBranchProtectedScope(); return OMPSectionDirective::Create(Context, StartLoc, EndLoc, AStmt); } StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); getCurFunction()->setHasBranchProtectedScope(); // OpenMP [2.7.3, single Construct, Restrictions] // The copyprivate clause must not be used with the nowait clause. OMPClause *Nowait = nullptr; OMPClause *Copyprivate = nullptr; for (auto *Clause : Clauses) { if (Clause->getClauseKind() == OMPC_nowait) Nowait = Clause; else if (Clause->getClauseKind() == OMPC_copyprivate) Copyprivate = Clause; if (Copyprivate && Nowait) { Diag(Copyprivate->getLocStart(), diag::err_omp_single_copyprivate_with_nowait); Diag(Nowait->getLocStart(), diag::note_omp_nowait_clause_here); return StmtError(); } } return OMPSingleDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); } StmtResult Sema::ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); getCurFunction()->setHasBranchProtectedScope(); return OMPMasterDirective::Create(Context, StartLoc, EndLoc, AStmt); } StmtResult Sema::ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); getCurFunction()->setHasBranchProtectedScope(); return OMPCriticalDirective::Create(Context, DirName, StartLoc, EndLoc, AStmt); } StmtResult Sema::ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); CapturedStmt *CS = cast<CapturedStmt>(AStmt); // 1.2.2 OpenMP Language Terminology // Structured block - An executable statement with a single entry at the // top and a single exit at the bottom. // The point of exit cannot be a branch out of the structured block. // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); OMPLoopDirective::HelperExprs B; // In presence of clause 'collapse', it will define the nested loops number. unsigned NestedLoopCount = CheckOpenMPLoop(OMPD_parallel_for, GetCollapseNumberExpr(Clauses), AStmt, *this, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); assert((CurContext->isDependentContext() || B.builtAll()) && "omp parallel for loop exprs were not built"); getCurFunction()->setHasBranchProtectedScope(); return OMPParallelForDirective::Create(Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } StmtResult Sema::ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); CapturedStmt *CS = cast<CapturedStmt>(AStmt); // 1.2.2 OpenMP Language Terminology // Structured block - An executable statement with a single entry at the // top and a single exit at the bottom. // The point of exit cannot be a branch out of the structured block. // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); OMPLoopDirective::HelperExprs B; // In presence of clause 'collapse', it will define the nested loops number. unsigned NestedLoopCount = CheckOpenMPLoop(OMPD_parallel_for_simd, GetCollapseNumberExpr(Clauses), AStmt, *this, *DSAStack, VarsWithImplicitDSA, B); if (NestedLoopCount == 0) return StmtError(); if (!CurContext->isDependentContext()) { // Finalize the clauses that need pre-built expressions for CodeGen. for (auto C : Clauses) { if (auto LC = dyn_cast<OMPLinearClause>(C)) if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef), B.NumIterations, *this, CurScope)) return StmtError(); } } getCurFunction()->setHasBranchProtectedScope(); return OMPParallelForSimdDirective::Create( Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B); } StmtResult Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); auto BaseStmt = AStmt; while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt)) BaseStmt = CS->getCapturedStmt(); if (auto C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) { auto S = C->children(); if (!S) return StmtError(); // All associated statements must be '#pragma omp section' except for // the first one. for (Stmt *SectionStmt : ++S) { if (!SectionStmt || !isa<OMPSectionDirective>(SectionStmt)) { if (SectionStmt) Diag(SectionStmt->getLocStart(), diag::err_omp_parallel_sections_substmt_not_section); return StmtError(); } } } else { Diag(AStmt->getLocStart(), diag::err_omp_parallel_sections_not_compound_stmt); return StmtError(); } getCurFunction()->setHasBranchProtectedScope(); return OMPParallelSectionsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); } StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); CapturedStmt *CS = cast<CapturedStmt>(AStmt); // 1.2.2 OpenMP Language Terminology // Structured block - An executable statement with a single entry at the // top and a single exit at the bottom. // The point of exit cannot be a branch out of the structured block. // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); getCurFunction()->setHasBranchProtectedScope(); return OMPTaskDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); } StmtResult Sema::ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) { return OMPTaskyieldDirective::Create(Context, StartLoc, EndLoc); } StmtResult Sema::ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) { return OMPBarrierDirective::Create(Context, StartLoc, EndLoc); } StmtResult Sema::ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) { return OMPTaskwaitDirective::Create(Context, StartLoc, EndLoc); } StmtResult Sema::ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); getCurFunction()->setHasBranchProtectedScope(); return OMPTaskgroupDirective::Create(Context, StartLoc, EndLoc, AStmt); } StmtResult Sema::ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc) { assert(Clauses.size() <= 1 && "Extra clauses in flush directive"); return OMPFlushDirective::Create(Context, StartLoc, EndLoc, Clauses); } StmtResult Sema::ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); getCurFunction()->setHasBranchProtectedScope(); return OMPOrderedDirective::Create(Context, StartLoc, EndLoc, AStmt); } namespace { /// \brief Helper class for checking expression in 'omp atomic [update]' /// construct. class OpenMPAtomicUpdateChecker { /// \brief Error results for atomic update expressions. enum ExprAnalysisErrorCode { /// \brief A statement is not an expression statement. NotAnExpression, /// \brief Expression is not builtin binary or unary operation. NotABinaryOrUnaryExpression, /// \brief Unary operation is not post-/pre- increment/decrement operation. NotAnUnaryIncDecExpression, /// \brief An expression is not of scalar type. NotAScalarType, /// \brief A binary operation is not an assignment operation. NotAnAssignmentOp, /// \brief RHS part of the binary operation is not a binary expression. NotABinaryExpression, /// \brief RHS part is not additive/multiplicative/shift/biwise binary /// expression. NotABinaryOperator, /// \brief RHS binary operation does not have reference to the updated LHS /// part. NotAnUpdateExpression, /// \brief No errors is found. NoError }; /// \brief Reference to Sema. Sema &SemaRef; /// \brief A location for note diagnostics (when error is found). SourceLocation NoteLoc; /// \brief 'x' lvalue part of the source atomic expression. Expr *X; /// \brief 'expr' rvalue part of the source atomic expression. Expr *E; /// \brief Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *UpdateExpr; /// \brief Is 'x' a LHS in a RHS part of full update expression. It is /// important for non-associative operations. bool IsXLHSInRHSPart; BinaryOperatorKind Op; SourceLocation OpLoc; /// \brief true if the source expression is a postfix unary operation, false /// if it is a prefix unary operation. bool IsPostfixUpdate; public: OpenMPAtomicUpdateChecker(Sema &SemaRef) : SemaRef(SemaRef), X(nullptr), E(nullptr), UpdateExpr(nullptr), IsXLHSInRHSPart(false), Op(BO_PtrMemD), IsPostfixUpdate(false) {} /// \brief Check specified statement that it is suitable for 'atomic update' /// constructs and extract 'x', 'expr' and Operation from the original /// expression. If DiagId and NoteId == 0, then only check is performed /// without error notification. /// \param DiagId Diagnostic which should be emitted if error is found. /// \param NoteId Diagnostic note for the main error message. /// \return true if statement is not an update expression, false otherwise. bool checkStatement(Stmt *S, unsigned DiagId = 0, unsigned NoteId = 0); /// \brief Return the 'x' lvalue part of the source atomic expression. Expr *getX() const { return X; } /// \brief Return the 'expr' rvalue part of the source atomic expression. Expr *getExpr() const { return E; } /// \brief Return the update expression used in calculation of the updated /// value. Always has form 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() const { return UpdateExpr; } /// \brief Return true if 'x' is LHS in RHS part of full update expression, /// false otherwise. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// \brief true if the source expression is a postfix unary operation, false /// if it is a prefix unary operation. bool isPostfixUpdate() const { return IsPostfixUpdate; } private: bool checkBinaryOperation(BinaryOperator *AtomicBinOp, unsigned DiagId = 0, unsigned NoteId = 0); }; } // namespace bool OpenMPAtomicUpdateChecker::checkBinaryOperation( BinaryOperator *AtomicBinOp, unsigned DiagId, unsigned NoteId) { ExprAnalysisErrorCode ErrorFound = NoError; SourceLocation ErrorLoc, NoteLoc; SourceRange ErrorRange, NoteRange; // Allowed constructs are: // x = x binop expr; // x = expr binop x; if (AtomicBinOp->getOpcode() == BO_Assign) { X = AtomicBinOp->getLHS(); if (auto *AtomicInnerBinOp = dyn_cast<BinaryOperator>( AtomicBinOp->getRHS()->IgnoreParenImpCasts())) { if (AtomicInnerBinOp->isMultiplicativeOp() || AtomicInnerBinOp->isAdditiveOp() || AtomicInnerBinOp->isShiftOp() || AtomicInnerBinOp->isBitwiseOp()) { Op = AtomicInnerBinOp->getOpcode(); OpLoc = AtomicInnerBinOp->getOperatorLoc(); auto *LHS = AtomicInnerBinOp->getLHS(); auto *RHS = AtomicInnerBinOp->getRHS(); llvm::FoldingSetNodeID XId, LHSId, RHSId; X->IgnoreParenImpCasts()->Profile(XId, SemaRef.getASTContext(), /*Canonical=*/true); LHS->IgnoreParenImpCasts()->Profile(LHSId, SemaRef.getASTContext(), /*Canonical=*/true); RHS->IgnoreParenImpCasts()->Profile(RHSId, SemaRef.getASTContext(), /*Canonical=*/true); if (XId == LHSId) { E = RHS; IsXLHSInRHSPart = true; } else if (XId == RHSId) { E = LHS; IsXLHSInRHSPart = false; } else { ErrorLoc = AtomicInnerBinOp->getExprLoc(); ErrorRange = AtomicInnerBinOp->getSourceRange(); NoteLoc = X->getExprLoc(); NoteRange = X->getSourceRange(); ErrorFound = NotAnUpdateExpression; } } else { ErrorLoc = AtomicInnerBinOp->getExprLoc(); ErrorRange = AtomicInnerBinOp->getSourceRange(); NoteLoc = AtomicInnerBinOp->getOperatorLoc(); NoteRange = SourceRange(NoteLoc, NoteLoc); ErrorFound = NotABinaryOperator; } } else { NoteLoc = ErrorLoc = AtomicBinOp->getRHS()->getExprLoc(); NoteRange = ErrorRange = AtomicBinOp->getRHS()->getSourceRange(); ErrorFound = NotABinaryExpression; } } else { ErrorLoc = AtomicBinOp->getExprLoc(); ErrorRange = AtomicBinOp->getSourceRange(); NoteLoc = AtomicBinOp->getOperatorLoc(); NoteRange = SourceRange(NoteLoc, NoteLoc); ErrorFound = NotAnAssignmentOp; } if (ErrorFound != NoError && DiagId != 0 && NoteId != 0) { SemaRef.Diag(ErrorLoc, DiagId) << ErrorRange; SemaRef.Diag(NoteLoc, NoteId) << ErrorFound << NoteRange; return true; } else if (SemaRef.CurContext->isDependentContext()) E = X = UpdateExpr = nullptr; return ErrorFound != NoError; } bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId, unsigned NoteId) { ExprAnalysisErrorCode ErrorFound = NoError; SourceLocation ErrorLoc, NoteLoc; SourceRange ErrorRange, NoteRange; // Allowed constructs are: // x++; // x--; // ++x; // --x; // x binop= expr; // x = x binop expr; // x = expr binop x; if (auto *AtomicBody = dyn_cast<Expr>(S)) { AtomicBody = AtomicBody->IgnoreParenImpCasts(); if (AtomicBody->getType()->isScalarType() || AtomicBody->isInstantiationDependent()) { if (auto *AtomicCompAssignOp = dyn_cast<CompoundAssignOperator>( AtomicBody->IgnoreParenImpCasts())) { // Check for Compound Assignment Operation Op = BinaryOperator::getOpForCompoundAssignment( AtomicCompAssignOp->getOpcode()); OpLoc = AtomicCompAssignOp->getOperatorLoc(); E = AtomicCompAssignOp->getRHS(); X = AtomicCompAssignOp->getLHS(); IsXLHSInRHSPart = true; } else if (auto *AtomicBinOp = dyn_cast<BinaryOperator>( AtomicBody->IgnoreParenImpCasts())) { // Check for Binary Operation if(checkBinaryOperation(AtomicBinOp, DiagId, NoteId)) return true; } else if (auto *AtomicUnaryOp = dyn_cast<UnaryOperator>(AtomicBody->IgnoreParenImpCasts())) { // Check for Unary Operation if (AtomicUnaryOp->isIncrementDecrementOp()) { IsPostfixUpdate = AtomicUnaryOp->isPostfix(); Op = AtomicUnaryOp->isIncrementOp() ? BO_Add : BO_Sub; OpLoc = AtomicUnaryOp->getOperatorLoc(); X = AtomicUnaryOp->getSubExpr(); E = SemaRef.ActOnIntegerConstant(OpLoc, /*uint64_t Val=*/1).get(); IsXLHSInRHSPart = true; } else { ErrorFound = NotAnUnaryIncDecExpression; ErrorLoc = AtomicUnaryOp->getExprLoc(); ErrorRange = AtomicUnaryOp->getSourceRange(); NoteLoc = AtomicUnaryOp->getOperatorLoc(); NoteRange = SourceRange(NoteLoc, NoteLoc); } } else { ErrorFound = NotABinaryOrUnaryExpression; NoteLoc = ErrorLoc = AtomicBody->getExprLoc(); NoteRange = ErrorRange = AtomicBody->getSourceRange(); } } else { ErrorFound = NotAScalarType; NoteLoc = ErrorLoc = AtomicBody->getLocStart(); NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc); } } else { ErrorFound = NotAnExpression; NoteLoc = ErrorLoc = S->getLocStart(); NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc); } if (ErrorFound != NoError && DiagId != 0 && NoteId != 0) { SemaRef.Diag(ErrorLoc, DiagId) << ErrorRange; SemaRef.Diag(NoteLoc, NoteId) << ErrorFound << NoteRange; return true; } else if (SemaRef.CurContext->isDependentContext()) E = X = UpdateExpr = nullptr; if (ErrorFound == NoError && E && X) { // Build an update expression of form 'OpaqueValueExpr(x) binop // OpaqueValueExpr(expr)' or 'OpaqueValueExpr(expr) binop // OpaqueValueExpr(x)' and then cast it to the type of the 'x' expression. auto *OVEX = new (SemaRef.getASTContext()) OpaqueValueExpr(X->getExprLoc(), X->getType(), VK_RValue); auto *OVEExpr = new (SemaRef.getASTContext()) OpaqueValueExpr(E->getExprLoc(), E->getType(), VK_RValue); auto Update = SemaRef.CreateBuiltinBinOp(OpLoc, Op, IsXLHSInRHSPart ? OVEX : OVEExpr, IsXLHSInRHSPart ? OVEExpr : OVEX); if (Update.isInvalid()) return true; Update = SemaRef.PerformImplicitConversion(Update.get(), X->getType(), Sema::AA_Casting); if (Update.isInvalid()) return true; UpdateExpr = Update.get(); } return ErrorFound != NoError; } StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); auto CS = cast<CapturedStmt>(AStmt); // 1.2.2 OpenMP Language Terminology // Structured block - An executable statement with a single entry at the // top and a single exit at the bottom. // The point of exit cannot be a branch out of the structured block. // longjmp() and throw() must not violate the entry/exit criteria. OpenMPClauseKind AtomicKind = OMPC_unknown; SourceLocation AtomicKindLoc; for (auto *C : Clauses) { if (C->getClauseKind() == OMPC_read || C->getClauseKind() == OMPC_write || C->getClauseKind() == OMPC_update || C->getClauseKind() == OMPC_capture) { if (AtomicKind != OMPC_unknown) { Diag(C->getLocStart(), diag::err_omp_atomic_several_clauses) << SourceRange(C->getLocStart(), C->getLocEnd()); Diag(AtomicKindLoc, diag::note_omp_atomic_previous_clause) << getOpenMPClauseName(AtomicKind); } else { AtomicKind = C->getClauseKind(); AtomicKindLoc = C->getLocStart(); } } } auto Body = CS->getCapturedStmt(); if (auto *EWC = dyn_cast<ExprWithCleanups>(Body)) Body = EWC->getSubExpr(); Expr *X = nullptr; Expr *V = nullptr; Expr *E = nullptr; Expr *UE = nullptr; bool IsXLHSInRHSPart = false; bool IsPostfixUpdate = false; // OpenMP [2.12.6, atomic Construct] // In the next expressions: // * x and v (as applicable) are both l-value expressions with scalar type. // * During the execution of an atomic region, multiple syntactic // occurrences of x must designate the same storage location. // * Neither of v and expr (as applicable) may access the storage location // designated by x. // * Neither of x and expr (as applicable) may access the storage location // designated by v. // * expr is an expression with scalar type. // * binop is one of +, *, -, /, &, ^, |, <<, or >>. // * binop, binop=, ++, and -- are not overloaded operators. // * The expression x binop expr must be numerically equivalent to x binop // (expr). This requirement is satisfied if the operators in expr have // precedence greater than binop, or by using parentheses around expr or // subexpressions of expr. // * The expression expr binop x must be numerically equivalent to (expr) // binop x. This requirement is satisfied if the operators in expr have // precedence equal to or greater than binop, or by using parentheses around // expr or subexpressions of expr. // * For forms that allow multiple occurrences of x, the number of times // that x is evaluated is unspecified. if (AtomicKind == OMPC_read) { enum { NotAnExpression, NotAnAssignmentOp, NotAScalarType, NotAnLValue, NoError } ErrorFound = NoError; SourceLocation ErrorLoc, NoteLoc; SourceRange ErrorRange, NoteRange; // If clause is read: // v = x; if (auto AtomicBody = dyn_cast<Expr>(Body)) { auto AtomicBinOp = dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts()); if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) { X = AtomicBinOp->getRHS()->IgnoreParenImpCasts(); V = AtomicBinOp->getLHS()->IgnoreParenImpCasts(); if ((X->isInstantiationDependent() || X->getType()->isScalarType()) && (V->isInstantiationDependent() || V->getType()->isScalarType())) { if (!X->isLValue() || !V->isLValue()) { auto NotLValueExpr = X->isLValue() ? V : X; ErrorFound = NotAnLValue; ErrorLoc = AtomicBinOp->getExprLoc(); ErrorRange = AtomicBinOp->getSourceRange(); NoteLoc = NotLValueExpr->getExprLoc(); NoteRange = NotLValueExpr->getSourceRange(); } } else if (!X->isInstantiationDependent() || !V->isInstantiationDependent()) { auto NotScalarExpr = (X->isInstantiationDependent() || X->getType()->isScalarType()) ? V : X; ErrorFound = NotAScalarType; ErrorLoc = AtomicBinOp->getExprLoc(); ErrorRange = AtomicBinOp->getSourceRange(); NoteLoc = NotScalarExpr->getExprLoc(); NoteRange = NotScalarExpr->getSourceRange(); } } else { ErrorFound = NotAnAssignmentOp; ErrorLoc = AtomicBody->getExprLoc(); ErrorRange = AtomicBody->getSourceRange(); NoteLoc = AtomicBinOp ? AtomicBinOp->getOperatorLoc() : AtomicBody->getExprLoc(); NoteRange = AtomicBinOp ? AtomicBinOp->getSourceRange() : AtomicBody->getSourceRange(); } } else { ErrorFound = NotAnExpression; NoteLoc = ErrorLoc = Body->getLocStart(); NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc); } if (ErrorFound != NoError) { Diag(ErrorLoc, diag::err_omp_atomic_read_not_expression_statement) << ErrorRange; Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound << NoteRange; return StmtError(); } else if (CurContext->isDependentContext()) V = X = nullptr; } else if (AtomicKind == OMPC_write) { enum { NotAnExpression, NotAnAssignmentOp, NotAScalarType, NotAnLValue, NoError } ErrorFound = NoError; SourceLocation ErrorLoc, NoteLoc; SourceRange ErrorRange, NoteRange; // If clause is write: // x = expr; if (auto AtomicBody = dyn_cast<Expr>(Body)) { auto AtomicBinOp = dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts()); if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) { X = AtomicBinOp->getLHS(); E = AtomicBinOp->getRHS(); if ((X->isInstantiationDependent() || X->getType()->isScalarType()) && (E->isInstantiationDependent() || E->getType()->isScalarType())) { if (!X->isLValue()) { ErrorFound = NotAnLValue; ErrorLoc = AtomicBinOp->getExprLoc(); ErrorRange = AtomicBinOp->getSourceRange(); NoteLoc = X->getExprLoc(); NoteRange = X->getSourceRange(); } } else if (!X->isInstantiationDependent() || !E->isInstantiationDependent()) { auto NotScalarExpr = (X->isInstantiationDependent() || X->getType()->isScalarType()) ? E : X; ErrorFound = NotAScalarType; ErrorLoc = AtomicBinOp->getExprLoc(); ErrorRange = AtomicBinOp->getSourceRange(); NoteLoc = NotScalarExpr->getExprLoc(); NoteRange = NotScalarExpr->getSourceRange(); } } else { ErrorFound = NotAnAssignmentOp; ErrorLoc = AtomicBody->getExprLoc(); ErrorRange = AtomicBody->getSourceRange(); NoteLoc = AtomicBinOp ? AtomicBinOp->getOperatorLoc() : AtomicBody->getExprLoc(); NoteRange = AtomicBinOp ? AtomicBinOp->getSourceRange() : AtomicBody->getSourceRange(); } } else { ErrorFound = NotAnExpression; NoteLoc = ErrorLoc = Body->getLocStart(); NoteRange = ErrorRange = SourceRange(NoteLoc, NoteLoc); } if (ErrorFound != NoError) { Diag(ErrorLoc, diag::err_omp_atomic_write_not_expression_statement) << ErrorRange; Diag(NoteLoc, diag::note_omp_atomic_read_write) << ErrorFound << NoteRange; return StmtError(); } else if (CurContext->isDependentContext()) E = X = nullptr; } else if (AtomicKind == OMPC_update || AtomicKind == OMPC_unknown) { // If clause is update: // x++; // x--; // ++x; // --x; // x binop= expr; // x = x binop expr; // x = expr binop x; OpenMPAtomicUpdateChecker Checker(*this); if (Checker.checkStatement( Body, (AtomicKind == OMPC_update) ? diag::err_omp_atomic_update_not_expression_statement : diag::err_omp_atomic_not_expression_statement, diag::note_omp_atomic_update)) return StmtError(); if (!CurContext->isDependentContext()) { E = Checker.getExpr(); X = Checker.getX(); UE = Checker.getUpdateExpr(); IsXLHSInRHSPart = Checker.isXLHSInRHSPart(); } } else if (AtomicKind == OMPC_capture) { enum { NotAnAssignmentOp, NotACompoundStatement, NotTwoSubstatements, NotASpecificExpression, NoError } ErrorFound = NoError; SourceLocation ErrorLoc, NoteLoc; SourceRange ErrorRange, NoteRange; if (auto *AtomicBody = dyn_cast<Expr>(Body)) { // If clause is a capture: // v = x++; // v = x--; // v = ++x; // v = --x; // v = x binop= expr; // v = x = x binop expr; // v = x = expr binop x; auto *AtomicBinOp = dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts()); if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) { V = AtomicBinOp->getLHS(); Body = AtomicBinOp->getRHS()->IgnoreParenImpCasts(); OpenMPAtomicUpdateChecker Checker(*this); if (Checker.checkStatement( Body, diag::err_omp_atomic_capture_not_expression_statement, diag::note_omp_atomic_update)) return StmtError(); E = Checker.getExpr(); X = Checker.getX(); UE = Checker.getUpdateExpr(); IsXLHSInRHSPart = Checker.isXLHSInRHSPart(); IsPostfixUpdate = Checker.isPostfixUpdate(); } else { ErrorLoc = AtomicBody->getExprLoc(); ErrorRange = AtomicBody->getSourceRange(); NoteLoc = AtomicBinOp ? AtomicBinOp->getOperatorLoc() : AtomicBody->getExprLoc(); NoteRange = AtomicBinOp ? AtomicBinOp->getSourceRange() : AtomicBody->getSourceRange(); ErrorFound = NotAnAssignmentOp; } if (ErrorFound != NoError) { Diag(ErrorLoc, diag::err_omp_atomic_capture_not_expression_statement) << ErrorRange; Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange; return StmtError(); } else if (CurContext->isDependentContext()) { UE = V = E = X = nullptr; } } else { // If clause is a capture: // { v = x; x = expr; } // { v = x; x++; } // { v = x; x--; } // { v = x; ++x; } // { v = x; --x; } // { v = x; x binop= expr; } // { v = x; x = x binop expr; } // { v = x; x = expr binop x; } // { x++; v = x; } // { x--; v = x; } // { ++x; v = x; } // { --x; v = x; } // { x binop= expr; v = x; } // { x = x binop expr; v = x; } // { x = expr binop x; v = x; } if (auto *CS = dyn_cast<CompoundStmt>(Body)) { // Check that this is { expr1; expr2; } if (CS->size() == 2) { auto *First = CS->body_front(); auto *Second = CS->body_back(); if (auto *EWC = dyn_cast<ExprWithCleanups>(First)) First = EWC->getSubExpr()->IgnoreParenImpCasts(); if (auto *EWC = dyn_cast<ExprWithCleanups>(Second)) Second = EWC->getSubExpr()->IgnoreParenImpCasts(); // Need to find what subexpression is 'v' and what is 'x'. OpenMPAtomicUpdateChecker Checker(*this); bool IsUpdateExprFound = !Checker.checkStatement(Second); BinaryOperator *BinOp = nullptr; if (IsUpdateExprFound) { BinOp = dyn_cast<BinaryOperator>(First); IsUpdateExprFound = BinOp && BinOp->getOpcode() == BO_Assign; } if (IsUpdateExprFound && !CurContext->isDependentContext()) { // { v = x; x++; } // { v = x; x--; } // { v = x; ++x; } // { v = x; --x; } // { v = x; x binop= expr; } // { v = x; x = x binop expr; } // { v = x; x = expr binop x; } // Check that the first expression has form v = x. auto *PossibleX = BinOp->getRHS()->IgnoreParenImpCasts(); llvm::FoldingSetNodeID XId, PossibleXId; Checker.getX()->Profile(XId, Context, /*Canonical=*/true); PossibleX->Profile(PossibleXId, Context, /*Canonical=*/true); IsUpdateExprFound = XId == PossibleXId; if (IsUpdateExprFound) { V = BinOp->getLHS(); X = Checker.getX(); E = Checker.getExpr(); UE = Checker.getUpdateExpr(); IsXLHSInRHSPart = Checker.isXLHSInRHSPart(); IsPostfixUpdate = true; } } if (!IsUpdateExprFound) { IsUpdateExprFound = !Checker.checkStatement(First); BinOp = nullptr; if (IsUpdateExprFound) { BinOp = dyn_cast<BinaryOperator>(Second); IsUpdateExprFound = BinOp && BinOp->getOpcode() == BO_Assign; } if (IsUpdateExprFound && !CurContext->isDependentContext()) { // { x++; v = x; } // { x--; v = x; } // { ++x; v = x; } // { --x; v = x; } // { x binop= expr; v = x; } // { x = x binop expr; v = x; } // { x = expr binop x; v = x; } // Check that the second expression has form v = x. auto *PossibleX = BinOp->getRHS()->IgnoreParenImpCasts(); llvm::FoldingSetNodeID XId, PossibleXId; Checker.getX()->Profile(XId, Context, /*Canonical=*/true); PossibleX->Profile(PossibleXId, Context, /*Canonical=*/true); IsUpdateExprFound = XId == PossibleXId; if (IsUpdateExprFound) { V = BinOp->getLHS(); X = Checker.getX(); E = Checker.getExpr(); UE = Checker.getUpdateExpr(); IsXLHSInRHSPart = Checker.isXLHSInRHSPart(); IsPostfixUpdate = false; } } } if (!IsUpdateExprFound) { // { v = x; x = expr; } auto *FirstBinOp = dyn_cast<BinaryOperator>(First); if (!FirstBinOp || FirstBinOp->getOpcode() != BO_Assign) { ErrorFound = NotAnAssignmentOp; NoteLoc = ErrorLoc = FirstBinOp ? FirstBinOp->getOperatorLoc() : First->getLocStart(); NoteRange = ErrorRange = FirstBinOp ? FirstBinOp->getSourceRange() : SourceRange(ErrorLoc, ErrorLoc); } else { auto *SecondBinOp = dyn_cast<BinaryOperator>(Second); if (!SecondBinOp || SecondBinOp->getOpcode() != BO_Assign) { ErrorFound = NotAnAssignmentOp; NoteLoc = ErrorLoc = SecondBinOp ? SecondBinOp->getOperatorLoc() : Second->getLocStart(); NoteRange = ErrorRange = SecondBinOp ? SecondBinOp->getSourceRange() : SourceRange(ErrorLoc, ErrorLoc); } else { auto *PossibleXRHSInFirst = FirstBinOp->getRHS()->IgnoreParenImpCasts(); auto *PossibleXLHSInSecond = SecondBinOp->getLHS()->IgnoreParenImpCasts(); llvm::FoldingSetNodeID X1Id, X2Id; PossibleXRHSInFirst->Profile(X1Id, Context, /*Canonical=*/true); PossibleXLHSInSecond->Profile(X2Id, Context, /*Canonical=*/true); IsUpdateExprFound = X1Id == X2Id; if (IsUpdateExprFound) { V = FirstBinOp->getLHS(); X = SecondBinOp->getLHS(); E = SecondBinOp->getRHS(); UE = nullptr; IsXLHSInRHSPart = false; IsPostfixUpdate = true; } else { ErrorFound = NotASpecificExpression; ErrorLoc = FirstBinOp->getExprLoc(); ErrorRange = FirstBinOp->getSourceRange(); NoteLoc = SecondBinOp->getLHS()->getExprLoc(); NoteRange = SecondBinOp->getRHS()->getSourceRange(); } } } } } else { NoteLoc = ErrorLoc = Body->getLocStart(); NoteRange = ErrorRange = SourceRange(Body->getLocStart(), Body->getLocStart()); ErrorFound = NotTwoSubstatements; } } else { NoteLoc = ErrorLoc = Body->getLocStart(); NoteRange = ErrorRange = SourceRange(Body->getLocStart(), Body->getLocStart()); ErrorFound = NotACompoundStatement; } if (ErrorFound != NoError) { Diag(ErrorLoc, diag::err_omp_atomic_capture_not_compound_statement) << ErrorRange; Diag(NoteLoc, diag::note_omp_atomic_capture) << ErrorFound << NoteRange; return StmtError(); } else if (CurContext->isDependentContext()) { UE = V = E = X = nullptr; } } } getCurFunction()->setHasBranchProtectedScope(); return OMPAtomicDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt, X, V, E, UE, IsXLHSInRHSPart, IsPostfixUpdate); } StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); // OpenMP [2.16, Nesting of Regions] // If specified, a teams construct must be contained within a target // construct. That target construct must contain no statements or directives // outside of the teams construct. if (DSAStack->hasInnerTeamsRegion()) { auto S = AStmt->IgnoreContainers(/*IgnoreCaptured*/ true); bool OMPTeamsFound = true; if (auto *CS = dyn_cast<CompoundStmt>(S)) { auto I = CS->body_begin(); while (I != CS->body_end()) { auto OED = dyn_cast<OMPExecutableDirective>(*I); if (!OED || !isOpenMPTeamsDirective(OED->getDirectiveKind())) { OMPTeamsFound = false; break; } ++I; } assert(I != CS->body_end() && "Not found statement"); S = *I; } if (!OMPTeamsFound) { Diag(StartLoc, diag::err_omp_target_contains_not_only_teams); Diag(DSAStack->getInnerTeamsRegionLoc(), diag::note_omp_nested_teams_construct_here); Diag(S->getLocStart(), diag::note_omp_nested_statement_here) << isa<OMPExecutableDirective>(S); return StmtError(); } } getCurFunction()->setHasBranchProtectedScope(); return OMPTargetDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); } StmtResult Sema::ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) { assert(AStmt && isa<CapturedStmt>(AStmt) && "Captured statement expected"); CapturedStmt *CS = cast<CapturedStmt>(AStmt); // 1.2.2 OpenMP Language Terminology // Structured block - An executable statement with a single entry at the // top and a single exit at the bottom. // The point of exit cannot be a branch out of the structured block. // longjmp() and throw() must not violate the entry/exit criteria. CS->getCapturedDecl()->setNothrow(); getCurFunction()->setHasBranchProtectedScope(); return OMPTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt); } StmtResult Sema::ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion) { if (CancelRegion != OMPD_parallel && CancelRegion != OMPD_for && CancelRegion != OMPD_sections && CancelRegion != OMPD_taskgroup) { Diag(StartLoc, diag::err_omp_wrong_cancel_region) << getOpenMPDirectiveName(CancelRegion); return StmtError(); } if (DSAStack->isParentNowaitRegion()) { Diag(StartLoc, diag::err_omp_parent_cancel_region_nowait) << 0; return StmtError(); } if (DSAStack->isParentOrderedRegion()) { Diag(StartLoc, diag::err_omp_parent_cancel_region_ordered) << 0; return StmtError(); } return OMPCancellationPointDirective::Create(Context, StartLoc, EndLoc, CancelRegion); } StmtResult Sema::ActOnOpenMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion) { if (CancelRegion != OMPD_parallel && CancelRegion != OMPD_for && CancelRegion != OMPD_sections && CancelRegion != OMPD_taskgroup) { Diag(StartLoc, diag::err_omp_wrong_cancel_region) << getOpenMPDirectiveName(CancelRegion); return StmtError(); } if (DSAStack->isParentNowaitRegion()) { Diag(StartLoc, diag::err_omp_parent_cancel_region_nowait) << 1; return StmtError(); } if (DSAStack->isParentOrderedRegion()) { Diag(StartLoc, diag::err_omp_parent_cancel_region_ordered) << 1; return StmtError(); } return OMPCancelDirective::Create(Context, StartLoc, EndLoc, CancelRegion); } OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { OMPClause *Res = nullptr; switch (Kind) { case OMPC_if: Res = ActOnOpenMPIfClause(Expr, StartLoc, LParenLoc, EndLoc); break; case OMPC_final: Res = ActOnOpenMPFinalClause(Expr, StartLoc, LParenLoc, EndLoc); break; case OMPC_num_threads: Res = ActOnOpenMPNumThreadsClause(Expr, StartLoc, LParenLoc, EndLoc); break; case OMPC_safelen: Res = ActOnOpenMPSafelenClause(Expr, StartLoc, LParenLoc, EndLoc); break; case OMPC_collapse: Res = ActOnOpenMPCollapseClause(Expr, StartLoc, LParenLoc, EndLoc); break; case OMPC_default: case OMPC_proc_bind: case OMPC_schedule: case OMPC_private: case OMPC_firstprivate: case OMPC_lastprivate: case OMPC_shared: case OMPC_reduction: case OMPC_linear: case OMPC_aligned: case OMPC_copyin: case OMPC_copyprivate: case OMPC_ordered: case OMPC_nowait: case OMPC_untied: case OMPC_mergeable: case OMPC_threadprivate: case OMPC_flush: case OMPC_read: case OMPC_write: case OMPC_update: case OMPC_capture: case OMPC_seq_cst: case OMPC_depend: case OMPC_unknown: llvm_unreachable("Clause is not allowed."); } return Res; } OMPClause *Sema::ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { Expr *ValExpr = Condition; if (!Condition->isValueDependent() && !Condition->isTypeDependent() && !Condition->isInstantiationDependent() && !Condition->containsUnexpandedParameterPack()) { ExprResult Val = ActOnBooleanCondition(DSAStack->getCurScope(), Condition->getExprLoc(), Condition); if (Val.isInvalid()) return nullptr; ValExpr = Val.get(); } return new (Context) OMPIfClause(ValExpr, StartLoc, LParenLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { Expr *ValExpr = Condition; if (!Condition->isValueDependent() && !Condition->isTypeDependent() && !Condition->isInstantiationDependent() && !Condition->containsUnexpandedParameterPack()) { ExprResult Val = ActOnBooleanCondition(DSAStack->getCurScope(), Condition->getExprLoc(), Condition); if (Val.isInvalid()) return nullptr; ValExpr = Val.get(); } return new (Context) OMPFinalClause(ValExpr, StartLoc, LParenLoc, EndLoc); } ExprResult Sema::PerformOpenMPImplicitIntegerConversion(SourceLocation Loc, Expr *Op) { if (!Op) return ExprError(); class IntConvertDiagnoser : public ICEConvertDiagnoser { public: IntConvertDiagnoser() : ICEConvertDiagnoser(/*AllowScopedEnumerations*/ false, false, true) {} SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_omp_not_integral) << T; } SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_omp_incomplete_type) << T; } SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, diag::err_omp_explicit_conversion) << T << ConvTy; } SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_omp_conversion_here) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_omp_ambiguous_conversion) << T; } SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_omp_conversion_here) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseConversion(Sema &, SourceLocation, QualType, QualType) override { llvm_unreachable("conversion functions are permitted"); } } ConvertDiagnoser; return PerformContextualImplicitConversion(Loc, Op, ConvertDiagnoser); } OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { Expr *ValExpr = NumThreads; if (!NumThreads->isValueDependent() && !NumThreads->isTypeDependent() && !NumThreads->containsUnexpandedParameterPack()) { SourceLocation NumThreadsLoc = NumThreads->getLocStart(); ExprResult Val = PerformOpenMPImplicitIntegerConversion(NumThreadsLoc, NumThreads); if (Val.isInvalid()) return nullptr; ValExpr = Val.get(); // OpenMP [2.5, Restrictions] // The num_threads expression must evaluate to a positive integer value. llvm::APSInt Result; if (ValExpr->isIntegerConstantExpr(Result, Context) && Result.isSigned() && !Result.isStrictlyPositive()) { Diag(NumThreadsLoc, diag::err_omp_negative_expression_in_clause) << "num_threads" << NumThreads->getSourceRange(); return nullptr; } } return new (Context) OMPNumThreadsClause(ValExpr, StartLoc, LParenLoc, EndLoc); } ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E, OpenMPClauseKind CKind) { if (!E) return ExprError(); if (E->isValueDependent() || E->isTypeDependent() || E->isInstantiationDependent() || E->containsUnexpandedParameterPack()) return E; llvm::APSInt Result; ExprResult ICE = VerifyIntegerConstantExpression(E, &Result); if (ICE.isInvalid()) return ExprError(); if (!Result.isStrictlyPositive()) { Diag(E->getExprLoc(), diag::err_omp_negative_expression_in_clause) << getOpenMPClauseName(CKind) << E->getSourceRange(); return ExprError(); } if (CKind == OMPC_aligned && !Result.isPowerOf2()) { Diag(E->getExprLoc(), diag::warn_omp_alignment_not_power_of_two) << E->getSourceRange(); return ExprError(); } if (CKind == OMPC_collapse) { DSAStack->setCollapseNumber(Result.getExtValue()); } return ICE; } OMPClause *Sema::ActOnOpenMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { // OpenMP [2.8.1, simd construct, Description] // The parameter of the safelen clause must be a constant // positive integer expression. ExprResult Safelen = VerifyPositiveIntegerConstantInClause(Len, OMPC_safelen); if (Safelen.isInvalid()) return nullptr; return new (Context) OMPSafelenClause(Safelen.get(), StartLoc, LParenLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { // OpenMP [2.7.1, loop construct, Description] // OpenMP [2.8.1, simd construct, Description] // OpenMP [2.9.6, distribute construct, Description] // The parameter of the collapse clause must be a constant // positive integer expression. ExprResult NumForLoopsResult = VerifyPositiveIntegerConstantInClause(NumForLoops, OMPC_collapse); if (NumForLoopsResult.isInvalid()) return nullptr; return new (Context) OMPCollapseClause(NumForLoopsResult.get(), StartLoc, LParenLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPSimpleClause( OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { OMPClause *Res = nullptr; switch (Kind) { case OMPC_default: Res = ActOnOpenMPDefaultClause(static_cast<OpenMPDefaultClauseKind>(Argument), ArgumentLoc, StartLoc, LParenLoc, EndLoc); break; case OMPC_proc_bind: Res = ActOnOpenMPProcBindClause( static_cast<OpenMPProcBindClauseKind>(Argument), ArgumentLoc, StartLoc, LParenLoc, EndLoc); break; case OMPC_if: case OMPC_final: case OMPC_num_threads: case OMPC_safelen: case OMPC_collapse: case OMPC_schedule: case OMPC_private: case OMPC_firstprivate: case OMPC_lastprivate: case OMPC_shared: case OMPC_reduction: case OMPC_linear: case OMPC_aligned: case OMPC_copyin: case OMPC_copyprivate: case OMPC_ordered: case OMPC_nowait: case OMPC_untied: case OMPC_mergeable: case OMPC_threadprivate: case OMPC_flush: case OMPC_read: case OMPC_write: case OMPC_update: case OMPC_capture: case OMPC_seq_cst: case OMPC_depend: case OMPC_unknown: llvm_unreachable("Clause is not allowed."); } return Res; } OMPClause *Sema::ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindKwLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { if (Kind == OMPC_DEFAULT_unknown) { std::string Values; static_assert(OMPC_DEFAULT_unknown > 0, "OMPC_DEFAULT_unknown not greater than 0"); std::string Sep(", "); for (unsigned i = 0; i < OMPC_DEFAULT_unknown; ++i) { Values += "'"; Values += getOpenMPSimpleClauseTypeName(OMPC_default, i); Values += "'"; switch (i) { case OMPC_DEFAULT_unknown - 2: Values += " or "; break; case OMPC_DEFAULT_unknown - 1: break; default: Values += Sep; break; } } Diag(KindKwLoc, diag::err_omp_unexpected_clause_value) << Values << getOpenMPClauseName(OMPC_default); return nullptr; } switch (Kind) { case OMPC_DEFAULT_none: DSAStack->setDefaultDSANone(KindKwLoc); break; case OMPC_DEFAULT_shared: DSAStack->setDefaultDSAShared(KindKwLoc); break; case OMPC_DEFAULT_unknown: llvm_unreachable("Clause kind is not allowed."); break; } return new (Context) OMPDefaultClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindKwLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { if (Kind == OMPC_PROC_BIND_unknown) { std::string Values; std::string Sep(", "); for (unsigned i = 0; i < OMPC_PROC_BIND_unknown; ++i) { Values += "'"; Values += getOpenMPSimpleClauseTypeName(OMPC_proc_bind, i); Values += "'"; switch (i) { case OMPC_PROC_BIND_unknown - 2: Values += " or "; break; case OMPC_PROC_BIND_unknown - 1: break; default: Values += Sep; break; } } Diag(KindKwLoc, diag::err_omp_unexpected_clause_value) << Values << getOpenMPClauseName(OMPC_proc_bind); return nullptr; } return new (Context) OMPProcBindClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc) { OMPClause *Res = nullptr; switch (Kind) { case OMPC_schedule: Res = ActOnOpenMPScheduleClause( static_cast<OpenMPScheduleClauseKind>(Argument), Expr, StartLoc, LParenLoc, ArgumentLoc, CommaLoc, EndLoc); break; case OMPC_if: case OMPC_final: case OMPC_num_threads: case OMPC_safelen: case OMPC_collapse: case OMPC_default: case OMPC_proc_bind: case OMPC_private: case OMPC_firstprivate: case OMPC_lastprivate: case OMPC_shared: case OMPC_reduction: case OMPC_linear: case OMPC_aligned: case OMPC_copyin: case OMPC_copyprivate: case OMPC_ordered: case OMPC_nowait: case OMPC_untied: case OMPC_mergeable: case OMPC_threadprivate: case OMPC_flush: case OMPC_read: case OMPC_write: case OMPC_update: case OMPC_capture: case OMPC_seq_cst: case OMPC_depend: case OMPC_unknown: llvm_unreachable("Clause is not allowed."); } return Res; } OMPClause *Sema::ActOnOpenMPScheduleClause( OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc) { if (Kind == OMPC_SCHEDULE_unknown) { std::string Values; std::string Sep(", "); for (unsigned i = 0; i < OMPC_SCHEDULE_unknown; ++i) { Values += "'"; Values += getOpenMPSimpleClauseTypeName(OMPC_schedule, i); Values += "'"; switch (i) { case OMPC_SCHEDULE_unknown - 2: Values += " or "; break; case OMPC_SCHEDULE_unknown - 1: break; default: Values += Sep; break; } } Diag(KindLoc, diag::err_omp_unexpected_clause_value) << Values << getOpenMPClauseName(OMPC_schedule); return nullptr; } Expr *ValExpr = ChunkSize; Expr *HelperValExpr = nullptr; if (ChunkSize) { if (!ChunkSize->isValueDependent() && !ChunkSize->isTypeDependent() && !ChunkSize->isInstantiationDependent() && !ChunkSize->containsUnexpandedParameterPack()) { SourceLocation ChunkSizeLoc = ChunkSize->getLocStart(); ExprResult Val = PerformOpenMPImplicitIntegerConversion(ChunkSizeLoc, ChunkSize); if (Val.isInvalid()) return nullptr; ValExpr = Val.get(); // OpenMP [2.7.1, Restrictions] // chunk_size must be a loop invariant integer expression with a positive // value. llvm::APSInt Result; if (ValExpr->isIntegerConstantExpr(Result, Context)) { if (Result.isSigned() && !Result.isStrictlyPositive()) { Diag(ChunkSizeLoc, diag::err_omp_negative_expression_in_clause) << "schedule" << ChunkSize->getSourceRange(); return nullptr; } } else if (isParallelOrTaskRegion(DSAStack->getCurrentDirective())) { auto *ImpVar = buildVarDecl(*this, ChunkSize->getExprLoc(), ChunkSize->getType(), ".chunk."); auto *ImpVarRef = buildDeclRefExpr(*this, ImpVar, ChunkSize->getType(), ChunkSize->getExprLoc(), /*RefersToCapture=*/true); HelperValExpr = ImpVarRef; } } } return new (Context) OMPScheduleClause(StartLoc, LParenLoc, KindLoc, CommaLoc, EndLoc, Kind, ValExpr, HelperValExpr); } OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc) { OMPClause *Res = nullptr; switch (Kind) { case OMPC_ordered: Res = ActOnOpenMPOrderedClause(StartLoc, EndLoc); break; case OMPC_nowait: Res = ActOnOpenMPNowaitClause(StartLoc, EndLoc); break; case OMPC_untied: Res = ActOnOpenMPUntiedClause(StartLoc, EndLoc); break; case OMPC_mergeable: Res = ActOnOpenMPMergeableClause(StartLoc, EndLoc); break; case OMPC_read: Res = ActOnOpenMPReadClause(StartLoc, EndLoc); break; case OMPC_write: Res = ActOnOpenMPWriteClause(StartLoc, EndLoc); break; case OMPC_update: Res = ActOnOpenMPUpdateClause(StartLoc, EndLoc); break; case OMPC_capture: Res = ActOnOpenMPCaptureClause(StartLoc, EndLoc); break; case OMPC_seq_cst: Res = ActOnOpenMPSeqCstClause(StartLoc, EndLoc); break; case OMPC_if: case OMPC_final: case OMPC_num_threads: case OMPC_safelen: case OMPC_collapse: case OMPC_schedule: case OMPC_private: case OMPC_firstprivate: case OMPC_lastprivate: case OMPC_shared: case OMPC_reduction: case OMPC_linear: case OMPC_aligned: case OMPC_copyin: case OMPC_copyprivate: case OMPC_default: case OMPC_proc_bind: case OMPC_threadprivate: case OMPC_flush: case OMPC_depend: case OMPC_unknown: llvm_unreachable("Clause is not allowed."); } return Res; } OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc) { DSAStack->setOrderedRegion(); return new (Context) OMPOrderedClause(StartLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) { DSAStack->setNowaitRegion(); return new (Context) OMPNowaitClause(StartLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) { return new (Context) OMPUntiedClause(StartLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) { return new (Context) OMPMergeableClause(StartLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) { return new (Context) OMPReadClause(StartLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) { return new (Context) OMPWriteClause(StartLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc) { return new (Context) OMPUpdateClause(StartLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) { return new (Context) OMPCaptureClause(StartLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) { return new (Context) OMPSeqCstClause(StartLoc, EndLoc); } OMPClause *Sema::ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, SourceLocation DepLoc) { OMPClause *Res = nullptr; switch (Kind) { case OMPC_private: Res = ActOnOpenMPPrivateClause(VarList, StartLoc, LParenLoc, EndLoc); break; case OMPC_firstprivate: Res = ActOnOpenMPFirstprivateClause(VarList, StartLoc, LParenLoc, EndLoc); break; case OMPC_lastprivate: Res = ActOnOpenMPLastprivateClause(VarList, StartLoc, LParenLoc, EndLoc); break; case OMPC_shared: Res = ActOnOpenMPSharedClause(VarList, StartLoc, LParenLoc, EndLoc); break; case OMPC_reduction: Res = ActOnOpenMPReductionClause(VarList, StartLoc, LParenLoc, ColonLoc, EndLoc, ReductionIdScopeSpec, ReductionId); break; case OMPC_linear: Res = ActOnOpenMPLinearClause(VarList, TailExpr, StartLoc, LParenLoc, ColonLoc, EndLoc); break; case OMPC_aligned: Res = ActOnOpenMPAlignedClause(VarList, TailExpr, StartLoc, LParenLoc, ColonLoc, EndLoc); break; case OMPC_copyin: Res = ActOnOpenMPCopyinClause(VarList, StartLoc, LParenLoc, EndLoc); break; case OMPC_copyprivate: Res = ActOnOpenMPCopyprivateClause(VarList, StartLoc, LParenLoc, EndLoc); break; case OMPC_flush: Res = ActOnOpenMPFlushClause(VarList, StartLoc, LParenLoc, EndLoc); break; case OMPC_depend: Res = ActOnOpenMPDependClause(DepKind, DepLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc); break; case OMPC_if: case OMPC_final: case OMPC_num_threads: case OMPC_safelen: case OMPC_collapse: case OMPC_default: case OMPC_proc_bind: case OMPC_schedule: case OMPC_ordered: case OMPC_nowait: case OMPC_untied: case OMPC_mergeable: case OMPC_threadprivate: case OMPC_read: case OMPC_write: case OMPC_update: case OMPC_capture: case OMPC_seq_cst: case OMPC_unknown: llvm_unreachable("Clause is not allowed."); } return Res; } OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; SmallVector<Expr *, 8> PrivateCopies; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP private clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); PrivateCopies.push_back(nullptr); continue; } SourceLocation ELoc = RefExpr->getExprLoc(); // OpenMP [2.1, C/C++] // A list item is a variable name. // OpenMP [2.9.3.3, Restrictions, p.1] // A variable that is part of another variable (as an array or // structure element) cannot appear in a private clause. DeclRefExpr *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr); if (!DE || !isa<VarDecl>(DE->getDecl())) { Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange(); continue; } Decl *D = DE->getDecl(); VarDecl *VD = cast<VarDecl>(D); QualType Type = VD->getType(); if (Type->isDependentType() || Type->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); PrivateCopies.push_back(nullptr); continue; } // OpenMP [2.9.3.3, Restrictions, C/C++, p.3] // A variable that appears in a private clause must not have an incomplete // type or a reference type. if (RequireCompleteType(ELoc, Type, diag::err_omp_private_incomplete_type)) { continue; } if (Type->isReferenceType()) { Diag(ELoc, diag::err_omp_clause_ref_type_arg) << getOpenMPClauseName(OMPC_private) << Type; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct] // Variables with the predetermined data-sharing attributes may not be // listed in data-sharing attributes clauses, except for the cases // listed below. For these exceptions only, listing a predetermined // variable in a data-sharing attribute clause is allowed and overrides // the variable's predetermined data-sharing attributes. DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false); if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_private) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_private); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } // Variably modified types are not supported for tasks. if (!Type->isAnyPointerType() && Type->isVariablyModifiedType() && DSAStack->getCurrentDirective() == OMPD_task) { Diag(ELoc, diag::err_omp_variably_modified_type_not_supported) << getOpenMPClauseName(OMPC_private) << Type << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // OpenMP [2.9.3.3, Restrictions, C/C++, p.1] // A variable of class type (or array thereof) that appears in a private // clause requires an accessible, unambiguous default constructor for the // class type. // Generate helper private variable and initialize it with the default // value. The address of the original variable is replaced by the address of // the new private variable in CodeGen. This new variable is not added to // IdResolver, so the code in the OpenMP region uses original variable for // proper diagnostics. Type = Type.getUnqualifiedType(); auto VDPrivate = buildVarDecl(*this, DE->getExprLoc(), Type, VD->getName()); ActOnUninitializedDecl(VDPrivate, /*TypeMayContainAuto=*/false); if (VDPrivate->isInvalidDecl()) continue; auto VDPrivateRefExpr = buildDeclRefExpr( *this, VDPrivate, DE->getType().getUnqualifiedType(), DE->getExprLoc()); DSAStack->addDSA(VD, DE, OMPC_private); Vars.push_back(DE); PrivateCopies.push_back(VDPrivateRefExpr); } if (Vars.empty()) return nullptr; return OMPPrivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars, PrivateCopies); } namespace { class DiagsUninitializedSeveretyRAII { private: DiagnosticsEngine &Diags; SourceLocation SavedLoc; bool IsIgnored; public: DiagsUninitializedSeveretyRAII(DiagnosticsEngine &Diags, SourceLocation Loc, bool IsIgnored) : Diags(Diags), SavedLoc(Loc), IsIgnored(IsIgnored) { if (!IsIgnored) { Diags.setSeverity(/*Diag*/ diag::warn_uninit_self_reference_in_init, /*Map*/ diag::Severity::Ignored, Loc); } } ~DiagsUninitializedSeveretyRAII() { if (!IsIgnored) Diags.popMappings(SavedLoc); } }; } OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; SmallVector<Expr *, 8> PrivateCopies; SmallVector<Expr *, 8> Inits; bool IsImplicitClause = StartLoc.isInvalid() && LParenLoc.isInvalid() && EndLoc.isInvalid(); auto ImplicitClauseLoc = DSAStack->getConstructLoc(); for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP firstprivate clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); PrivateCopies.push_back(nullptr); Inits.push_back(nullptr); continue; } SourceLocation ELoc = IsImplicitClause ? ImplicitClauseLoc : RefExpr->getExprLoc(); // OpenMP [2.1, C/C++] // A list item is a variable name. // OpenMP [2.9.3.3, Restrictions, p.1] // A variable that is part of another variable (as an array or // structure element) cannot appear in a private clause. DeclRefExpr *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr); if (!DE || !isa<VarDecl>(DE->getDecl())) { Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange(); continue; } Decl *D = DE->getDecl(); VarDecl *VD = cast<VarDecl>(D); QualType Type = VD->getType(); if (Type->isDependentType() || Type->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); PrivateCopies.push_back(nullptr); Inits.push_back(nullptr); continue; } // OpenMP [2.9.3.3, Restrictions, C/C++, p.3] // A variable that appears in a private clause must not have an incomplete // type or a reference type. if (RequireCompleteType(ELoc, Type, diag::err_omp_firstprivate_incomplete_type)) { continue; } if (Type->isReferenceType()) { if (IsImplicitClause) { Diag(ImplicitClauseLoc, diag::err_omp_task_predetermined_firstprivate_ref_type_arg) << Type; Diag(RefExpr->getExprLoc(), diag::note_used_here); } else { Diag(ELoc, diag::err_omp_clause_ref_type_arg) << getOpenMPClauseName(OMPC_firstprivate) << Type; } bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // OpenMP [2.9.3.4, Restrictions, C/C++, p.1] // A variable of class type (or array thereof) that appears in a private // clause requires an accessible, unambiguous copy constructor for the // class type. auto ElemType = Context.getBaseElementType(Type).getNonReferenceType(); // If an implicit firstprivate variable found it was checked already. if (!IsImplicitClause) { DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false); bool IsConstant = ElemType.isConstant(Context); // OpenMP [2.4.13, Data-sharing Attribute Clauses] // A list item that specifies a given variable may not appear in more // than one clause on the same directive, except that a variable may be // specified in both firstprivate and lastprivate clauses. if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_firstprivate && DVar.CKind != OMPC_lastprivate && DVar.RefExpr) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_firstprivate); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct] // Variables with the predetermined data-sharing attributes may not be // listed in data-sharing attributes clauses, except for the cases // listed below. For these exceptions only, listing a predetermined // variable in a data-sharing attribute clause is allowed and overrides // the variable's predetermined data-sharing attributes. // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct, C/C++, p.2] // Variables with const-qualified type having no mutable member may be // listed in a firstprivate clause, even if they are static data members. if (!(IsConstant || VD->isStaticDataMember()) && !DVar.RefExpr && DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_shared) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_firstprivate); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective(); // OpenMP [2.9.3.4, Restrictions, p.2] // A list item that is private within a parallel region must not appear // in a firstprivate clause on a worksharing construct if any of the // worksharing regions arising from the worksharing construct ever bind // to any of the parallel regions arising from the parallel construct. if (isOpenMPWorksharingDirective(CurrDir) && !isOpenMPParallelDirective(CurrDir)) { DVar = DSAStack->getImplicitDSA(VD, true); if (DVar.CKind != OMPC_shared && (isOpenMPParallelDirective(DVar.DKind) || DVar.DKind == OMPD_unknown)) { Diag(ELoc, diag::err_omp_required_access) << getOpenMPClauseName(OMPC_firstprivate) << getOpenMPClauseName(OMPC_shared); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } } // OpenMP [2.9.3.4, Restrictions, p.3] // A list item that appears in a reduction clause of a parallel construct // must not appear in a firstprivate clause on a worksharing or task // construct if any of the worksharing or task regions arising from the // worksharing or task construct ever bind to any of the parallel regions // arising from the parallel construct. // OpenMP [2.9.3.4, Restrictions, p.4] // A list item that appears in a reduction clause in worksharing // construct must not appear in a firstprivate clause in a task construct // encountered during execution of any of the worksharing regions arising // from the worksharing construct. if (CurrDir == OMPD_task) { DVar = DSAStack->hasInnermostDSA(VD, MatchesAnyClause(OMPC_reduction), [](OpenMPDirectiveKind K) -> bool { return isOpenMPParallelDirective(K) || isOpenMPWorksharingDirective(K); }, false); if (DVar.CKind == OMPC_reduction && (isOpenMPParallelDirective(DVar.DKind) || isOpenMPWorksharingDirective(DVar.DKind))) { Diag(ELoc, diag::err_omp_parallel_reduction_in_task_firstprivate) << getOpenMPDirectiveName(DVar.DKind); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } } } // Variably modified types are not supported for tasks. if (!Type->isAnyPointerType() && Type->isVariablyModifiedType() && DSAStack->getCurrentDirective() == OMPD_task) { Diag(ELoc, diag::err_omp_variably_modified_type_not_supported) << getOpenMPClauseName(OMPC_firstprivate) << Type << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } Type = Type.getUnqualifiedType(); auto VDPrivate = buildVarDecl(*this, ELoc, Type, VD->getName()); // Generate helper private variable and initialize it with the value of the // original variable. The address of the original variable is replaced by // the address of the new private variable in the CodeGen. This new variable // is not added to IdResolver, so the code in the OpenMP region uses // original variable for proper diagnostics and variable capturing. Expr *VDInitRefExpr = nullptr; // For arrays generate initializer for single element and replace it by the // original array element in CodeGen. if (Type->isArrayType()) { auto VDInit = buildVarDecl(*this, DE->getExprLoc(), ElemType, VD->getName()); VDInitRefExpr = buildDeclRefExpr(*this, VDInit, ElemType, ELoc); auto Init = DefaultLvalueConversion(VDInitRefExpr).get(); ElemType = ElemType.getUnqualifiedType(); auto *VDInitTemp = buildVarDecl(*this, DE->getLocStart(), ElemType, ".firstprivate.temp"); InitializedEntity Entity = InitializedEntity::InitializeVariable(VDInitTemp); InitializationKind Kind = InitializationKind::CreateCopy(ELoc, ELoc); InitializationSequence InitSeq(*this, Entity, Kind, Init); ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Init); if (Result.isInvalid()) VDPrivate->setInvalidDecl(); else VDPrivate->setInit(Result.getAs<Expr>()); } else { auto *VDInit = buildVarDecl(*this, DE->getLocStart(), Type, ".firstprivate.temp"); VDInitRefExpr = buildDeclRefExpr(*this, VDInit, DE->getType(), DE->getExprLoc()); AddInitializerToDecl(VDPrivate, DefaultLvalueConversion(VDInitRefExpr).get(), /*DirectInit=*/false, /*TypeMayContainAuto=*/false); } if (VDPrivate->isInvalidDecl()) { if (IsImplicitClause) { Diag(DE->getExprLoc(), diag::note_omp_task_predetermined_firstprivate_here); } continue; } CurContext->addDecl(VDPrivate); auto VDPrivateRefExpr = buildDeclRefExpr( *this, VDPrivate, DE->getType().getUnqualifiedType(), DE->getExprLoc()); DSAStack->addDSA(VD, DE, OMPC_firstprivate); Vars.push_back(DE); PrivateCopies.push_back(VDPrivateRefExpr); Inits.push_back(VDInitRefExpr); } if (Vars.empty()) return nullptr; return OMPFirstprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars, PrivateCopies, Inits); } OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; SmallVector<Expr *, 8> SrcExprs; SmallVector<Expr *, 8> DstExprs; SmallVector<Expr *, 8> AssignmentOps; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP lastprivate clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); SrcExprs.push_back(nullptr); DstExprs.push_back(nullptr); AssignmentOps.push_back(nullptr); continue; } SourceLocation ELoc = RefExpr->getExprLoc(); // OpenMP [2.1, C/C++] // A list item is a variable name. // OpenMP [2.14.3.5, Restrictions, p.1] // A variable that is part of another variable (as an array or structure // element) cannot appear in a lastprivate clause. DeclRefExpr *DE = dyn_cast_or_null<DeclRefExpr>(RefExpr); if (!DE || !isa<VarDecl>(DE->getDecl())) { Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange(); continue; } Decl *D = DE->getDecl(); VarDecl *VD = cast<VarDecl>(D); QualType Type = VD->getType(); if (Type->isDependentType() || Type->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); SrcExprs.push_back(nullptr); DstExprs.push_back(nullptr); AssignmentOps.push_back(nullptr); continue; } // OpenMP [2.14.3.5, Restrictions, C/C++, p.2] // A variable that appears in a lastprivate clause must not have an // incomplete type or a reference type. if (RequireCompleteType(ELoc, Type, diag::err_omp_lastprivate_incomplete_type)) { continue; } if (Type->isReferenceType()) { Diag(ELoc, diag::err_omp_clause_ref_type_arg) << getOpenMPClauseName(OMPC_lastprivate) << Type; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct] // Variables with the predetermined data-sharing attributes may not be // listed in data-sharing attributes clauses, except for the cases // listed below. DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false); if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_lastprivate && DVar.CKind != OMPC_firstprivate && (DVar.CKind != OMPC_private || DVar.RefExpr != nullptr)) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_lastprivate); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective(); // OpenMP [2.14.3.5, Restrictions, p.2] // A list item that is private within a parallel region, or that appears in // the reduction clause of a parallel construct, must not appear in a // lastprivate clause on a worksharing construct if any of the corresponding // worksharing regions ever binds to any of the corresponding parallel // regions. DSAStackTy::DSAVarData TopDVar = DVar; if (isOpenMPWorksharingDirective(CurrDir) && !isOpenMPParallelDirective(CurrDir)) { DVar = DSAStack->getImplicitDSA(VD, true); if (DVar.CKind != OMPC_shared) { Diag(ELoc, diag::err_omp_required_access) << getOpenMPClauseName(OMPC_lastprivate) << getOpenMPClauseName(OMPC_shared); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } } // OpenMP [2.14.3.5, Restrictions, C++, p.1,2] // A variable of class type (or array thereof) that appears in a // lastprivate clause requires an accessible, unambiguous default // constructor for the class type, unless the list item is also specified // in a firstprivate clause. // A variable of class type (or array thereof) that appears in a // lastprivate clause requires an accessible, unambiguous copy assignment // operator for the class type. Type = Context.getBaseElementType(Type).getNonReferenceType(); auto *SrcVD = buildVarDecl(*this, DE->getLocStart(), Type.getUnqualifiedType(), ".lastprivate.src"); auto *PseudoSrcExpr = buildDeclRefExpr( *this, SrcVD, Type.getUnqualifiedType(), DE->getExprLoc()); auto *DstVD = buildVarDecl(*this, DE->getLocStart(), Type, ".lastprivate.dst"); auto *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, DE->getExprLoc()); // For arrays generate assignment operation for single element and replace // it by the original array element in CodeGen. auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign, PseudoDstExpr, PseudoSrcExpr); if (AssignmentOp.isInvalid()) continue; AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(), /*DiscardedValue=*/true); if (AssignmentOp.isInvalid()) continue; if (TopDVar.CKind != OMPC_firstprivate) DSAStack->addDSA(VD, DE, OMPC_lastprivate); Vars.push_back(DE); SrcExprs.push_back(PseudoSrcExpr); DstExprs.push_back(PseudoDstExpr); AssignmentOps.push_back(AssignmentOp.get()); } if (Vars.empty()) return nullptr; return OMPLastprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars, SrcExprs, DstExprs, AssignmentOps); } OMPClause *Sema::ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP shared clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); continue; } SourceLocation ELoc = RefExpr->getExprLoc(); // OpenMP [2.1, C/C++] // A list item is a variable name. // OpenMP [2.14.3.2, Restrictions, p.1] // A variable that is part of another variable (as an array or structure // element) cannot appear in a shared unless it is a static data member // of a C++ class. DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr); if (!DE || !isa<VarDecl>(DE->getDecl())) { Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange(); continue; } Decl *D = DE->getDecl(); VarDecl *VD = cast<VarDecl>(D); QualType Type = VD->getType(); if (Type->isDependentType() || Type->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); continue; } // OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct] // Variables with the predetermined data-sharing attributes may not be // listed in data-sharing attributes clauses, except for the cases // listed below. For these exceptions only, listing a predetermined // variable in a data-sharing attribute clause is allowed and overrides // the variable's predetermined data-sharing attributes. DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false); if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_shared && DVar.RefExpr) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_shared); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } DSAStack->addDSA(VD, DE, OMPC_shared); Vars.push_back(DE); } if (Vars.empty()) return nullptr; return OMPSharedClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars); } namespace { class DSARefChecker : public StmtVisitor<DSARefChecker, bool> { DSAStackTy *Stack; public: bool VisitDeclRefExpr(DeclRefExpr *E) { if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl())) { DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, false); if (DVar.CKind == OMPC_shared && !DVar.RefExpr) return false; if (DVar.CKind != OMPC_unknown) return true; DSAStackTy::DSAVarData DVarPrivate = Stack->hasDSA(VD, isOpenMPPrivate, MatchesAlways(), false); if (DVarPrivate.CKind != OMPC_unknown) return true; return false; } return false; } bool VisitStmt(Stmt *S) { for (auto Child : S->children()) { if (Child && Visit(Child)) return true; } return false; } explicit DSARefChecker(DSAStackTy *S) : Stack(S) {} }; } // namespace OMPClause *Sema::ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId) { // TODO: Allow scope specification search when 'declare reduction' is // supported. assert(ReductionIdScopeSpec.isEmpty() && "No support for scoped reduction identifiers yet."); auto DN = ReductionId.getName(); auto OOK = DN.getCXXOverloadedOperator(); BinaryOperatorKind BOK = BO_Comma; // OpenMP [2.14.3.6, reduction clause] // C // reduction-identifier is either an identifier or one of the following // operators: +, -, *, &, |, ^, && and || // C++ // reduction-identifier is either an id-expression or one of the following // operators: +, -, *, &, |, ^, && and || // FIXME: Only 'min' and 'max' identifiers are supported for now. switch (OOK) { case OO_Plus: case OO_Minus: BOK = BO_Add; break; case OO_Star: BOK = BO_Mul; break; case OO_Amp: BOK = BO_And; break; case OO_Pipe: BOK = BO_Or; break; case OO_Caret: BOK = BO_Xor; break; case OO_AmpAmp: BOK = BO_LAnd; break; case OO_PipePipe: BOK = BO_LOr; break; case OO_New: case OO_Delete: case OO_Array_New: case OO_Array_Delete: case OO_Slash: case OO_Percent: case OO_Tilde: case OO_Exclaim: case OO_Equal: case OO_Less: case OO_Greater: case OO_LessEqual: case OO_GreaterEqual: case OO_PlusEqual: case OO_MinusEqual: case OO_StarEqual: case OO_SlashEqual: case OO_PercentEqual: case OO_CaretEqual: case OO_AmpEqual: case OO_PipeEqual: case OO_LessLess: case OO_GreaterGreater: case OO_LessLessEqual: case OO_GreaterGreaterEqual: case OO_EqualEqual: case OO_ExclaimEqual: case OO_PlusPlus: case OO_MinusMinus: case OO_Comma: case OO_ArrowStar: case OO_Arrow: case OO_Call: case OO_Subscript: case OO_Conditional: case NUM_OVERLOADED_OPERATORS: llvm_unreachable("Unexpected reduction identifier"); case OO_None: if (auto II = DN.getAsIdentifierInfo()) { if (II->isStr("max")) BOK = BO_GT; else if (II->isStr("min")) BOK = BO_LT; } break; } SourceRange ReductionIdRange; if (ReductionIdScopeSpec.isValid()) { ReductionIdRange.setBegin(ReductionIdScopeSpec.getBeginLoc()); } ReductionIdRange.setEnd(ReductionId.getEndLoc()); if (BOK == BO_Comma) { // Not allowed reduction identifier is found. Diag(ReductionId.getLocStart(), diag::err_omp_unknown_reduction_identifier) << ReductionIdRange; return nullptr; } SmallVector<Expr *, 8> Vars; SmallVector<Expr *, 8> LHSs; SmallVector<Expr *, 8> RHSs; SmallVector<Expr *, 8> ReductionOps; for (auto RefExpr : VarList) { assert(RefExpr && "nullptr expr in OpenMP reduction clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); LHSs.push_back(nullptr); RHSs.push_back(nullptr); ReductionOps.push_back(nullptr); continue; } if (RefExpr->isTypeDependent() || RefExpr->isValueDependent() || RefExpr->isInstantiationDependent() || RefExpr->containsUnexpandedParameterPack()) { // It will be analyzed later. Vars.push_back(RefExpr); LHSs.push_back(nullptr); RHSs.push_back(nullptr); ReductionOps.push_back(nullptr); continue; } auto ELoc = RefExpr->getExprLoc(); auto ERange = RefExpr->getSourceRange(); // OpenMP [2.1, C/C++] // A list item is a variable or array section, subject to the restrictions // specified in Section 2.4 on page 42 and in each of the sections // describing clauses and directives for which a list appears. // OpenMP [2.14.3.3, Restrictions, p.1] // A variable that is part of another variable (as an array or // structure element) cannot appear in a private clause. auto DE = dyn_cast<DeclRefExpr>(RefExpr); if (!DE || !isa<VarDecl>(DE->getDecl())) { Diag(ELoc, diag::err_omp_expected_var_name) << ERange; continue; } auto D = DE->getDecl(); auto VD = cast<VarDecl>(D); auto Type = VD->getType(); // OpenMP [2.9.3.3, Restrictions, C/C++, p.3] // A variable that appears in a private clause must not have an incomplete // type or a reference type. if (RequireCompleteType(ELoc, Type, diag::err_omp_reduction_incomplete_type)) continue; // OpenMP [2.14.3.6, reduction clause, Restrictions] // Arrays may not appear in a reduction clause. if (Type.getNonReferenceType()->isArrayType()) { Diag(ELoc, diag::err_omp_reduction_type_array) << Type << ERange; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // OpenMP [2.14.3.6, reduction clause, Restrictions] // A list item that appears in a reduction clause must not be // const-qualified. if (Type.getNonReferenceType().isConstant(Context)) { Diag(ELoc, diag::err_omp_const_variable) << getOpenMPClauseName(OMPC_reduction) << Type << ERange; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // OpenMP [2.9.3.6, Restrictions, C/C++, p.4] // If a list-item is a reference type then it must bind to the same object // for all threads of the team. VarDecl *VDDef = VD->getDefinition(); if (Type->isReferenceType() && VDDef) { DSARefChecker Check(DSAStack); if (Check.Visit(VDDef->getInit())) { Diag(ELoc, diag::err_omp_reduction_ref_type_arg) << ERange; Diag(VDDef->getLocation(), diag::note_defined_here) << VDDef; continue; } } // OpenMP [2.14.3.6, reduction clause, Restrictions] // The type of a list item that appears in a reduction clause must be valid // for the reduction-identifier. For a max or min reduction in C, the type // of the list item must be an allowed arithmetic data type: char, int, // float, double, or _Bool, possibly modified with long, short, signed, or // unsigned. For a max or min reduction in C++, the type of the list item // must be an allowed arithmetic data type: char, wchar_t, int, float, // double, or bool, possibly modified with long, short, signed, or unsigned. if ((BOK == BO_GT || BOK == BO_LT) && !(Type->isScalarType() || (getLangOpts().CPlusPlus && Type->isArithmeticType()))) { Diag(ELoc, diag::err_omp_clause_not_arithmetic_type_arg) << getLangOpts().CPlusPlus; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } if ((BOK == BO_OrAssign || BOK == BO_AndAssign || BOK == BO_XorAssign) && !getLangOpts().CPlusPlus && Type->isFloatingType()) { Diag(ELoc, diag::err_omp_clause_floating_type_arg); bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // OpenMP [2.14.1.1, Data-sharing Attribute Rules for Variables Referenced // in a Construct] // Variables with the predetermined data-sharing attributes may not be // listed in data-sharing attributes clauses, except for the cases // listed below. For these exceptions only, listing a predetermined // variable in a data-sharing attribute clause is allowed and overrides // the variable's predetermined data-sharing attributes. // OpenMP [2.14.3.6, Restrictions, p.3] // Any number of reduction clauses can be specified on the directive, // but a list item can appear only once in the reduction clauses for that // directive. DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false); if (DVar.CKind == OMPC_reduction) { Diag(ELoc, diag::err_omp_once_referenced) << getOpenMPClauseName(OMPC_reduction); if (DVar.RefExpr) { Diag(DVar.RefExpr->getExprLoc(), diag::note_omp_referenced); } } else if (DVar.CKind != OMPC_unknown) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_reduction); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } // OpenMP [2.14.3.6, Restrictions, p.1] // A list item that appears in a reduction clause of a worksharing // construct must be shared in the parallel regions to which any of the // worksharing regions arising from the worksharing construct bind. OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective(); if (isOpenMPWorksharingDirective(CurrDir) && !isOpenMPParallelDirective(CurrDir)) { DVar = DSAStack->getImplicitDSA(VD, true); if (DVar.CKind != OMPC_shared) { Diag(ELoc, diag::err_omp_required_access) << getOpenMPClauseName(OMPC_reduction) << getOpenMPClauseName(OMPC_shared); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } } Type = Type.getNonLValueExprType(Context).getUnqualifiedType(); auto *LHSVD = buildVarDecl(*this, ELoc, Type, ".reduction.lhs"); auto *RHSVD = buildVarDecl(*this, ELoc, Type, VD->getName()); // Add initializer for private variable. Expr *Init = nullptr; switch (BOK) { case BO_Add: case BO_Xor: case BO_Or: case BO_LOr: // '+', '-', '^', '|', '||' reduction ops - initializer is '0'. if (Type->isScalarType() || Type->isAnyComplexType()) { Init = ActOnIntegerConstant(ELoc, /*Val=*/0).get(); } break; case BO_Mul: case BO_LAnd: if (Type->isScalarType() || Type->isAnyComplexType()) { // '*' and '&&' reduction ops - initializer is '1'. Init = ActOnIntegerConstant(ELoc, /*Val=*/1).get(); } break; case BO_And: { // '&' reduction op - initializer is '~0'. QualType OrigType = Type; if (auto *ComplexTy = OrigType->getAs<ComplexType>()) { Type = ComplexTy->getElementType(); } if (Type->isRealFloatingType()) { llvm::APFloat InitValue = llvm::APFloat::getAllOnesValue(Context.getTypeSize(Type), /*isIEEE=*/true); Init = FloatingLiteral::Create(Context, InitValue, /*isexact=*/true, Type, ELoc); } else if (Type->isScalarType()) { auto Size = Context.getTypeSize(Type); QualType IntTy = Context.getIntTypeForBitwidth(Size, /*Signed=*/0); llvm::APInt InitValue = llvm::APInt::getAllOnesValue(Size); Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc); } if (Init && OrigType->isAnyComplexType()) { // Init = 0xFFFF + 0xFFFFi; auto *Im = new (Context) ImaginaryLiteral(Init, OrigType); Init = CreateBuiltinBinOp(ELoc, BO_Add, Init, Im).get(); } Type = OrigType; break; } case BO_LT: case BO_GT: { // 'min' reduction op - initializer is 'Largest representable number in // the reduction list item type'. // 'max' reduction op - initializer is 'Least representable number in // the reduction list item type'. if (Type->isIntegerType() || Type->isPointerType()) { bool IsSigned = Type->hasSignedIntegerRepresentation(); auto Size = Context.getTypeSize(Type); QualType IntTy = Context.getIntTypeForBitwidth(Size, /*Signed=*/IsSigned); llvm::APInt InitValue = (BOK != BO_LT) ? IsSigned ? llvm::APInt::getSignedMinValue(Size) : llvm::APInt::getMinValue(Size) : IsSigned ? llvm::APInt::getSignedMaxValue(Size) : llvm::APInt::getMaxValue(Size); Init = IntegerLiteral::Create(Context, InitValue, IntTy, ELoc); if (Type->isPointerType()) { // Cast to pointer type. auto CastExpr = BuildCStyleCastExpr( SourceLocation(), Context.getTrivialTypeSourceInfo(Type, ELoc), SourceLocation(), Init); if (CastExpr.isInvalid()) continue; Init = CastExpr.get(); } } else if (Type->isRealFloatingType()) { llvm::APFloat InitValue = llvm::APFloat::getLargest( Context.getFloatTypeSemantics(Type), BOK != BO_LT); Init = FloatingLiteral::Create(Context, InitValue, /*isexact=*/true, Type, ELoc); } break; } case BO_PtrMemD: case BO_PtrMemI: case BO_MulAssign: case BO_Div: case BO_Rem: case BO_Sub: case BO_Shl: case BO_Shr: case BO_LE: case BO_GE: case BO_EQ: case BO_NE: case BO_AndAssign: case BO_XorAssign: case BO_OrAssign: case BO_Assign: case BO_AddAssign: case BO_SubAssign: case BO_DivAssign: case BO_RemAssign: case BO_ShlAssign: case BO_ShrAssign: case BO_Comma: llvm_unreachable("Unexpected reduction operation"); } if (Init) { AddInitializerToDecl(RHSVD, Init, /*DirectInit=*/false, /*TypeMayContainAuto=*/false); } else { ActOnUninitializedDecl(RHSVD, /*TypeMayContainAuto=*/false); } if (!RHSVD->hasInit()) { Diag(ELoc, diag::err_omp_reduction_id_not_compatible) << Type << ReductionIdRange; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } auto *LHSDRE = buildDeclRefExpr(*this, LHSVD, Type, ELoc); auto *RHSDRE = buildDeclRefExpr(*this, RHSVD, Type, ELoc); ExprResult ReductionOp = BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(), BOK, LHSDRE, RHSDRE); if (ReductionOp.isUsable()) { if (BOK != BO_LT && BOK != BO_GT) { ReductionOp = BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(), BO_Assign, LHSDRE, ReductionOp.get()); } else { auto *ConditionalOp = new (Context) ConditionalOperator( ReductionOp.get(), SourceLocation(), LHSDRE, SourceLocation(), RHSDRE, Type, VK_LValue, OK_Ordinary); ReductionOp = BuildBinOp(DSAStack->getCurScope(), ReductionId.getLocStart(), BO_Assign, LHSDRE, ConditionalOp); } if (ReductionOp.isUsable()) { ReductionOp = ActOnFinishFullExpr(ReductionOp.get()); } } if (ReductionOp.isInvalid()) continue; DSAStack->addDSA(VD, DE, OMPC_reduction); Vars.push_back(DE); LHSs.push_back(LHSDRE); RHSs.push_back(RHSDRE); ReductionOps.push_back(ReductionOp.get()); } if (Vars.empty()) return nullptr; return OMPReductionClause::Create( Context, StartLoc, LParenLoc, ColonLoc, EndLoc, Vars, ReductionIdScopeSpec.getWithLocInContext(Context), ReductionId, LHSs, RHSs, ReductionOps); } OMPClause *Sema::ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; SmallVector<Expr *, 8> Inits; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP linear clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); Inits.push_back(nullptr); continue; } // OpenMP [2.14.3.7, linear clause] // A list item that appears in a linear clause is subject to the private // clause semantics described in Section 2.14.3.3 on page 159 except as // noted. In addition, the value of the new list item on each iteration // of the associated loop(s) corresponds to the value of the original // list item before entering the construct plus the logical number of // the iteration times linear-step. SourceLocation ELoc = RefExpr->getExprLoc(); // OpenMP [2.1, C/C++] // A list item is a variable name. // OpenMP [2.14.3.3, Restrictions, p.1] // A variable that is part of another variable (as an array or // structure element) cannot appear in a private clause. DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr); if (!DE || !isa<VarDecl>(DE->getDecl())) { Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange(); continue; } VarDecl *VD = cast<VarDecl>(DE->getDecl()); // OpenMP [2.14.3.7, linear clause] // A list-item cannot appear in more than one linear clause. // A list-item that appears in a linear clause cannot appear in any // other data-sharing attribute clause. DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(VD, false); if (DVar.RefExpr) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_linear); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } QualType QType = VD->getType(); if (QType->isDependentType() || QType->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); Inits.push_back(nullptr); continue; } // A variable must not have an incomplete type or a reference type. if (RequireCompleteType(ELoc, QType, diag::err_omp_linear_incomplete_type)) { continue; } if (QType->isReferenceType()) { Diag(ELoc, diag::err_omp_clause_ref_type_arg) << getOpenMPClauseName(OMPC_linear) << QType; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // A list item must not be const-qualified. if (QType.isConstant(Context)) { Diag(ELoc, diag::err_omp_const_variable) << getOpenMPClauseName(OMPC_linear); bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // A list item must be of integral or pointer type. QType = QType.getUnqualifiedType().getCanonicalType(); const Type *Ty = QType.getTypePtrOrNull(); if (!Ty || (!Ty->isDependentType() && !Ty->isIntegralType(Context) && !Ty->isPointerType())) { Diag(ELoc, diag::err_omp_linear_expected_int_or_ptr) << QType; bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // Build var to save initial value. VarDecl *Init = buildVarDecl(*this, ELoc, QType, ".linear.start"); AddInitializerToDecl(Init, DefaultLvalueConversion(DE).get(), /*DirectInit*/ false, /*TypeMayContainAuto*/ false); auto InitRef = buildDeclRefExpr( *this, Init, DE->getType().getUnqualifiedType(), DE->getExprLoc()); DSAStack->addDSA(VD, DE, OMPC_linear); Vars.push_back(DE); Inits.push_back(InitRef); } if (Vars.empty()) return nullptr; Expr *StepExpr = Step; Expr *CalcStepExpr = nullptr; if (Step && !Step->isValueDependent() && !Step->isTypeDependent() && !Step->isInstantiationDependent() && !Step->containsUnexpandedParameterPack()) { SourceLocation StepLoc = Step->getLocStart(); ExprResult Val = PerformOpenMPImplicitIntegerConversion(StepLoc, Step); if (Val.isInvalid()) return nullptr; StepExpr = Val.get(); // Build var to save the step value. VarDecl *SaveVar = buildVarDecl(*this, StepLoc, StepExpr->getType(), ".linear.step"); ExprResult SaveRef = buildDeclRefExpr(*this, SaveVar, StepExpr->getType(), StepLoc); ExprResult CalcStep = BuildBinOp(CurScope, StepLoc, BO_Assign, SaveRef.get(), StepExpr); // Warn about zero linear step (it would be probably better specified as // making corresponding variables 'const'). llvm::APSInt Result; bool IsConstant = StepExpr->isIntegerConstantExpr(Result, Context); if (IsConstant && !Result.isNegative() && !Result.isStrictlyPositive()) Diag(StepLoc, diag::warn_omp_linear_step_zero) << Vars[0] << (Vars.size() > 1); if (!IsConstant && CalcStep.isUsable()) { // Calculate the step beforehand instead of doing this on each iteration. // (This is not used if the number of iterations may be kfold-ed). CalcStepExpr = CalcStep.get(); } } return OMPLinearClause::Create(Context, StartLoc, LParenLoc, ColonLoc, EndLoc, Vars, Inits, StepExpr, CalcStepExpr); } static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV, Expr *NumIterations, Sema &SemaRef, Scope *S) { // Walk the vars and build update/final expressions for the CodeGen. SmallVector<Expr *, 8> Updates; SmallVector<Expr *, 8> Finals; Expr *Step = Clause.getStep(); Expr *CalcStep = Clause.getCalcStep(); // OpenMP [2.14.3.7, linear clause] // If linear-step is not specified it is assumed to be 1. if (Step == nullptr) Step = SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get(); else if (CalcStep) Step = cast<BinaryOperator>(CalcStep)->getLHS(); bool HasErrors = false; auto CurInit = Clause.inits().begin(); for (auto &RefExpr : Clause.varlists()) { Expr *InitExpr = *CurInit; // Build privatized reference to the current linear var. auto DE = cast<DeclRefExpr>(RefExpr); auto PrivateRef = buildDeclRefExpr(SemaRef, cast<VarDecl>(DE->getDecl()), DE->getType().getUnqualifiedType(), DE->getExprLoc(), /*RefersToCapture=*/true); // Build update: Var = InitExpr + IV * Step ExprResult Update = BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), PrivateRef, InitExpr, IV, Step, /* Subtract */ false); Update = SemaRef.ActOnFinishFullExpr(Update.get()); // Build final: Var = InitExpr + NumIterations * Step ExprResult Final = BuildCounterUpdate(SemaRef, S, RefExpr->getExprLoc(), PrivateRef, InitExpr, NumIterations, Step, /* Subtract */ false); Final = SemaRef.ActOnFinishFullExpr(Final.get()); if (!Update.isUsable() || !Final.isUsable()) { Updates.push_back(nullptr); Finals.push_back(nullptr); HasErrors = true; } else { Updates.push_back(Update.get()); Finals.push_back(Final.get()); } ++CurInit; } Clause.setUpdates(Updates); Clause.setFinals(Finals); return HasErrors; } OMPClause *Sema::ActOnOpenMPAlignedClause( ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP aligned clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); continue; } SourceLocation ELoc = RefExpr->getExprLoc(); // OpenMP [2.1, C/C++] // A list item is a variable name. DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr); if (!DE || !isa<VarDecl>(DE->getDecl())) { Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange(); continue; } VarDecl *VD = cast<VarDecl>(DE->getDecl()); // OpenMP [2.8.1, simd construct, Restrictions] // The type of list items appearing in the aligned clause must be // array, pointer, reference to array, or reference to pointer. QualType QType = VD->getType(); QType = QType.getNonReferenceType().getUnqualifiedType().getCanonicalType(); const Type *Ty = QType.getTypePtrOrNull(); if (!Ty || (!Ty->isDependentType() && !Ty->isArrayType() && !Ty->isPointerType())) { Diag(ELoc, diag::err_omp_aligned_expected_array_or_ptr) << QType << getLangOpts().CPlusPlus << RefExpr->getSourceRange(); bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // OpenMP [2.8.1, simd construct, Restrictions] // A list-item cannot appear in more than one aligned clause. if (DeclRefExpr *PrevRef = DSAStack->addUniqueAligned(VD, DE)) { Diag(ELoc, diag::err_omp_aligned_twice) << RefExpr->getSourceRange(); Diag(PrevRef->getExprLoc(), diag::note_omp_explicit_dsa) << getOpenMPClauseName(OMPC_aligned); continue; } Vars.push_back(DE); } // OpenMP [2.8.1, simd construct, Description] // The parameter of the aligned clause, alignment, must be a constant // positive integer expression. // If no optional parameter is specified, implementation-defined default // alignments for SIMD instructions on the target platforms are assumed. if (Alignment != nullptr) { ExprResult AlignResult = VerifyPositiveIntegerConstantInClause(Alignment, OMPC_aligned); if (AlignResult.isInvalid()) return nullptr; Alignment = AlignResult.get(); } if (Vars.empty()) return nullptr; return OMPAlignedClause::Create(Context, StartLoc, LParenLoc, ColonLoc, EndLoc, Vars, Alignment); } OMPClause *Sema::ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; SmallVector<Expr *, 8> SrcExprs; SmallVector<Expr *, 8> DstExprs; SmallVector<Expr *, 8> AssignmentOps; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP copyin clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); SrcExprs.push_back(nullptr); DstExprs.push_back(nullptr); AssignmentOps.push_back(nullptr); continue; } SourceLocation ELoc = RefExpr->getExprLoc(); // OpenMP [2.1, C/C++] // A list item is a variable name. // OpenMP [2.14.4.1, Restrictions, p.1] // A list item that appears in a copyin clause must be threadprivate. DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr); if (!DE || !isa<VarDecl>(DE->getDecl())) { Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange(); continue; } Decl *D = DE->getDecl(); VarDecl *VD = cast<VarDecl>(D); QualType Type = VD->getType(); if (Type->isDependentType() || Type->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); SrcExprs.push_back(nullptr); DstExprs.push_back(nullptr); AssignmentOps.push_back(nullptr); continue; } // OpenMP [2.14.4.1, Restrictions, C/C++, p.1] // A list item that appears in a copyin clause must be threadprivate. if (!DSAStack->isThreadPrivate(VD)) { Diag(ELoc, diag::err_omp_required_access) << getOpenMPClauseName(OMPC_copyin) << getOpenMPDirectiveName(OMPD_threadprivate); continue; } // OpenMP [2.14.4.1, Restrictions, C/C++, p.2] // A variable of class type (or array thereof) that appears in a // copyin clause requires an accessible, unambiguous copy assignment // operator for the class type. auto ElemType = Context.getBaseElementType(Type).getNonReferenceType(); auto *SrcVD = buildVarDecl(*this, DE->getLocStart(), ElemType.getUnqualifiedType(), ".copyin.src"); auto *PseudoSrcExpr = buildDeclRefExpr( *this, SrcVD, ElemType.getUnqualifiedType(), DE->getExprLoc()); auto *DstVD = buildVarDecl(*this, DE->getLocStart(), ElemType, ".copyin.dst"); auto *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, ElemType, DE->getExprLoc()); // For arrays generate assignment operation for single element and replace // it by the original array element in CodeGen. auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign, PseudoDstExpr, PseudoSrcExpr); if (AssignmentOp.isInvalid()) continue; AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(), /*DiscardedValue=*/true); if (AssignmentOp.isInvalid()) continue; DSAStack->addDSA(VD, DE, OMPC_copyin); Vars.push_back(DE); SrcExprs.push_back(PseudoSrcExpr); DstExprs.push_back(PseudoDstExpr); AssignmentOps.push_back(AssignmentOp.get()); } if (Vars.empty()) return nullptr; return OMPCopyinClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars, SrcExprs, DstExprs, AssignmentOps); } OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { SmallVector<Expr *, 8> Vars; SmallVector<Expr *, 8> SrcExprs; SmallVector<Expr *, 8> DstExprs; SmallVector<Expr *, 8> AssignmentOps; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP copyprivate clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); SrcExprs.push_back(nullptr); DstExprs.push_back(nullptr); AssignmentOps.push_back(nullptr); continue; } SourceLocation ELoc = RefExpr->getExprLoc(); // OpenMP [2.1, C/C++] // A list item is a variable name. // OpenMP [2.14.4.1, Restrictions, p.1] // A list item that appears in a copyin clause must be threadprivate. DeclRefExpr *DE = dyn_cast<DeclRefExpr>(RefExpr); if (!DE || !isa<VarDecl>(DE->getDecl())) { Diag(ELoc, diag::err_omp_expected_var_name) << RefExpr->getSourceRange(); continue; } Decl *D = DE->getDecl(); VarDecl *VD = cast<VarDecl>(D); QualType Type = VD->getType(); if (Type->isDependentType() || Type->isInstantiationDependentType()) { // It will be analyzed later. Vars.push_back(DE); SrcExprs.push_back(nullptr); DstExprs.push_back(nullptr); AssignmentOps.push_back(nullptr); continue; } // OpenMP [2.14.4.2, Restrictions, p.2] // A list item that appears in a copyprivate clause may not appear in a // private or firstprivate clause on the single construct. if (!DSAStack->isThreadPrivate(VD)) { auto DVar = DSAStack->getTopDSA(VD, false); if (DVar.CKind != OMPC_unknown && DVar.CKind != OMPC_copyprivate && DVar.RefExpr) { Diag(ELoc, diag::err_omp_wrong_dsa) << getOpenMPClauseName(DVar.CKind) << getOpenMPClauseName(OMPC_copyprivate); ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } // OpenMP [2.11.4.2, Restrictions, p.1] // All list items that appear in a copyprivate clause must be either // threadprivate or private in the enclosing context. if (DVar.CKind == OMPC_unknown) { DVar = DSAStack->getImplicitDSA(VD, false); if (DVar.CKind == OMPC_shared) { Diag(ELoc, diag::err_omp_required_access) << getOpenMPClauseName(OMPC_copyprivate) << "threadprivate or private in the enclosing context"; ReportOriginalDSA(*this, DSAStack, VD, DVar); continue; } } } // Variably modified types are not supported. if (!Type->isAnyPointerType() && Type->isVariablyModifiedType()) { Diag(ELoc, diag::err_omp_variably_modified_type_not_supported) << getOpenMPClauseName(OMPC_copyprivate) << Type << getOpenMPDirectiveName(DSAStack->getCurrentDirective()); bool IsDecl = VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly; Diag(VD->getLocation(), IsDecl ? diag::note_previous_decl : diag::note_defined_here) << VD; continue; } // OpenMP [2.14.4.1, Restrictions, C/C++, p.2] // A variable of class type (or array thereof) that appears in a // copyin clause requires an accessible, unambiguous copy assignment // operator for the class type. Type = Context.getBaseElementType(Type).getUnqualifiedType(); auto *SrcVD = buildVarDecl(*this, DE->getLocStart(), Type, ".copyprivate.src"); auto *PseudoSrcExpr = buildDeclRefExpr(*this, SrcVD, Type, DE->getExprLoc()); auto *DstVD = buildVarDecl(*this, DE->getLocStart(), Type, ".copyprivate.dst"); auto *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, DE->getExprLoc()); auto AssignmentOp = BuildBinOp(/*S=*/nullptr, DE->getExprLoc(), BO_Assign, PseudoDstExpr, PseudoSrcExpr); if (AssignmentOp.isInvalid()) continue; AssignmentOp = ActOnFinishFullExpr(AssignmentOp.get(), DE->getExprLoc(), /*DiscardedValue=*/true); if (AssignmentOp.isInvalid()) continue; // No need to mark vars as copyprivate, they are already threadprivate or // implicitly private. Vars.push_back(DE); SrcExprs.push_back(PseudoSrcExpr); DstExprs.push_back(PseudoDstExpr); AssignmentOps.push_back(AssignmentOp.get()); } if (Vars.empty()) return nullptr; return OMPCopyprivateClause::Create(Context, StartLoc, LParenLoc, EndLoc, Vars, SrcExprs, DstExprs, AssignmentOps); } OMPClause *Sema::ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { if (VarList.empty()) return nullptr; return OMPFlushClause::Create(Context, StartLoc, LParenLoc, EndLoc, VarList); } OMPClause * Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) { if (DepKind == OMPC_DEPEND_unknown) { std::string Values; std::string Sep(", "); for (unsigned i = 0; i < OMPC_DEPEND_unknown; ++i) { Values += "'"; Values += getOpenMPSimpleClauseTypeName(OMPC_depend, i); Values += "'"; switch (i) { case OMPC_DEPEND_unknown - 2: Values += " or "; break; case OMPC_DEPEND_unknown - 1: break; default: Values += Sep; break; } } Diag(DepLoc, diag::err_omp_unexpected_clause_value) << Values << getOpenMPClauseName(OMPC_depend); return nullptr; } SmallVector<Expr *, 8> Vars; for (auto &RefExpr : VarList) { assert(RefExpr && "NULL expr in OpenMP shared clause."); if (isa<DependentScopeDeclRefExpr>(RefExpr)) { // It will be analyzed later. Vars.push_back(RefExpr); continue; } SourceLocation ELoc = RefExpr->getExprLoc(); // OpenMP [2.11.1.1, Restrictions, p.3] // A variable that is part of another variable (such as a field of a // structure) but is not an array element or an array section cannot appear // in a depend clause. auto *SimpleExpr = RefExpr->IgnoreParenCasts(); DeclRefExpr *DE = dyn_cast<DeclRefExpr>(SimpleExpr); ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(SimpleExpr); if (!RefExpr->IgnoreParenImpCasts()->isLValue() || (!ASE && !DE) || (DE && !isa<VarDecl>(DE->getDecl())) || (ASE && !ASE->getBase()->getType()->isAnyPointerType() && !ASE->getBase()->getType()->isArrayType())) { Diag(ELoc, diag::err_omp_expected_var_name_or_array_item) << RefExpr->getSourceRange(); continue; } Vars.push_back(RefExpr->IgnoreParenImpCasts()); } if (Vars.empty()) return nullptr; return OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc, DepKind, DepLoc, ColonLoc, Vars); } #endif // HLSL Change
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
//===--- CodeCompleteConsumer.cpp - Code Completion Interface ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the CodeCompleteConsumer class. // //===----------------------------------------------------------------------===// #include "clang/Sema/CodeCompleteConsumer.h" #include "clang-c/Index.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/Twine.h" #include "llvm/Support/raw_ostream.h" #include <algorithm> #include <cstring> #include <functional> // // /////////////////////////////////////////////////////////////////////////////// using namespace clang; //===----------------------------------------------------------------------===// // Code completion context implementation //===----------------------------------------------------------------------===// bool CodeCompletionContext::wantConstructorResults() const { switch (Kind) { case CCC_Recovery: case CCC_Statement: case CCC_Expression: case CCC_ObjCMessageReceiver: case CCC_ParenthesizedExpression: return true; case CCC_TopLevel: case CCC_ObjCInterface: case CCC_ObjCImplementation: case CCC_ObjCIvarList: case CCC_ClassStructUnion: case CCC_DotMemberAccess: case CCC_ArrowMemberAccess: case CCC_ObjCPropertyAccess: case CCC_EnumTag: case CCC_UnionTag: case CCC_ClassOrStructTag: case CCC_ObjCProtocolName: case CCC_Namespace: case CCC_Type: case CCC_Name: case CCC_PotentiallyQualifiedName: case CCC_MacroName: case CCC_MacroNameUse: case CCC_PreprocessorExpression: case CCC_PreprocessorDirective: case CCC_NaturalLanguage: case CCC_SelectorName: case CCC_TypeQualifiers: case CCC_Other: case CCC_OtherWithMacros: case CCC_ObjCInstanceMessage: case CCC_ObjCClassMessage: case CCC_ObjCInterfaceName: case CCC_ObjCCategoryName: return false; } llvm_unreachable("Invalid CodeCompletionContext::Kind!"); } //===----------------------------------------------------------------------===// // Code completion string implementation //===----------------------------------------------------------------------===// CodeCompletionString::Chunk::Chunk(ChunkKind Kind, const char *Text) : Kind(Kind), Text("") { switch (Kind) { case CK_TypedText: case CK_Text: case CK_Placeholder: case CK_Informative: case CK_ResultType: case CK_CurrentParameter: this->Text = Text; break; case CK_Optional: llvm_unreachable("Optional strings cannot be created from text"); case CK_LeftParen: this->Text = "("; break; case CK_RightParen: this->Text = ")"; break; case CK_LeftBracket: this->Text = "["; break; case CK_RightBracket: this->Text = "]"; break; case CK_LeftBrace: this->Text = "{"; break; case CK_RightBrace: this->Text = "}"; break; case CK_LeftAngle: this->Text = "<"; break; case CK_RightAngle: this->Text = ">"; break; case CK_Comma: this->Text = ", "; break; case CK_Colon: this->Text = ":"; break; case CK_SemiColon: this->Text = ";"; break; case CK_Equal: this->Text = " = "; break; case CK_HorizontalSpace: this->Text = " "; break; case CK_VerticalSpace: this->Text = "\n"; break; } } CodeCompletionString::Chunk CodeCompletionString::Chunk::CreateText(const char *Text) { return Chunk(CK_Text, Text); } CodeCompletionString::Chunk CodeCompletionString::Chunk::CreateOptional(CodeCompletionString *Optional) { Chunk Result; Result.Kind = CK_Optional; Result.Optional = Optional; return Result; } CodeCompletionString::Chunk CodeCompletionString::Chunk::CreatePlaceholder(const char *Placeholder) { return Chunk(CK_Placeholder, Placeholder); } CodeCompletionString::Chunk CodeCompletionString::Chunk::CreateInformative(const char *Informative) { return Chunk(CK_Informative, Informative); } CodeCompletionString::Chunk CodeCompletionString::Chunk::CreateResultType(const char *ResultType) { return Chunk(CK_ResultType, ResultType); } CodeCompletionString::Chunk CodeCompletionString::Chunk::CreateCurrentParameter( const char *CurrentParameter) { return Chunk(CK_CurrentParameter, CurrentParameter); } CodeCompletionString::CodeCompletionString(const Chunk *Chunks, unsigned NumChunks, unsigned Priority, CXAvailabilityKind Availability, const char **Annotations, unsigned NumAnnotations, StringRef ParentName, const char *BriefComment) : NumChunks(NumChunks), NumAnnotations(NumAnnotations), Priority(Priority), Availability(Availability), ParentName(ParentName), BriefComment(BriefComment) { assert(NumChunks <= 0xffff); assert(NumAnnotations <= 0xffff); Chunk *StoredChunks = reinterpret_cast<Chunk *>(this + 1); for (unsigned I = 0; I != NumChunks; ++I) StoredChunks[I] = Chunks[I]; const char **StoredAnnotations = reinterpret_cast<const char **>(StoredChunks + NumChunks); for (unsigned I = 0; I != NumAnnotations; ++I) StoredAnnotations[I] = Annotations[I]; } unsigned CodeCompletionString::getAnnotationCount() const { return NumAnnotations; } const char *CodeCompletionString::getAnnotation(unsigned AnnotationNr) const { if (AnnotationNr < NumAnnotations) return reinterpret_cast<const char * const*>(end())[AnnotationNr]; else return nullptr; } std::string CodeCompletionString::getAsString() const { std::string Result; llvm::raw_string_ostream OS(Result); for (iterator C = begin(), CEnd = end(); C != CEnd; ++C) { switch (C->Kind) { case CK_Optional: OS << "{#" << C->Optional->getAsString() << "#}"; break; case CK_Placeholder: OS << "<#" << C->Text << "#>"; break; case CK_Informative: case CK_ResultType: OS << "[#" << C->Text << "#]"; break; case CK_CurrentParameter: OS << "<#" << C->Text << "#>"; break; default: OS << C->Text; break; } } return OS.str(); } const char *CodeCompletionString::getTypedText() const { for (iterator C = begin(), CEnd = end(); C != CEnd; ++C) if (C->Kind == CK_TypedText) return C->Text; return nullptr; } const char *CodeCompletionAllocator::CopyString(const Twine &String) { SmallString<128> Data; StringRef Ref = String.toStringRef(Data); // FIXME: It would be more efficient to teach Twine to tell us its size and // then add a routine there to fill in an allocated char* with the contents // of the string. char *Mem = (char *)Allocate(Ref.size() + 1, 1); std::copy(Ref.begin(), Ref.end(), Mem); Mem[Ref.size()] = 0; return Mem; } StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) { const NamedDecl *ND = dyn_cast<NamedDecl>(DC); if (!ND) return StringRef(); // Check whether we've already cached the parent name. StringRef &CachedParentName = ParentNames[DC]; if (!CachedParentName.empty()) return CachedParentName; // If we already processed this DeclContext and assigned empty to it, the // data pointer will be non-null. if (CachedParentName.data() != nullptr) return StringRef(); // Find the interesting names. SmallVector<const DeclContext *, 2> Contexts; while (DC && !DC->isFunctionOrMethod()) { if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC)) { if (ND->getIdentifier()) Contexts.push_back(DC); } DC = DC->getParent(); } { SmallString<128> S; llvm::raw_svector_ostream OS(S); bool First = true; for (unsigned I = Contexts.size(); I != 0; --I) { if (First) First = false; else { OS << "::"; } const DeclContext *CurDC = Contexts[I-1]; if (const ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(CurDC)) CurDC = CatImpl->getCategoryDecl(); if (const ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(CurDC)) { const ObjCInterfaceDecl *Interface = Cat->getClassInterface(); if (!Interface) { // Assign an empty StringRef but with non-null data to distinguish // between empty because we didn't process the DeclContext yet. CachedParentName = StringRef((const char *)~(uintptr_t)0U, 0); // HLSL Change return StringRef(); } OS << Interface->getName() << '(' << Cat->getName() << ')'; } else { OS << cast<NamedDecl>(CurDC)->getName(); } } CachedParentName = AllocatorRef->CopyString(OS.str()); } return CachedParentName; } CodeCompletionString *CodeCompletionBuilder::TakeString() { void *Mem = getAllocator().Allocate( sizeof(CodeCompletionString) + sizeof(Chunk) * Chunks.size() + sizeof(const char *) * Annotations.size(), llvm::alignOf<CodeCompletionString>()); CodeCompletionString *Result = new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(), Priority, Availability, Annotations.data(), Annotations.size(), ParentName, BriefComment); Chunks.clear(); return Result; } void CodeCompletionBuilder::AddTypedTextChunk(const char *Text) { Chunks.push_back(Chunk(CodeCompletionString::CK_TypedText, Text)); } void CodeCompletionBuilder::AddTextChunk(const char *Text) { Chunks.push_back(Chunk::CreateText(Text)); } void CodeCompletionBuilder::AddOptionalChunk(CodeCompletionString *Optional) { Chunks.push_back(Chunk::CreateOptional(Optional)); } void CodeCompletionBuilder::AddPlaceholderChunk(const char *Placeholder) { Chunks.push_back(Chunk::CreatePlaceholder(Placeholder)); } void CodeCompletionBuilder::AddInformativeChunk(const char *Text) { Chunks.push_back(Chunk::CreateInformative(Text)); } void CodeCompletionBuilder::AddResultTypeChunk(const char *ResultType) { Chunks.push_back(Chunk::CreateResultType(ResultType)); } void CodeCompletionBuilder::AddCurrentParameterChunk(const char *CurrentParameter) { Chunks.push_back(Chunk::CreateCurrentParameter(CurrentParameter)); } void CodeCompletionBuilder::AddChunk(CodeCompletionString::ChunkKind CK, const char *Text) { Chunks.push_back(Chunk(CK, Text)); } void CodeCompletionBuilder::addParentContext(const DeclContext *DC) { if (DC->isTranslationUnit()) { return; } if (DC->isFunctionOrMethod()) return; const NamedDecl *ND = dyn_cast<NamedDecl>(DC); if (!ND) return; ParentName = getCodeCompletionTUInfo().getParentName(DC); } void CodeCompletionBuilder::addBriefComment(StringRef Comment) { BriefComment = Allocator.CopyString(Comment); } //===----------------------------------------------------------------------===// // Code completion overload candidate implementation //===----------------------------------------------------------------------===// FunctionDecl * CodeCompleteConsumer::OverloadCandidate::getFunction() const { if (getKind() == CK_Function) return Function; else if (getKind() == CK_FunctionTemplate) return FunctionTemplate->getTemplatedDecl(); else return nullptr; } const FunctionType * CodeCompleteConsumer::OverloadCandidate::getFunctionType() const { switch (Kind) { case CK_Function: return Function->getType()->getAs<FunctionType>(); case CK_FunctionTemplate: return FunctionTemplate->getTemplatedDecl()->getType() ->getAs<FunctionType>(); case CK_FunctionType: return Type; } llvm_unreachable("Invalid CandidateKind!"); } //===----------------------------------------------------------------------===// // Code completion consumer implementation //===----------------------------------------------------------------------===// CodeCompleteConsumer::~CodeCompleteConsumer() { } void PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef, CodeCompletionContext Context, CodeCompletionResult *Results, unsigned NumResults) { std::stable_sort(Results, Results + NumResults); // Print the results. for (unsigned I = 0; I != NumResults; ++I) { OS << "COMPLETION: "; switch (Results[I].Kind) { case CodeCompletionResult::RK_Declaration: OS << *Results[I].Declaration; if (Results[I].Hidden) OS << " (Hidden)"; if (CodeCompletionString *CCS = Results[I].CreateCodeCompletionString(SemaRef, Context, getAllocator(), CCTUInfo, includeBriefComments())) { OS << " : " << CCS->getAsString(); if (const char *BriefComment = CCS->getBriefComment()) OS << " : " << BriefComment; } OS << '\n'; break; case CodeCompletionResult::RK_Keyword: OS << Results[I].Keyword << '\n'; break; case CodeCompletionResult::RK_Macro: { OS << Results[I].Macro->getName(); if (CodeCompletionString *CCS = Results[I].CreateCodeCompletionString(SemaRef, Context, getAllocator(), CCTUInfo, includeBriefComments())) { OS << " : " << CCS->getAsString(); } OS << '\n'; break; } case CodeCompletionResult::RK_Pattern: { OS << "Pattern : " << Results[I].Pattern->getAsString() << '\n'; break; } } } } // This function is used solely to preserve the former presentation of overloads // by "clang -cc1 -code-completion-at", since CodeCompletionString::getAsString // needs to be improved for printing the newer and more detailed overload // chunks. static std::string getOverloadAsString(const CodeCompletionString &CCS) { std::string Result; llvm::raw_string_ostream OS(Result); for (auto &C : CCS) { switch (C.Kind) { case CodeCompletionString::CK_Informative: case CodeCompletionString::CK_ResultType: OS << "[#" << C.Text << "#]"; break; case CodeCompletionString::CK_CurrentParameter: OS << "<#" << C.Text << "#>"; break; default: OS << C.Text; break; } } return OS.str(); } void PrintingCodeCompleteConsumer::ProcessOverloadCandidates(Sema &SemaRef, unsigned CurrentArg, OverloadCandidate *Candidates, unsigned NumCandidates) { for (unsigned I = 0; I != NumCandidates; ++I) { if (CodeCompletionString *CCS = Candidates[I].CreateSignatureString(CurrentArg, SemaRef, getAllocator(), CCTUInfo, includeBriefComments())) { OS << "OVERLOAD: " << getOverloadAsString(*CCS) << "\n"; } } } /// \brief Retrieve the effective availability of the given declaration. static AvailabilityResult getDeclAvailability(const Decl *D) { AvailabilityResult AR = D->getAvailability(); if (isa<EnumConstantDecl>(D)) AR = std::max(AR, cast<Decl>(D->getDeclContext())->getAvailability()); return AR; } void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) { switch (Kind) { case RK_Pattern: if (!Declaration) { // Do nothing: Patterns can come with cursor kinds! break; } LLVM_FALLTHROUGH; // HLSL Change case RK_Declaration: { // Set the availability based on attributes. switch (getDeclAvailability(Declaration)) { case AR_Available: case AR_NotYetIntroduced: Availability = CXAvailability_Available; break; case AR_Deprecated: Availability = CXAvailability_Deprecated; break; case AR_Unavailable: Availability = CXAvailability_NotAvailable; break; } if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Declaration)) if (Function->isDeleted()) Availability = CXAvailability_NotAvailable; CursorKind = getCursorKindForDecl(Declaration); if (CursorKind == CXCursor_UnexposedDecl) { // FIXME: Forward declarations of Objective-C classes and protocols // are not directly exposed, but we want code completion to treat them // like a definition. if (isa<ObjCInterfaceDecl>(Declaration)) CursorKind = CXCursor_ObjCInterfaceDecl; else if (isa<ObjCProtocolDecl>(Declaration)) CursorKind = CXCursor_ObjCProtocolDecl; else CursorKind = CXCursor_NotImplemented; } break; } case RK_Macro: case RK_Keyword: llvm_unreachable("Macro and keyword kinds are handled by the constructors"); } if (!Accessible) Availability = CXAvailability_NotAccessible; } /// \brief Retrieve the name that should be used to order a result. /// /// If the name needs to be constructed as a string, that string will be /// saved into Saved and the returned StringRef will refer to it. static StringRef getOrderedName(const CodeCompletionResult &R, std::string &Saved) { switch (R.Kind) { case CodeCompletionResult::RK_Keyword: return R.Keyword; case CodeCompletionResult::RK_Pattern: return R.Pattern->getTypedText(); case CodeCompletionResult::RK_Macro: return R.Macro->getName(); case CodeCompletionResult::RK_Declaration: // Handle declarations below. break; } DeclarationName Name = R.Declaration->getDeclName(); // If the name is a simple identifier (by far the common case), or a // zero-argument selector, just return a reference to that identifier. if (IdentifierInfo *Id = Name.getAsIdentifierInfo()) return Id->getName(); if (Name.isObjCZeroArgSelector()) if (IdentifierInfo *Id = Name.getObjCSelector().getIdentifierInfoForSlot(0)) return Id->getName(); Saved = Name.getAsString(); return Saved; } bool clang::operator<(const CodeCompletionResult &X, const CodeCompletionResult &Y) { std::string XSaved, YSaved; StringRef XStr = getOrderedName(X, XSaved); StringRef YStr = getOrderedName(Y, YSaved); int cmp = XStr.compare_lower(YStr); if (cmp) return cmp < 0; // If case-insensitive comparison fails, try case-sensitive comparison. cmp = XStr.compare(YStr); if (cmp) return cmp < 0; return false; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
//===------- SemaTemplateDeduction.cpp - Template Argument Deduction ------===/ // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. //===----------------------------------------------------------------------===/ // // This file implements C++ template argument deduction. // //===----------------------------------------------------------------------===/ #include "clang/Sema/TemplateDeduction.h" #include "TreeTransform.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/StmtVisitor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "clang/Sema/Template.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change #include "llvm/ADT/SmallBitVector.h" #include <algorithm> namespace clang { using namespace sema; /// \brief Various flags that control template argument deduction. /// /// These flags can be bitwise-OR'd together. enum TemplateDeductionFlags { /// \brief No template argument deduction flags, which indicates the /// strictest results for template argument deduction (as used for, e.g., /// matching class template partial specializations). TDF_None = 0, /// \brief Within template argument deduction from a function call, we are /// matching with a parameter type for which the original parameter was /// a reference. TDF_ParamWithReferenceType = 0x1, /// \brief Within template argument deduction from a function call, we /// are matching in a case where we ignore cv-qualifiers. TDF_IgnoreQualifiers = 0x02, /// \brief Within template argument deduction from a function call, /// we are matching in a case where we can perform template argument /// deduction from a template-id of a derived class of the argument type. TDF_DerivedClass = 0x04, /// \brief Allow non-dependent types to differ, e.g., when performing /// template argument deduction from a function call where conversions /// may apply. TDF_SkipNonDependent = 0x08, /// \brief Whether we are performing template argument deduction for /// parameters and arguments in a top-level template argument TDF_TopLevelParameterTypeList = 0x10, /// \brief Within template argument deduction from overload resolution per /// C++ [over.over] allow matching function types that are compatible in /// terms of noreturn and default calling convention adjustments. TDF_InOverloadResolution = 0x20 }; } using namespace clang; /// \brief Compare two APSInts, extending and switching the sign as /// necessary to compare their values regardless of underlying type. static bool hasSameExtendedValue(llvm::APSInt X, llvm::APSInt Y) { if (Y.getBitWidth() > X.getBitWidth()) X = X.extend(Y.getBitWidth()); else if (Y.getBitWidth() < X.getBitWidth()) Y = Y.extend(X.getBitWidth()); // If there is a signedness mismatch, correct it. if (X.isSigned() != Y.isSigned()) { // If the signed value is negative, then the values cannot be the same. if ((Y.isSigned() && Y.isNegative()) || (X.isSigned() && X.isNegative())) return false; Y.setIsSigned(true); X.setIsSigned(true); } return X == Y; } static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const TemplateArgument &Param, TemplateArgument Arg, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced); static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(Sema &S, TemplateParameterList *TemplateParams, QualType Param, QualType Arg, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> & Deduced, unsigned TDF, bool PartialOrdering = false); static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const TemplateArgument *Params, unsigned NumParams, const TemplateArgument *Args, unsigned NumArgs, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced); /// \brief If the given expression is of a form that permits the deduction /// of a non-type template parameter, return the declaration of that /// non-type template parameter. static NonTypeTemplateParmDecl *getDeducedParameterFromExpr(Expr *E) { // If we are within an alias template, the expression may have undergone // any number of parameter substitutions already. while (1) { if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(E)) E = IC->getSubExpr(); else if (SubstNonTypeTemplateParmExpr *Subst = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) E = Subst->getReplacement(); else break; } if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) return dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl()); return nullptr; } /// \brief Determine whether two declaration pointers refer to the same /// declaration. static bool isSameDeclaration(Decl *X, Decl *Y) { if (NamedDecl *NX = dyn_cast<NamedDecl>(X)) X = NX->getUnderlyingDecl(); if (NamedDecl *NY = dyn_cast<NamedDecl>(Y)) Y = NY->getUnderlyingDecl(); return X->getCanonicalDecl() == Y->getCanonicalDecl(); } /// \brief Verify that the given, deduced template arguments are compatible. /// /// \returns The deduced template argument, or a NULL template argument if /// the deduced template arguments were incompatible. static DeducedTemplateArgument checkDeducedTemplateArguments(ASTContext &Context, const DeducedTemplateArgument &X, const DeducedTemplateArgument &Y) { // We have no deduction for one or both of the arguments; they're compatible. if (X.isNull()) return Y; if (Y.isNull()) return X; switch (X.getKind()) { case TemplateArgument::Null: llvm_unreachable("Non-deduced template arguments handled above"); case TemplateArgument::Type: // If two template type arguments have the same type, they're compatible. if (Y.getKind() == TemplateArgument::Type && Context.hasSameType(X.getAsType(), Y.getAsType())) return X; return DeducedTemplateArgument(); case TemplateArgument::Integral: // If we deduced a constant in one case and either a dependent expression or // declaration in another case, keep the integral constant. // If both are integral constants with the same value, keep that value. if (Y.getKind() == TemplateArgument::Expression || Y.getKind() == TemplateArgument::Declaration || (Y.getKind() == TemplateArgument::Integral && hasSameExtendedValue(X.getAsIntegral(), Y.getAsIntegral()))) return DeducedTemplateArgument(X, X.wasDeducedFromArrayBound() && Y.wasDeducedFromArrayBound()); // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::Template: if (Y.getKind() == TemplateArgument::Template && Context.hasSameTemplateName(X.getAsTemplate(), Y.getAsTemplate())) return X; // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::TemplateExpansion: if (Y.getKind() == TemplateArgument::TemplateExpansion && Context.hasSameTemplateName(X.getAsTemplateOrTemplatePattern(), Y.getAsTemplateOrTemplatePattern())) return X; // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::Expression: // If we deduced a dependent expression in one case and either an integral // constant or a declaration in another case, keep the integral constant // or declaration. if (Y.getKind() == TemplateArgument::Integral || Y.getKind() == TemplateArgument::Declaration) return DeducedTemplateArgument(Y, X.wasDeducedFromArrayBound() && Y.wasDeducedFromArrayBound()); if (Y.getKind() == TemplateArgument::Expression) { // Compare the expressions for equality llvm::FoldingSetNodeID ID1, ID2; X.getAsExpr()->Profile(ID1, Context, true); Y.getAsExpr()->Profile(ID2, Context, true); if (ID1 == ID2) return X; } // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::Declaration: // If we deduced a declaration and a dependent expression, keep the // declaration. if (Y.getKind() == TemplateArgument::Expression) return X; // If we deduced a declaration and an integral constant, keep the // integral constant. if (Y.getKind() == TemplateArgument::Integral) return Y; // If we deduced two declarations, make sure they they refer to the // same declaration. if (Y.getKind() == TemplateArgument::Declaration && isSameDeclaration(X.getAsDecl(), Y.getAsDecl())) return X; // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::NullPtr: // If we deduced a null pointer and a dependent expression, keep the // null pointer. if (Y.getKind() == TemplateArgument::Expression) return X; // If we deduced a null pointer and an integral constant, keep the // integral constant. if (Y.getKind() == TemplateArgument::Integral) return Y; // If we deduced two null pointers, make sure they have the same type. if (Y.getKind() == TemplateArgument::NullPtr && Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType())) return X; // All other combinations are incompatible. return DeducedTemplateArgument(); case TemplateArgument::Pack: if (Y.getKind() != TemplateArgument::Pack || X.pack_size() != Y.pack_size()) return DeducedTemplateArgument(); for (TemplateArgument::pack_iterator XA = X.pack_begin(), XAEnd = X.pack_end(), YA = Y.pack_begin(); XA != XAEnd; ++XA, ++YA) { // FIXME: Do we need to merge the results together here? if (checkDeducedTemplateArguments(Context, DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()), DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound())) .isNull()) return DeducedTemplateArgument(); } return X; } llvm_unreachable("Invalid TemplateArgument Kind!"); } /// \brief Deduce the value of the given non-type template parameter /// from the given constant. static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(Sema &S, NonTypeTemplateParmDecl *NTTP, llvm::APSInt Value, QualType ValueType, bool DeducedFromArrayBound, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced) { assert(NTTP->getDepth() == 0 && "Cannot deduce non-type template argument with depth > 0"); DeducedTemplateArgument NewDeduced(S.Context, Value, ValueType, DeducedFromArrayBound); DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context, Deduced[NTTP->getIndex()], NewDeduced); if (Result.isNull()) { Info.Param = NTTP; Info.FirstArg = Deduced[NTTP->getIndex()]; Info.SecondArg = NewDeduced; return Sema::TDK_Inconsistent; } Deduced[NTTP->getIndex()] = Result; return Sema::TDK_Success; } /// \brief Deduce the value of the given non-type template parameter /// from the given type- or value-dependent expression. /// /// \returns true if deduction succeeded, false otherwise. static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(Sema &S, NonTypeTemplateParmDecl *NTTP, Expr *Value, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced) { assert(NTTP->getDepth() == 0 && "Cannot deduce non-type template argument with depth > 0"); assert((Value->isTypeDependent() || Value->isValueDependent()) && "Expression template argument must be type- or value-dependent."); DeducedTemplateArgument NewDeduced(Value); DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context, Deduced[NTTP->getIndex()], NewDeduced); if (Result.isNull()) { Info.Param = NTTP; Info.FirstArg = Deduced[NTTP->getIndex()]; Info.SecondArg = NewDeduced; return Sema::TDK_Inconsistent; } Deduced[NTTP->getIndex()] = Result; return Sema::TDK_Success; } /// \brief Deduce the value of the given non-type template parameter /// from the given declaration. /// /// \returns true if deduction succeeded, false otherwise. static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(Sema &S, NonTypeTemplateParmDecl *NTTP, ValueDecl *D, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced) { assert(NTTP->getDepth() == 0 && "Cannot deduce non-type template argument with depth > 0"); D = D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr; TemplateArgument New(D, NTTP->getType()); DeducedTemplateArgument NewDeduced(New); DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context, Deduced[NTTP->getIndex()], NewDeduced); if (Result.isNull()) { Info.Param = NTTP; Info.FirstArg = Deduced[NTTP->getIndex()]; Info.SecondArg = NewDeduced; return Sema::TDK_Inconsistent; } Deduced[NTTP->getIndex()] = Result; return Sema::TDK_Success; } static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, TemplateName Param, TemplateName Arg, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced) { TemplateDecl *ParamDecl = Param.getAsTemplateDecl(); if (!ParamDecl) { // The parameter type is dependent and is not a template template parameter, // so there is nothing that we can deduce. return Sema::TDK_Success; } if (TemplateTemplateParmDecl *TempParam = dyn_cast<TemplateTemplateParmDecl>(ParamDecl)) { DeducedTemplateArgument NewDeduced(S.Context.getCanonicalTemplateName(Arg)); DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context, Deduced[TempParam->getIndex()], NewDeduced); if (Result.isNull()) { Info.Param = TempParam; Info.FirstArg = Deduced[TempParam->getIndex()]; Info.SecondArg = NewDeduced; return Sema::TDK_Inconsistent; } Deduced[TempParam->getIndex()] = Result; return Sema::TDK_Success; } // Verify that the two template names are equivalent. if (S.Context.hasSameTemplateName(Param, Arg)) return Sema::TDK_Success; // Mismatch of non-dependent template parameter to argument. Info.FirstArg = TemplateArgument(Param); Info.SecondArg = TemplateArgument(Arg); return Sema::TDK_NonDeducedMismatch; } /// \brief Deduce the template arguments by comparing the template parameter /// type (which is a template-id) with the template argument type. /// /// \param S the Sema /// /// \param TemplateParams the template parameters that we are deducing /// /// \param Param the parameter type /// /// \param Arg the argument type /// /// \param Info information about the template argument deduction itself /// /// \param Deduced the deduced template arguments /// /// \returns the result of template argument deduction so far. Note that a /// "success" result means that template argument deduction has not yet failed, /// but it may still fail, later, for other reasons. static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const TemplateSpecializationType *Param, QualType Arg, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced) { assert(Arg.isCanonical() && "Argument type must be canonical"); // Check whether the template argument is a dependent template-id. if (const TemplateSpecializationType *SpecArg = dyn_cast<TemplateSpecializationType>(Arg)) { // Perform template argument deduction for the template name. if (Sema::TemplateDeductionResult Result = DeduceTemplateArguments(S, TemplateParams, Param->getTemplateName(), SpecArg->getTemplateName(), Info, Deduced)) return Result; // Perform template argument deduction on each template // argument. Ignore any missing/extra arguments, since they could be // filled in by default arguments. return DeduceTemplateArguments(S, TemplateParams, Param->getArgs(), Param->getNumArgs(), SpecArg->getArgs(), SpecArg->getNumArgs(), Info, Deduced); } // If the argument type is a class template specialization, we // perform template argument deduction using its template // arguments. const RecordType *RecordArg = dyn_cast<RecordType>(Arg); if (!RecordArg) { Info.FirstArg = TemplateArgument(QualType(Param, 0)); Info.SecondArg = TemplateArgument(Arg); return Sema::TDK_NonDeducedMismatch; } ClassTemplateSpecializationDecl *SpecArg = dyn_cast<ClassTemplateSpecializationDecl>(RecordArg->getDecl()); if (!SpecArg) { Info.FirstArg = TemplateArgument(QualType(Param, 0)); Info.SecondArg = TemplateArgument(Arg); return Sema::TDK_NonDeducedMismatch; } // Perform template argument deduction for the template name. if (Sema::TemplateDeductionResult Result = DeduceTemplateArguments(S, TemplateParams, Param->getTemplateName(), TemplateName(SpecArg->getSpecializedTemplate()), Info, Deduced)) return Result; // Perform template argument deduction for the template arguments. return DeduceTemplateArguments(S, TemplateParams, Param->getArgs(), Param->getNumArgs(), SpecArg->getTemplateArgs().data(), SpecArg->getTemplateArgs().size(), Info, Deduced); } /// \brief Determines whether the given type is an opaque type that /// might be more qualified when instantiated. static bool IsPossiblyOpaquelyQualifiedType(QualType T) { switch (T->getTypeClass()) { case Type::TypeOfExpr: case Type::TypeOf: case Type::DependentName: case Type::Decltype: case Type::UnresolvedUsing: case Type::TemplateTypeParm: return true; case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: case Type::DependentSizedArray: return IsPossiblyOpaquelyQualifiedType( cast<ArrayType>(T)->getElementType()); default: return false; } } /// \brief Retrieve the depth and index of a template parameter. static std::pair<unsigned, unsigned> getDepthAndIndex(NamedDecl *ND) { if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND)) return std::make_pair(TTP->getDepth(), TTP->getIndex()); if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND)) return std::make_pair(NTTP->getDepth(), NTTP->getIndex()); TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND); return std::make_pair(TTP->getDepth(), TTP->getIndex()); } /// \brief Retrieve the depth and index of an unexpanded parameter pack. static std::pair<unsigned, unsigned> getDepthAndIndex(UnexpandedParameterPack UPP) { if (const TemplateTypeParmType *TTP = UPP.first.dyn_cast<const TemplateTypeParmType *>()) return std::make_pair(TTP->getDepth(), TTP->getIndex()); return getDepthAndIndex(UPP.first.get<NamedDecl *>()); } /// \brief Helper function to build a TemplateParameter when we don't /// know its type statically. static TemplateParameter makeTemplateParameter(Decl *D) { if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(D)) return TemplateParameter(TTP); if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) return TemplateParameter(NTTP); return TemplateParameter(cast<TemplateTemplateParmDecl>(D)); } /// A pack that we're currently deducing. struct clang::DeducedPack { DeducedPack(unsigned Index) : Index(Index), Outer(nullptr) {} // The index of the pack. unsigned Index; // The old value of the pack before we started deducing it. DeducedTemplateArgument Saved; // A deferred value of this pack from an inner deduction, that couldn't be // deduced because this deduction hadn't happened yet. DeducedTemplateArgument DeferredDeduction; // The new value of the pack. SmallVector<DeducedTemplateArgument, 4> New; // The outer deduction for this pack, if any. DeducedPack *Outer; }; namespace { /// A scope in which we're performing pack deduction. class PackDeductionScope { public: PackDeductionScope(Sema &S, TemplateParameterList *TemplateParams, SmallVectorImpl<DeducedTemplateArgument> &Deduced, TemplateDeductionInfo &Info, TemplateArgument Pattern) : S(S), TemplateParams(TemplateParams), Deduced(Deduced), Info(Info) { // Compute the set of template parameter indices that correspond to // parameter packs expanded by the pack expansion. { llvm::SmallBitVector SawIndices(TemplateParams->size()); SmallVector<UnexpandedParameterPack, 2> Unexpanded; S.collectUnexpandedParameterPacks(Pattern, Unexpanded); for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) { unsigned Depth, Index; std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]); if (Depth == 0 && !SawIndices[Index]) { SawIndices[Index] = true; // Save the deduced template argument for the parameter pack expanded // by this pack expansion, then clear out the deduction. DeducedPack Pack(Index); Pack.Saved = Deduced[Index]; Deduced[Index] = TemplateArgument(); Packs.push_back(Pack); } } } assert(!Packs.empty() && "Pack expansion without unexpanded packs?"); for (auto &Pack : Packs) { if (Info.PendingDeducedPacks.size() > Pack.Index) Pack.Outer = Info.PendingDeducedPacks[Pack.Index]; else Info.PendingDeducedPacks.resize(Pack.Index + 1); Info.PendingDeducedPacks[Pack.Index] = &Pack; if (S.CurrentInstantiationScope) { // If the template argument pack was explicitly specified, add that to // the set of deduced arguments. const TemplateArgument *ExplicitArgs; unsigned NumExplicitArgs; NamedDecl *PartiallySubstitutedPack = S.CurrentInstantiationScope->getPartiallySubstitutedPack( &ExplicitArgs, &NumExplicitArgs); if (PartiallySubstitutedPack && getDepthAndIndex(PartiallySubstitutedPack).second == Pack.Index) Pack.New.append(ExplicitArgs, ExplicitArgs + NumExplicitArgs); } } } ~PackDeductionScope() { for (auto &Pack : Packs) Info.PendingDeducedPacks[Pack.Index] = Pack.Outer; } /// Move to deducing the next element in each pack that is being deduced. void nextPackElement() { // Capture the deduced template arguments for each parameter pack expanded // by this pack expansion, add them to the list of arguments we've deduced // for that pack, then clear out the deduced argument. for (auto &Pack : Packs) { DeducedTemplateArgument &DeducedArg = Deduced[Pack.Index]; if (!DeducedArg.isNull()) { Pack.New.push_back(DeducedArg); DeducedArg = DeducedTemplateArgument(); } } } /// \brief Finish template argument deduction for a set of argument packs, /// producing the argument packs and checking for consistency with prior /// deductions. Sema::TemplateDeductionResult finish(bool HasAnyArguments) { // Build argument packs for each of the parameter packs expanded by this // pack expansion. for (auto &Pack : Packs) { // Put back the old value for this pack. Deduced[Pack.Index] = Pack.Saved; // Build or find a new value for this pack. DeducedTemplateArgument NewPack; if (HasAnyArguments && Pack.New.empty()) { if (Pack.DeferredDeduction.isNull()) { // We were not able to deduce anything for this parameter pack // (because it only appeared in non-deduced contexts), so just // restore the saved argument pack. continue; } NewPack = Pack.DeferredDeduction; Pack.DeferredDeduction = TemplateArgument(); } else if (Pack.New.empty()) { // If we deduced an empty argument pack, create it now. NewPack = DeducedTemplateArgument(TemplateArgument::getEmptyPack()); } else { TemplateArgument *ArgumentPack = new (S.Context) TemplateArgument[Pack.New.size()]; std::copy(Pack.New.begin(), Pack.New.end(), ArgumentPack); NewPack = DeducedTemplateArgument( TemplateArgument(ArgumentPack, Pack.New.size()), Pack.New[0].wasDeducedFromArrayBound()); } // Pick where we're going to put the merged pack. DeducedTemplateArgument *Loc; if (Pack.Outer) { if (Pack.Outer->DeferredDeduction.isNull()) { // Defer checking this pack until we have a complete pack to compare // it against. Pack.Outer->DeferredDeduction = NewPack; continue; } Loc = &Pack.Outer->DeferredDeduction; } else { Loc = &Deduced[Pack.Index]; } // Check the new pack matches any previous value. DeducedTemplateArgument OldPack = *Loc; DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context, OldPack, NewPack); // If we deferred a deduction of this pack, check that one now too. if (!Result.isNull() && !Pack.DeferredDeduction.isNull()) { OldPack = Result; NewPack = Pack.DeferredDeduction; Result = checkDeducedTemplateArguments(S.Context, OldPack, NewPack); } if (Result.isNull()) { Info.Param = makeTemplateParameter(TemplateParams->getParam(Pack.Index)); Info.FirstArg = OldPack; Info.SecondArg = NewPack; return Sema::TDK_Inconsistent; } *Loc = Result; } return Sema::TDK_Success; } private: Sema &S; TemplateParameterList *TemplateParams; SmallVectorImpl<DeducedTemplateArgument> &Deduced; TemplateDeductionInfo &Info; SmallVector<DeducedPack, 2> Packs; }; } // namespace /// \brief Deduce the template arguments by comparing the list of parameter /// types to the list of argument types, as in the parameter-type-lists of /// function types (C++ [temp.deduct.type]p10). /// /// \param S The semantic analysis object within which we are deducing /// /// \param TemplateParams The template parameters that we are deducing /// /// \param Params The list of parameter types /// /// \param NumParams The number of types in \c Params /// /// \param Args The list of argument types /// /// \param NumArgs The number of types in \c Args /// /// \param Info information about the template argument deduction itself /// /// \param Deduced the deduced template arguments /// /// \param TDF bitwise OR of the TemplateDeductionFlags bits that describe /// how template argument deduction is performed. /// /// \param PartialOrdering If true, we are performing template argument /// deduction for during partial ordering for a call /// (C++0x [temp.deduct.partial]). /// /// \returns the result of template argument deduction so far. Note that a /// "success" result means that template argument deduction has not yet failed, /// but it may still fail, later, for other reasons. static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const QualType *Params, unsigned NumParams, const QualType *Args, unsigned NumArgs, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF, bool PartialOrdering = false) { // Fast-path check to see if we have too many/too few arguments. if (NumParams != NumArgs && !(NumParams && isa<PackExpansionType>(Params[NumParams - 1])) && !(NumArgs && isa<PackExpansionType>(Args[NumArgs - 1]))) return Sema::TDK_MiscellaneousDeductionFailure; // C++0x [temp.deduct.type]p10: // Similarly, if P has a form that contains (T), then each parameter type // Pi of the respective parameter-type- list of P is compared with the // corresponding parameter type Ai of the corresponding parameter-type-list // of A. [...] unsigned ArgIdx = 0, ParamIdx = 0; for (; ParamIdx != NumParams; ++ParamIdx) { // Check argument types. const PackExpansionType *Expansion = dyn_cast<PackExpansionType>(Params[ParamIdx]); if (!Expansion) { // Simple case: compare the parameter and argument types at this point. // Make sure we have an argument. if (ArgIdx >= NumArgs) return Sema::TDK_MiscellaneousDeductionFailure; if (isa<PackExpansionType>(Args[ArgIdx])) { // C++0x [temp.deduct.type]p22: // If the original function parameter associated with A is a function // parameter pack and the function parameter associated with P is not // a function parameter pack, then template argument deduction fails. return Sema::TDK_MiscellaneousDeductionFailure; } if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Params[ParamIdx], Args[ArgIdx], Info, Deduced, TDF, PartialOrdering)) return Result; ++ArgIdx; continue; } // C++0x [temp.deduct.type]p5: // The non-deduced contexts are: // - A function parameter pack that does not occur at the end of the // parameter-declaration-clause. if (ParamIdx + 1 < NumParams) return Sema::TDK_Success; // C++0x [temp.deduct.type]p10: // If the parameter-declaration corresponding to Pi is a function // parameter pack, then the type of its declarator- id is compared with // each remaining parameter type in the parameter-type-list of A. Each // comparison deduces template arguments for subsequent positions in the // template parameter packs expanded by the function parameter pack. QualType Pattern = Expansion->getPattern(); PackDeductionScope PackScope(S, TemplateParams, Deduced, Info, Pattern); bool HasAnyArguments = false; for (; ArgIdx < NumArgs; ++ArgIdx) { HasAnyArguments = true; // Deduce template arguments from the pattern. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Pattern, Args[ArgIdx], Info, Deduced, TDF, PartialOrdering)) return Result; PackScope.nextPackElement(); } // Build argument packs for each of the parameter packs expanded by this // pack expansion. if (auto Result = PackScope.finish(HasAnyArguments)) return Result; } // Make sure we don't have any extra arguments. if (ArgIdx < NumArgs) return Sema::TDK_MiscellaneousDeductionFailure; return Sema::TDK_Success; } /// \brief Determine whether the parameter has qualifiers that are either /// inconsistent with or a superset of the argument's qualifiers. static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType, QualType ArgType) { Qualifiers ParamQs = ParamType.getQualifiers(); Qualifiers ArgQs = ArgType.getQualifiers(); if (ParamQs == ArgQs) return false; // Mismatched (but not missing) Objective-C GC attributes. if (ParamQs.getObjCGCAttr() != ArgQs.getObjCGCAttr() && ParamQs.hasObjCGCAttr()) return true; // Mismatched (but not missing) address spaces. if (ParamQs.getAddressSpace() != ArgQs.getAddressSpace() && ParamQs.hasAddressSpace()) return true; // Mismatched (but not missing) Objective-C lifetime qualifiers. if (ParamQs.getObjCLifetime() != ArgQs.getObjCLifetime() && ParamQs.hasObjCLifetime()) return true; // CVR qualifier superset. return (ParamQs.getCVRQualifiers() != ArgQs.getCVRQualifiers()) && ((ParamQs.getCVRQualifiers() | ArgQs.getCVRQualifiers()) == ParamQs.getCVRQualifiers()); } /// \brief Compare types for equality with respect to possibly compatible /// function types (noreturn adjustment, implicit calling conventions). If any /// of parameter and argument is not a function, just perform type comparison. /// /// \param Param the template parameter type. /// /// \param Arg the argument type. bool Sema::isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg) { const FunctionType *ParamFunction = Param->getAs<FunctionType>(), *ArgFunction = Arg->getAs<FunctionType>(); // Just compare if not functions. if (!ParamFunction || !ArgFunction) return Param == Arg; // Noreturn adjustment. QualType AdjustedParam; if (IsNoReturnConversion(Param, Arg, AdjustedParam)) return Arg == Context.getCanonicalType(AdjustedParam); // FIXME: Compatible calling conventions. return Param == Arg; } /// \brief Deduce the template arguments by comparing the parameter type and /// the argument type (C++ [temp.deduct.type]). /// /// \param S the semantic analysis object within which we are deducing /// /// \param TemplateParams the template parameters that we are deducing /// /// \param ParamIn the parameter type /// /// \param ArgIn the argument type /// /// \param Info information about the template argument deduction itself /// /// \param Deduced the deduced template arguments /// /// \param TDF bitwise OR of the TemplateDeductionFlags bits that describe /// how template argument deduction is performed. /// /// \param PartialOrdering Whether we're performing template argument deduction /// in the context of partial ordering (C++0x [temp.deduct.partial]). /// /// \returns the result of template argument deduction so far. Note that a /// "success" result means that template argument deduction has not yet failed, /// but it may still fail, later, for other reasons. static Sema::TemplateDeductionResult DeduceTemplateArgumentsByTypeMatch(Sema &S, TemplateParameterList *TemplateParams, QualType ParamIn, QualType ArgIn, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF, bool PartialOrdering) { // We only want to look at the canonical types, since typedefs and // sugar are not part of template argument deduction. QualType Param = S.Context.getCanonicalType(ParamIn); QualType Arg = S.Context.getCanonicalType(ArgIn); // If the argument type is a pack expansion, look at its pattern. // This isn't explicitly called out if (const PackExpansionType *ArgExpansion = dyn_cast<PackExpansionType>(Arg)) Arg = ArgExpansion->getPattern(); if (PartialOrdering) { // C++11 [temp.deduct.partial]p5: // Before the partial ordering is done, certain transformations are // performed on the types used for partial ordering: // - If P is a reference type, P is replaced by the type referred to. const ReferenceType *ParamRef = Param->getAs<ReferenceType>(); if (ParamRef) Param = ParamRef->getPointeeType(); // - If A is a reference type, A is replaced by the type referred to. const ReferenceType *ArgRef = Arg->getAs<ReferenceType>(); if (ArgRef) Arg = ArgRef->getPointeeType(); if (ParamRef && ArgRef && S.Context.hasSameUnqualifiedType(Param, Arg)) { // C++11 [temp.deduct.partial]p9: // If, for a given type, deduction succeeds in both directions (i.e., // the types are identical after the transformations above) and both // P and A were reference types [...]: // - if [one type] was an lvalue reference and [the other type] was // not, [the other type] is not considered to be at least as // specialized as [the first type] // - if [one type] is more cv-qualified than [the other type], // [the other type] is not considered to be at least as specialized // as [the first type] // Objective-C ARC adds: // - [one type] has non-trivial lifetime, [the other type] has // __unsafe_unretained lifetime, and the types are otherwise // identical // // A is "considered to be at least as specialized" as P iff deduction // succeeds, so we model this as a deduction failure. Note that // [the first type] is P and [the other type] is A here; the standard // gets this backwards. Qualifiers ParamQuals = Param.getQualifiers(); Qualifiers ArgQuals = Arg.getQualifiers(); if ((ParamRef->isLValueReferenceType() && !ArgRef->isLValueReferenceType()) || ParamQuals.isStrictSupersetOf(ArgQuals) || (ParamQuals.hasNonTrivialObjCLifetime() && ArgQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone && ParamQuals.withoutObjCLifetime() == ArgQuals.withoutObjCLifetime())) { Info.FirstArg = TemplateArgument(ParamIn); Info.SecondArg = TemplateArgument(ArgIn); return Sema::TDK_NonDeducedMismatch; } } // C++11 [temp.deduct.partial]p7: // Remove any top-level cv-qualifiers: // - If P is a cv-qualified type, P is replaced by the cv-unqualified // version of P. Param = Param.getUnqualifiedType(); // - If A is a cv-qualified type, A is replaced by the cv-unqualified // version of A. Arg = Arg.getUnqualifiedType(); } else { // C++0x [temp.deduct.call]p4 bullet 1: // - If the original P is a reference type, the deduced A (i.e., the type // referred to by the reference) can be more cv-qualified than the // transformed A. if (TDF & TDF_ParamWithReferenceType) { Qualifiers Quals; QualType UnqualParam = S.Context.getUnqualifiedArrayType(Param, Quals); Quals.setCVRQualifiers(Quals.getCVRQualifiers() & Arg.getCVRQualifiers()); Param = S.Context.getQualifiedType(UnqualParam, Quals); } if ((TDF & TDF_TopLevelParameterTypeList) && !Param->isFunctionType()) { // C++0x [temp.deduct.type]p10: // If P and A are function types that originated from deduction when // taking the address of a function template (14.8.2.2) or when deducing // template arguments from a function declaration (14.8.2.6) and Pi and // Ai are parameters of the top-level parameter-type-list of P and A, // respectively, Pi is adjusted if it is an rvalue reference to a // cv-unqualified template parameter and Ai is an lvalue reference, in // which case the type of Pi is changed to be the template parameter // type (i.e., T&& is changed to simply T). [ Note: As a result, when // Pi is T&& and Ai is X&, the adjusted Pi will be T, causing T to be // deduced as X&. - end note ] TDF &= ~TDF_TopLevelParameterTypeList; if (const RValueReferenceType *ParamRef = Param->getAs<RValueReferenceType>()) { if (isa<TemplateTypeParmType>(ParamRef->getPointeeType()) && !ParamRef->getPointeeType().getQualifiers()) if (Arg->isLValueReferenceType()) Param = ParamRef->getPointeeType(); } } } // C++ [temp.deduct.type]p9: // A template type argument T, a template template argument TT or a // template non-type argument i can be deduced if P and A have one of // the following forms: // // T // cv-list T if (const TemplateTypeParmType *TemplateTypeParm = Param->getAs<TemplateTypeParmType>()) { // Just skip any attempts to deduce from a placeholder type. if (Arg->isPlaceholderType()) return Sema::TDK_Success; unsigned Index = TemplateTypeParm->getIndex(); bool RecanonicalizeArg = false; // If the argument type is an array type, move the qualifiers up to the // top level, so they can be matched with the qualifiers on the parameter. if (isa<ArrayType>(Arg)) { Qualifiers Quals; Arg = S.Context.getUnqualifiedArrayType(Arg, Quals); if (Quals) { Arg = S.Context.getQualifiedType(Arg, Quals); RecanonicalizeArg = true; } } // The argument type can not be less qualified than the parameter // type. if (!(TDF & TDF_IgnoreQualifiers) && hasInconsistentOrSupersetQualifiersOf(Param, Arg)) { Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index)); Info.FirstArg = TemplateArgument(Param); Info.SecondArg = TemplateArgument(Arg); return Sema::TDK_Underqualified; } assert(TemplateTypeParm->getDepth() == 0 && "Can't deduce with depth > 0"); assert(Arg != S.Context.OverloadTy && "Unresolved overloaded function"); QualType DeducedType = Arg; // Remove any qualifiers on the parameter from the deduced type. // We checked the qualifiers for consistency above. Qualifiers DeducedQs = DeducedType.getQualifiers(); Qualifiers ParamQs = Param.getQualifiers(); DeducedQs.removeCVRQualifiers(ParamQs.getCVRQualifiers()); if (ParamQs.hasObjCGCAttr()) DeducedQs.removeObjCGCAttr(); if (ParamQs.hasAddressSpace()) DeducedQs.removeAddressSpace(); if (ParamQs.hasObjCLifetime()) DeducedQs.removeObjCLifetime(); // Objective-C ARC: // If template deduction would produce a lifetime qualifier on a type // that is not a lifetime type, template argument deduction fails. if (ParamQs.hasObjCLifetime() && !DeducedType->isObjCLifetimeType() && !DeducedType->isDependentType()) { Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index)); Info.FirstArg = TemplateArgument(Param); Info.SecondArg = TemplateArgument(Arg); return Sema::TDK_Underqualified; } // Objective-C ARC: // If template deduction would produce an argument type with lifetime type // but no lifetime qualifier, the __strong lifetime qualifier is inferred. if (S.getLangOpts().ObjCAutoRefCount && DeducedType->isObjCLifetimeType() && !DeducedQs.hasObjCLifetime()) DeducedQs.setObjCLifetime(Qualifiers::OCL_Strong); DeducedType = S.Context.getQualifiedType(DeducedType.getUnqualifiedType(), DeducedQs); if (RecanonicalizeArg) DeducedType = S.Context.getCanonicalType(DeducedType); DeducedTemplateArgument NewDeduced(DeducedType); DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context, Deduced[Index], NewDeduced); if (Result.isNull()) { Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index)); Info.FirstArg = Deduced[Index]; Info.SecondArg = NewDeduced; return Sema::TDK_Inconsistent; } Deduced[Index] = Result; return Sema::TDK_Success; } // Set up the template argument deduction information for a failure. Info.FirstArg = TemplateArgument(ParamIn); Info.SecondArg = TemplateArgument(ArgIn); // If the parameter is an already-substituted template parameter // pack, do nothing: we don't know which of its arguments to look // at, so we have to wait until all of the parameter packs in this // expansion have arguments. if (isa<SubstTemplateTypeParmPackType>(Param)) return Sema::TDK_Success; // Check the cv-qualifiers on the parameter and argument types. CanQualType CanParam = S.Context.getCanonicalType(Param); CanQualType CanArg = S.Context.getCanonicalType(Arg); if (!(TDF & TDF_IgnoreQualifiers)) { if (TDF & TDF_ParamWithReferenceType) { if (hasInconsistentOrSupersetQualifiersOf(Param, Arg)) return Sema::TDK_NonDeducedMismatch; } else if (!IsPossiblyOpaquelyQualifiedType(Param)) { if (Param.getCVRQualifiers() != Arg.getCVRQualifiers()) return Sema::TDK_NonDeducedMismatch; } // If the parameter type is not dependent, there is nothing to deduce. if (!Param->isDependentType()) { if (!(TDF & TDF_SkipNonDependent)) { bool NonDeduced = (TDF & TDF_InOverloadResolution)? !S.isSameOrCompatibleFunctionType(CanParam, CanArg) : Param != Arg; if (NonDeduced) { return Sema::TDK_NonDeducedMismatch; } } return Sema::TDK_Success; } } else if (!Param->isDependentType()) { CanQualType ParamUnqualType = CanParam.getUnqualifiedType(), ArgUnqualType = CanArg.getUnqualifiedType(); bool Success = (TDF & TDF_InOverloadResolution)? S.isSameOrCompatibleFunctionType(ParamUnqualType, ArgUnqualType) : ParamUnqualType == ArgUnqualType; if (Success) return Sema::TDK_Success; } switch (Param->getTypeClass()) { // Non-canonical types cannot appear here. #define NON_CANONICAL_TYPE(Class, Base) \ case Type::Class: llvm_unreachable("deducing non-canonical type: " #Class); #define TYPE(Class, Base) #include "clang/AST/TypeNodes.def" case Type::TemplateTypeParm: case Type::SubstTemplateTypeParmPack: llvm_unreachable("Type nodes handled above"); // These types cannot be dependent, so simply check whether the types are // the same. case Type::Builtin: case Type::VariableArray: case Type::Vector: case Type::FunctionNoProto: case Type::Record: case Type::Enum: case Type::ObjCObject: case Type::ObjCInterface: case Type::ObjCObjectPointer: { if (TDF & TDF_SkipNonDependent) return Sema::TDK_Success; if (TDF & TDF_IgnoreQualifiers) { Param = Param.getUnqualifiedType(); Arg = Arg.getUnqualifiedType(); } return Param == Arg? Sema::TDK_Success : Sema::TDK_NonDeducedMismatch; } // _Complex T [placeholder extension] case Type::Complex: if (const ComplexType *ComplexArg = Arg->getAs<ComplexType>()) return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, cast<ComplexType>(Param)->getElementType(), ComplexArg->getElementType(), Info, Deduced, TDF); return Sema::TDK_NonDeducedMismatch; // _Atomic T [extension] case Type::Atomic: if (const AtomicType *AtomicArg = Arg->getAs<AtomicType>()) return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, cast<AtomicType>(Param)->getValueType(), AtomicArg->getValueType(), Info, Deduced, TDF); return Sema::TDK_NonDeducedMismatch; // T * case Type::Pointer: { QualType PointeeType; if (const PointerType *PointerArg = Arg->getAs<PointerType>()) { PointeeType = PointerArg->getPointeeType(); } else if (const ObjCObjectPointerType *PointerArg = Arg->getAs<ObjCObjectPointerType>()) { PointeeType = PointerArg->getPointeeType(); } else { return Sema::TDK_NonDeducedMismatch; } unsigned SubTDF = TDF & (TDF_IgnoreQualifiers | TDF_DerivedClass); return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, cast<PointerType>(Param)->getPointeeType(), PointeeType, Info, Deduced, SubTDF); } // T & case Type::LValueReference: { const LValueReferenceType *ReferenceArg = Arg->getAs<LValueReferenceType>(); if (!ReferenceArg) return Sema::TDK_NonDeducedMismatch; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, cast<LValueReferenceType>(Param)->getPointeeType(), ReferenceArg->getPointeeType(), Info, Deduced, 0); } // T && [C++0x] case Type::RValueReference: { const RValueReferenceType *ReferenceArg = Arg->getAs<RValueReferenceType>(); if (!ReferenceArg) return Sema::TDK_NonDeducedMismatch; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, cast<RValueReferenceType>(Param)->getPointeeType(), ReferenceArg->getPointeeType(), Info, Deduced, 0); } // T [] (implied, but not stated explicitly) case Type::IncompleteArray: { const IncompleteArrayType *IncompleteArrayArg = S.Context.getAsIncompleteArrayType(Arg); if (!IncompleteArrayArg) return Sema::TDK_NonDeducedMismatch; unsigned SubTDF = TDF & TDF_IgnoreQualifiers; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, S.Context.getAsIncompleteArrayType(Param)->getElementType(), IncompleteArrayArg->getElementType(), Info, Deduced, SubTDF); } // T [integer-constant] case Type::ConstantArray: { const ConstantArrayType *ConstantArrayArg = S.Context.getAsConstantArrayType(Arg); if (!ConstantArrayArg) return Sema::TDK_NonDeducedMismatch; const ConstantArrayType *ConstantArrayParm = S.Context.getAsConstantArrayType(Param); if (ConstantArrayArg->getSize() != ConstantArrayParm->getSize()) return Sema::TDK_NonDeducedMismatch; unsigned SubTDF = TDF & TDF_IgnoreQualifiers; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ConstantArrayParm->getElementType(), ConstantArrayArg->getElementType(), Info, Deduced, SubTDF); } // type [i] case Type::DependentSizedArray: { const ArrayType *ArrayArg = S.Context.getAsArrayType(Arg); if (!ArrayArg) return Sema::TDK_NonDeducedMismatch; unsigned SubTDF = TDF & TDF_IgnoreQualifiers; // Check the element type of the arrays const DependentSizedArrayType *DependentArrayParm = S.Context.getAsDependentSizedArrayType(Param); if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, DependentArrayParm->getElementType(), ArrayArg->getElementType(), Info, Deduced, SubTDF)) return Result; // Determine the array bound is something we can deduce. NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(DependentArrayParm->getSizeExpr()); if (!NTTP) return Sema::TDK_Success; // We can perform template argument deduction for the given non-type // template parameter. assert(NTTP->getDepth() == 0 && "Cannot deduce non-type template argument at depth > 0"); if (const ConstantArrayType *ConstantArrayArg = dyn_cast<ConstantArrayType>(ArrayArg)) { llvm::APSInt Size(ConstantArrayArg->getSize()); return DeduceNonTypeTemplateArgument(S, NTTP, Size, S.Context.getSizeType(), /*ArrayBound=*/true, Info, Deduced); } if (const DependentSizedArrayType *DependentArrayArg = dyn_cast<DependentSizedArrayType>(ArrayArg)) if (DependentArrayArg->getSizeExpr()) return DeduceNonTypeTemplateArgument(S, NTTP, DependentArrayArg->getSizeExpr(), Info, Deduced); // Incomplete type does not match a dependently-sized array type return Sema::TDK_NonDeducedMismatch; } // type(*)(T) // T(*)() // T(*)(T) case Type::FunctionProto: { unsigned SubTDF = TDF & TDF_TopLevelParameterTypeList; const FunctionProtoType *FunctionProtoArg = dyn_cast<FunctionProtoType>(Arg); if (!FunctionProtoArg) return Sema::TDK_NonDeducedMismatch; const FunctionProtoType *FunctionProtoParam = cast<FunctionProtoType>(Param); if (FunctionProtoParam->getTypeQuals() != FunctionProtoArg->getTypeQuals() || FunctionProtoParam->getRefQualifier() != FunctionProtoArg->getRefQualifier() || FunctionProtoParam->isVariadic() != FunctionProtoArg->isVariadic()) return Sema::TDK_NonDeducedMismatch; // Check return types. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, FunctionProtoParam->getReturnType(), FunctionProtoArg->getReturnType(), Info, Deduced, 0)) return Result; return DeduceTemplateArguments( S, TemplateParams, FunctionProtoParam->param_type_begin(), FunctionProtoParam->getNumParams(), FunctionProtoArg->param_type_begin(), FunctionProtoArg->getNumParams(), Info, Deduced, SubTDF); } case Type::InjectedClassName: { // Treat a template's injected-class-name as if the template // specialization type had been used. Param = cast<InjectedClassNameType>(Param) ->getInjectedSpecializationType(); assert(isa<TemplateSpecializationType>(Param) && "injected class name is not a template specialization type"); LLVM_FALLTHROUGH; // HLSL Change } // template-name<T> (where template-name refers to a class template) // template-name<i> // TT<T> // TT<i> // TT<> case Type::TemplateSpecialization: { const TemplateSpecializationType *SpecParam = cast<TemplateSpecializationType>(Param); // Try to deduce template arguments from the template-id. Sema::TemplateDeductionResult Result = DeduceTemplateArguments(S, TemplateParams, SpecParam, Arg, Info, Deduced); if (Result && (TDF & TDF_DerivedClass)) { // C++ [temp.deduct.call]p3b3: // If P is a class, and P has the form template-id, then A can be a // derived class of the deduced A. Likewise, if P is a pointer to a // class of the form template-id, A can be a pointer to a derived // class pointed to by the deduced A. // // More importantly: // These alternatives are considered only if type deduction would // otherwise fail. if (const RecordType *RecordT = Arg->getAs<RecordType>()) { // We cannot inspect base classes as part of deduction when the type // is incomplete, so either instantiate any templates necessary to // complete the type, or skip over it if it cannot be completed. if (S.RequireCompleteType(Info.getLocation(), Arg, 0)) return Result; // Use data recursion to crawl through the list of base classes. // Visited contains the set of nodes we have already visited, while // ToVisit is our stack of records that we still need to visit. llvm::SmallPtrSet<const RecordType *, 8> Visited; SmallVector<const RecordType *, 8> ToVisit; ToVisit.push_back(RecordT); bool Successful = false; SmallVector<DeducedTemplateArgument, 8> DeducedOrig(Deduced.begin(), Deduced.end()); while (!ToVisit.empty()) { // Retrieve the next class in the inheritance hierarchy. const RecordType *NextT = ToVisit.pop_back_val(); // If we have already seen this type, skip it. if (!Visited.insert(NextT).second) continue; // If this is a base class, try to perform template argument // deduction from it. if (NextT != RecordT) { TemplateDeductionInfo BaseInfo(Info.getLocation()); Sema::TemplateDeductionResult BaseResult = DeduceTemplateArguments(S, TemplateParams, SpecParam, QualType(NextT, 0), BaseInfo, Deduced); // If template argument deduction for this base was successful, // note that we had some success. Otherwise, ignore any deductions // from this base class. if (BaseResult == Sema::TDK_Success) { Successful = true; DeducedOrig.clear(); DeducedOrig.append(Deduced.begin(), Deduced.end()); Info.Param = BaseInfo.Param; Info.FirstArg = BaseInfo.FirstArg; Info.SecondArg = BaseInfo.SecondArg; } else Deduced = DeducedOrig; } // Visit base classes CXXRecordDecl *Next = cast<CXXRecordDecl>(NextT->getDecl()); for (const auto &Base : Next->bases()) { assert(Base.getType()->isRecordType() && "Base class that isn't a record?"); ToVisit.push_back(Base.getType()->getAs<RecordType>()); } } if (Successful) return Sema::TDK_Success; } } return Result; } // T type::* // T T::* // T (type::*)() // type (T::*)() // type (type::*)(T) // type (T::*)(T) // T (type::*)(T) // T (T::*)() // T (T::*)(T) case Type::MemberPointer: { const MemberPointerType *MemPtrParam = cast<MemberPointerType>(Param); const MemberPointerType *MemPtrArg = dyn_cast<MemberPointerType>(Arg); if (!MemPtrArg) return Sema::TDK_NonDeducedMismatch; if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, MemPtrParam->getPointeeType(), MemPtrArg->getPointeeType(), Info, Deduced, TDF & TDF_IgnoreQualifiers)) return Result; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, QualType(MemPtrParam->getClass(), 0), QualType(MemPtrArg->getClass(), 0), Info, Deduced, TDF & TDF_IgnoreQualifiers); } // (clang extension) // // type(^)(T) // T(^)() // T(^)(T) case Type::BlockPointer: { const BlockPointerType *BlockPtrParam = cast<BlockPointerType>(Param); const BlockPointerType *BlockPtrArg = dyn_cast<BlockPointerType>(Arg); if (!BlockPtrArg) return Sema::TDK_NonDeducedMismatch; return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, BlockPtrParam->getPointeeType(), BlockPtrArg->getPointeeType(), Info, Deduced, 0); } // (clang extension) // // T __attribute__(((ext_vector_type(<integral constant>)))) case Type::ExtVector: { const ExtVectorType *VectorParam = cast<ExtVectorType>(Param); if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) { // Make sure that the vectors have the same number of elements. if (VectorParam->getNumElements() != VectorArg->getNumElements()) return Sema::TDK_NonDeducedMismatch; // Perform deduction on the element types. return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, VectorParam->getElementType(), VectorArg->getElementType(), Info, Deduced, TDF); } if (const DependentSizedExtVectorType *VectorArg = dyn_cast<DependentSizedExtVectorType>(Arg)) { // We can't check the number of elements, since the argument has a // dependent number of elements. This can only occur during partial // ordering. // Perform deduction on the element types. return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, VectorParam->getElementType(), VectorArg->getElementType(), Info, Deduced, TDF); } return Sema::TDK_NonDeducedMismatch; } // (clang extension) // // T __attribute__(((ext_vector_type(N)))) case Type::DependentSizedExtVector: { const DependentSizedExtVectorType *VectorParam = cast<DependentSizedExtVectorType>(Param); if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) { // Perform deduction on the element types. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, VectorParam->getElementType(), VectorArg->getElementType(), Info, Deduced, TDF)) return Result; // Perform deduction on the vector size, if we can. NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(VectorParam->getSizeExpr()); if (!NTTP) return Sema::TDK_Success; llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false); ArgSize = VectorArg->getNumElements(); return DeduceNonTypeTemplateArgument(S, NTTP, ArgSize, S.Context.IntTy, false, Info, Deduced); } if (const DependentSizedExtVectorType *VectorArg = dyn_cast<DependentSizedExtVectorType>(Arg)) { // Perform deduction on the element types. if (Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, VectorParam->getElementType(), VectorArg->getElementType(), Info, Deduced, TDF)) return Result; // Perform deduction on the vector size, if we can. NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(VectorParam->getSizeExpr()); if (!NTTP) return Sema::TDK_Success; return DeduceNonTypeTemplateArgument(S, NTTP, VectorArg->getSizeExpr(), Info, Deduced); } return Sema::TDK_NonDeducedMismatch; } case Type::TypeOfExpr: case Type::TypeOf: case Type::DependentName: case Type::UnresolvedUsing: case Type::Decltype: case Type::UnaryTransform: case Type::Auto: case Type::DependentTemplateSpecialization: case Type::PackExpansion: // No template argument deduction for these types return Sema::TDK_Success; } llvm_unreachable("Invalid Type Class!"); } static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const TemplateArgument &Param, TemplateArgument Arg, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced) { // If the template argument is a pack expansion, perform template argument // deduction against the pattern of that expansion. This only occurs during // partial ordering. if (Arg.isPackExpansion()) Arg = Arg.getPackExpansionPattern(); switch (Param.getKind()) { case TemplateArgument::Null: llvm_unreachable("Null template argument in parameter list"); case TemplateArgument::Type: if (Arg.getKind() == TemplateArgument::Type) return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Param.getAsType(), Arg.getAsType(), Info, Deduced, 0); Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; case TemplateArgument::Template: if (Arg.getKind() == TemplateArgument::Template) return DeduceTemplateArguments(S, TemplateParams, Param.getAsTemplate(), Arg.getAsTemplate(), Info, Deduced); Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; case TemplateArgument::TemplateExpansion: llvm_unreachable("caller should handle pack expansions"); case TemplateArgument::Declaration: if (Arg.getKind() == TemplateArgument::Declaration && isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl())) return Sema::TDK_Success; Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; case TemplateArgument::NullPtr: if (Arg.getKind() == TemplateArgument::NullPtr && S.Context.hasSameType(Param.getNullPtrType(), Arg.getNullPtrType())) return Sema::TDK_Success; Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; case TemplateArgument::Integral: if (Arg.getKind() == TemplateArgument::Integral) { if (hasSameExtendedValue(Param.getAsIntegral(), Arg.getAsIntegral())) return Sema::TDK_Success; Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; } if (Arg.getKind() == TemplateArgument::Expression) { Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; } Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; case TemplateArgument::Expression: { if (NonTypeTemplateParmDecl *NTTP = getDeducedParameterFromExpr(Param.getAsExpr())) { if (Arg.getKind() == TemplateArgument::Integral) return DeduceNonTypeTemplateArgument(S, NTTP, Arg.getAsIntegral(), Arg.getIntegralType(), /*ArrayBound=*/false, Info, Deduced); if (Arg.getKind() == TemplateArgument::Expression) return DeduceNonTypeTemplateArgument(S, NTTP, Arg.getAsExpr(), Info, Deduced); if (Arg.getKind() == TemplateArgument::Declaration) return DeduceNonTypeTemplateArgument(S, NTTP, Arg.getAsDecl(), Info, Deduced); Info.FirstArg = Param; Info.SecondArg = Arg; return Sema::TDK_NonDeducedMismatch; } // Can't deduce anything, but that's okay. return Sema::TDK_Success; } case TemplateArgument::Pack: llvm_unreachable("Argument packs should be expanded by the caller!"); } llvm_unreachable("Invalid TemplateArgument Kind!"); } /// \brief Determine whether there is a template argument to be used for /// deduction. /// /// This routine "expands" argument packs in-place, overriding its input /// parameters so that \c Args[ArgIdx] will be the available template argument. /// /// \returns true if there is another template argument (which will be at /// \c Args[ArgIdx]), false otherwise. static bool hasTemplateArgumentForDeduction(const TemplateArgument *&Args, unsigned &ArgIdx, unsigned &NumArgs) { if (ArgIdx == NumArgs) return false; const TemplateArgument &Arg = Args[ArgIdx]; if (Arg.getKind() != TemplateArgument::Pack) return true; assert(ArgIdx == NumArgs - 1 && "Pack not at the end of argument list?"); Args = Arg.pack_begin(); NumArgs = Arg.pack_size(); ArgIdx = 0; return ArgIdx < NumArgs; } /// \brief Determine whether the given set of template arguments has a pack /// expansion that is not the last template argument. static bool hasPackExpansionBeforeEnd(const TemplateArgument *Args, unsigned NumArgs) { unsigned ArgIdx = 0; while (ArgIdx < NumArgs) { const TemplateArgument &Arg = Args[ArgIdx]; // Unwrap argument packs. if (Args[ArgIdx].getKind() == TemplateArgument::Pack) { Args = Arg.pack_begin(); NumArgs = Arg.pack_size(); ArgIdx = 0; continue; } ++ArgIdx; if (ArgIdx == NumArgs) return false; if (Arg.isPackExpansion()) return true; } return false; } static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const TemplateArgument *Params, unsigned NumParams, const TemplateArgument *Args, unsigned NumArgs, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced) { // C++0x [temp.deduct.type]p9: // If the template argument list of P contains a pack expansion that is not // the last template argument, the entire template argument list is a // non-deduced context. if (hasPackExpansionBeforeEnd(Params, NumParams)) return Sema::TDK_Success; // C++0x [temp.deduct.type]p9: // If P has a form that contains <T> or <i>, then each argument Pi of the // respective template argument list P is compared with the corresponding // argument Ai of the corresponding template argument list of A. unsigned ArgIdx = 0, ParamIdx = 0; for (; hasTemplateArgumentForDeduction(Params, ParamIdx, NumParams); ++ParamIdx) { if (!Params[ParamIdx].isPackExpansion()) { // The simple case: deduce template arguments by matching Pi and Ai. // Check whether we have enough arguments. if (!hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs)) return Sema::TDK_Success; if (Args[ArgIdx].isPackExpansion()) { // FIXME: We follow the logic of C++0x [temp.deduct.type]p22 here, // but applied to pack expansions that are template arguments. return Sema::TDK_MiscellaneousDeductionFailure; } // Perform deduction for this Pi/Ai pair. if (Sema::TemplateDeductionResult Result = DeduceTemplateArguments(S, TemplateParams, Params[ParamIdx], Args[ArgIdx], Info, Deduced)) return Result; // Move to the next argument. ++ArgIdx; continue; } // The parameter is a pack expansion. // C++0x [temp.deduct.type]p9: // If Pi is a pack expansion, then the pattern of Pi is compared with // each remaining argument in the template argument list of A. Each // comparison deduces template arguments for subsequent positions in the // template parameter packs expanded by Pi. TemplateArgument Pattern = Params[ParamIdx].getPackExpansionPattern(); // FIXME: If there are no remaining arguments, we can bail out early // and set any deduced parameter packs to an empty argument pack. // The latter part of this is a (minor) correctness issue. // Prepare to deduce the packs within the pattern. PackDeductionScope PackScope(S, TemplateParams, Deduced, Info, Pattern); // Keep track of the deduced template arguments for each parameter pack // expanded by this pack expansion (the outer index) and for each // template argument (the inner SmallVectors). bool HasAnyArguments = false; for (; hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs); ++ArgIdx) { HasAnyArguments = true; // Deduce template arguments from the pattern. if (Sema::TemplateDeductionResult Result = DeduceTemplateArguments(S, TemplateParams, Pattern, Args[ArgIdx], Info, Deduced)) return Result; PackScope.nextPackElement(); } // Build argument packs for each of the parameter packs expanded by this // pack expansion. if (auto Result = PackScope.finish(HasAnyArguments)) return Result; } return Sema::TDK_Success; } static Sema::TemplateDeductionResult DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams, const TemplateArgumentList &ParamList, const TemplateArgumentList &ArgList, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced) { return DeduceTemplateArguments(S, TemplateParams, ParamList.data(), ParamList.size(), ArgList.data(), ArgList.size(), Info, Deduced); } /// \brief Determine whether two template arguments are the same. static bool isSameTemplateArg(ASTContext &Context, const TemplateArgument &X, const TemplateArgument &Y) { if (X.getKind() != Y.getKind()) return false; switch (X.getKind()) { case TemplateArgument::Null: llvm_unreachable("Comparing NULL template argument"); case TemplateArgument::Type: return Context.getCanonicalType(X.getAsType()) == Context.getCanonicalType(Y.getAsType()); case TemplateArgument::Declaration: return isSameDeclaration(X.getAsDecl(), Y.getAsDecl()); case TemplateArgument::NullPtr: return Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType()); case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: return Context.getCanonicalTemplateName( X.getAsTemplateOrTemplatePattern()).getAsVoidPointer() == Context.getCanonicalTemplateName( Y.getAsTemplateOrTemplatePattern()).getAsVoidPointer(); case TemplateArgument::Integral: return X.getAsIntegral() == Y.getAsIntegral(); case TemplateArgument::Expression: { llvm::FoldingSetNodeID XID, YID; X.getAsExpr()->Profile(XID, Context, true); Y.getAsExpr()->Profile(YID, Context, true); return XID == YID; } case TemplateArgument::Pack: if (X.pack_size() != Y.pack_size()) return false; for (TemplateArgument::pack_iterator XP = X.pack_begin(), XPEnd = X.pack_end(), YP = Y.pack_begin(); XP != XPEnd; ++XP, ++YP) if (!isSameTemplateArg(Context, *XP, *YP)) return false; return true; } llvm_unreachable("Invalid TemplateArgument Kind!"); } /// \brief Allocate a TemplateArgumentLoc where all locations have /// been initialized to the given location. /// /// \param S The semantic analysis object. /// /// \param Arg The template argument we are producing template argument /// location information for. /// /// \param NTTPType For a declaration template argument, the type of /// the non-type template parameter that corresponds to this template /// argument. /// /// \param Loc The source location to use for the resulting template /// argument. static TemplateArgumentLoc getTrivialTemplateArgumentLoc(Sema &S, const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc) { switch (Arg.getKind()) { case TemplateArgument::Null: llvm_unreachable("Can't get a NULL template argument here"); case TemplateArgument::Type: return TemplateArgumentLoc(Arg, S.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc)); case TemplateArgument::Declaration: { Expr *E = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc) .getAs<Expr>(); return TemplateArgumentLoc(TemplateArgument(E), E); } case TemplateArgument::NullPtr: { Expr *E = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc) .getAs<Expr>(); return TemplateArgumentLoc(TemplateArgument(NTTPType, /*isNullPtr*/true), E); } case TemplateArgument::Integral: { Expr *E = S.BuildExpressionFromIntegralTemplateArgument(Arg, Loc).getAs<Expr>(); return TemplateArgumentLoc(TemplateArgument(E), E); } case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: { NestedNameSpecifierLocBuilder Builder; TemplateName Template = Arg.getAsTemplate(); if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) Builder.MakeTrivial(S.Context, DTN->getQualifier(), Loc); else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName()) Builder.MakeTrivial(S.Context, QTN->getQualifier(), Loc); if (Arg.getKind() == TemplateArgument::Template) return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(S.Context), Loc); return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(S.Context), Loc, Loc); } case TemplateArgument::Expression: return TemplateArgumentLoc(Arg, Arg.getAsExpr()); case TemplateArgument::Pack: return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo()); } llvm_unreachable("Invalid TemplateArgument Kind!"); } /// \brief Convert the given deduced template argument and add it to the set of /// fully-converted template arguments. static bool ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param, DeducedTemplateArgument Arg, NamedDecl *Template, QualType NTTPType, unsigned ArgumentPackIndex, TemplateDeductionInfo &Info, bool InFunctionTemplate, SmallVectorImpl<TemplateArgument> &Output) { if (Arg.getKind() == TemplateArgument::Pack) { // This is a template argument pack, so check each of its arguments against // the template parameter. SmallVector<TemplateArgument, 2> PackedArgsBuilder; for (const auto &P : Arg.pack_elements()) { // When converting the deduced template argument, append it to the // general output list. We need to do this so that the template argument // checking logic has all of the prior template arguments available. DeducedTemplateArgument InnerArg(P); InnerArg.setDeducedFromArrayBound(Arg.wasDeducedFromArrayBound()); if (ConvertDeducedTemplateArgument(S, Param, InnerArg, Template, NTTPType, PackedArgsBuilder.size(), Info, InFunctionTemplate, Output)) return true; // Move the converted template argument into our argument pack. PackedArgsBuilder.push_back(Output.pop_back_val()); } // Create the resulting argument pack. Output.push_back(TemplateArgument::CreatePackCopy(S.Context, PackedArgsBuilder.data(), PackedArgsBuilder.size())); return false; } // Convert the deduced template argument into a template // argument that we can check, almost as if the user had written // the template argument explicitly. TemplateArgumentLoc ArgLoc = getTrivialTemplateArgumentLoc(S, Arg, NTTPType, Info.getLocation()); // Check the template argument, converting it as necessary. return S.CheckTemplateArgument(Param, ArgLoc, Template, Template->getLocation(), Template->getSourceRange().getEnd(), ArgumentPackIndex, Output, InFunctionTemplate ? (Arg.wasDeducedFromArrayBound() ? Sema::CTAK_DeducedFromArrayBound : Sema::CTAK_Deduced) : Sema::CTAK_Specified); } /// Complete template argument deduction for a class template partial /// specialization. static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction(Sema &S, ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, TemplateDeductionInfo &Info) { // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated); Sema::SFINAETrap Trap(S); Sema::ContextRAII SavedContext(S, Partial); // C++ [temp.deduct.type]p2: // [...] or if any template argument remains neither deduced nor // explicitly specified, template argument deduction fails. SmallVector<TemplateArgument, 4> Builder; TemplateParameterList *PartialParams = Partial->getTemplateParameters(); for (unsigned I = 0, N = PartialParams->size(); I != N; ++I) { NamedDecl *Param = PartialParams->getParam(I); if (Deduced[I].isNull()) { Info.Param = makeTemplateParameter(Param); return Sema::TDK_Incomplete; } // We have deduced this argument, so it still needs to be // checked and converted. // First, for a non-type template parameter type that is // initialized by a declaration, we need the type of the // corresponding non-type template parameter. QualType NTTPType; if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { NTTPType = NTTP->getType(); if (NTTPType->isDependentType()) { TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Builder.data(), Builder.size()); NTTPType = S.SubstType(NTTPType, MultiLevelTemplateArgumentList(TemplateArgs), NTTP->getLocation(), NTTP->getDeclName()); if (NTTPType.isNull()) { Info.Param = makeTemplateParameter(Param); // FIXME: These template arguments are temporary. Free them! Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder.data(), Builder.size())); return Sema::TDK_SubstitutionFailure; } } } if (ConvertDeducedTemplateArgument(S, Param, Deduced[I], Partial, NTTPType, 0, Info, false, Builder)) { Info.Param = makeTemplateParameter(Param); // FIXME: These template arguments are temporary. Free them! Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder.data(), Builder.size())); return Sema::TDK_SubstitutionFailure; } } // Form the template argument list from the deduced template arguments. TemplateArgumentList *DeducedArgumentList = TemplateArgumentList::CreateCopy(S.Context, Builder.data(), Builder.size()); Info.reset(DeducedArgumentList); // Substitute the deduced template arguments into the template // arguments of the class template partial specialization, and // verify that the instantiated template arguments are both valid // and are equivalent to the template arguments originally provided // to the class template. LocalInstantiationScope InstScope(S); ClassTemplateDecl *ClassTemplate = Partial->getSpecializedTemplate(); const ASTTemplateArgumentListInfo *PartialTemplArgInfo = Partial->getTemplateArgsAsWritten(); const TemplateArgumentLoc *PartialTemplateArgs = PartialTemplArgInfo->getTemplateArgs(); TemplateArgumentListInfo InstArgs(PartialTemplArgInfo->LAngleLoc, PartialTemplArgInfo->RAngleLoc); if (S.Subst(PartialTemplateArgs, PartialTemplArgInfo->NumTemplateArgs, InstArgs, MultiLevelTemplateArgumentList(*DeducedArgumentList))) { unsigned ArgIdx = InstArgs.size(), ParamIdx = ArgIdx; if (ParamIdx >= Partial->getTemplateParameters()->size()) ParamIdx = Partial->getTemplateParameters()->size() - 1; Decl *Param = const_cast<NamedDecl *>( Partial->getTemplateParameters()->getParam(ParamIdx)); Info.Param = makeTemplateParameter(Param); Info.FirstArg = PartialTemplateArgs[ArgIdx].getArgument(); return Sema::TDK_SubstitutionFailure; } SmallVector<TemplateArgument, 4> ConvertedInstArgs; if (S.CheckTemplateArgumentList(ClassTemplate, Partial->getLocation(), InstArgs, false, ConvertedInstArgs)) return Sema::TDK_SubstitutionFailure; TemplateParameterList *TemplateParams = ClassTemplate->getTemplateParameters(); for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) { TemplateArgument InstArg = ConvertedInstArgs.data()[I]; if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg)) { Info.Param = makeTemplateParameter(TemplateParams->getParam(I)); Info.FirstArg = TemplateArgs[I]; Info.SecondArg = InstArg; return Sema::TDK_NonDeducedMismatch; } } if (Trap.hasErrorOccurred()) return Sema::TDK_SubstitutionFailure; return Sema::TDK_Success; } /// \brief Perform template argument deduction to determine whether /// the given template arguments match the given class template /// partial specialization per C++ [temp.class.spec.match]. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, TemplateDeductionInfo &Info) { if (Partial->isInvalidDecl()) return TDK_Invalid; // C++ [temp.class.spec.match]p2: // A partial specialization matches a given actual template // argument list if the template arguments of the partial // specialization can be deduced from the actual template argument // list (14.8.2). // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); SFINAETrap Trap(*this); SmallVector<DeducedTemplateArgument, 4> Deduced; Deduced.resize(Partial->getTemplateParameters()->size()); if (TemplateDeductionResult Result = ::DeduceTemplateArguments(*this, Partial->getTemplateParameters(), Partial->getTemplateArgs(), TemplateArgs, Info, Deduced)) return Result; SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end()); InstantiatingTemplate Inst(*this, Info.getLocation(), Partial, DeducedArgs, Info); if (Inst.isInvalid()) return TDK_InstantiationDepth; if (Trap.hasErrorOccurred()) return Sema::TDK_SubstitutionFailure; return ::FinishTemplateArgumentDeduction(*this, Partial, TemplateArgs, Deduced, Info); } /// Complete template argument deduction for a variable template partial /// specialization. /// TODO: Unify with ClassTemplatePartialSpecializationDecl version? /// May require unifying ClassTemplate(Partial)SpecializationDecl and /// VarTemplate(Partial)SpecializationDecl with a new data /// structure Template(Partial)SpecializationDecl, and /// using Template(Partial)SpecializationDecl as input type. static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction( Sema &S, VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, TemplateDeductionInfo &Info) { // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated); Sema::SFINAETrap Trap(S); // C++ [temp.deduct.type]p2: // [...] or if any template argument remains neither deduced nor // explicitly specified, template argument deduction fails. SmallVector<TemplateArgument, 4> Builder; TemplateParameterList *PartialParams = Partial->getTemplateParameters(); for (unsigned I = 0, N = PartialParams->size(); I != N; ++I) { NamedDecl *Param = PartialParams->getParam(I); if (Deduced[I].isNull()) { Info.Param = makeTemplateParameter(Param); return Sema::TDK_Incomplete; } // We have deduced this argument, so it still needs to be // checked and converted. // First, for a non-type template parameter type that is // initialized by a declaration, we need the type of the // corresponding non-type template parameter. QualType NTTPType; if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { NTTPType = NTTP->getType(); if (NTTPType->isDependentType()) { TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Builder.data(), Builder.size()); NTTPType = S.SubstType(NTTPType, MultiLevelTemplateArgumentList(TemplateArgs), NTTP->getLocation(), NTTP->getDeclName()); if (NTTPType.isNull()) { Info.Param = makeTemplateParameter(Param); // FIXME: These template arguments are temporary. Free them! Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder.data(), Builder.size())); return Sema::TDK_SubstitutionFailure; } } } if (ConvertDeducedTemplateArgument(S, Param, Deduced[I], Partial, NTTPType, 0, Info, false, Builder)) { Info.Param = makeTemplateParameter(Param); // FIXME: These template arguments are temporary. Free them! Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder.data(), Builder.size())); return Sema::TDK_SubstitutionFailure; } } // Form the template argument list from the deduced template arguments. TemplateArgumentList *DeducedArgumentList = TemplateArgumentList::CreateCopy( S.Context, Builder.data(), Builder.size()); Info.reset(DeducedArgumentList); // Substitute the deduced template arguments into the template // arguments of the class template partial specialization, and // verify that the instantiated template arguments are both valid // and are equivalent to the template arguments originally provided // to the class template. LocalInstantiationScope InstScope(S); VarTemplateDecl *VarTemplate = Partial->getSpecializedTemplate(); const ASTTemplateArgumentListInfo *PartialTemplArgInfo = Partial->getTemplateArgsAsWritten(); const TemplateArgumentLoc *PartialTemplateArgs = PartialTemplArgInfo->getTemplateArgs(); TemplateArgumentListInfo InstArgs(PartialTemplArgInfo->LAngleLoc, PartialTemplArgInfo->RAngleLoc); if (S.Subst(PartialTemplateArgs, PartialTemplArgInfo->NumTemplateArgs, InstArgs, MultiLevelTemplateArgumentList(*DeducedArgumentList))) { unsigned ArgIdx = InstArgs.size(), ParamIdx = ArgIdx; if (ParamIdx >= Partial->getTemplateParameters()->size()) ParamIdx = Partial->getTemplateParameters()->size() - 1; Decl *Param = const_cast<NamedDecl *>( Partial->getTemplateParameters()->getParam(ParamIdx)); Info.Param = makeTemplateParameter(Param); Info.FirstArg = PartialTemplateArgs[ArgIdx].getArgument(); return Sema::TDK_SubstitutionFailure; } SmallVector<TemplateArgument, 4> ConvertedInstArgs; if (S.CheckTemplateArgumentList(VarTemplate, Partial->getLocation(), InstArgs, false, ConvertedInstArgs)) return Sema::TDK_SubstitutionFailure; TemplateParameterList *TemplateParams = VarTemplate->getTemplateParameters(); for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) { TemplateArgument InstArg = ConvertedInstArgs.data()[I]; if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg)) { Info.Param = makeTemplateParameter(TemplateParams->getParam(I)); Info.FirstArg = TemplateArgs[I]; Info.SecondArg = InstArg; return Sema::TDK_NonDeducedMismatch; } } if (Trap.hasErrorOccurred()) return Sema::TDK_SubstitutionFailure; return Sema::TDK_Success; } /// \brief Perform template argument deduction to determine whether /// the given template arguments match the given variable template /// partial specialization per C++ [temp.class.spec.match]. /// TODO: Unify with ClassTemplatePartialSpecializationDecl version? /// May require unifying ClassTemplate(Partial)SpecializationDecl and /// VarTemplate(Partial)SpecializationDecl with a new data /// structure Template(Partial)SpecializationDecl, and /// using Template(Partial)SpecializationDecl as input type. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, TemplateDeductionInfo &Info) { if (Partial->isInvalidDecl()) return TDK_Invalid; // C++ [temp.class.spec.match]p2: // A partial specialization matches a given actual template // argument list if the template arguments of the partial // specialization can be deduced from the actual template argument // list (14.8.2). // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); SFINAETrap Trap(*this); SmallVector<DeducedTemplateArgument, 4> Deduced; Deduced.resize(Partial->getTemplateParameters()->size()); if (TemplateDeductionResult Result = ::DeduceTemplateArguments( *this, Partial->getTemplateParameters(), Partial->getTemplateArgs(), TemplateArgs, Info, Deduced)) return Result; SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end()); InstantiatingTemplate Inst(*this, Info.getLocation(), Partial, DeducedArgs, Info); if (Inst.isInvalid()) return TDK_InstantiationDepth; if (Trap.hasErrorOccurred()) return Sema::TDK_SubstitutionFailure; return ::FinishTemplateArgumentDeduction(*this, Partial, TemplateArgs, Deduced, Info); } /// \brief Determine whether the given type T is a simple-template-id type. static bool isSimpleTemplateIdType(QualType T) { if (const TemplateSpecializationType *Spec = T->getAs<TemplateSpecializationType>()) return Spec->getTemplateName().getAsTemplateDecl() != nullptr; return false; } /// \brief Substitute the explicitly-provided template arguments into the /// given function template according to C++ [temp.arg.explicit]. /// /// \param FunctionTemplate the function template into which the explicit /// template arguments will be substituted. /// /// \param ExplicitTemplateArgs the explicitly-specified template /// arguments. /// /// \param Deduced the deduced template arguments, which will be populated /// with the converted and checked explicit template arguments. /// /// \param ParamTypes will be populated with the instantiated function /// parameters. /// /// \param FunctionType if non-NULL, the result type of the function template /// will also be instantiated and the pointed-to value will be updated with /// the instantiated function type. /// /// \param Info if substitution fails for any reason, this object will be /// populated with more information about the failure. /// /// \returns TDK_Success if substitution was successful, or some failure /// condition. Sema::TemplateDeductionResult Sema::SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, TemplateDeductionInfo &Info) { FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); if (ExplicitTemplateArgs.size() == 0) { // No arguments to substitute; just copy over the parameter types and // fill in the function type. for (auto P : Function->params()) ParamTypes.push_back(P->getType()); if (FunctionType) *FunctionType = Function->getType(); return TDK_Success; } // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); SFINAETrap Trap(*this); // C++ [temp.arg.explicit]p3: // Template arguments that are present shall be specified in the // declaration order of their corresponding template-parameters. The // template argument list shall not specify more template-arguments than // there are corresponding template-parameters. SmallVector<TemplateArgument, 4> Builder; // Enter a new template instantiation context where we check the // explicitly-specified template arguments against this function template, // and then substitute them into the function parameter types. SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end()); InstantiatingTemplate Inst(*this, Info.getLocation(), FunctionTemplate, DeducedArgs, ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution, Info); if (Inst.isInvalid()) return TDK_InstantiationDepth; if (CheckTemplateArgumentList(FunctionTemplate, SourceLocation(), ExplicitTemplateArgs, true, Builder) || Trap.hasErrorOccurred()) { unsigned Index = Builder.size(); if (Index >= TemplateParams->size()) Index = TemplateParams->size() - 1; Info.Param = makeTemplateParameter(TemplateParams->getParam(Index)); return TDK_InvalidExplicitArguments; } // Form the template argument list from the explicitly-specified // template arguments. TemplateArgumentList *ExplicitArgumentList = TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size()); Info.reset(ExplicitArgumentList); // Template argument deduction and the final substitution should be // done in the context of the templated declaration. Explicit // argument substitution, on the other hand, needs to happen in the // calling context. ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl()); // If we deduced template arguments for a template parameter pack, // note that the template argument pack is partially substituted and record // the explicit template arguments. They'll be used as part of deduction // for this template parameter pack. for (unsigned I = 0, N = Builder.size(); I != N; ++I) { const TemplateArgument &Arg = Builder[I]; if (Arg.getKind() == TemplateArgument::Pack) { CurrentInstantiationScope->SetPartiallySubstitutedPack( TemplateParams->getParam(I), Arg.pack_begin(), Arg.pack_size()); break; } } const FunctionProtoType *Proto = Function->getType()->getAs<FunctionProtoType>(); assert(Proto && "Function template does not have a prototype?"); // Isolate our substituted parameters from our caller. LocalInstantiationScope InstScope(*this, /*MergeWithOuterScope*/true); // Instantiate the types of each of the function parameters given the // explicitly-specified template arguments. If the function has a trailing // return type, substitute it after the arguments to ensure we substitute // in lexical order. if (Proto->hasTrailingReturn()) { if (SubstParmTypes(Function->getLocation(), Function->param_begin(), Function->getNumParams(), MultiLevelTemplateArgumentList(*ExplicitArgumentList), ParamTypes)) return TDK_SubstitutionFailure; } // Instantiate the return type. QualType ResultType; { // C++11 [expr.prim.general]p3: // If a declaration declares a member function or member function // template of a class X, the expression this is a prvalue of type // "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq // and the end of the function-definition, member-declarator, or // declarator. unsigned ThisTypeQuals = 0; CXXRecordDecl *ThisContext = nullptr; if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) { ThisContext = Method->getParent(); ThisTypeQuals = Method->getTypeQualifiers(); } CXXThisScopeRAII ThisScope(*this, ThisContext, ThisTypeQuals, getLangOpts().CPlusPlus11); ResultType = SubstType(Proto->getReturnType(), MultiLevelTemplateArgumentList(*ExplicitArgumentList), Function->getTypeSpecStartLoc(), Function->getDeclName()); if (ResultType.isNull() || Trap.hasErrorOccurred()) return TDK_SubstitutionFailure; } // Instantiate the types of each of the function parameters given the // explicitly-specified template arguments if we didn't do so earlier. if (!Proto->hasTrailingReturn() && SubstParmTypes(Function->getLocation(), Function->param_begin(), Function->getNumParams(), MultiLevelTemplateArgumentList(*ExplicitArgumentList), ParamTypes)) return TDK_SubstitutionFailure; if (FunctionType) { // HLSL Change - FIX - We should move param mods to parameter QualTypes *FunctionType = BuildFunctionType( ResultType, ParamTypes, Function->getLocation(), Function->getDeclName(), Proto->getExtProtoInfo(), cast<FunctionProtoType>(Function->getType())->getParamMods()); // HLSL Change - End if (FunctionType->isNull() || Trap.hasErrorOccurred()) return TDK_SubstitutionFailure; } // C++ [temp.arg.explicit]p2: // Trailing template arguments that can be deduced (14.8.2) may be // omitted from the list of explicit template-arguments. If all of the // template arguments can be deduced, they may all be omitted; in this // case, the empty template argument list <> itself may also be omitted. // // Take all of the explicitly-specified arguments and put them into // the set of deduced template arguments. Explicitly-specified // parameter packs, however, will be set to NULL since the deduction // mechanisms handle explicitly-specified argument packs directly. Deduced.reserve(TemplateParams->size()); for (unsigned I = 0, N = ExplicitArgumentList->size(); I != N; ++I) { const TemplateArgument &Arg = ExplicitArgumentList->get(I); if (Arg.getKind() == TemplateArgument::Pack) Deduced.push_back(DeducedTemplateArgument()); else Deduced.push_back(Arg); } return TDK_Success; } /// \brief Check whether the deduced argument type for a call to a function /// template matches the actual argument type per C++ [temp.deduct.call]p4. static bool CheckOriginalCallArgDeduction(Sema &S, Sema::OriginalCallArg OriginalArg, QualType DeducedA) { ASTContext &Context = S.Context; QualType A = OriginalArg.OriginalArgType; QualType OriginalParamType = OriginalArg.OriginalParamType; // Check for type equality (top-level cv-qualifiers are ignored). if (Context.hasSameUnqualifiedType(A, DeducedA)) return false; // Strip off references on the argument types; they aren't needed for // the following checks. if (const ReferenceType *DeducedARef = DeducedA->getAs<ReferenceType>()) DeducedA = DeducedARef->getPointeeType(); if (const ReferenceType *ARef = A->getAs<ReferenceType>()) A = ARef->getPointeeType(); // C++ [temp.deduct.call]p4: // [...] However, there are three cases that allow a difference: // - If the original P is a reference type, the deduced A (i.e., the // type referred to by the reference) can be more cv-qualified than // the transformed A. if (const ReferenceType *OriginalParamRef = OriginalParamType->getAs<ReferenceType>()) { // We don't want to keep the reference around any more. OriginalParamType = OriginalParamRef->getPointeeType(); Qualifiers AQuals = A.getQualifiers(); Qualifiers DeducedAQuals = DeducedA.getQualifiers(); // Under Objective-C++ ARC, the deduced type may have implicitly // been given strong or (when dealing with a const reference) // unsafe_unretained lifetime. If so, update the original // qualifiers to include this lifetime. if (S.getLangOpts().ObjCAutoRefCount && ((DeducedAQuals.getObjCLifetime() == Qualifiers::OCL_Strong && AQuals.getObjCLifetime() == Qualifiers::OCL_None) || (DeducedAQuals.hasConst() && DeducedAQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone))) { AQuals.setObjCLifetime(DeducedAQuals.getObjCLifetime()); } if (AQuals == DeducedAQuals) { // Qualifiers match; there's nothing to do. } else if (!DeducedAQuals.compatiblyIncludes(AQuals)) { return true; } else { // Qualifiers are compatible, so have the argument type adopt the // deduced argument type's qualifiers as if we had performed the // qualification conversion. A = Context.getQualifiedType(A.getUnqualifiedType(), DeducedAQuals); } } // - The transformed A can be another pointer or pointer to member // type that can be converted to the deduced A via a qualification // conversion. // // Also allow conversions which merely strip [[noreturn]] from function types // (recursively) as an extension. // FIXME: Currently, this doesn't play nicely with qualification conversions. bool ObjCLifetimeConversion = false; QualType ResultTy; if ((A->isAnyPointerType() || A->isMemberPointerType()) && (S.IsQualificationConversion(A, DeducedA, false, ObjCLifetimeConversion) || S.IsNoReturnConversion(A, DeducedA, ResultTy))) return false; // - If P is a class and P has the form simple-template-id, then the // transformed A can be a derived class of the deduced A. [...] // [...] Likewise, if P is a pointer to a class of the form // simple-template-id, the transformed A can be a pointer to a // derived class pointed to by the deduced A. if (const PointerType *OriginalParamPtr = OriginalParamType->getAs<PointerType>()) { if (const PointerType *DeducedAPtr = DeducedA->getAs<PointerType>()) { if (const PointerType *APtr = A->getAs<PointerType>()) { if (A->getPointeeType()->isRecordType()) { OriginalParamType = OriginalParamPtr->getPointeeType(); DeducedA = DeducedAPtr->getPointeeType(); A = APtr->getPointeeType(); } } } } /// HLSL Change Begin // resolve literal types to 32-bit types for template argument types. if (LangOptions().HLSL) { if (auto BT = dyn_cast<BuiltinType>(A)) { if (BT->getKind() == BuiltinType::LitFloat) A = Context.FloatTy; else if (BT->getKind() == BuiltinType::LitInt) A = Context.IntTy; } } /// HLSL Change End if (Context.hasSameUnqualifiedType(A, DeducedA)) return false; if (A->isRecordType() && isSimpleTemplateIdType(OriginalParamType) && S.IsDerivedFrom(A, DeducedA)) return false; return true; } /// \brief Finish template argument deduction for a function template, /// checking the deduced template arguments for completeness and forming /// the function template specialization. /// /// \param OriginalCallArgs If non-NULL, the original call arguments against /// which the deduced argument types should be compared. Sema::TemplateDeductionResult Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs, bool PartialOverloading) { TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); SFINAETrap Trap(*this); // Enter a new template instantiation context while we instantiate the // actual function declaration. SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end()); InstantiatingTemplate Inst(*this, Info.getLocation(), FunctionTemplate, DeducedArgs, ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution, Info); if (Inst.isInvalid()) return TDK_InstantiationDepth; ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl()); // C++ [temp.deduct.type]p2: // [...] or if any template argument remains neither deduced nor // explicitly specified, template argument deduction fails. SmallVector<TemplateArgument, 4> Builder; for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) { NamedDecl *Param = TemplateParams->getParam(I); if (!Deduced[I].isNull()) { if (I < NumExplicitlySpecified) { // We have already fully type-checked and converted this // argument, because it was explicitly-specified. Just record the // presence of this argument. Builder.push_back(Deduced[I]); // We may have had explicitly-specified template arguments for a // template parameter pack (that may or may not have been extended // via additional deduced arguments). if (Param->isParameterPack() && CurrentInstantiationScope) { if (CurrentInstantiationScope->getPartiallySubstitutedPack() == Param) { // Forget the partially-substituted pack; its substitution is now // complete. CurrentInstantiationScope->ResetPartiallySubstitutedPack(); } } continue; } // We have deduced this argument, so it still needs to be // checked and converted. // First, for a non-type template parameter type that is // initialized by a declaration, we need the type of the // corresponding non-type template parameter. QualType NTTPType; if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { NTTPType = NTTP->getType(); if (NTTPType->isDependentType()) { TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Builder.data(), Builder.size()); NTTPType = SubstType(NTTPType, MultiLevelTemplateArgumentList(TemplateArgs), NTTP->getLocation(), NTTP->getDeclName()); if (NTTPType.isNull()) { Info.Param = makeTemplateParameter(Param); // FIXME: These template arguments are temporary. Free them! Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size())); return TDK_SubstitutionFailure; } } } if (ConvertDeducedTemplateArgument(*this, Param, Deduced[I], FunctionTemplate, NTTPType, 0, Info, true, Builder)) { Info.Param = makeTemplateParameter(Param); // FIXME: These template arguments are temporary. Free them! Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size())); return TDK_SubstitutionFailure; } continue; } // C++0x [temp.arg.explicit]p3: // A trailing template parameter pack (14.5.3) not otherwise deduced will // be deduced to an empty sequence of template arguments. // FIXME: Where did the word "trailing" come from? if (Param->isTemplateParameterPack()) { // We may have had explicitly-specified template arguments for this // template parameter pack. If so, our empty deduction extends the // explicitly-specified set (C++0x [temp.arg.explicit]p9). const TemplateArgument *ExplicitArgs; unsigned NumExplicitArgs; if (CurrentInstantiationScope && CurrentInstantiationScope->getPartiallySubstitutedPack(&ExplicitArgs, &NumExplicitArgs) == Param) { Builder.push_back(TemplateArgument(ExplicitArgs, NumExplicitArgs)); // Forget the partially-substituted pack; it's substitution is now // complete. CurrentInstantiationScope->ResetPartiallySubstitutedPack(); } else { Builder.push_back(TemplateArgument::getEmptyPack()); } continue; } // Substitute into the default template argument, if available. bool HasDefaultArg = false; TemplateArgumentLoc DefArg = SubstDefaultTemplateArgumentIfAvailable(FunctionTemplate, FunctionTemplate->getLocation(), FunctionTemplate->getSourceRange().getEnd(), Param, Builder, HasDefaultArg); // If there was no default argument, deduction is incomplete. if (DefArg.getArgument().isNull()) { Info.Param = makeTemplateParameter( const_cast<NamedDecl *>(TemplateParams->getParam(I))); Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size())); if (PartialOverloading) break; return HasDefaultArg ? TDK_SubstitutionFailure : TDK_Incomplete; } // Check whether we can actually use the default argument. if (CheckTemplateArgument(Param, DefArg, FunctionTemplate, FunctionTemplate->getLocation(), FunctionTemplate->getSourceRange().getEnd(), 0, Builder, CTAK_Specified)) { Info.Param = makeTemplateParameter( const_cast<NamedDecl *>(TemplateParams->getParam(I))); // FIXME: These template arguments are temporary. Free them! Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size())); return TDK_SubstitutionFailure; } // If we get here, we successfully used the default template argument. } /// HLSL Change Begin // In HLSL for user-supplied templates we translate any `literal <type>` // arguments into the appropriate 32-bit type. For builtins we preserve the // `literal` types as long as possible so that constant folding can operate on // the highest precision type. if (LangOptions().HLSL) { for (auto &Arg : Builder) { if (Arg.getKind() != TemplateArgument::Type) continue; QualType QT = Arg.getAsType(); if (auto BT = dyn_cast<BuiltinType>(QT)) { if (BT->getKind() == BuiltinType::LitFloat) { Arg = TemplateArgument(Context.FloatTy); } else if (BT->getKind() == BuiltinType::LitInt) { Arg = TemplateArgument(Context.IntTy); } } } } /// HLSL Change End // Form the template argument list from the deduced template arguments. TemplateArgumentList *DeducedArgumentList = TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size()); Info.reset(DeducedArgumentList); // Substitute the deduced template arguments into the function template // declaration to produce the function template specialization. DeclContext *Owner = FunctionTemplate->getDeclContext(); if (FunctionTemplate->getFriendObjectKind()) Owner = FunctionTemplate->getLexicalDeclContext(); Specialization = cast_or_null<FunctionDecl>( SubstDecl(FunctionTemplate->getTemplatedDecl(), Owner, MultiLevelTemplateArgumentList(*DeducedArgumentList))); if (!Specialization || Specialization->isInvalidDecl()) return TDK_SubstitutionFailure; assert(Specialization->getPrimaryTemplate()->getCanonicalDecl() == FunctionTemplate->getCanonicalDecl()); // If the template argument list is owned by the function template // specialization, release it. if (Specialization->getTemplateSpecializationArgs() == DeducedArgumentList && !Trap.hasErrorOccurred()) Info.take(); // There may have been an error that did not prevent us from constructing a // declaration. Mark the declaration invalid and return with a substitution // failure. if (Trap.hasErrorOccurred()) { Specialization->setInvalidDecl(true); return TDK_SubstitutionFailure; } if (OriginalCallArgs) { // C++ [temp.deduct.call]p4: // In general, the deduction process attempts to find template argument // values that will make the deduced A identical to A (after the type A // is transformed as described above). [...] for (unsigned I = 0, N = OriginalCallArgs->size(); I != N; ++I) { OriginalCallArg OriginalArg = (*OriginalCallArgs)[I]; unsigned ParamIdx = OriginalArg.ArgIdx; if (ParamIdx >= Specialization->getNumParams()) continue; QualType DeducedA = Specialization->getParamDecl(ParamIdx)->getType(); if (CheckOriginalCallArgDeduction(*this, OriginalArg, DeducedA)) return Sema::TDK_SubstitutionFailure; } } // If we suppressed any diagnostics while performing template argument // deduction, and if we haven't already instantiated this declaration, // keep track of these diagnostics. They'll be emitted if this specialization // is actually used. if (Info.diag_begin() != Info.diag_end()) { SuppressedDiagnosticsMap::iterator Pos = SuppressedDiagnostics.find(Specialization->getCanonicalDecl()); if (Pos == SuppressedDiagnostics.end()) SuppressedDiagnostics[Specialization->getCanonicalDecl()] .append(Info.diag_begin(), Info.diag_end()); } return TDK_Success; } /// Gets the type of a function for template-argument-deducton /// purposes when it's considered as part of an overload set. static QualType GetTypeOfFunction(Sema &S, const OverloadExpr::FindResult &R, FunctionDecl *Fn) { // We may need to deduce the return type of the function now. if (S.getLangOpts().CPlusPlus14 && Fn->getReturnType()->isUndeducedType() && S.DeduceReturnType(Fn, R.Expression->getExprLoc(), /*Diagnose*/ false)) return QualType(); if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) if (Method->isInstance()) { // An instance method that's referenced in a form that doesn't // look like a member pointer is just invalid. if (!R.HasFormOfMemberPointer) return QualType(); return S.Context.getMemberPointerType(Fn->getType(), S.Context.getTypeDeclType(Method->getParent()).getTypePtr()); } if (!R.IsAddressOfOperand) return Fn->getType(); return S.Context.getPointerType(Fn->getType()); } /// Apply the deduction rules for overload sets. /// /// \return the null type if this argument should be treated as an /// undeduced context static QualType ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams, Expr *Arg, QualType ParamType, bool ParamWasReference) { OverloadExpr::FindResult R = OverloadExpr::find(Arg); OverloadExpr *Ovl = R.Expression; // C++0x [temp.deduct.call]p4 unsigned TDF = 0; if (ParamWasReference) TDF |= TDF_ParamWithReferenceType; if (R.IsAddressOfOperand) TDF |= TDF_IgnoreQualifiers; // C++0x [temp.deduct.call]p6: // When P is a function type, pointer to function type, or pointer // to member function type: if (!ParamType->isFunctionType() && !ParamType->isFunctionPointerType() && !ParamType->isMemberFunctionPointerType()) { if (Ovl->hasExplicitTemplateArgs()) { // But we can still look for an explicit specialization. if (FunctionDecl *ExplicitSpec = S.ResolveSingleFunctionTemplateSpecialization(Ovl)) return GetTypeOfFunction(S, R, ExplicitSpec); } return QualType(); } // Gather the explicit template arguments, if any. TemplateArgumentListInfo ExplicitTemplateArgs; if (Ovl->hasExplicitTemplateArgs()) Ovl->getExplicitTemplateArgs().copyInto(ExplicitTemplateArgs); QualType Match; for (UnresolvedSetIterator I = Ovl->decls_begin(), E = Ovl->decls_end(); I != E; ++I) { NamedDecl *D = (*I)->getUnderlyingDecl(); if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D)) { // - If the argument is an overload set containing one or more // function templates, the parameter is treated as a // non-deduced context. if (!Ovl->hasExplicitTemplateArgs()) return QualType(); // Otherwise, see if we can resolve a function type FunctionDecl *Specialization = nullptr; TemplateDeductionInfo Info(Ovl->getNameLoc()); if (S.DeduceTemplateArguments(FunTmpl, &ExplicitTemplateArgs, Specialization, Info)) continue; D = Specialization; } FunctionDecl *Fn = cast<FunctionDecl>(D); QualType ArgType = GetTypeOfFunction(S, R, Fn); if (ArgType.isNull()) continue; // Function-to-pointer conversion. if (!ParamWasReference && ParamType->isPointerType() && ArgType->isFunctionType()) ArgType = S.Context.getPointerType(ArgType); // - If the argument is an overload set (not containing function // templates), trial argument deduction is attempted using each // of the members of the set. If deduction succeeds for only one // of the overload set members, that member is used as the // argument value for the deduction. If deduction succeeds for // more than one member of the overload set the parameter is // treated as a non-deduced context. // We do all of this in a fresh context per C++0x [temp.deduct.type]p2: // Type deduction is done independently for each P/A pair, and // the deduced template argument values are then combined. // So we do not reject deductions which were made elsewhere. SmallVector<DeducedTemplateArgument, 8> Deduced(TemplateParams->size()); TemplateDeductionInfo Info(Ovl->getNameLoc()); Sema::TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType, ArgType, Info, Deduced, TDF); if (Result) continue; if (!Match.isNull()) return QualType(); Match = ArgType; } return Match; } /// \brief Perform the adjustments to the parameter and argument types /// described in C++ [temp.deduct.call]. /// /// \returns true if the caller should not attempt to perform any template /// argument deduction based on this P/A pair because the argument is an /// overloaded function set that could not be resolved. static bool AdjustFunctionParmAndArgTypesForDeduction(Sema &S, TemplateParameterList *TemplateParams, QualType &ParamType, QualType &ArgType, Expr *Arg, unsigned &TDF) { // C++0x [temp.deduct.call]p3: // If P is a cv-qualified type, the top level cv-qualifiers of P's type // are ignored for type deduction. if (ParamType.hasQualifiers()) ParamType = ParamType.getUnqualifiedType(); // [...] If P is a reference type, the type referred to by P is // used for type deduction. const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>(); if (ParamRefType) ParamType = ParamRefType->getPointeeType(); // Overload sets usually make this parameter an undeduced context, // but there are sometimes special circumstances. Typically // involving a template-id-expr. if (ArgType == S.Context.OverloadTy) { ArgType = ResolveOverloadForDeduction(S, TemplateParams, Arg, ParamType, ParamRefType != nullptr); if (ArgType.isNull()) return true; } if (ParamRefType) { // If the argument has incomplete array type, try to complete its type. if (ArgType->isIncompleteArrayType() && !S.RequireCompleteExprType(Arg, 0)) ArgType = Arg->getType(); // C++0x [temp.deduct.call]p3: // If P is an rvalue reference to a cv-unqualified template // parameter and the argument is an lvalue, the type "lvalue // reference to A" is used in place of A for type deduction. if (ParamRefType->isRValueReferenceType() && !ParamType.getQualifiers() && isa<TemplateTypeParmType>(ParamType) && Arg->isLValue()) ArgType = S.Context.getLValueReferenceType(ArgType); } else { // C++ [temp.deduct.call]p2: // If P is not a reference type: // - If A is an array type, the pointer type produced by the // array-to-pointer standard conversion (4.2) is used in place of // A for type deduction; otherwise, //if (ArgType->isArrayType()) // HLSL Change // ArgType = S.Context.getArrayDecayedType(ArgType); // - If A is a function type, the pointer type produced by the // function-to-pointer standard conversion (4.3) is used in place // of A for type deduction; otherwise, if (ArgType->isFunctionType()) ArgType = S.Context.getPointerType(ArgType); else { // - If A is a cv-qualified type, the top level cv-qualifiers of A's // type are ignored for type deduction. ArgType = ArgType.getUnqualifiedType(); } } // C++0x [temp.deduct.call]p4: // In general, the deduction process attempts to find template argument // values that will make the deduced A identical to A (after the type A // is transformed as described above). [...] TDF = TDF_SkipNonDependent; // - If the original P is a reference type, the deduced A (i.e., the // type referred to by the reference) can be more cv-qualified than // the transformed A. if (ParamRefType) TDF |= TDF_ParamWithReferenceType; // - The transformed A can be another pointer or pointer to member // type that can be converted to the deduced A via a qualification // conversion (4.4). if (ArgType->isPointerType() || ArgType->isMemberPointerType() || ArgType->isObjCObjectPointerType()) TDF |= TDF_IgnoreQualifiers; // - If P is a class and P has the form simple-template-id, then the // transformed A can be a derived class of the deduced A. Likewise, // if P is a pointer to a class of the form simple-template-id, the // transformed A can be a pointer to a derived class pointed to by // the deduced A. if (isSimpleTemplateIdType(ParamType) || (isa<PointerType>(ParamType) && isSimpleTemplateIdType( ParamType->getAs<PointerType>()->getPointeeType()))) TDF |= TDF_DerivedClass; return false; } static bool hasDeducibleTemplateParameters(Sema &S, FunctionTemplateDecl *FunctionTemplate, QualType T); static Sema::TemplateDeductionResult DeduceTemplateArgumentByListElement( Sema &S, TemplateParameterList *TemplateParams, QualType ParamType, Expr *Arg, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF); /// \brief Attempt template argument deduction from an initializer list /// deemed to be an argument in a function call. static bool DeduceFromInitializerList(Sema &S, TemplateParameterList *TemplateParams, QualType AdjustedParamType, InitListExpr *ILE, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF, Sema::TemplateDeductionResult &Result) { // If the argument is an initializer list then the parameter is an undeduced // context, unless the parameter type is (reference to cv) // std::initializer_list<P'>, in which case deduction is done for each element // of the initializer list as-if it were an argument in a function call, and // the result is the deduced type if it's the same for all elements. QualType X; if (!S.isStdInitializerList(AdjustedParamType, &X)) return false; Result = Sema::TDK_Success; // Recurse down into the init list. for (unsigned i = 0, e = ILE->getNumInits(); i < e; ++i) { if ((Result = DeduceTemplateArgumentByListElement( S, TemplateParams, X, ILE->getInit(i), Info, Deduced, TDF))) return true; } return true; } /// \brief Perform template argument deduction by matching a parameter type /// against a single expression, where the expression is an element of /// an initializer list that was originally matched against a parameter /// of type \c initializer_list\<ParamType\>. static Sema::TemplateDeductionResult DeduceTemplateArgumentByListElement(Sema &S, TemplateParameterList *TemplateParams, QualType ParamType, Expr *Arg, TemplateDeductionInfo &Info, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned TDF) { // Handle the case where an init list contains another init list as the // element. if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) { Sema::TemplateDeductionResult Result; if (!DeduceFromInitializerList(S, TemplateParams, ParamType.getNonReferenceType(), ILE, Info, Deduced, TDF, Result)) return Sema::TDK_Success; // Just ignore this expression. return Result; } // For all other cases, just match by type. QualType ArgType = Arg->getType(); if (AdjustFunctionParmAndArgTypesForDeduction(S, TemplateParams, ParamType, ArgType, Arg, TDF)) { Info.Expression = Arg; return Sema::TDK_FailedOverloadResolution; } return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType, ArgType, Info, Deduced, TDF); } /// \brief Perform template argument deduction from a function call /// (C++ [temp.deduct.call]). /// /// \param FunctionTemplate the function template for which we are performing /// template argument deduction. /// /// \param ExplicitTemplateArgs the explicit template arguments provided /// for this call. /// /// \param Args the function call arguments /// /// \param Specialization if template argument deduction was successful, /// this will be set to the function template specialization produced by /// template argument deduction. /// /// \param Info the argument will be updated to provide additional information /// about template argument deduction. /// /// \returns the result of template argument deduction. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, TemplateDeductionInfo &Info, bool PartialOverloading) { if (FunctionTemplate->isInvalidDecl()) return TDK_Invalid; FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); unsigned NumParams = Function->getNumParams(); // C++ [temp.deduct.call]p1: // Template argument deduction is done by comparing each function template // parameter type (call it P) with the type of the corresponding argument // of the call (call it A) as described below. unsigned CheckArgs = Args.size(); if (Args.size() < Function->getMinRequiredArguments() && !PartialOverloading) return TDK_TooFewArguments; else if (TooManyArguments(NumParams, Args.size(), PartialOverloading)) { const FunctionProtoType *Proto = Function->getType()->getAs<FunctionProtoType>(); if (Proto->isTemplateVariadic()) /* Do nothing */; else if (Proto->isVariadic()) CheckArgs = NumParams; else return TDK_TooManyArguments; } // HLSL Change Starts if (getLangOpts().HLSL) { Sema::TemplateDeductionResult hlslResult = hlsl::DeduceTemplateArgumentsForHLSL( this, FunctionTemplate, ExplicitTemplateArgs, Args, Specialization, Info); if (hlslResult != TDK_Invalid) { return hlslResult; } } // HLSL Change Ends // The types of the parameters from which we will perform template argument // deduction. LocalInstantiationScope InstScope(*this); TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); SmallVector<DeducedTemplateArgument, 4> Deduced; SmallVector<QualType, 4> ParamTypes; unsigned NumExplicitlySpecified = 0; if (ExplicitTemplateArgs) { TemplateDeductionResult Result = SubstituteExplicitTemplateArguments(FunctionTemplate, *ExplicitTemplateArgs, Deduced, ParamTypes, nullptr, Info); if (Result) return Result; NumExplicitlySpecified = Deduced.size(); } else { // Just fill in the parameter types from the function declaration. for (unsigned I = 0; I != NumParams; ++I) ParamTypes.push_back(Function->getParamDecl(I)->getType()); } // Deduce template arguments from the function parameters. Deduced.resize(TemplateParams->size()); unsigned ArgIdx = 0; SmallVector<OriginalCallArg, 4> OriginalCallArgs; for (unsigned ParamIdx = 0, NumParamTypes = ParamTypes.size(); ParamIdx != NumParamTypes; ++ParamIdx) { QualType OrigParamType = ParamTypes[ParamIdx]; QualType ParamType = OrigParamType; const PackExpansionType *ParamExpansion = dyn_cast<PackExpansionType>(ParamType); if (!ParamExpansion) { // Simple case: matching a function parameter to a function argument. if (ArgIdx >= CheckArgs) break; Expr *Arg = Args[ArgIdx++]; QualType ArgType = Arg->getType(); unsigned TDF = 0; if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams, ParamType, ArgType, Arg, TDF)) continue; // If we have nothing to deduce, we're done. if (!hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType)) continue; // If the argument is an initializer list ... if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) { TemplateDeductionResult Result; // Removing references was already done. if (!DeduceFromInitializerList(*this, TemplateParams, ParamType, ILE, Info, Deduced, TDF, Result)) continue; if (Result) return Result; // Don't track the argument type, since an initializer list has none. continue; } // Keep track of the argument type and corresponding parameter index, // so we can check for compatibility between the deduced A and A. OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx-1, ArgType)); if (TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams, ParamType, ArgType, Info, Deduced, TDF)) return Result; continue; } // C++0x [temp.deduct.call]p1: // For a function parameter pack that occurs at the end of the // parameter-declaration-list, the type A of each remaining argument of // the call is compared with the type P of the declarator-id of the // function parameter pack. Each comparison deduces template arguments // for subsequent positions in the template parameter packs expanded by // the function parameter pack. For a function parameter pack that does // not occur at the end of the parameter-declaration-list, the type of // the parameter pack is a non-deduced context. if (ParamIdx + 1 < NumParamTypes) break; QualType ParamPattern = ParamExpansion->getPattern(); PackDeductionScope PackScope(*this, TemplateParams, Deduced, Info, ParamPattern); bool HasAnyArguments = false; for (; ArgIdx < Args.size(); ++ArgIdx) { HasAnyArguments = true; QualType OrigParamType = ParamPattern; ParamType = OrigParamType; Expr *Arg = Args[ArgIdx]; QualType ArgType = Arg->getType(); unsigned TDF = 0; if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams, ParamType, ArgType, Arg, TDF)) { // We can't actually perform any deduction for this argument, so stop // deduction at this point. ++ArgIdx; break; } // As above, initializer lists need special handling. if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) { TemplateDeductionResult Result; if (!DeduceFromInitializerList(*this, TemplateParams, ParamType, ILE, Info, Deduced, TDF, Result)) { ++ArgIdx; break; } if (Result) return Result; } else { // Keep track of the argument type and corresponding argument index, // so we can check for compatibility between the deduced A and A. if (hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType)) OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx, ArgType)); if (TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams, ParamType, ArgType, Info, Deduced, TDF)) return Result; } PackScope.nextPackElement(); } // Build argument packs for each of the parameter packs expanded by this // pack expansion. if (auto Result = PackScope.finish(HasAnyArguments)) return Result; // After we've matching against a parameter pack, we're done. break; } return FinishTemplateArgumentDeduction(FunctionTemplate, Deduced, NumExplicitlySpecified, Specialization, Info, &OriginalCallArgs, PartialOverloading); } QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType) { if (ArgFunctionType.isNull()) return ArgFunctionType; const FunctionProtoType *FunctionTypeP = FunctionType->castAs<FunctionProtoType>(); CallingConv CC = FunctionTypeP->getCallConv(); bool NoReturn = FunctionTypeP->getNoReturnAttr(); const FunctionProtoType *ArgFunctionTypeP = ArgFunctionType->getAs<FunctionProtoType>(); if (ArgFunctionTypeP->getCallConv() == CC && ArgFunctionTypeP->getNoReturnAttr() == NoReturn) return ArgFunctionType; FunctionType::ExtInfo EI = ArgFunctionTypeP->getExtInfo().withCallingConv(CC); EI = EI.withNoReturn(NoReturn); ArgFunctionTypeP = cast<FunctionProtoType>(Context.adjustFunctionType(ArgFunctionTypeP, EI)); return QualType(ArgFunctionTypeP, 0); } /// \brief Deduce template arguments when taking the address of a function /// template (C++ [temp.deduct.funcaddr]) or matching a specialization to /// a template. /// /// \param FunctionTemplate the function template for which we are performing /// template argument deduction. /// /// \param ExplicitTemplateArgs the explicitly-specified template /// arguments. /// /// \param ArgFunctionType the function type that will be used as the /// "argument" type (A) when performing template argument deduction from the /// function template's function type. This type may be NULL, if there is no /// argument type to compare against, in C++0x [temp.arg.explicit]p3. /// /// \param Specialization if template argument deduction was successful, /// this will be set to the function template specialization produced by /// template argument deduction. /// /// \param Info the argument will be updated to provide additional information /// about template argument deduction. /// /// \returns the result of template argument deduction. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, TemplateDeductionInfo &Info, bool InOverloadResolution) { if (FunctionTemplate->isInvalidDecl()) return TDK_Invalid; FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); QualType FunctionType = Function->getType(); if (!InOverloadResolution) ArgFunctionType = adjustCCAndNoReturn(ArgFunctionType, FunctionType); // Substitute any explicit template arguments. LocalInstantiationScope InstScope(*this); SmallVector<DeducedTemplateArgument, 4> Deduced; unsigned NumExplicitlySpecified = 0; SmallVector<QualType, 4> ParamTypes; if (ExplicitTemplateArgs) { if (TemplateDeductionResult Result = SubstituteExplicitTemplateArguments(FunctionTemplate, *ExplicitTemplateArgs, Deduced, ParamTypes, &FunctionType, Info)) return Result; NumExplicitlySpecified = Deduced.size(); } // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); SFINAETrap Trap(*this); Deduced.resize(TemplateParams->size()); // If the function has a deduced return type, substitute it for a dependent // type so that we treat it as a non-deduced context in what follows. bool HasDeducedReturnType = false; if (getLangOpts().CPlusPlus14 && InOverloadResolution && Function->getReturnType()->getContainedAutoType()) { FunctionType = SubstAutoType(FunctionType, Context.DependentTy); HasDeducedReturnType = true; } if (!ArgFunctionType.isNull()) { unsigned TDF = TDF_TopLevelParameterTypeList; if (InOverloadResolution) TDF |= TDF_InOverloadResolution; // Deduce template arguments from the function type. if (TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams, FunctionType, ArgFunctionType, Info, Deduced, TDF)) return Result; } if (TemplateDeductionResult Result = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced, NumExplicitlySpecified, Specialization, Info)) return Result; // If the function has a deduced return type, deduce it now, so we can check // that the deduced function type matches the requested type. if (HasDeducedReturnType && Specialization->getReturnType()->isUndeducedType() && DeduceReturnType(Specialization, Info.getLocation(), false)) return TDK_MiscellaneousDeductionFailure; // If the requested function type does not match the actual type of the // specialization with respect to arguments of compatible pointer to function // types, template argument deduction fails. if (!ArgFunctionType.isNull()) { if (InOverloadResolution && !isSameOrCompatibleFunctionType( Context.getCanonicalType(Specialization->getType()), Context.getCanonicalType(ArgFunctionType))) return TDK_MiscellaneousDeductionFailure; else if(!InOverloadResolution && !Context.hasSameType(Specialization->getType(), ArgFunctionType)) return TDK_MiscellaneousDeductionFailure; } return TDK_Success; } #if 0 // HLSL Change Starts /// \brief Given a function declaration (e.g. a generic lambda conversion /// function) that contains an 'auto' in its result type, substitute it /// with TypeToReplaceAutoWith. Be careful to pass in the type you want /// to replace 'auto' with and not the actual result type you want /// to set the function to. static inline void SubstAutoWithinFunctionReturnType(FunctionDecl *F, QualType TypeToReplaceAutoWith, Sema &S) { assert(!TypeToReplaceAutoWith->getContainedAutoType()); QualType AutoResultType = F->getReturnType(); assert(AutoResultType->getContainedAutoType()); QualType DeducedResultType = S.SubstAutoType(AutoResultType, TypeToReplaceAutoWith); S.Context.adjustDeducedFunctionResultType(F, DeducedResultType); } #endif // HLSL Change Ends /// \brief Given a specialized conversion operator of a generic lambda /// create the corresponding specializations of the call operator and /// the static-invoker. If the return type of the call operator is auto, /// deduce its return type and check if that matches the /// return type of the destination function ptr. static inline Sema::TemplateDeductionResult SpecializeCorrespondingLambdaCallOperatorAndInvoker( CXXConversionDecl *ConversionSpecialized, SmallVectorImpl<DeducedTemplateArgument> &DeducedArguments, QualType ReturnTypeOfDestFunctionPtr, TemplateDeductionInfo &TDInfo, Sema &S) { // HLSL Change Start // No support for lambdas. return Sema::TDK_Invalid; #if 0 // HLSL Change Ends CXXRecordDecl *LambdaClass = ConversionSpecialized->getParent(); assert(LambdaClass && LambdaClass->isGenericLambda()); CXXMethodDecl *CallOpGeneric = LambdaClass->getLambdaCallOperator(); QualType CallOpResultType = CallOpGeneric->getReturnType(); const bool GenericLambdaCallOperatorHasDeducedReturnType = CallOpResultType->getContainedAutoType(); FunctionTemplateDecl *CallOpTemplate = CallOpGeneric->getDescribedFunctionTemplate(); FunctionDecl *CallOpSpecialized = nullptr; // Use the deduced arguments of the conversion function, to specialize our // generic lambda's call operator. if (Sema::TemplateDeductionResult Result = S.FinishTemplateArgumentDeduction(CallOpTemplate, DeducedArguments, 0, CallOpSpecialized, TDInfo)) return Result; // If we need to deduce the return type, do so (instantiates the callop). if (GenericLambdaCallOperatorHasDeducedReturnType && CallOpSpecialized->getReturnType()->isUndeducedType()) S.DeduceReturnType(CallOpSpecialized, CallOpSpecialized->getPointOfInstantiation(), /*Diagnose*/ true); // Check to see if the return type of the destination ptr-to-function // matches the return type of the call operator. if (!S.Context.hasSameType(CallOpSpecialized->getReturnType(), ReturnTypeOfDestFunctionPtr)) return Sema::TDK_NonDeducedMismatch; // Since we have succeeded in matching the source and destination // ptr-to-functions (now including return type), and have successfully // specialized our corresponding call operator, we are ready to // specialize the static invoker with the deduced arguments of our // ptr-to-function. FunctionDecl *InvokerSpecialized = nullptr; FunctionTemplateDecl *InvokerTemplate = LambdaClass-> getLambdaStaticInvoker()->getDescribedFunctionTemplate(); #ifndef NDEBUG Sema::TemplateDeductionResult LLVM_ATTRIBUTE_UNUSED Result = #endif S.FinishTemplateArgumentDeduction(InvokerTemplate, DeducedArguments, 0, InvokerSpecialized, TDInfo); assert(Result == Sema::TDK_Success && "If the call operator succeeded so should the invoker!"); // Set the result type to match the corresponding call operator // specialization's result type. if (GenericLambdaCallOperatorHasDeducedReturnType && InvokerSpecialized->getReturnType()->isUndeducedType()) { // Be sure to get the type to replace 'auto' with and not // the full result type of the call op specialization // to substitute into the 'auto' of the invoker and conversion // function. // For e.g. // int* (*fp)(int*) = [](auto* a) -> auto* { return a; }; // We don't want to subst 'int*' into 'auto' to get int**. QualType TypeToReplaceAutoWith = CallOpSpecialized->getReturnType() ->getContainedAutoType() ->getDeducedType(); SubstAutoWithinFunctionReturnType(InvokerSpecialized, TypeToReplaceAutoWith, S); SubstAutoWithinFunctionReturnType(ConversionSpecialized, TypeToReplaceAutoWith, S); } // Ensure that static invoker doesn't have a const qualifier. // FIXME: When creating the InvokerTemplate in SemaLambda.cpp // do not use the CallOperator's TypeSourceInfo which allows // the const qualifier to leak through. const FunctionProtoType *InvokerFPT = InvokerSpecialized-> getType().getTypePtr()->castAs<FunctionProtoType>(); FunctionProtoType::ExtProtoInfo EPI = InvokerFPT->getExtProtoInfo(); EPI.TypeQuals = 0; InvokerSpecialized->setType(S.Context.getFunctionType( InvokerFPT->getReturnType(), InvokerFPT->getParamTypes(), EPI)); return Sema::TDK_Success; #endif // HLSL Change } /// \brief Deduce template arguments for a templated conversion /// function (C++ [temp.deduct.conv]) and, if successful, produce a /// conversion function template specialization. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate, QualType ToType, CXXConversionDecl *&Specialization, TemplateDeductionInfo &Info) { if (ConversionTemplate->isInvalidDecl()) return TDK_Invalid; CXXConversionDecl *ConversionGeneric = cast<CXXConversionDecl>(ConversionTemplate->getTemplatedDecl()); QualType FromType = ConversionGeneric->getConversionType(); // Canonicalize the types for deduction. QualType P = Context.getCanonicalType(FromType); QualType A = Context.getCanonicalType(ToType); // C++0x [temp.deduct.conv]p2: // If P is a reference type, the type referred to by P is used for // type deduction. if (const ReferenceType *PRef = P->getAs<ReferenceType>()) P = PRef->getPointeeType(); // C++0x [temp.deduct.conv]p4: // [...] If A is a reference type, the type referred to by A is used // for type deduction. if (const ReferenceType *ARef = A->getAs<ReferenceType>()) A = ARef->getPointeeType().getUnqualifiedType(); // C++ [temp.deduct.conv]p3: // // If A is not a reference type: else { assert(!A->isReferenceType() && "Reference types were handled above"); // - If P is an array type, the pointer type produced by the // array-to-pointer standard conversion (4.2) is used in place // of P for type deduction; otherwise, if (P->isArrayType()) P = Context.getArrayDecayedType(P); // - If P is a function type, the pointer type produced by the // function-to-pointer standard conversion (4.3) is used in // place of P for type deduction; otherwise, else if (P->isFunctionType()) P = Context.getPointerType(P); // - If P is a cv-qualified type, the top level cv-qualifiers of // P's type are ignored for type deduction. else P = P.getUnqualifiedType(); // C++0x [temp.deduct.conv]p4: // If A is a cv-qualified type, the top level cv-qualifiers of A's // type are ignored for type deduction. If A is a reference type, the type // referred to by A is used for type deduction. A = A.getUnqualifiedType(); } // Unevaluated SFINAE context. EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated); SFINAETrap Trap(*this); // C++ [temp.deduct.conv]p1: // Template argument deduction is done by comparing the return // type of the template conversion function (call it P) with the // type that is required as the result of the conversion (call it // A) as described in 14.8.2.4. TemplateParameterList *TemplateParams = ConversionTemplate->getTemplateParameters(); SmallVector<DeducedTemplateArgument, 4> Deduced; Deduced.resize(TemplateParams->size()); // C++0x [temp.deduct.conv]p4: // In general, the deduction process attempts to find template // argument values that will make the deduced A identical to // A. However, there are two cases that allow a difference: unsigned TDF = 0; // - If the original A is a reference type, A can be more // cv-qualified than the deduced A (i.e., the type referred to // by the reference) if (ToType->isReferenceType()) TDF |= TDF_ParamWithReferenceType; // - The deduced A can be another pointer or pointer to member // type that can be converted to A via a qualification // conversion. // // (C++0x [temp.deduct.conv]p6 clarifies that this only happens when // both P and A are pointers or member pointers. In this case, we // just ignore cv-qualifiers completely). if ((P->isPointerType() && A->isPointerType()) || (P->isMemberPointerType() && A->isMemberPointerType())) TDF |= TDF_IgnoreQualifiers; if (TemplateDeductionResult Result = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams, P, A, Info, Deduced, TDF)) return Result; // Create an Instantiation Scope for finalizing the operator. LocalInstantiationScope InstScope(*this); // Finish template argument deduction. FunctionDecl *ConversionSpecialized = nullptr; TemplateDeductionResult Result = FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0, ConversionSpecialized, Info); Specialization = cast_or_null<CXXConversionDecl>(ConversionSpecialized); // If the conversion operator is being invoked on a lambda closure to convert // to a ptr-to-function, use the deduced arguments from the conversion // function to specialize the corresponding call operator. // e.g., int (*fp)(int) = [](auto a) { return a; }; if (Result == TDK_Success && isLambdaConversionOperator(ConversionGeneric)) { // Get the return type of the destination ptr-to-function we are converting // to. This is necessary for matching the lambda call operator's return // type to that of the destination ptr-to-function's return type. assert(A->isPointerType() && "Can only convert from lambda to ptr-to-function"); const FunctionType *ToFunType = A->getPointeeType().getTypePtr()->getAs<FunctionType>(); const QualType DestFunctionPtrReturnType = ToFunType->getReturnType(); // Create the corresponding specializations of the call operator and // the static-invoker; and if the return type is auto, // deduce the return type and check if it matches the // DestFunctionPtrReturnType. // For instance: // auto L = [](auto a) { return f(a); }; // int (*fp)(int) = L; // char (*fp2)(int) = L; <-- Not OK. Result = SpecializeCorrespondingLambdaCallOperatorAndInvoker( Specialization, Deduced, DestFunctionPtrReturnType, Info, *this); } return Result; } /// \brief Deduce template arguments for a function template when there is /// nothing to deduce against (C++0x [temp.arg.explicit]p3). /// /// \param FunctionTemplate the function template for which we are performing /// template argument deduction. /// /// \param ExplicitTemplateArgs the explicitly-specified template /// arguments. /// /// \param Specialization if template argument deduction was successful, /// this will be set to the function template specialization produced by /// template argument deduction. /// /// \param Info the argument will be updated to provide additional information /// about template argument deduction. /// /// \returns the result of template argument deduction. Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, TemplateDeductionInfo &Info, bool InOverloadResolution) { return DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs, QualType(), Specialization, Info, InOverloadResolution); } namespace { /// Substitute the 'auto' type specifier within a type for a given replacement /// type. class SubstituteAutoTransform : public TreeTransform<SubstituteAutoTransform> { QualType Replacement; public: SubstituteAutoTransform(Sema &SemaRef, QualType Replacement) : TreeTransform<SubstituteAutoTransform>(SemaRef), Replacement(Replacement) {} QualType TransformAutoType(TypeLocBuilder &TLB, AutoTypeLoc TL) { // If we're building the type pattern to deduce against, don't wrap the // substituted type in an AutoType. Certain template deduction rules // apply only when a template type parameter appears directly (and not if // the parameter is found through desugaring). For instance: // auto &&lref = lvalue; // must transform into "rvalue reference to T" not "rvalue reference to // auto type deduced as T" in order for [temp.deduct.call]p3 to apply. if (!Replacement.isNull() && isa<TemplateTypeParmType>(Replacement)) { QualType Result = Replacement; TemplateTypeParmTypeLoc NewTL = TLB.push<TemplateTypeParmTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } else { bool Dependent = !Replacement.isNull() && Replacement->isDependentType(); QualType Result = SemaRef.Context.getAutoType(Dependent ? QualType() : Replacement, TL.getTypePtr()->isDecltypeAuto(), Dependent); AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result); NewTL.setNameLoc(TL.getNameLoc()); return Result; } } ExprResult TransformLambdaExpr(LambdaExpr *E) { // Lambdas never need to be transformed. return E; } QualType Apply(TypeLoc TL) { // Create some scratch storage for the transformed type locations. // FIXME: We're just going to throw this information away. Don't build it. TypeLocBuilder TLB; TLB.reserve(TL.getFullDataSize()); return TransformType(TLB, TL); } }; } Sema::DeduceAutoResult Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init, QualType &Result) { return DeduceAutoType(Type->getTypeLoc(), Init, Result); } /// \brief Deduce the type for an auto type-specifier (C++11 [dcl.spec.auto]p6) /// /// \param Type the type pattern using the auto type-specifier. /// \param Init the initializer for the variable whose type is to be deduced. /// \param Result if type deduction was successful, this will be set to the /// deduced type. Sema::DeduceAutoResult Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) { if (Init->getType()->isNonOverloadPlaceholderType()) { ExprResult NonPlaceholder = CheckPlaceholderExpr(Init); if (NonPlaceholder.isInvalid()) return DAR_FailedAlreadyDiagnosed; Init = NonPlaceholder.get(); } if (Init->isTypeDependent() || Type.getType()->isDependentType()) { Result = SubstituteAutoTransform(*this, Context.DependentTy).Apply(Type); assert(!Result.isNull() && "substituting DependentTy can't fail"); return DAR_Succeeded; } // If this is a 'decltype(auto)' specifier, do the decltype dance. // Since 'decltype(auto)' can only occur at the top of the type, we // don't need to go digging for it. if (const AutoType *AT = Type.getType()->getAs<AutoType>()) { if (AT->isDecltypeAuto()) { if (isa<InitListExpr>(Init)) { Diag(Init->getLocStart(), diag::err_decltype_auto_initializer_list); return DAR_FailedAlreadyDiagnosed; } QualType Deduced = BuildDecltypeType(Init, Init->getLocStart(), false); if (Deduced.isNull()) return DAR_FailedAlreadyDiagnosed; // FIXME: Support a non-canonical deduced type for 'auto'. Deduced = Context.getCanonicalType(Deduced); Result = SubstituteAutoTransform(*this, Deduced).Apply(Type); if (Result.isNull()) return DAR_FailedAlreadyDiagnosed; return DAR_Succeeded; } } SourceLocation Loc = Init->getExprLoc(); LocalInstantiationScope InstScope(*this); // Build template<class TemplParam> void Func(FuncParam); TemplateTypeParmDecl *TemplParam = TemplateTypeParmDecl::Create(Context, nullptr, SourceLocation(), Loc, 0, 0, nullptr, false, false); QualType TemplArg = QualType(TemplParam->getTypeForDecl(), 0); NamedDecl *TemplParamPtr = TemplParam; FixedSizeTemplateParameterList<1> TemplateParams(Loc, Loc, &TemplParamPtr, Loc); QualType FuncParam = SubstituteAutoTransform(*this, TemplArg).Apply(Type); assert(!FuncParam.isNull() && "substituting template parameter for 'auto' failed"); // Deduce type of TemplParam in Func(Init) SmallVector<DeducedTemplateArgument, 1> Deduced; Deduced.resize(1); QualType InitType = Init->getType(); unsigned TDF = 0; TemplateDeductionInfo Info(Loc); InitListExpr *InitList = dyn_cast<InitListExpr>(Init); if (InitList) { for (unsigned i = 0, e = InitList->getNumInits(); i < e; ++i) { if (DeduceTemplateArgumentByListElement(*this, &TemplateParams, TemplArg, InitList->getInit(i), Info, Deduced, TDF)) return DAR_Failed; } } else { if (AdjustFunctionParmAndArgTypesForDeduction(*this, &TemplateParams, FuncParam, InitType, Init, TDF)) return DAR_Failed; if (DeduceTemplateArgumentsByTypeMatch(*this, &TemplateParams, FuncParam, InitType, Info, Deduced, TDF)) return DAR_Failed; } if (Deduced[0].getKind() != TemplateArgument::Type) return DAR_Failed; QualType DeducedType = Deduced[0].getAsType(); if (InitList) { DeducedType = BuildStdInitializerList(DeducedType, Loc); if (DeducedType.isNull()) return DAR_FailedAlreadyDiagnosed; } Result = SubstituteAutoTransform(*this, DeducedType).Apply(Type); if (Result.isNull()) return DAR_FailedAlreadyDiagnosed; // Check that the deduced argument type is compatible with the original // argument type per C++ [temp.deduct.call]p4. if (!InitList && !Result.isNull() && CheckOriginalCallArgDeduction(*this, Sema::OriginalCallArg(FuncParam,0,InitType), Result)) { Result = QualType(); return DAR_Failed; } return DAR_Succeeded; } QualType Sema::SubstAutoType(QualType TypeWithAuto, QualType TypeToReplaceAuto) { return SubstituteAutoTransform(*this, TypeToReplaceAuto). TransformType(TypeWithAuto); } TypeSourceInfo* Sema::SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType TypeToReplaceAuto) { return SubstituteAutoTransform(*this, TypeToReplaceAuto). TransformType(TypeWithAuto); } void Sema::DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init) { if (isa<InitListExpr>(Init)) Diag(VDecl->getLocation(), VDecl->isInitCapture() ? diag::err_init_capture_deduction_failure_from_init_list : diag::err_auto_var_deduction_failure_from_init_list) << VDecl->getDeclName() << VDecl->getType() << Init->getSourceRange(); else Diag(VDecl->getLocation(), VDecl->isInitCapture() ? diag::err_init_capture_deduction_failure : diag::err_auto_var_deduction_failure) << VDecl->getDeclName() << VDecl->getType() << Init->getType() << Init->getSourceRange(); } bool Sema::DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose) { assert(FD->getReturnType()->isUndeducedType()); if (FD->getTemplateInstantiationPattern()) InstantiateFunctionDefinition(Loc, FD); bool StillUndeduced = FD->getReturnType()->isUndeducedType(); if (StillUndeduced && Diagnose && !FD->isInvalidDecl()) { Diag(Loc, diag::err_auto_fn_used_before_defined) << FD; Diag(FD->getLocation(), diag::note_callee_decl) << FD; } return StillUndeduced; } static void MarkUsedTemplateParameters(ASTContext &Ctx, QualType T, bool OnlyDeduced, unsigned Level, llvm::SmallBitVector &Deduced); /// \brief If this is a non-static member function, static void AddImplicitObjectParameterType(ASTContext &Context, CXXMethodDecl *Method, SmallVectorImpl<QualType> &ArgTypes) { // C++11 [temp.func.order]p3: // [...] The new parameter is of type "reference to cv A," where cv are // the cv-qualifiers of the function template (if any) and A is // the class of which the function template is a member. // // The standard doesn't say explicitly, but we pick the appropriate kind of // reference type based on [over.match.funcs]p4. QualType ArgTy = Context.getTypeDeclType(Method->getParent()); ArgTy = Context.getQualifiedType(ArgTy, Qualifiers::fromCVRMask(Method->getTypeQualifiers())); if (Method->getRefQualifier() == RQ_RValue) ArgTy = Context.getRValueReferenceType(ArgTy); else ArgTy = Context.getLValueReferenceType(ArgTy); ArgTypes.push_back(ArgTy); } /// \brief Determine whether the function template \p FT1 is at least as /// specialized as \p FT2. static bool isAtLeastAsSpecializedAs(Sema &S, SourceLocation Loc, FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1) { FunctionDecl *FD1 = FT1->getTemplatedDecl(); FunctionDecl *FD2 = FT2->getTemplatedDecl(); const FunctionProtoType *Proto1 = FD1->getType()->getAs<FunctionProtoType>(); const FunctionProtoType *Proto2 = FD2->getType()->getAs<FunctionProtoType>(); assert(Proto1 && Proto2 && "Function templates must have prototypes"); TemplateParameterList *TemplateParams = FT2->getTemplateParameters(); SmallVector<DeducedTemplateArgument, 4> Deduced; Deduced.resize(TemplateParams->size()); // C++0x [temp.deduct.partial]p3: // The types used to determine the ordering depend on the context in which // the partial ordering is done: TemplateDeductionInfo Info(Loc); SmallVector<QualType, 4> Args2; switch (TPOC) { case TPOC_Call: { // - In the context of a function call, the function parameter types are // used. CXXMethodDecl *Method1 = dyn_cast<CXXMethodDecl>(FD1); CXXMethodDecl *Method2 = dyn_cast<CXXMethodDecl>(FD2); // C++11 [temp.func.order]p3: // [...] If only one of the function templates is a non-static // member, that function template is considered to have a new // first parameter inserted in its function parameter list. The // new parameter is of type "reference to cv A," where cv are // the cv-qualifiers of the function template (if any) and A is // the class of which the function template is a member. // // Note that we interpret this to mean "if one of the function // templates is a non-static member and the other is a non-member"; // otherwise, the ordering rules for static functions against non-static // functions don't make any sense. // // C++98/03 doesn't have this provision but we've extended DR532 to cover // it as wording was broken prior to it. SmallVector<QualType, 4> Args1; unsigned NumComparedArguments = NumCallArguments1; if (!Method2 && Method1 && !Method1->isStatic()) { // Compare 'this' from Method1 against first parameter from Method2. AddImplicitObjectParameterType(S.Context, Method1, Args1); ++NumComparedArguments; } else if (!Method1 && Method2 && !Method2->isStatic()) { // Compare 'this' from Method2 against first parameter from Method1. AddImplicitObjectParameterType(S.Context, Method2, Args2); } Args1.insert(Args1.end(), Proto1->param_type_begin(), Proto1->param_type_end()); Args2.insert(Args2.end(), Proto2->param_type_begin(), Proto2->param_type_end()); // C++ [temp.func.order]p5: // The presence of unused ellipsis and default arguments has no effect on // the partial ordering of function templates. if (Args1.size() > NumComparedArguments) Args1.resize(NumComparedArguments); if (Args2.size() > NumComparedArguments) Args2.resize(NumComparedArguments); if (DeduceTemplateArguments(S, TemplateParams, Args2.data(), Args2.size(), Args1.data(), Args1.size(), Info, Deduced, TDF_None, /*PartialOrdering=*/true)) return false; break; } case TPOC_Conversion: // - In the context of a call to a conversion operator, the return types // of the conversion function templates are used. if (DeduceTemplateArgumentsByTypeMatch( S, TemplateParams, Proto2->getReturnType(), Proto1->getReturnType(), Info, Deduced, TDF_None, /*PartialOrdering=*/true)) return false; break; case TPOC_Other: // - In other contexts (14.6.6.2) the function template's function type // is used. if (DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, FD2->getType(), FD1->getType(), Info, Deduced, TDF_None, /*PartialOrdering=*/true)) return false; break; } // C++0x [temp.deduct.partial]p11: // In most cases, all template parameters must have values in order for // deduction to succeed, but for partial ordering purposes a template // parameter may remain without a value provided it is not used in the // types being used for partial ordering. [ Note: a template parameter used // in a non-deduced context is considered used. -end note] unsigned ArgIdx = 0, NumArgs = Deduced.size(); for (; ArgIdx != NumArgs; ++ArgIdx) if (Deduced[ArgIdx].isNull()) break; if (ArgIdx == NumArgs) { // All template arguments were deduced. FT1 is at least as specialized // as FT2. return true; } // Figure out which template parameters were used. llvm::SmallBitVector UsedParameters(TemplateParams->size()); switch (TPOC) { case TPOC_Call: for (unsigned I = 0, N = Args2.size(); I != N; ++I) ::MarkUsedTemplateParameters(S.Context, Args2[I], false, TemplateParams->getDepth(), UsedParameters); break; case TPOC_Conversion: ::MarkUsedTemplateParameters(S.Context, Proto2->getReturnType(), false, TemplateParams->getDepth(), UsedParameters); break; case TPOC_Other: ::MarkUsedTemplateParameters(S.Context, FD2->getType(), false, TemplateParams->getDepth(), UsedParameters); break; } for (; ArgIdx != NumArgs; ++ArgIdx) // If this argument had no value deduced but was used in one of the types // used for partial ordering, then deduction fails. if (Deduced[ArgIdx].isNull() && UsedParameters[ArgIdx]) return false; return true; } /// \brief Determine whether this a function template whose parameter-type-list /// ends with a function parameter pack. static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) { FunctionDecl *Function = FunTmpl->getTemplatedDecl(); unsigned NumParams = Function->getNumParams(); if (NumParams == 0) return false; ParmVarDecl *Last = Function->getParamDecl(NumParams - 1); if (!Last->isParameterPack()) return false; // Make sure that no previous parameter is a parameter pack. while (--NumParams > 0) { if (Function->getParamDecl(NumParams - 1)->isParameterPack()) return false; } return true; } /// \brief Returns the more specialized function template according /// to the rules of function template partial ordering (C++ [temp.func.order]). /// /// \param FT1 the first function template /// /// \param FT2 the second function template /// /// \param TPOC the context in which we are performing partial ordering of /// function templates. /// /// \param NumCallArguments1 The number of arguments in the call to FT1, used /// only when \c TPOC is \c TPOC_Call. /// /// \param NumCallArguments2 The number of arguments in the call to FT2, used /// only when \c TPOC is \c TPOC_Call. /// /// \returns the more specialized function template. If neither /// template is more specialized, returns NULL. FunctionTemplateDecl * Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2) { bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC, NumCallArguments1); bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC, NumCallArguments2); if (Better1 != Better2) // We have a clear winner return Better1 ? FT1 : FT2; if (!Better1 && !Better2) // Neither is better than the other return nullptr; // FIXME: This mimics what GCC implements, but doesn't match up with the // proposed resolution for core issue 692. This area needs to be sorted out, // but for now we attempt to maintain compatibility. bool Variadic1 = isVariadicFunctionTemplate(FT1); bool Variadic2 = isVariadicFunctionTemplate(FT2); if (Variadic1 != Variadic2) return Variadic1? FT2 : FT1; return nullptr; } /// \brief Determine if the two templates are equivalent. static bool isSameTemplate(TemplateDecl *T1, TemplateDecl *T2) { if (T1 == T2) return true; if (!T1 || !T2) return false; return T1->getCanonicalDecl() == T2->getCanonicalDecl(); } /// \brief Retrieve the most specialized of the given function template /// specializations. /// /// \param SpecBegin the start iterator of the function template /// specializations that we will be comparing. /// /// \param SpecEnd the end iterator of the function template /// specializations, paired with \p SpecBegin. /// /// \param Loc the location where the ambiguity or no-specializations /// diagnostic should occur. /// /// \param NoneDiag partial diagnostic used to diagnose cases where there are /// no matching candidates. /// /// \param AmbigDiag partial diagnostic used to diagnose an ambiguity, if one /// occurs. /// /// \param CandidateDiag partial diagnostic used for each function template /// specialization that is a candidate in the ambiguous ordering. One parameter /// in this diagnostic should be unbound, which will correspond to the string /// describing the template arguments for the function template specialization. /// /// \returns the most specialized function template specialization, if /// found. Otherwise, returns SpecEnd. UnresolvedSetIterator Sema::getMostSpecialized( UnresolvedSetIterator SpecBegin, UnresolvedSetIterator SpecEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain, QualType TargetType) { if (SpecBegin == SpecEnd) { if (Complain) { Diag(Loc, NoneDiag); FailedCandidates.NoteCandidates(*this, Loc); } return SpecEnd; } if (SpecBegin + 1 == SpecEnd) return SpecBegin; // Find the function template that is better than all of the templates it // has been compared to. UnresolvedSetIterator Best = SpecBegin; FunctionTemplateDecl *BestTemplate = cast<FunctionDecl>(*Best)->getPrimaryTemplate(); assert(BestTemplate && "Not a function template specialization?"); for (UnresolvedSetIterator I = SpecBegin + 1; I != SpecEnd; ++I) { FunctionTemplateDecl *Challenger = cast<FunctionDecl>(*I)->getPrimaryTemplate(); assert(Challenger && "Not a function template specialization?"); if (isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger, Loc, TPOC_Other, 0, 0), Challenger)) { Best = I; BestTemplate = Challenger; } } // Make sure that the "best" function template is more specialized than all // of the others. bool Ambiguous = false; for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I) { FunctionTemplateDecl *Challenger = cast<FunctionDecl>(*I)->getPrimaryTemplate(); if (I != Best && !isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger, Loc, TPOC_Other, 0, 0), BestTemplate)) { Ambiguous = true; break; } } if (!Ambiguous) { // We found an answer. Return it. return Best; } // Diagnose the ambiguity. if (Complain) { Diag(Loc, AmbigDiag); // FIXME: Can we order the candidates in some sane way? for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I) { PartialDiagnostic PD = CandidateDiag; PD << getTemplateArgumentBindingsText( cast<FunctionDecl>(*I)->getPrimaryTemplate()->getTemplateParameters(), *cast<FunctionDecl>(*I)->getTemplateSpecializationArgs()); if (!TargetType.isNull()) HandleFunctionTypeMismatch(PD, cast<FunctionDecl>(*I)->getType(), TargetType); Diag((*I)->getLocation(), PD); } } return SpecEnd; } /// \brief Returns the more specialized class template partial specialization /// according to the rules of partial ordering of class template partial /// specializations (C++ [temp.class.order]). /// /// \param PS1 the first class template partial specialization /// /// \param PS2 the second class template partial specialization /// /// \returns the more specialized class template partial specialization. If /// neither partial specialization is more specialized, returns NULL. ClassTemplatePartialSpecializationDecl * Sema::getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc) { // C++ [temp.class.order]p1: // For two class template partial specializations, the first is at least as // specialized as the second if, given the following rewrite to two // function templates, the first function template is at least as // specialized as the second according to the ordering rules for function // templates (14.6.6.2): // - the first function template has the same template parameters as the // first partial specialization and has a single function parameter // whose type is a class template specialization with the template // arguments of the first partial specialization, and // - the second function template has the same template parameters as the // second partial specialization and has a single function parameter // whose type is a class template specialization with the template // arguments of the second partial specialization. // // Rather than synthesize function templates, we merely perform the // equivalent partial ordering by performing deduction directly on // the template arguments of the class template partial // specializations. This computation is slightly simpler than the // general problem of function template partial ordering, because // class template partial specializations are more constrained. We // know that every template parameter is deducible from the class // template partial specialization's template arguments, for // example. SmallVector<DeducedTemplateArgument, 4> Deduced; TemplateDeductionInfo Info(Loc); QualType PT1 = PS1->getInjectedSpecializationType(); QualType PT2 = PS2->getInjectedSpecializationType(); // Determine whether PS1 is at least as specialized as PS2 Deduced.resize(PS2->getTemplateParameters()->size()); bool Better1 = !DeduceTemplateArgumentsByTypeMatch(*this, PS2->getTemplateParameters(), PT2, PT1, Info, Deduced, TDF_None, /*PartialOrdering=*/true); if (Better1) { SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end()); InstantiatingTemplate Inst(*this, Loc, PS2, DeducedArgs, Info); Better1 = !::FinishTemplateArgumentDeduction( *this, PS2, PS1->getTemplateArgs(), Deduced, Info); } // Determine whether PS2 is at least as specialized as PS1 Deduced.clear(); Deduced.resize(PS1->getTemplateParameters()->size()); bool Better2 = !DeduceTemplateArgumentsByTypeMatch( *this, PS1->getTemplateParameters(), PT1, PT2, Info, Deduced, TDF_None, /*PartialOrdering=*/true); if (Better2) { SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end()); InstantiatingTemplate Inst(*this, Loc, PS1, DeducedArgs, Info); Better2 = !::FinishTemplateArgumentDeduction( *this, PS1, PS2->getTemplateArgs(), Deduced, Info); } if (Better1 == Better2) return nullptr; return Better1 ? PS1 : PS2; } /// TODO: Unify with ClassTemplatePartialSpecializationDecl version? /// May require unifying ClassTemplate(Partial)SpecializationDecl and /// VarTemplate(Partial)SpecializationDecl with a new data /// structure Template(Partial)SpecializationDecl, and /// using Template(Partial)SpecializationDecl as input type. VarTemplatePartialSpecializationDecl * Sema::getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc) { SmallVector<DeducedTemplateArgument, 4> Deduced; TemplateDeductionInfo Info(Loc); assert(PS1->getSpecializedTemplate() == PS2->getSpecializedTemplate() && "the partial specializations being compared should specialize" " the same template."); TemplateName Name(PS1->getSpecializedTemplate()); TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name); QualType PT1 = Context.getTemplateSpecializationType( CanonTemplate, PS1->getTemplateArgs().data(), PS1->getTemplateArgs().size()); QualType PT2 = Context.getTemplateSpecializationType( CanonTemplate, PS2->getTemplateArgs().data(), PS2->getTemplateArgs().size()); // Determine whether PS1 is at least as specialized as PS2 Deduced.resize(PS2->getTemplateParameters()->size()); bool Better1 = !DeduceTemplateArgumentsByTypeMatch( *this, PS2->getTemplateParameters(), PT2, PT1, Info, Deduced, TDF_None, /*PartialOrdering=*/true); if (Better1) { SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(), Deduced.end()); InstantiatingTemplate Inst(*this, Loc, PS2, DeducedArgs, Info); Better1 = !::FinishTemplateArgumentDeduction(*this, PS2, PS1->getTemplateArgs(), Deduced, Info); } // Determine whether PS2 is at least as specialized as PS1 Deduced.clear(); Deduced.resize(PS1->getTemplateParameters()->size()); bool Better2 = !DeduceTemplateArgumentsByTypeMatch(*this, PS1->getTemplateParameters(), PT1, PT2, Info, Deduced, TDF_None, /*PartialOrdering=*/true); if (Better2) { SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end()); InstantiatingTemplate Inst(*this, Loc, PS1, DeducedArgs, Info); Better2 = !::FinishTemplateArgumentDeduction(*this, PS1, PS2->getTemplateArgs(), Deduced, Info); } if (Better1 == Better2) return nullptr; return Better1? PS1 : PS2; } static void MarkUsedTemplateParameters(ASTContext &Ctx, const TemplateArgument &TemplateArg, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); /// \brief Mark the template parameters that are used by the given /// expression. static void MarkUsedTemplateParameters(ASTContext &Ctx, const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { // We can deduce from a pack expansion. if (const PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(E)) E = Expansion->getPattern(); // Skip through any implicit casts we added while type-checking, and any // substitutions performed by template alias expansion. while (1) { if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) E = ICE->getSubExpr(); else if (const SubstNonTypeTemplateParmExpr *Subst = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) E = Subst->getReplacement(); else break; } // FIXME: if !OnlyDeduced, we have to walk the whole subexpression to // find other occurrences of template parameters. const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E); if (!DRE) return; const NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl()); if (!NTTP) return; if (NTTP->getDepth() == Depth) Used[NTTP->getIndex()] = true; } /// \brief Mark the template parameters that are used by the given /// nested name specifier. static void MarkUsedTemplateParameters(ASTContext &Ctx, NestedNameSpecifier *NNS, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { if (!NNS) return; MarkUsedTemplateParameters(Ctx, NNS->getPrefix(), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, QualType(NNS->getAsType(), 0), OnlyDeduced, Depth, Used); } /// \brief Mark the template parameters that are used by the given /// template name. static void MarkUsedTemplateParameters(ASTContext &Ctx, TemplateName Name, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { if (TemplateDecl *Template = Name.getAsTemplateDecl()) { if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(Template)) { if (TTP->getDepth() == Depth) Used[TTP->getIndex()] = true; } return; } if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) MarkUsedTemplateParameters(Ctx, QTN->getQualifier(), OnlyDeduced, Depth, Used); if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) MarkUsedTemplateParameters(Ctx, DTN->getQualifier(), OnlyDeduced, Depth, Used); } /// \brief Mark the template parameters that are used by the given /// type. static void MarkUsedTemplateParameters(ASTContext &Ctx, QualType T, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { if (T.isNull()) return; // Non-dependent types have nothing deducible if (!T->isDependentType()) return; T = Ctx.getCanonicalType(T); switch (T->getTypeClass()) { case Type::Pointer: MarkUsedTemplateParameters(Ctx, cast<PointerType>(T)->getPointeeType(), OnlyDeduced, Depth, Used); break; case Type::BlockPointer: MarkUsedTemplateParameters(Ctx, cast<BlockPointerType>(T)->getPointeeType(), OnlyDeduced, Depth, Used); break; case Type::LValueReference: case Type::RValueReference: MarkUsedTemplateParameters(Ctx, cast<ReferenceType>(T)->getPointeeType(), OnlyDeduced, Depth, Used); break; case Type::MemberPointer: { const MemberPointerType *MemPtr = cast<MemberPointerType>(T.getTypePtr()); MarkUsedTemplateParameters(Ctx, MemPtr->getPointeeType(), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, QualType(MemPtr->getClass(), 0), OnlyDeduced, Depth, Used); break; } case Type::DependentSizedArray: MarkUsedTemplateParameters(Ctx, cast<DependentSizedArrayType>(T)->getSizeExpr(), OnlyDeduced, Depth, Used); // Fall through to check the element type LLVM_FALLTHROUGH; // HLSL Change case Type::ConstantArray: case Type::IncompleteArray: MarkUsedTemplateParameters(Ctx, cast<ArrayType>(T)->getElementType(), OnlyDeduced, Depth, Used); break; case Type::Vector: case Type::ExtVector: MarkUsedTemplateParameters(Ctx, cast<VectorType>(T)->getElementType(), OnlyDeduced, Depth, Used); break; case Type::DependentSizedExtVector: { const DependentSizedExtVectorType *VecType = cast<DependentSizedExtVectorType>(T); MarkUsedTemplateParameters(Ctx, VecType->getElementType(), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, VecType->getSizeExpr(), OnlyDeduced, Depth, Used); break; } case Type::FunctionProto: { const FunctionProtoType *Proto = cast<FunctionProtoType>(T); MarkUsedTemplateParameters(Ctx, Proto->getReturnType(), OnlyDeduced, Depth, Used); for (unsigned I = 0, N = Proto->getNumParams(); I != N; ++I) MarkUsedTemplateParameters(Ctx, Proto->getParamType(I), OnlyDeduced, Depth, Used); break; } case Type::TemplateTypeParm: { const TemplateTypeParmType *TTP = cast<TemplateTypeParmType>(T); if (TTP->getDepth() == Depth) Used[TTP->getIndex()] = true; break; } case Type::SubstTemplateTypeParmPack: { const SubstTemplateTypeParmPackType *Subst = cast<SubstTemplateTypeParmPackType>(T); MarkUsedTemplateParameters(Ctx, QualType(Subst->getReplacedParameter(), 0), OnlyDeduced, Depth, Used); MarkUsedTemplateParameters(Ctx, Subst->getArgumentPack(), OnlyDeduced, Depth, Used); break; } case Type::InjectedClassName: T = cast<InjectedClassNameType>(T)->getInjectedSpecializationType(); LLVM_FALLTHROUGH; // HLSL Change case Type::TemplateSpecialization: { const TemplateSpecializationType *Spec = cast<TemplateSpecializationType>(T); MarkUsedTemplateParameters(Ctx, Spec->getTemplateName(), OnlyDeduced, Depth, Used); // C++0x [temp.deduct.type]p9: // If the template argument list of P contains a pack expansion that is // not the last template argument, the entire template argument list is a // non-deduced context. if (OnlyDeduced && hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs())) break; for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I) MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth, Used); break; } case Type::Complex: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast<ComplexType>(T)->getElementType(), OnlyDeduced, Depth, Used); break; case Type::Atomic: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast<AtomicType>(T)->getValueType(), OnlyDeduced, Depth, Used); break; case Type::DependentName: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast<DependentNameType>(T)->getQualifier(), OnlyDeduced, Depth, Used); break; case Type::DependentTemplateSpecialization: { const DependentTemplateSpecializationType *Spec = cast<DependentTemplateSpecializationType>(T); if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, Spec->getQualifier(), OnlyDeduced, Depth, Used); // C++0x [temp.deduct.type]p9: // If the template argument list of P contains a pack expansion that is not // the last template argument, the entire template argument list is a // non-deduced context. if (OnlyDeduced && hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs())) break; for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I) MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth, Used); break; } case Type::TypeOf: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast<TypeOfType>(T)->getUnderlyingType(), OnlyDeduced, Depth, Used); break; case Type::TypeOfExpr: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast<TypeOfExprType>(T)->getUnderlyingExpr(), OnlyDeduced, Depth, Used); break; case Type::Decltype: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast<DecltypeType>(T)->getUnderlyingExpr(), OnlyDeduced, Depth, Used); break; case Type::UnaryTransform: if (!OnlyDeduced) MarkUsedTemplateParameters(Ctx, cast<UnaryTransformType>(T)->getUnderlyingType(), OnlyDeduced, Depth, Used); break; case Type::PackExpansion: MarkUsedTemplateParameters(Ctx, cast<PackExpansionType>(T)->getPattern(), OnlyDeduced, Depth, Used); break; case Type::Auto: MarkUsedTemplateParameters(Ctx, cast<AutoType>(T)->getDeducedType(), OnlyDeduced, Depth, Used); break; // None of these types have any template parameters in them. case Type::Builtin: case Type::VariableArray: case Type::FunctionNoProto: case Type::Record: case Type::Enum: case Type::ObjCInterface: case Type::ObjCObject: case Type::ObjCObjectPointer: case Type::UnresolvedUsing: #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.def" break; } } /// \brief Mark the template parameters that are used by this /// template argument. static void MarkUsedTemplateParameters(ASTContext &Ctx, const TemplateArgument &TemplateArg, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { switch (TemplateArg.getKind()) { case TemplateArgument::Null: case TemplateArgument::Integral: case TemplateArgument::Declaration: break; case TemplateArgument::NullPtr: MarkUsedTemplateParameters(Ctx, TemplateArg.getNullPtrType(), OnlyDeduced, Depth, Used); break; case TemplateArgument::Type: MarkUsedTemplateParameters(Ctx, TemplateArg.getAsType(), OnlyDeduced, Depth, Used); break; case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: MarkUsedTemplateParameters(Ctx, TemplateArg.getAsTemplateOrTemplatePattern(), OnlyDeduced, Depth, Used); break; case TemplateArgument::Expression: MarkUsedTemplateParameters(Ctx, TemplateArg.getAsExpr(), OnlyDeduced, Depth, Used); break; case TemplateArgument::Pack: for (const auto &P : TemplateArg.pack_elements()) MarkUsedTemplateParameters(Ctx, P, OnlyDeduced, Depth, Used); break; } } /// \brief Mark which template parameters can be deduced from a given /// template argument list. /// /// \param TemplateArgs the template argument list from which template /// parameters will be deduced. /// /// \param Used a bit vector whose elements will be set to \c true /// to indicate when the corresponding template parameter will be /// deduced. void Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used) { // C++0x [temp.deduct.type]p9: // If the template argument list of P contains a pack expansion that is not // the last template argument, the entire template argument list is a // non-deduced context. if (OnlyDeduced && hasPackExpansionBeforeEnd(TemplateArgs.data(), TemplateArgs.size())) return; for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) ::MarkUsedTemplateParameters(Context, TemplateArgs[I], OnlyDeduced, Depth, Used); } /// \brief Marks all of the template parameters that will be deduced by a /// call to the given function template. void Sema::MarkDeducedTemplateParameters( ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); Deduced.clear(); Deduced.resize(TemplateParams->size()); FunctionDecl *Function = FunctionTemplate->getTemplatedDecl(); for (unsigned I = 0, N = Function->getNumParams(); I != N; ++I) ::MarkUsedTemplateParameters(Ctx, Function->getParamDecl(I)->getType(), true, TemplateParams->getDepth(), Deduced); } bool hasDeducibleTemplateParameters(Sema &S, FunctionTemplateDecl *FunctionTemplate, QualType T) { if (!T->isDependentType()) return false; TemplateParameterList *TemplateParams = FunctionTemplate->getTemplateParameters(); llvm::SmallBitVector Deduced(TemplateParams->size()); ::MarkUsedTemplateParameters(S.Context, T, true, TemplateParams->getDepth(), Deduced); return Deduced.any(); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/DeclSpec.cpp
//===--- SemaDeclSpec.cpp - Declaration Specifier Semantic Analysis -------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for declaration specifiers. // //===----------------------------------------------------------------------===// #include "clang/Sema/DeclSpec.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Parse/ParseDiagnostic.h" // FIXME: remove this back-dependency! #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Sema.h" #include "clang/Sema/SemaDiagnostic.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallString.h" #include "llvm/Support/ErrorHandling.h" #include <cstring> using namespace clang; static DiagnosticBuilder Diag(DiagnosticsEngine &D, SourceLocation Loc, unsigned DiagID) { return D.Report(Loc, DiagID); } void UnqualifiedId::setTemplateId(TemplateIdAnnotation *TemplateId) { assert(TemplateId && "NULL template-id annotation?"); Kind = IK_TemplateId; this->TemplateId = TemplateId; StartLocation = TemplateId->TemplateNameLoc; EndLocation = TemplateId->RAngleLoc; } void UnqualifiedId::setConstructorTemplateId(TemplateIdAnnotation *TemplateId) { assert(TemplateId && "NULL template-id annotation?"); Kind = IK_ConstructorTemplateId; this->TemplateId = TemplateId; StartLocation = TemplateId->TemplateNameLoc; EndLocation = TemplateId->RAngleLoc; } void CXXScopeSpec::Extend(ASTContext &Context, SourceLocation TemplateKWLoc, TypeLoc TL, SourceLocation ColonColonLoc) { Builder.Extend(Context, TemplateKWLoc, TL, ColonColonLoc); if (Range.getBegin().isInvalid()) Range.setBegin(TL.getBeginLoc()); Range.setEnd(ColonColonLoc); assert(Range == Builder.getSourceRange() && "NestedNameSpecifierLoc range computation incorrect"); } void CXXScopeSpec::Extend(ASTContext &Context, IdentifierInfo *Identifier, SourceLocation IdentifierLoc, SourceLocation ColonColonLoc) { Builder.Extend(Context, Identifier, IdentifierLoc, ColonColonLoc); if (Range.getBegin().isInvalid()) Range.setBegin(IdentifierLoc); Range.setEnd(ColonColonLoc); assert(Range == Builder.getSourceRange() && "NestedNameSpecifierLoc range computation incorrect"); } void CXXScopeSpec::Extend(ASTContext &Context, NamespaceDecl *Namespace, SourceLocation NamespaceLoc, SourceLocation ColonColonLoc) { Builder.Extend(Context, Namespace, NamespaceLoc, ColonColonLoc); if (Range.getBegin().isInvalid()) Range.setBegin(NamespaceLoc); Range.setEnd(ColonColonLoc); assert(Range == Builder.getSourceRange() && "NestedNameSpecifierLoc range computation incorrect"); } void CXXScopeSpec::Extend(ASTContext &Context, NamespaceAliasDecl *Alias, SourceLocation AliasLoc, SourceLocation ColonColonLoc) { Builder.Extend(Context, Alias, AliasLoc, ColonColonLoc); if (Range.getBegin().isInvalid()) Range.setBegin(AliasLoc); Range.setEnd(ColonColonLoc); assert(Range == Builder.getSourceRange() && "NestedNameSpecifierLoc range computation incorrect"); } void CXXScopeSpec::MakeGlobal(ASTContext &Context, SourceLocation ColonColonLoc) { Builder.MakeGlobal(Context, ColonColonLoc); Range = SourceRange(ColonColonLoc); assert(Range == Builder.getSourceRange() && "NestedNameSpecifierLoc range computation incorrect"); } void CXXScopeSpec::MakeSuper(ASTContext &Context, CXXRecordDecl *RD, SourceLocation SuperLoc, SourceLocation ColonColonLoc) { Builder.MakeSuper(Context, RD, SuperLoc, ColonColonLoc); Range.setBegin(SuperLoc); Range.setEnd(ColonColonLoc); assert(Range == Builder.getSourceRange() && "NestedNameSpecifierLoc range computation incorrect"); } void CXXScopeSpec::MakeTrivial(ASTContext &Context, NestedNameSpecifier *Qualifier, SourceRange R) { Builder.MakeTrivial(Context, Qualifier, R); Range = R; } void CXXScopeSpec::Adopt(NestedNameSpecifierLoc Other) { if (!Other) { Range = SourceRange(); Builder.Clear(); return; } Range = Other.getSourceRange(); Builder.Adopt(Other); } SourceLocation CXXScopeSpec::getLastQualifierNameLoc() const { if (!Builder.getRepresentation()) return SourceLocation(); return Builder.getTemporary().getLocalBeginLoc(); } NestedNameSpecifierLoc CXXScopeSpec::getWithLocInContext(ASTContext &Context) const { if (!Builder.getRepresentation()) return NestedNameSpecifierLoc(); return Builder.getWithLocInContext(Context); } /// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function. /// "TheDeclarator" is the declarator that this will be added to. DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isAmbiguous, SourceLocation LParenLoc, ParamInfo *Params, unsigned NumParams, SourceLocation EllipsisLoc, SourceLocation RParenLoc, unsigned TypeQuals, bool RefQualifierIsLvalueRef, SourceLocation RefQualifierLoc, SourceLocation ConstQualifierLoc, SourceLocation VolatileQualifierLoc, SourceLocation RestrictQualifierLoc, SourceLocation MutableLoc, ExceptionSpecificationType ESpecType, SourceLocation ESpecLoc, ParsedType *Exceptions, SourceRange *ExceptionRanges, unsigned NumExceptions, Expr *NoexceptExpr, CachedTokens *ExceptionSpecTokens, SourceLocation LocalRangeBegin, SourceLocation LocalRangeEnd, Declarator &TheDeclarator, TypeResult TrailingReturnType) { assert(!(TypeQuals & DeclSpec::TQ_atomic) && "function cannot have _Atomic qualifier"); DeclaratorChunk I; I.Kind = Function; I.Loc = LocalRangeBegin; I.EndLoc = LocalRangeEnd; I.Fun.AttrList = nullptr; I.Fun.hasPrototype = hasProto; I.Fun.isVariadic = EllipsisLoc.isValid(); I.Fun.isAmbiguous = isAmbiguous; I.Fun.LParenLoc = LParenLoc.getRawEncoding(); I.Fun.EllipsisLoc = EllipsisLoc.getRawEncoding(); I.Fun.RParenLoc = RParenLoc.getRawEncoding(); I.Fun.DeleteParams = false; I.Fun.TypeQuals = TypeQuals; I.Fun.NumParams = NumParams; I.Fun.Params = nullptr; I.Fun.RefQualifierIsLValueRef = RefQualifierIsLvalueRef; I.Fun.RefQualifierLoc = RefQualifierLoc.getRawEncoding(); I.Fun.ConstQualifierLoc = ConstQualifierLoc.getRawEncoding(); I.Fun.VolatileQualifierLoc = VolatileQualifierLoc.getRawEncoding(); I.Fun.RestrictQualifierLoc = RestrictQualifierLoc.getRawEncoding(); I.Fun.MutableLoc = MutableLoc.getRawEncoding(); I.Fun.ExceptionSpecType = ESpecType; I.Fun.ExceptionSpecLoc = ESpecLoc.getRawEncoding(); I.Fun.NumExceptions = 0; I.Fun.Exceptions = nullptr; I.Fun.NoexceptExpr = nullptr; I.Fun.HasTrailingReturnType = TrailingReturnType.isUsable() || TrailingReturnType.isInvalid(); I.Fun.TrailingReturnType = TrailingReturnType.get(); assert(I.Fun.TypeQuals == TypeQuals && "bitfield overflow"); assert(I.Fun.ExceptionSpecType == ESpecType && "bitfield overflow"); // new[] a parameter array if needed. if (NumParams) { // If the 'InlineParams' in Declarator is unused and big enough, put our // parameter list there (in an effort to avoid new/delete traffic). If it // is already used (consider a function returning a function pointer) or too // small (function with too many parameters), go to the heap. if (!TheDeclarator.InlineParamsUsed && NumParams <= llvm::array_lengthof(TheDeclarator.InlineParams)) { I.Fun.Params = TheDeclarator.InlineParams; I.Fun.DeleteParams = false; TheDeclarator.InlineParamsUsed = true; } else { I.Fun.Params = new DeclaratorChunk::ParamInfo[NumParams]; I.Fun.DeleteParams = true; } memcpy(I.Fun.Params, Params, sizeof(Params[0]) * NumParams); } // Check what exception specification information we should actually store. switch (ESpecType) { default: break; // By default, save nothing. case EST_Dynamic: // new[] an exception array if needed if (NumExceptions) { I.Fun.NumExceptions = NumExceptions; I.Fun.Exceptions = new DeclaratorChunk::TypeAndRange[NumExceptions]; for (unsigned i = 0; i != NumExceptions; ++i) { I.Fun.Exceptions[i].Ty = Exceptions[i]; I.Fun.Exceptions[i].Range = ExceptionRanges[i]; } } break; case EST_ComputedNoexcept: I.Fun.NoexceptExpr = NoexceptExpr; break; case EST_Unparsed: I.Fun.ExceptionSpecTokens = ExceptionSpecTokens; break; } return I; } bool Declarator::isDeclarationOfFunction() const { for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) { switch (DeclTypeInfo[i].Kind) { case DeclaratorChunk::Function: return true; case DeclaratorChunk::Paren: continue; case DeclaratorChunk::Pointer: case DeclaratorChunk::Reference: case DeclaratorChunk::Array: case DeclaratorChunk::BlockPointer: case DeclaratorChunk::MemberPointer: return false; } llvm_unreachable("Invalid type chunk"); } switch (DS.getTypeSpecType()) { case TST_atomic: case TST_auto: case TST_bool: case TST_char: case TST_char16: case TST_char32: case TST_class: case TST_decimal128: case TST_decimal32: case TST_decimal64: case TST_double: case TST_enum: case TST_error: case TST_float: case TST_half: case TST_int: case TST_int128: case TST_struct: case TST_interface: case TST_union: case TST_unknown_anytype: case TST_unspecified: case TST_void: case TST_wchar: // HLSL Change Starts case TST_halffloat: case TST_min16float: case TST_min16int: case TST_min16uint: case TST_min10float: case TST_min12int: case TST_int8_4packed: case TST_uint8_4packed: // HLSL Change Ends return false; case TST_decltype_auto: // This must have an initializer, so can't be a function declaration, // even if the initializer has function type. return false; case TST_decltype: case TST_typeofExpr: if (Expr *E = DS.getRepAsExpr()) return E->getType()->isFunctionType(); return false; case TST_underlyingType: case TST_typename: case TST_typeofType: { QualType QT = DS.getRepAsType().get(); if (QT.isNull()) return false; if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT)) QT = LIT->getType(); if (QT.isNull()) return false; return QT->isFunctionType(); } } llvm_unreachable("Invalid TypeSpecType!"); } bool Declarator::isStaticMember() { assert(getContext() == MemberContext); return getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static || (getName().Kind == UnqualifiedId::IK_OperatorFunctionId && CXXMethodDecl::isStaticOverloadedOperator( getName().OperatorFunctionId.Operator)); } bool DeclSpec::hasTagDefinition() const { if (!TypeSpecOwned) return false; return cast<TagDecl>(getRepAsDecl())->isCompleteDefinition(); } /// getParsedSpecifiers - Return a bitmask of which flavors of specifiers this /// declaration specifier includes. /// unsigned DeclSpec::getParsedSpecifiers() const { unsigned Res = 0; if (StorageClassSpec != SCS_unspecified || ThreadStorageClassSpec != TSCS_unspecified) Res |= PQ_StorageClassSpecifier; if (TypeQualifiers != TQ_unspecified) Res |= PQ_TypeQualifier; if (hasTypeSpecifier()) Res |= PQ_TypeSpecifier; if (FS_inline_specified || FS_virtual_specified || FS_explicit_specified || FS_noreturn_specified || FS_forceinline_specified) Res |= PQ_FunctionSpecifier; return Res; } template <class T> static bool BadSpecifier(T TNew, T TPrev, const char *&PrevSpec, unsigned &DiagID, bool IsExtension = true) { PrevSpec = DeclSpec::getSpecifierName(TPrev); if (TNew != TPrev) DiagID = diag::err_invalid_decl_spec_combination; else DiagID = IsExtension ? diag::ext_duplicate_declspec : diag::warn_duplicate_declspec; return true; } const char *DeclSpec::getSpecifierName(DeclSpec::SCS S) { switch (S) { case DeclSpec::SCS_unspecified: return "unspecified"; case DeclSpec::SCS_typedef: return "typedef"; case DeclSpec::SCS_extern: return "extern"; case DeclSpec::SCS_static: return "static"; case DeclSpec::SCS_auto: return "auto"; case DeclSpec::SCS_register: return "register"; case DeclSpec::SCS_private_extern: return "__private_extern__"; case DeclSpec::SCS_mutable: return "mutable"; } llvm_unreachable("Unknown typespec!"); } const char *DeclSpec::getSpecifierName(DeclSpec::TSCS S) { switch (S) { case DeclSpec::TSCS_unspecified: return "unspecified"; case DeclSpec::TSCS___thread: return "__thread"; case DeclSpec::TSCS_thread_local: return "thread_local"; case DeclSpec::TSCS__Thread_local: return "_Thread_local"; } llvm_unreachable("Unknown typespec!"); } const char *DeclSpec::getSpecifierName(TSW W) { switch (W) { case TSW_unspecified: return "unspecified"; case TSW_short: return "short"; case TSW_long: return "long"; case TSW_longlong: return "long long"; } llvm_unreachable("Unknown typespec!"); } const char *DeclSpec::getSpecifierName(TSC C) { switch (C) { case TSC_unspecified: return "unspecified"; case TSC_imaginary: return "imaginary"; case TSC_complex: return "complex"; } llvm_unreachable("Unknown typespec!"); } const char *DeclSpec::getSpecifierName(TSS S) { switch (S) { case TSS_unspecified: return "unspecified"; case TSS_signed: return "signed"; case TSS_unsigned: return "unsigned"; } llvm_unreachable("Unknown typespec!"); } const char *DeclSpec::getSpecifierName(DeclSpec::TST T, const PrintingPolicy &Policy) { switch (T) { case DeclSpec::TST_unspecified: return "unspecified"; case DeclSpec::TST_void: return "void"; case DeclSpec::TST_char: return "char"; case DeclSpec::TST_wchar: return Policy.MSWChar ? "__wchar_t" : "wchar_t"; case DeclSpec::TST_char16: return "char16_t"; case DeclSpec::TST_char32: return "char32_t"; case DeclSpec::TST_int: return "int"; case DeclSpec::TST_int128: return "__int128"; // HLSL Change Starts case DeclSpec::TST_min16float: return "min16float"; case DeclSpec::TST_min16int: return "min16int"; case DeclSpec::TST_min16uint: return "min16uint"; case DeclSpec::TST_min10float: return "min10float"; case DeclSpec::TST_min12int: return "min12int"; case DeclSpec::TST_halffloat: case DeclSpec::TST_int8_4packed: return "int8_t4_packed"; case DeclSpec::TST_uint8_4packed: return "uint8_t4_packed"; // HLSL Change Ends case DeclSpec::TST_half: return "half"; case DeclSpec::TST_float: return "float"; case DeclSpec::TST_double: return "double"; case DeclSpec::TST_bool: return Policy.Bool ? "bool" : "_Bool"; case DeclSpec::TST_decimal32: return "_Decimal32"; case DeclSpec::TST_decimal64: return "_Decimal64"; case DeclSpec::TST_decimal128: return "_Decimal128"; case DeclSpec::TST_enum: return "enum"; case DeclSpec::TST_class: return "class"; case DeclSpec::TST_union: return "union"; case DeclSpec::TST_struct: return "struct"; case DeclSpec::TST_interface: return "__interface"; case DeclSpec::TST_typename: return "type-name"; case DeclSpec::TST_typeofType: case DeclSpec::TST_typeofExpr: return "typeof"; case DeclSpec::TST_auto: return "auto"; case DeclSpec::TST_decltype: return "(decltype)"; case DeclSpec::TST_decltype_auto: return "decltype(auto)"; case DeclSpec::TST_underlyingType: return "__underlying_type"; case DeclSpec::TST_unknown_anytype: return "__unknown_anytype"; case DeclSpec::TST_atomic: return "_Atomic"; case DeclSpec::TST_error: return "(error)"; } llvm_unreachable("Unknown typespec!"); } const char *DeclSpec::getSpecifierName(TQ T) { switch (T) { case DeclSpec::TQ_unspecified: return "unspecified"; case DeclSpec::TQ_const: return "const"; case DeclSpec::TQ_restrict: return "restrict"; case DeclSpec::TQ_volatile: return "volatile"; case DeclSpec::TQ_atomic: return "_Atomic"; } llvm_unreachable("Unknown typespec!"); } bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy) { // OpenCL v1.1 s6.8g: "The extern, static, auto and register storage-class // specifiers are not supported. // It seems sensible to prohibit private_extern too // The cl_clang_storage_class_specifiers extension enables support for // these storage-class specifiers. // OpenCL v1.2 s6.8 changes this to "The auto and register storage-class // specifiers are not supported." if (S.getLangOpts().OpenCL && !S.getOpenCLOptions().cl_clang_storage_class_specifiers) { switch (SC) { case SCS_extern: case SCS_private_extern: case SCS_static: if (S.getLangOpts().OpenCLVersion < 120) { DiagID = diag::err_opencl_unknown_type_specifier; PrevSpec = getSpecifierName(SC); return true; } break; case SCS_auto: case SCS_register: DiagID = diag::err_opencl_unknown_type_specifier; PrevSpec = getSpecifierName(SC); return true; default: break; } } if (StorageClassSpec != SCS_unspecified) { // Maybe this is an attempt to use C++11 'auto' outside of C++11 mode. bool isInvalid = true; if (TypeSpecType == TST_unspecified && S.getLangOpts().CPlusPlus) { if (SC == SCS_auto) return SetTypeSpecType(TST_auto, Loc, PrevSpec, DiagID, Policy); if (StorageClassSpec == SCS_auto) { isInvalid = SetTypeSpecType(TST_auto, StorageClassSpecLoc, PrevSpec, DiagID, Policy); assert(!isInvalid && "auto SCS -> TST recovery failed"); } } // Changing storage class is allowed only if the previous one // was the 'extern' that is part of a linkage specification and // the new storage class is 'typedef'. if (isInvalid && !(SCS_extern_in_linkage_spec && StorageClassSpec == SCS_extern && SC == SCS_typedef)) return BadSpecifier(SC, (SCS)StorageClassSpec, PrevSpec, DiagID); } StorageClassSpec = SC; StorageClassSpecLoc = Loc; assert((unsigned)SC == StorageClassSpec && "SCS constants overflow bitfield"); return false; } bool DeclSpec::SetStorageClassSpecThread(TSCS TSC, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { if (ThreadStorageClassSpec != TSCS_unspecified) return BadSpecifier(TSC, (TSCS)ThreadStorageClassSpec, PrevSpec, DiagID); ThreadStorageClassSpec = TSC; ThreadStorageClassSpecLoc = Loc; return false; } /// These methods set the specified attribute of the DeclSpec, but return true /// and ignore the request if invalid (e.g. "extern" then "auto" is /// specified). bool DeclSpec::SetTypeSpecWidth(TSW W, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy) { // Overwrite TSWLoc only if TypeSpecWidth was unspecified, so that // for 'long long' we will keep the source location of the first 'long'. if (TypeSpecWidth == TSW_unspecified) TSWLoc = Loc; // Allow turning long -> long long. else if (W != TSW_longlong || TypeSpecWidth != TSW_long) return BadSpecifier(W, (TSW)TypeSpecWidth, PrevSpec, DiagID); TypeSpecWidth = W; return false; } bool DeclSpec::SetTypeSpecComplex(TSC C, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { if (TypeSpecComplex != TSC_unspecified) return BadSpecifier(C, (TSC)TypeSpecComplex, PrevSpec, DiagID); TypeSpecComplex = C; TSCLoc = Loc; return false; } bool DeclSpec::SetTypeSpecSign(TSS S, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { if (TypeSpecSign != TSS_unspecified) return BadSpecifier(S, (TSS)TypeSpecSign, PrevSpec, DiagID); TypeSpecSign = S; TSSLoc = Loc; return false; } bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, ParsedType Rep, const PrintingPolicy &Policy) { return SetTypeSpecType(T, Loc, Loc, PrevSpec, DiagID, Rep, Policy); } bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc, SourceLocation TagNameLoc, const char *&PrevSpec, unsigned &DiagID, ParsedType Rep, const PrintingPolicy &Policy) { assert(isTypeRep(T) && "T does not store a type"); assert(Rep && "no type provided!"); if (TypeSpecType != TST_unspecified) { PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy); DiagID = diag::err_invalid_decl_spec_combination; return true; } TypeSpecType = T; TypeRep = Rep; TSTLoc = TagKwLoc; TSTNameLoc = TagNameLoc; TypeSpecOwned = false; return false; } bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, Expr *Rep, const PrintingPolicy &Policy) { assert(isExprRep(T) && "T does not store an expr"); assert(Rep && "no expression provided!"); if (TypeSpecType != TST_unspecified) { PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy); DiagID = diag::err_invalid_decl_spec_combination; return true; } TypeSpecType = T; ExprRep = Rep; TSTLoc = Loc; TSTNameLoc = Loc; TypeSpecOwned = false; return false; } bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, Decl *Rep, bool Owned, const PrintingPolicy &Policy) { return SetTypeSpecType(T, Loc, Loc, PrevSpec, DiagID, Rep, Owned, Policy); } bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc, SourceLocation TagNameLoc, const char *&PrevSpec, unsigned &DiagID, Decl *Rep, bool Owned, const PrintingPolicy &Policy) { assert(isDeclRep(T) && "T does not store a decl"); // Unlike the other cases, we don't assert that we actually get a decl. if (TypeSpecType != TST_unspecified) { PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy); DiagID = diag::err_invalid_decl_spec_combination; return true; } TypeSpecType = T; DeclRep = Rep; TSTLoc = TagKwLoc; TSTNameLoc = TagNameLoc; TypeSpecOwned = Owned && Rep != nullptr; return false; } bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy) { assert(!isDeclRep(T) && !isTypeRep(T) && !isExprRep(T) && "rep required for these type-spec kinds!"); if (TypeSpecType != TST_unspecified) { PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy); DiagID = diag::err_invalid_decl_spec_combination; return true; } TSTLoc = Loc; TSTNameLoc = Loc; if (TypeAltiVecVector && (T == TST_bool) && !TypeAltiVecBool) { TypeAltiVecBool = true; return false; } TypeSpecType = T; TypeSpecOwned = false; return false; } bool DeclSpec::SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy) { if (TypeSpecType != TST_unspecified) { PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy); DiagID = diag::err_invalid_vector_decl_spec_combination; return true; } TypeAltiVecVector = isAltiVecVector; AltiVecLoc = Loc; return false; } bool DeclSpec::SetTypeAltiVecPixel(bool isAltiVecPixel, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy) { if (!TypeAltiVecVector || TypeAltiVecPixel || (TypeSpecType != TST_unspecified)) { PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy); DiagID = diag::err_invalid_pixel_decl_spec_combination; return true; } TypeAltiVecPixel = isAltiVecPixel; TSTLoc = Loc; TSTNameLoc = Loc; return false; } bool DeclSpec::SetTypeAltiVecBool(bool isAltiVecBool, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const PrintingPolicy &Policy) { if (!TypeAltiVecVector || TypeAltiVecBool || (TypeSpecType != TST_unspecified)) { PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType, Policy); DiagID = diag::err_invalid_vector_bool_decl_spec; return true; } TypeAltiVecBool = isAltiVecBool; TSTLoc = Loc; TSTNameLoc = Loc; return false; } bool DeclSpec::SetTypeSpecError() { TypeSpecType = TST_error; TypeSpecOwned = false; TSTLoc = SourceLocation(); TSTNameLoc = SourceLocation(); return false; } bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, const LangOptions &Lang) { // Duplicates are permitted in C99 onwards, but are not permitted in C89 or // C++. However, since this is likely not what the user intended, we will // always warn. We do not need to set the qualifier's location since we // already have it. if (TypeQualifiers & T) { bool IsExtension = true; if (Lang.C99) IsExtension = false; return BadSpecifier(T, T, PrevSpec, DiagID, IsExtension); } TypeQualifiers |= T; switch (T) { case TQ_unspecified: break; case TQ_const: TQ_constLoc = Loc; return false; case TQ_restrict: TQ_restrictLoc = Loc; return false; case TQ_volatile: TQ_volatileLoc = Loc; return false; case TQ_atomic: TQ_atomicLoc = Loc; return false; } llvm_unreachable("Unknown type qualifier!"); } bool DeclSpec::setFunctionSpecInline(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { // 'inline inline' is ok. However, since this is likely not what the user // intended, we will always warn, similar to duplicates of type qualifiers. if (FS_inline_specified) { DiagID = diag::warn_duplicate_declspec; PrevSpec = "inline"; return true; } FS_inline_specified = true; FS_inlineLoc = Loc; return false; } bool DeclSpec::setFunctionSpecForceInline(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { if (FS_forceinline_specified) { DiagID = diag::warn_duplicate_declspec; PrevSpec = "__forceinline"; return true; } FS_forceinline_specified = true; FS_forceinlineLoc = Loc; return false; } bool DeclSpec::setFunctionSpecVirtual(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { // 'virtual virtual' is ok, but warn as this is likely not what the user // intended. if (FS_virtual_specified) { DiagID = diag::warn_duplicate_declspec; PrevSpec = "virtual"; return true; } FS_virtual_specified = true; FS_virtualLoc = Loc; return false; } bool DeclSpec::setFunctionSpecExplicit(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { // 'explicit explicit' is ok, but warn as this is likely not what the user // intended. if (FS_explicit_specified) { DiagID = diag::warn_duplicate_declspec; PrevSpec = "explicit"; return true; } FS_explicit_specified = true; FS_explicitLoc = Loc; return false; } bool DeclSpec::setFunctionSpecNoreturn(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { // '_Noreturn _Noreturn' is ok, but warn as this is likely not what the user // intended. if (FS_noreturn_specified) { DiagID = diag::warn_duplicate_declspec; PrevSpec = "_Noreturn"; return true; } FS_noreturn_specified = true; FS_noreturnLoc = Loc; return false; } bool DeclSpec::SetFriendSpec(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { if (Friend_specified) { PrevSpec = "friend"; // Keep the later location, so that we can later diagnose ill-formed // declarations like 'friend class X friend;'. Per [class.friend]p3, // 'friend' must be the first token in a friend declaration that is // not a function declaration. FriendLoc = Loc; DiagID = diag::warn_duplicate_declspec; return true; } Friend_specified = true; FriendLoc = Loc; return false; } bool DeclSpec::setModulePrivateSpec(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { if (isModulePrivateSpecified()) { PrevSpec = "__module_private__"; DiagID = diag::ext_duplicate_declspec; return true; } ModulePrivateLoc = Loc; return false; } bool DeclSpec::SetConstexprSpec(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { // 'constexpr constexpr' is ok, but warn as this is likely not what the user // intended. if (Constexpr_specified) { DiagID = diag::warn_duplicate_declspec; PrevSpec = "constexpr"; return true; } Constexpr_specified = true; ConstexprLoc = Loc; return false; } bool DeclSpec::SetConceptSpec(SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID) { if (Concept_specified) { DiagID = diag::ext_duplicate_declspec; PrevSpec = "concept"; return true; } Concept_specified = true; ConceptLoc = Loc; return false; } void DeclSpec::SaveWrittenBuiltinSpecs() { writtenBS.Sign = getTypeSpecSign(); writtenBS.Width = getTypeSpecWidth(); writtenBS.Type = getTypeSpecType(); // Search the list of attributes for the presence of a mode attribute. writtenBS.ModeAttr = false; AttributeList* attrs = getAttributes().getList(); while (attrs) { if (attrs->getKind() == AttributeList::AT_Mode) { writtenBS.ModeAttr = true; break; } attrs = attrs->getNext(); } } /// Finish - This does final analysis of the declspec, rejecting things like /// "_Imaginary" (lacking an FP type). This returns a diagnostic to issue or /// diag::NUM_DIAGNOSTICS if there is no error. After calling this method, /// DeclSpec is guaranteed self-consistent, even if an error occurred. void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP, const PrintingPolicy &Policy) { // Before possibly changing their values, save specs as written. SaveWrittenBuiltinSpecs(); // Check the type specifier components first. // If decltype(auto) is used, no other type specifiers are permitted. if (TypeSpecType == TST_decltype_auto && (TypeSpecWidth != TSW_unspecified || TypeSpecComplex != TSC_unspecified || TypeSpecSign != TSS_unspecified || TypeAltiVecVector || TypeAltiVecPixel || TypeAltiVecBool || TypeQualifiers)) { const unsigned NumLocs = 8; SourceLocation ExtraLocs[NumLocs] = { TSWLoc, TSCLoc, TSSLoc, AltiVecLoc, TQ_constLoc, TQ_restrictLoc, TQ_volatileLoc, TQ_atomicLoc }; FixItHint Hints[NumLocs]; SourceLocation FirstLoc; for (unsigned I = 0; I != NumLocs; ++I) { if (!ExtraLocs[I].isInvalid()) { if (FirstLoc.isInvalid() || PP.getSourceManager().isBeforeInTranslationUnit(ExtraLocs[I], FirstLoc)) FirstLoc = ExtraLocs[I]; Hints[I] = FixItHint::CreateRemoval(ExtraLocs[I]); } } TypeSpecWidth = TSW_unspecified; TypeSpecComplex = TSC_unspecified; TypeSpecSign = TSS_unspecified; TypeAltiVecVector = TypeAltiVecPixel = TypeAltiVecBool = false; TypeQualifiers = 0; Diag(D, TSTLoc, diag::err_decltype_auto_cannot_be_combined) << Hints[0] << Hints[1] << Hints[2] << Hints[3] << Hints[4] << Hints[5] << Hints[6] << Hints[7]; } // Validate and finalize AltiVec vector declspec. if (TypeAltiVecVector) { if (TypeAltiVecBool) { // Sign specifiers are not allowed with vector bool. (PIM 2.1) if (TypeSpecSign != TSS_unspecified) { Diag(D, TSSLoc, diag::err_invalid_vector_bool_decl_spec) << getSpecifierName((TSS)TypeSpecSign); } // Only char/int are valid with vector bool. (PIM 2.1) if (((TypeSpecType != TST_unspecified) && (TypeSpecType != TST_char) && (TypeSpecType != TST_int)) || TypeAltiVecPixel) { Diag(D, TSTLoc, diag::err_invalid_vector_bool_decl_spec) << (TypeAltiVecPixel ? "__pixel" : getSpecifierName((TST)TypeSpecType, Policy)); } // Only 'short' and 'long long' are valid with vector bool. (PIM 2.1) if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short) && (TypeSpecWidth != TSW_longlong)) Diag(D, TSWLoc, diag::err_invalid_vector_bool_decl_spec) << getSpecifierName((TSW)TypeSpecWidth); // vector bool long long requires VSX support or ZVector. if ((TypeSpecWidth == TSW_longlong) && (!PP.getTargetInfo().hasFeature("vsx")) && (!PP.getTargetInfo().hasFeature("power8-vector")) && !PP.getLangOpts().ZVector) Diag(D, TSTLoc, diag::err_invalid_vector_long_long_decl_spec); // Elements of vector bool are interpreted as unsigned. (PIM 2.1) if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) || (TypeSpecWidth != TSW_unspecified)) TypeSpecSign = TSS_unsigned; } else if (TypeSpecType == TST_double) { // vector long double and vector long long double are never allowed. // vector double is OK for Power7 and later, and ZVector. if (TypeSpecWidth == TSW_long || TypeSpecWidth == TSW_longlong) Diag(D, TSWLoc, diag::err_invalid_vector_long_double_decl_spec); else if (!PP.getTargetInfo().hasFeature("vsx") && !PP.getLangOpts().ZVector) Diag(D, TSTLoc, diag::err_invalid_vector_double_decl_spec); } else if (TypeSpecType == TST_float) { // vector float is unsupported for ZVector. if (PP.getLangOpts().ZVector) Diag(D, TSTLoc, diag::err_invalid_vector_float_decl_spec); } else if (TypeSpecWidth == TSW_long) { // vector long is unsupported for ZVector and deprecated for AltiVec. if (PP.getLangOpts().ZVector) Diag(D, TSWLoc, diag::err_invalid_vector_long_decl_spec); else Diag(D, TSWLoc, diag::warn_vector_long_decl_spec_combination) << getSpecifierName((TST)TypeSpecType, Policy); } if (TypeAltiVecPixel) { //TODO: perform validation TypeSpecType = TST_int; TypeSpecSign = TSS_unsigned; TypeSpecWidth = TSW_short; TypeSpecOwned = false; } } // signed/unsigned are only valid with int/char/wchar_t. if (TypeSpecSign != TSS_unspecified) { // HLSL Change starts - signed/unsigned are not complete type specifiers. #if 0 if (TypeSpecType == TST_unspecified) TypeSpecType = TST_int; #endif // shorthand vectors and matrices can have signed/unsigned specifiers. // If other typenames are used with signed/unsigned, it is already diagnosed by hlsl external source if (TypeSpecType != TST_int && TypeSpecType != TST_int128 && TypeSpecType != TST_char && TypeSpecType != TST_wchar && TypeSpecType != TST_typename) { Diag(D, TSSLoc, diag::err_invalid_sign_spec) << getSpecifierName((TST)TypeSpecType, Policy); // signed double -> double. TypeSpecSign = TSS_unspecified; } // HLSL Change end } // Validate the width of the type. switch (TypeSpecWidth) { case TSW_unspecified: break; case TSW_short: // short int case TSW_longlong: // long long int if (TypeSpecType == TST_unspecified) TypeSpecType = TST_int; // short -> short int, long long -> long long int. else if (TypeSpecType != TST_int) { Diag(D, TSWLoc, TypeSpecWidth == TSW_short ? diag::err_invalid_short_spec : diag::err_invalid_longlong_spec) << getSpecifierName((TST)TypeSpecType, Policy); TypeSpecType = TST_int; TypeSpecOwned = false; } break; case TSW_long: // long double, long int if (TypeSpecType == TST_unspecified) TypeSpecType = TST_int; // long -> long int. else if (TypeSpecType != TST_int && TypeSpecType != TST_double) { Diag(D, TSWLoc, diag::err_invalid_long_spec) << getSpecifierName((TST)TypeSpecType, Policy); TypeSpecType = TST_int; TypeSpecOwned = false; } break; } // TODO: if the implementation does not implement _Complex or _Imaginary, // disallow their use. Need information about the backend. if (TypeSpecComplex != TSC_unspecified) { if (TypeSpecType == TST_unspecified) { Diag(D, TSCLoc, diag::ext_plain_complex) << FixItHint::CreateInsertion( PP.getLocForEndOfToken(getTypeSpecComplexLoc()), " double"); TypeSpecType = TST_double; // _Complex -> _Complex double. } else if (TypeSpecType == TST_int || TypeSpecType == TST_char) { // Note that this intentionally doesn't include _Complex _Bool. if (!PP.getLangOpts().CPlusPlus) Diag(D, TSTLoc, diag::ext_integer_complex); } else if (TypeSpecType != TST_float && TypeSpecType != TST_double) { Diag(D, TSCLoc, diag::err_invalid_complex_spec) << getSpecifierName((TST)TypeSpecType, Policy); TypeSpecComplex = TSC_unspecified; } } // C11 6.7.1/3, C++11 [dcl.stc]p1, GNU TLS: __thread, thread_local and // _Thread_local can only appear with the 'static' and 'extern' storage class // specifiers. We also allow __private_extern__ as an extension. if (ThreadStorageClassSpec != TSCS_unspecified) { switch (StorageClassSpec) { case SCS_unspecified: case SCS_extern: case SCS_private_extern: case SCS_static: break; default: if (PP.getSourceManager().isBeforeInTranslationUnit( getThreadStorageClassSpecLoc(), getStorageClassSpecLoc())) Diag(D, getStorageClassSpecLoc(), diag::err_invalid_decl_spec_combination) << DeclSpec::getSpecifierName(getThreadStorageClassSpec()) << SourceRange(getThreadStorageClassSpecLoc()); else Diag(D, getThreadStorageClassSpecLoc(), diag::err_invalid_decl_spec_combination) << DeclSpec::getSpecifierName(getStorageClassSpec()) << SourceRange(getStorageClassSpecLoc()); // Discard the thread storage class specifier to recover. ThreadStorageClassSpec = TSCS_unspecified; ThreadStorageClassSpecLoc = SourceLocation(); } } // If no type specifier was provided and we're parsing a language where // the type specifier is not optional, but we got 'auto' as a storage // class specifier, then assume this is an attempt to use C++0x's 'auto' // type specifier. if (PP.getLangOpts().CPlusPlus && TypeSpecType == TST_unspecified && StorageClassSpec == SCS_auto) { TypeSpecType = TST_auto; StorageClassSpec = SCS_unspecified; TSTLoc = TSTNameLoc = StorageClassSpecLoc; StorageClassSpecLoc = SourceLocation(); } // Diagnose if we've recovered from an ill-formed 'auto' storage class // specifier in a pre-C++11 dialect of C++. if (!PP.getLangOpts().CPlusPlus11 && TypeSpecType == TST_auto) Diag(D, TSTLoc, diag::ext_auto_type_specifier); if (PP.getLangOpts().CPlusPlus && !PP.getLangOpts().CPlusPlus11 && StorageClassSpec == SCS_auto) Diag(D, StorageClassSpecLoc, diag::warn_auto_storage_class) << FixItHint::CreateRemoval(StorageClassSpecLoc); if (TypeSpecType == TST_char16 || TypeSpecType == TST_char32) Diag(D, TSTLoc, diag::warn_cxx98_compat_unicode_type) << (TypeSpecType == TST_char16 ? "char16_t" : "char32_t"); if (Constexpr_specified) Diag(D, ConstexprLoc, diag::warn_cxx98_compat_constexpr); // C++ [class.friend]p6: // No storage-class-specifier shall appear in the decl-specifier-seq // of a friend declaration. if (isFriendSpecified() && (getStorageClassSpec() || getThreadStorageClassSpec())) { SmallString<32> SpecName; SourceLocation SCLoc; FixItHint StorageHint, ThreadHint; if (DeclSpec::SCS SC = getStorageClassSpec()) { SpecName = getSpecifierName(SC); SCLoc = getStorageClassSpecLoc(); StorageHint = FixItHint::CreateRemoval(SCLoc); } if (DeclSpec::TSCS TSC = getThreadStorageClassSpec()) { if (!SpecName.empty()) SpecName += " "; SpecName += getSpecifierName(TSC); SCLoc = getThreadStorageClassSpecLoc(); ThreadHint = FixItHint::CreateRemoval(SCLoc); } Diag(D, SCLoc, diag::err_friend_decl_spec) << SpecName << StorageHint << ThreadHint; ClearStorageClassSpecs(); } // C++11 [dcl.fct.spec]p5: // The virtual specifier shall be used only in the initial // declaration of a non-static class member function; // C++11 [dcl.fct.spec]p6: // The explicit specifier shall be used only in the declaration of // a constructor or conversion function within its class // definition; if (isFriendSpecified() && (isVirtualSpecified() || isExplicitSpecified())) { StringRef Keyword; SourceLocation SCLoc; if (isVirtualSpecified()) { Keyword = "virtual"; SCLoc = getVirtualSpecLoc(); } else { Keyword = "explicit"; SCLoc = getExplicitSpecLoc(); } FixItHint Hint = FixItHint::CreateRemoval(SCLoc); Diag(D, SCLoc, diag::err_friend_decl_spec) << Keyword << Hint; FS_virtual_specified = FS_explicit_specified = false; FS_virtualLoc = FS_explicitLoc = SourceLocation(); } assert(!TypeSpecOwned || isDeclRep((TST) TypeSpecType)); // Okay, now we can infer the real type. // TODO: return "auto function" and other bad things based on the real type. // 'data definition has no type or storage class'? } bool DeclSpec::isMissingDeclaratorOk() { TST tst = getTypeSpecType(); return isDeclRep(tst) && getRepAsDecl() != nullptr && StorageClassSpec != DeclSpec::SCS_typedef; } void UnqualifiedId::setOperatorFunctionId(SourceLocation OperatorLoc, OverloadedOperatorKind Op, SourceLocation SymbolLocations[3]) { Kind = IK_OperatorFunctionId; StartLocation = OperatorLoc; EndLocation = OperatorLoc; OperatorFunctionId.Operator = Op; for (unsigned I = 0; I != 3; ++I) { OperatorFunctionId.SymbolLocations[I] = SymbolLocations[I].getRawEncoding(); if (SymbolLocations[I].isValid()) EndLocation = SymbolLocations[I]; } } bool VirtSpecifiers::SetSpecifier(Specifier VS, SourceLocation Loc, const char *&PrevSpec) { if (!FirstLocation.isValid()) FirstLocation = Loc; LastLocation = Loc; LastSpecifier = VS; if (Specifiers & VS) { PrevSpec = getSpecifierName(VS); return true; } Specifiers |= VS; switch (VS) { default: llvm_unreachable("Unknown specifier!"); case VS_Override: VS_overrideLoc = Loc; break; case VS_Sealed: case VS_Final: VS_finalLoc = Loc; break; } return false; } const char *VirtSpecifiers::getSpecifierName(Specifier VS) { switch (VS) { default: llvm_unreachable("Unknown specifier"); case VS_Override: return "override"; case VS_Final: return "final"; case VS_Sealed: return "sealed"; } }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
//=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines analysis_warnings::[Policy,Executor]. // Together they are used by Sema to issue warnings based on inexpensive // static analysis algorithms in libAnalysis. // //===----------------------------------------------------------------------===// #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ParentMap.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtVisitor.h" #include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h" #include "clang/Analysis/Analyses/Consumed.h" #include "clang/Analysis/Analyses/ReachableCode.h" #include "clang/Analysis/Analyses/ThreadSafety.h" #include "clang/Analysis/Analyses/UninitializedValues.h" #include "clang/Analysis/AnalysisContext.h" #include "clang/Analysis/CFG.h" #include "clang/Analysis/CFGStmtMap.h" #include "clang/Basic/SourceLocation.h" #include "clang/Basic/SourceManager.h" #include "clang/Lex/Lexer.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaInternal.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/ADT/ImmutableMap.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/PostOrderIterator.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include <algorithm> #include <deque> #include <iterator> #include <vector> using namespace clang; //===----------------------------------------------------------------------===// // Unreachable code analysis. //===----------------------------------------------------------------------===// namespace { class UnreachableCodeHandler : public reachable_code::Callback { Sema &S; public: UnreachableCodeHandler(Sema &s) : S(s) {} void HandleUnreachable(reachable_code::UnreachableKind UK, SourceLocation L, SourceRange SilenceableCondVal, SourceRange R1, SourceRange R2) override { unsigned diag = diag::warn_unreachable; switch (UK) { case reachable_code::UK_Break: diag = diag::warn_unreachable_break; break; case reachable_code::UK_Return: diag = diag::warn_unreachable_return; break; case reachable_code::UK_Loop_Increment: diag = diag::warn_unreachable_loop_increment; break; case reachable_code::UK_Other: break; } S.Diag(L, diag) << R1 << R2; SourceLocation Open = SilenceableCondVal.getBegin(); if (Open.isValid()) { SourceLocation Close = SilenceableCondVal.getEnd(); Close = S.getLocForEndOfToken(Close); if (Close.isValid()) { S.Diag(Open, diag::note_unreachable_silence) << FixItHint::CreateInsertion(Open, "/* DISABLES CODE */ (") << FixItHint::CreateInsertion(Close, ")"); } } } }; } /// CheckUnreachable - Check for unreachable code. static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) { // As a heuristic prune all diagnostics not in the main file. Currently // the majority of warnings in headers are false positives. These // are largely caused by configuration state, e.g. preprocessor // defined code, etc. // // Note that this is also a performance optimization. Analyzing // headers many times can be expensive. if (!S.getSourceManager().isInMainFile(AC.getDecl()->getLocStart())) return; UnreachableCodeHandler UC(S); reachable_code::FindUnreachableCode(AC, S.getPreprocessor(), UC); } namespace { /// \brief Warn on logical operator errors in CFGBuilder class LogicalErrorHandler : public CFGCallback { Sema &S; public: LogicalErrorHandler(Sema &S) : CFGCallback(), S(S) {} static bool HasMacroID(const Expr *E) { if (E->getExprLoc().isMacroID()) return true; // Recurse to children. for (const Stmt *SubStmt : E->children()) if (const Expr *SubExpr = dyn_cast_or_null<Expr>(SubStmt)) if (HasMacroID(SubExpr)) return true; return false; } void compareAlwaysTrue(const BinaryOperator *B, bool isAlwaysTrue) override { if (HasMacroID(B)) return; SourceRange DiagRange = B->getSourceRange(); S.Diag(B->getExprLoc(), diag::warn_tautological_overlap_comparison) << DiagRange << isAlwaysTrue; } void compareBitwiseEquality(const BinaryOperator *B, bool isAlwaysTrue) override { if (HasMacroID(B)) return; SourceRange DiagRange = B->getSourceRange(); S.Diag(B->getExprLoc(), diag::warn_comparison_bitwise_always) << DiagRange << isAlwaysTrue; } }; } // namespace //===----------------------------------------------------------------------===// // Check for infinite self-recursion in functions //===----------------------------------------------------------------------===// // All blocks are in one of three states. States are ordered so that blocks // can only move to higher states. enum RecursiveState { FoundNoPath, FoundPath, FoundPathWithNoRecursiveCall }; static void checkForFunctionCall(Sema &S, const FunctionDecl *FD, CFGBlock &Block, unsigned ExitID, llvm::SmallVectorImpl<RecursiveState> &States, RecursiveState State) { unsigned ID = Block.getBlockID(); // A block's state can only move to a higher state. if (States[ID] >= State) return; States[ID] = State; // Found a path to the exit node without a recursive call. if (ID == ExitID && State == FoundPathWithNoRecursiveCall) return; if (State == FoundPathWithNoRecursiveCall) { // If the current state is FoundPathWithNoRecursiveCall, the successors // will be either FoundPathWithNoRecursiveCall or FoundPath. To determine // which, process all the Stmt's in this block to find any recursive calls. for (const auto &B : Block) { if (B.getKind() != CFGElement::Statement) continue; const CallExpr *CE = dyn_cast<CallExpr>(B.getAs<CFGStmt>()->getStmt()); if (CE && CE->getCalleeDecl() && CE->getCalleeDecl()->getCanonicalDecl() == FD) { // Skip function calls which are qualified with a templated class. if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>( CE->getCallee()->IgnoreParenImpCasts())) { if (NestedNameSpecifier *NNS = DRE->getQualifier()) { if (NNS->getKind() == NestedNameSpecifier::TypeSpec && isa<TemplateSpecializationType>(NNS->getAsType())) { continue; } } } if (const CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(CE)) { if (isa<CXXThisExpr>(MCE->getImplicitObjectArgument()) || !MCE->getMethodDecl()->isVirtual()) { State = FoundPath; break; } } else { State = FoundPath; break; } } } } for (CFGBlock::succ_iterator I = Block.succ_begin(), E = Block.succ_end(); I != E; ++I) if (*I) checkForFunctionCall(S, FD, **I, ExitID, States, State); } static void checkRecursiveFunction(Sema &S, const FunctionDecl *FD, const Stmt *Body, AnalysisDeclContext &AC) { FD = FD->getCanonicalDecl(); // Only run on non-templated functions and non-templated members of // templated classes. if (FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate && FD->getTemplatedKind() != FunctionDecl::TK_MemberSpecialization) return; CFG *cfg = AC.getCFG(); if (!cfg) return; // If the exit block is unreachable, skip processing the function. if (cfg->getExit().pred_empty()) return; // Mark all nodes as FoundNoPath, then begin processing the entry block. llvm::SmallVector<RecursiveState, 16> states(cfg->getNumBlockIDs(), FoundNoPath); checkForFunctionCall(S, FD, cfg->getEntry(), cfg->getExit().getBlockID(), states, FoundPathWithNoRecursiveCall); // Check that the exit block is reachable. This prevents triggering the // warning on functions that do not terminate. if (states[cfg->getExit().getBlockID()] == FoundPath) S.Diag(Body->getLocStart(), diag::warn_infinite_recursive_function); } //===----------------------------------------------------------------------===// // Check for missing return value. //===----------------------------------------------------------------------===// enum ControlFlowKind { UnknownFallThrough, NeverFallThrough, MaybeFallThrough, AlwaysFallThrough, NeverFallThroughOrReturn }; /// CheckFallThrough - Check that we don't fall off the end of a /// Statement that should return a value. /// /// \returns AlwaysFallThrough iff we always fall off the end of the statement, /// MaybeFallThrough iff we might or might not fall off the end, /// NeverFallThroughOrReturn iff we never fall off the end of the statement or /// return. We assume NeverFallThrough iff we never fall off the end of the /// statement but we may return. We assume that functions not marked noreturn /// will return. static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) { CFG *cfg = AC.getCFG(); if (!cfg) return UnknownFallThrough; // The CFG leaves in dead things, and we don't want the dead code paths to // confuse us, so we mark all live things first. llvm::BitVector live(cfg->getNumBlockIDs()); unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(), live); bool AddEHEdges = AC.getAddEHEdges(); if (!AddEHEdges && count != cfg->getNumBlockIDs()) // When there are things remaining dead, and we didn't add EH edges // from CallExprs to the catch clauses, we have to go back and // mark them as live. for (const auto *B : *cfg) { if (!live[B->getBlockID()]) { if (B->pred_begin() == B->pred_end()) { if (B->getTerminator() && isa<CXXTryStmt>(B->getTerminator())) // When not adding EH edges from calls, catch clauses // can otherwise seem dead. Avoid noting them as dead. count += reachable_code::ScanReachableFromBlock(B, live); continue; } } } // Now we know what is live, we check the live precessors of the exit block // and look for fall through paths, being careful to ignore normal returns, // and exceptional paths. bool HasLiveReturn = false; bool HasFakeEdge = false; bool HasPlainEdge = false; bool HasAbnormalEdge = false; // Ignore default cases that aren't likely to be reachable because all // enums in a switch(X) have explicit case statements. CFGBlock::FilterOptions FO; FO.IgnoreDefaultsWithCoveredEnums = 1; for (CFGBlock::filtered_pred_iterator I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) { const CFGBlock& B = **I; if (!live[B.getBlockID()]) continue; // Skip blocks which contain an element marked as no-return. They don't // represent actually viable edges into the exit block, so mark them as // abnormal. if (B.hasNoReturnElement()) { HasAbnormalEdge = true; continue; } // Destructors can appear after the 'return' in the CFG. This is // normal. We need to look pass the destructors for the return // statement (if it exists). CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend(); for ( ; ri != re ; ++ri) if (ri->getAs<CFGStmt>()) break; // No more CFGElements in the block? if (ri == re) { if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) { HasAbnormalEdge = true; continue; } // A labeled empty statement, or the entry block... HasPlainEdge = true; continue; } CFGStmt CS = ri->castAs<CFGStmt>(); const Stmt *S = CS.getStmt(); if (isa<ReturnStmt>(S)) { HasLiveReturn = true; continue; } if (isa<ObjCAtThrowStmt>(S)) { HasFakeEdge = true; continue; } if (isa<CXXThrowExpr>(S)) { HasFakeEdge = true; continue; } if (isa<MSAsmStmt>(S)) { // TODO: Verify this is correct. HasFakeEdge = true; HasLiveReturn = true; continue; } if (isa<CXXTryStmt>(S)) { HasAbnormalEdge = true; continue; } if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit()) == B.succ_end()) { HasAbnormalEdge = true; continue; } HasPlainEdge = true; } if (!HasPlainEdge) { if (HasLiveReturn) return NeverFallThrough; return NeverFallThroughOrReturn; } if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn) return MaybeFallThrough; // This says AlwaysFallThrough for calls to functions that are not marked // noreturn, that don't return. If people would like this warning to be more // accurate, such functions should be marked as noreturn. return AlwaysFallThrough; } namespace { struct CheckFallThroughDiagnostics { unsigned diag_MaybeFallThrough_HasNoReturn; unsigned diag_MaybeFallThrough_ReturnsNonVoid; unsigned diag_AlwaysFallThrough_HasNoReturn; unsigned diag_AlwaysFallThrough_ReturnsNonVoid; unsigned diag_NeverFallThroughOrReturn; enum { Function, Block, Lambda } funMode; SourceLocation FuncLoc; static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) { CheckFallThroughDiagnostics D; D.FuncLoc = Func->getLocation(); D.diag_MaybeFallThrough_HasNoReturn = diag::warn_falloff_noreturn_function; D.diag_MaybeFallThrough_ReturnsNonVoid = diag::warn_maybe_falloff_nonvoid_function; D.diag_AlwaysFallThrough_HasNoReturn = diag::warn_falloff_noreturn_function; D.diag_AlwaysFallThrough_ReturnsNonVoid = diag::warn_falloff_nonvoid_function; // Don't suggest that virtual functions be marked "noreturn", since they // might be overridden by non-noreturn functions. bool isVirtualMethod = false; if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func)) isVirtualMethod = Method->isVirtual(); // Don't suggest that template instantiations be marked "noreturn" bool isTemplateInstantiation = false; if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func)) isTemplateInstantiation = Function->isTemplateInstantiation(); if (!isVirtualMethod && !isTemplateInstantiation) D.diag_NeverFallThroughOrReturn = diag::warn_suggest_noreturn_function; else D.diag_NeverFallThroughOrReturn = 0; D.funMode = Function; return D; } static CheckFallThroughDiagnostics MakeForBlock() { CheckFallThroughDiagnostics D; D.diag_MaybeFallThrough_HasNoReturn = diag::err_noreturn_block_has_return_expr; D.diag_MaybeFallThrough_ReturnsNonVoid = diag::err_maybe_falloff_nonvoid_block; D.diag_AlwaysFallThrough_HasNoReturn = diag::err_noreturn_block_has_return_expr; D.diag_AlwaysFallThrough_ReturnsNonVoid = diag::err_falloff_nonvoid_block; D.diag_NeverFallThroughOrReturn = 0; D.funMode = Block; return D; } static CheckFallThroughDiagnostics MakeForLambda() { CheckFallThroughDiagnostics D; D.diag_MaybeFallThrough_HasNoReturn = diag::err_noreturn_lambda_has_return_expr; D.diag_MaybeFallThrough_ReturnsNonVoid = diag::warn_maybe_falloff_nonvoid_lambda; D.diag_AlwaysFallThrough_HasNoReturn = diag::err_noreturn_lambda_has_return_expr; D.diag_AlwaysFallThrough_ReturnsNonVoid = diag::warn_falloff_nonvoid_lambda; D.diag_NeverFallThroughOrReturn = 0; D.funMode = Lambda; return D; } bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid, bool HasNoReturn) const { if (funMode == Function) { return (ReturnsVoid || D.isIgnored(diag::warn_maybe_falloff_nonvoid_function, FuncLoc)) && (!HasNoReturn || D.isIgnored(diag::warn_noreturn_function_has_return_expr, FuncLoc)) && (!ReturnsVoid || D.isIgnored(diag::warn_suggest_noreturn_block, FuncLoc)); } // For blocks / lambdas. return ReturnsVoid && !HasNoReturn; } }; } /// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a /// function that should return a value. Check that we don't fall off the end /// of a noreturn function. We assume that functions and blocks not marked /// noreturn will return. static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body, const BlockExpr *blkExpr, const CheckFallThroughDiagnostics& CD, AnalysisDeclContext &AC) { bool ReturnsVoid = false; bool HasNoReturn = false; if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { ReturnsVoid = FD->getReturnType()->isVoidType(); HasNoReturn = FD->isNoReturn(); } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { ReturnsVoid = MD->getReturnType()->isVoidType(); HasNoReturn = MD->hasAttr<NoReturnAttr>(); } else if (isa<BlockDecl>(D)) { QualType BlockTy = blkExpr->getType(); if (const FunctionType *FT = BlockTy->getPointeeType()->getAs<FunctionType>()) { if (FT->getReturnType()->isVoidType()) ReturnsVoid = true; if (FT->getNoReturnAttr()) HasNoReturn = true; } } DiagnosticsEngine &Diags = S.getDiagnostics(); // Short circuit for compilation speed. if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn)) return; SourceLocation LBrace = Body->getLocStart(), RBrace = Body->getLocEnd(); // Either in a function body compound statement, or a function-try-block. switch (CheckFallThrough(AC)) { case UnknownFallThrough: break; case MaybeFallThrough: if (HasNoReturn) S.Diag(RBrace, CD.diag_MaybeFallThrough_HasNoReturn); else if (!ReturnsVoid) S.Diag(RBrace, CD.diag_MaybeFallThrough_ReturnsNonVoid); break; case AlwaysFallThrough: if (HasNoReturn) S.Diag(RBrace, CD.diag_AlwaysFallThrough_HasNoReturn); else if (!ReturnsVoid) S.Diag(RBrace, CD.diag_AlwaysFallThrough_ReturnsNonVoid); break; case NeverFallThroughOrReturn: if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) { if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 0 << FD; } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) { S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn) << 1 << MD; } else { S.Diag(LBrace, CD.diag_NeverFallThroughOrReturn); } } break; case NeverFallThrough: break; } } //===----------------------------------------------------------------------===// // -Wuninitialized //===----------------------------------------------------------------------===// namespace { /// ContainsReference - A visitor class to search for references to /// a particular declaration (the needle) within any evaluated component of an /// expression (recursively). class ContainsReference : public ConstEvaluatedExprVisitor<ContainsReference> { bool FoundReference; const DeclRefExpr *Needle; public: typedef ConstEvaluatedExprVisitor<ContainsReference> Inherited; ContainsReference(ASTContext &Context, const DeclRefExpr *Needle) : Inherited(Context), FoundReference(false), Needle(Needle) {} void VisitExpr(const Expr *E) { // Stop evaluating if we already have a reference. if (FoundReference) return; Inherited::VisitExpr(E); } void VisitDeclRefExpr(const DeclRefExpr *E) { if (E == Needle) FoundReference = true; else Inherited::VisitDeclRefExpr(E); } bool doesContainReference() const { return FoundReference; } }; } static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) { QualType VariableTy = VD->getType().getCanonicalType(); if (VariableTy->isBlockPointerType() && !VD->hasAttr<BlocksAttr>()) { S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) << VD->getDeclName() << FixItHint::CreateInsertion(VD->getLocation(), "__block "); return true; } // HLSL Change Start - Don't issue fixit for out parameters if (VD->hasAttr<HLSLOutAttr>()) return false; // HLSL Change End // Don't issue a fixit if there is already an initializer. if (VD->getInit()) return false; // Don't suggest a fixit inside macros. if (VD->getLocEnd().isMacroID()) return false; SourceLocation Loc = S.getLocForEndOfToken(VD->getLocEnd()); // Suggest possible initialization (if any). std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc); if (Init.empty()) return false; S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName() << FixItHint::CreateInsertion(Loc, Init); return true; } /// Create a fixit to remove an if-like statement, on the assumption that its /// condition is CondVal. static void CreateIfFixit(Sema &S, const Stmt *If, const Stmt *Then, const Stmt *Else, bool CondVal, FixItHint &Fixit1, FixItHint &Fixit2) { if (CondVal) { // If condition is always true, remove all but the 'then'. Fixit1 = FixItHint::CreateRemoval( CharSourceRange::getCharRange(If->getLocStart(), Then->getLocStart())); if (Else) { SourceLocation ElseKwLoc = Lexer::getLocForEndOfToken( Then->getLocEnd(), 0, S.getSourceManager(), S.getLangOpts()); Fixit2 = FixItHint::CreateRemoval( SourceRange(ElseKwLoc, Else->getLocEnd())); } } else { // If condition is always false, remove all but the 'else'. if (Else) Fixit1 = FixItHint::CreateRemoval( CharSourceRange::getCharRange(If->getLocStart(), Else->getLocStart())); else Fixit1 = FixItHint::CreateRemoval(If->getSourceRange()); } } /// DiagUninitUse -- Helper function to produce a diagnostic for an /// uninitialized use of a variable. static void DiagUninitUse(Sema &S, const VarDecl *VD, const UninitUse &Use, bool IsCapturedByBlock) { bool Diagnosed = false; // HLSL Change Start - Generate warnings for uninitialized out params. bool HLSLOutParam = VD->hasAttr<HLSLOutAttr>(); switch (Use.getKind()) { case UninitUse::Always: S.Diag(Use.getUser()->getLocStart(), HLSLOutParam ? diag::warn_hlsl_uninit_out_param : diag::warn_uninit_var) << VD->getDeclName() << IsCapturedByBlock << Use.getUser()->getSourceRange(); return; case UninitUse::AfterDecl: case UninitUse::AfterCall: S.Diag(VD->getLocation(), HLSLOutParam ? diag::warn_hlsl_sometimes_uninit_out_param : diag::warn_sometimes_uninit_var) << VD->getDeclName() << IsCapturedByBlock << (Use.getKind() == UninitUse::AfterDecl ? 4 : 5) << const_cast<DeclContext *>(VD->getLexicalDeclContext()) << VD->getSourceRange(); // HLSL Change End - Generate warnings for uninitialized out params. S.Diag(Use.getUser()->getLocStart(), diag::note_uninit_var_use) << IsCapturedByBlock << Use.getUser()->getSourceRange(); return; case UninitUse::Maybe: case UninitUse::Sometimes: // Carry on to report sometimes-uninitialized branches, if possible, // or a 'may be used uninitialized' diagnostic otherwise. break; } // Diagnose each branch which leads to a sometimes-uninitialized use. for (UninitUse::branch_iterator I = Use.branch_begin(), E = Use.branch_end(); I != E; ++I) { assert(Use.getKind() == UninitUse::Sometimes); const Expr *User = Use.getUser(); const Stmt *Term = I->Terminator; // Information used when building the diagnostic. unsigned DiagKind; StringRef Str; SourceRange Range; // FixIts to suppress the diagnostic by removing the dead condition. // For all binary terminators, branch 0 is taken if the condition is true, // and branch 1 is taken if the condition is false. int RemoveDiagKind = -1; const char *FixitStr = S.getLangOpts().CPlusPlus ? (I->Output ? "true" : "false") : (I->Output ? "1" : "0"); FixItHint Fixit1, Fixit2; switch (Term ? Term->getStmtClass() : Stmt::DeclStmtClass) { default: // Don't know how to report this. Just fall back to 'may be used // uninitialized'. FIXME: Can this happen? continue; // "condition is true / condition is false". case Stmt::IfStmtClass: { const IfStmt *IS = cast<IfStmt>(Term); DiagKind = 0; Str = "if"; Range = IS->getCond()->getSourceRange(); RemoveDiagKind = 0; CreateIfFixit(S, IS, IS->getThen(), IS->getElse(), I->Output, Fixit1, Fixit2); break; } case Stmt::ConditionalOperatorClass: { const ConditionalOperator *CO = cast<ConditionalOperator>(Term); DiagKind = 0; Str = "?:"; Range = CO->getCond()->getSourceRange(); RemoveDiagKind = 0; CreateIfFixit(S, CO, CO->getTrueExpr(), CO->getFalseExpr(), I->Output, Fixit1, Fixit2); break; } case Stmt::BinaryOperatorClass: { const BinaryOperator *BO = cast<BinaryOperator>(Term); if (!BO->isLogicalOp()) continue; DiagKind = 0; Str = BO->getOpcodeStr(); Range = BO->getLHS()->getSourceRange(); RemoveDiagKind = 0; if ((BO->getOpcode() == BO_LAnd && I->Output) || (BO->getOpcode() == BO_LOr && !I->Output)) // true && y -> y, false || y -> y. Fixit1 = FixItHint::CreateRemoval(SourceRange(BO->getLocStart(), BO->getOperatorLoc())); else // false && y -> false, true || y -> true. Fixit1 = FixItHint::CreateReplacement(BO->getSourceRange(), FixitStr); break; } // "loop is entered / loop is exited". case Stmt::WhileStmtClass: DiagKind = 1; Str = "while"; Range = cast<WhileStmt>(Term)->getCond()->getSourceRange(); RemoveDiagKind = 1; Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); break; case Stmt::ForStmtClass: DiagKind = 1; Str = "for"; Range = cast<ForStmt>(Term)->getCond()->getSourceRange(); RemoveDiagKind = 1; if (I->Output) Fixit1 = FixItHint::CreateRemoval(Range); else Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); break; case Stmt::CXXForRangeStmtClass: if (I->Output == 1) { // The use occurs if a range-based for loop's body never executes. // That may be impossible, and there's no syntactic fix for this, // so treat it as a 'may be uninitialized' case. continue; } DiagKind = 1; Str = "for"; Range = cast<CXXForRangeStmt>(Term)->getRangeInit()->getSourceRange(); break; // "condition is true / loop is exited". case Stmt::DoStmtClass: DiagKind = 2; Str = "do"; Range = cast<DoStmt>(Term)->getCond()->getSourceRange(); RemoveDiagKind = 1; Fixit1 = FixItHint::CreateReplacement(Range, FixitStr); break; // "switch case is taken". case Stmt::CaseStmtClass: DiagKind = 3; Str = "case"; Range = cast<CaseStmt>(Term)->getLHS()->getSourceRange(); break; case Stmt::DefaultStmtClass: DiagKind = 3; Str = "default"; Range = cast<DefaultStmt>(Term)->getDefaultLoc(); break; } // HLSL Change Start - Generate warnings for uninitialized out params. S.Diag(Range.getBegin(), HLSLOutParam ? diag::warn_hlsl_sometimes_uninit_out_param : diag::warn_sometimes_uninit_var) << VD->getDeclName() << IsCapturedByBlock << DiagKind << Str << I->Output << Range; // HLSL Change End - Generate warnings for uninitialized out params. S.Diag(User->getLocStart(), diag::note_uninit_var_use) << IsCapturedByBlock << User->getSourceRange(); if (RemoveDiagKind != -1) S.Diag(Fixit1.RemoveRange.getBegin(), diag::note_uninit_fixit_remove_cond) << RemoveDiagKind << Str << I->Output << Fixit1 << Fixit2; Diagnosed = true; } if (!Diagnosed) S.Diag(Use.getUser()->getLocStart(), diag::warn_maybe_uninit_var) << VD->getDeclName() << IsCapturedByBlock << Use.getUser()->getSourceRange(); } /// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an /// uninitialized variable. This manages the different forms of diagnostic /// emitted for particular types of uses. Returns true if the use was diagnosed /// as a warning. If a particular use is one we omit warnings for, returns /// false. static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD, const UninitUse &Use, bool alwaysReportSelfInit = false) { if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Use.getUser())) { // Inspect the initializer of the variable declaration which is // being referenced prior to its initialization. We emit // specialized diagnostics for self-initialization, and we // specifically avoid warning about self references which take the // form of: // // int x = x; // // This is used to indicate to GCC that 'x' is intentionally left // uninitialized. Proven code paths which access 'x' in // an uninitialized state after this will still warn. if (const Expr *Initializer = VD->getInit()) { if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts()) return false; ContainsReference CR(S.Context, DRE); CR.Visit(Initializer); if (CR.doesContainReference()) { S.Diag(DRE->getLocStart(), diag::warn_uninit_self_reference_in_init) << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange(); return true; } } DiagUninitUse(S, VD, Use, false); } else { const BlockExpr *BE = cast<BlockExpr>(Use.getUser()); if (VD->getType()->isBlockPointerType() && !VD->hasAttr<BlocksAttr>()) S.Diag(BE->getLocStart(), diag::warn_uninit_byref_blockvar_captured_by_block) << VD->getDeclName(); else DiagUninitUse(S, VD, Use, true); } // Report where the variable was declared when the use wasn't within // the initializer of that declaration & we didn't already suggest // an initialization fixit. if (!SuggestInitializationFixit(S, VD)) S.Diag(VD->getLocStart(), diag::note_uninit_var_def) << VD->getDeclName(); return true; } namespace { class FallthroughMapper : public RecursiveASTVisitor<FallthroughMapper> { public: FallthroughMapper(Sema &S) : FoundSwitchStatements(false), S(S) { } bool foundSwitchStatements() const { return FoundSwitchStatements; } void markFallthroughVisited(const AttributedStmt *Stmt) { bool Found = FallthroughStmts.erase(Stmt); assert(Found); (void)Found; } typedef llvm::SmallPtrSet<const AttributedStmt*, 8> AttrStmts; const AttrStmts &getFallthroughStmts() const { return FallthroughStmts; } void fillReachableBlocks(CFG *Cfg) { assert(ReachableBlocks.empty() && "ReachableBlocks already filled"); std::deque<const CFGBlock *> BlockQueue; ReachableBlocks.insert(&Cfg->getEntry()); BlockQueue.push_back(&Cfg->getEntry()); // Mark all case blocks reachable to avoid problems with switching on // constants, covered enums, etc. // These blocks can contain fall-through annotations, and we don't want to // issue a warn_fallthrough_attr_unreachable for them. for (const auto *B : *Cfg) { const Stmt *L = B->getLabel(); if (L && isa<SwitchCase>(L) && ReachableBlocks.insert(B).second) BlockQueue.push_back(B); } while (!BlockQueue.empty()) { const CFGBlock *P = BlockQueue.front(); BlockQueue.pop_front(); for (CFGBlock::const_succ_iterator I = P->succ_begin(), E = P->succ_end(); I != E; ++I) { if (*I && ReachableBlocks.insert(*I).second) BlockQueue.push_back(*I); } } } bool checkFallThroughIntoBlock(const CFGBlock &B, int &AnnotatedCnt) { assert(!ReachableBlocks.empty() && "ReachableBlocks empty"); int UnannotatedCnt = 0; AnnotatedCnt = 0; std::deque<const CFGBlock*> BlockQueue(B.pred_begin(), B.pred_end()); while (!BlockQueue.empty()) { const CFGBlock *P = BlockQueue.front(); BlockQueue.pop_front(); if (!P) continue; const Stmt *Term = P->getTerminator(); if (Term && isa<SwitchStmt>(Term)) continue; // Switch statement, good. const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(P->getLabel()); if (SW && SW->getSubStmt() == B.getLabel() && P->begin() == P->end()) continue; // Previous case label has no statements, good. const LabelStmt *L = dyn_cast_or_null<LabelStmt>(P->getLabel()); if (L && L->getSubStmt() == B.getLabel() && P->begin() == P->end()) continue; // Case label is preceded with a normal label, good. if (!ReachableBlocks.count(P)) { for (CFGBlock::const_reverse_iterator ElemIt = P->rbegin(), ElemEnd = P->rend(); ElemIt != ElemEnd; ++ElemIt) { if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>()) { if (const AttributedStmt *AS = asFallThroughAttr(CS->getStmt())) { S.Diag(AS->getLocStart(), diag::warn_fallthrough_attr_unreachable); markFallthroughVisited(AS); ++AnnotatedCnt; break; } // Don't care about other unreachable statements. } } // If there are no unreachable statements, this may be a special // case in CFG: // case X: { // A a; // A has a destructor. // break; // } // // <<<< This place is represented by a 'hanging' CFG block. // case Y: continue; } const Stmt *LastStmt = getLastStmt(*P); if (const AttributedStmt *AS = asFallThroughAttr(LastStmt)) { markFallthroughVisited(AS); ++AnnotatedCnt; continue; // Fallthrough annotation, good. } if (!LastStmt) { // This block contains no executable statements. // Traverse its predecessors. std::copy(P->pred_begin(), P->pred_end(), std::back_inserter(BlockQueue)); continue; } ++UnannotatedCnt; } return !!UnannotatedCnt; } // RecursiveASTVisitor setup. bool shouldWalkTypesOfTypeLocs() const { return false; } bool VisitAttributedStmt(AttributedStmt *S) { if (asFallThroughAttr(S)) FallthroughStmts.insert(S); return true; } bool VisitSwitchStmt(SwitchStmt *S) { FoundSwitchStatements = true; return true; } // We don't want to traverse local type declarations. We analyze their // methods separately. bool TraverseDecl(Decl *D) { return true; } // We analyze lambda bodies separately. Skip them here. bool TraverseLambdaBody(LambdaExpr *LE) { return true; } private: static const AttributedStmt *asFallThroughAttr(const Stmt *S) { if (const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(S)) { if (hasSpecificAttr<FallThroughAttr>(AS->getAttrs())) return AS; } return nullptr; } static const Stmt *getLastStmt(const CFGBlock &B) { if (const Stmt *Term = B.getTerminator()) return Term; for (CFGBlock::const_reverse_iterator ElemIt = B.rbegin(), ElemEnd = B.rend(); ElemIt != ElemEnd; ++ElemIt) { if (Optional<CFGStmt> CS = ElemIt->getAs<CFGStmt>()) return CS->getStmt(); } // Workaround to detect a statement thrown out by CFGBuilder: // case X: {} case Y: // case X: ; case Y: if (const SwitchCase *SW = dyn_cast_or_null<SwitchCase>(B.getLabel())) if (!isa<SwitchCase>(SW->getSubStmt())) return SW->getSubStmt(); return nullptr; } bool FoundSwitchStatements; AttrStmts FallthroughStmts; Sema &S; llvm::SmallPtrSet<const CFGBlock *, 16> ReachableBlocks; }; } static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC, bool PerFunction) { // Only perform this analysis when using C++11. There is no good workflow // for this warning when not using C++11. There is no good way to silence // the warning (no attribute is available) unless we are using C++11's support // for generalized attributes. Once could use pragmas to silence the warning, // but as a general solution that is gross and not in the spirit of this // warning. // // NOTE: This an intermediate solution. There are on-going discussions on // how to properly support this warning outside of C++11 with an annotation. if (!AC.getASTContext().getLangOpts().CPlusPlus11) return; FallthroughMapper FM(S); FM.TraverseStmt(AC.getBody()); if (!FM.foundSwitchStatements()) return; if (PerFunction && FM.getFallthroughStmts().empty()) return; CFG *Cfg = AC.getCFG(); if (!Cfg) return; FM.fillReachableBlocks(Cfg); for (CFG::reverse_iterator I = Cfg->rbegin(), E = Cfg->rend(); I != E; ++I) { const CFGBlock *B = *I; const Stmt *Label = B->getLabel(); if (!Label || !isa<SwitchCase>(Label)) continue; int AnnotatedCnt; if (!FM.checkFallThroughIntoBlock(*B, AnnotatedCnt)) continue; S.Diag(Label->getLocStart(), PerFunction ? diag::warn_unannotated_fallthrough_per_function : diag::warn_unannotated_fallthrough); if (!AnnotatedCnt) { SourceLocation L = Label->getLocStart(); if (L.isMacroID()) continue; if (S.getLangOpts().CPlusPlus11) { const Stmt *Term = B->getTerminator(); // Skip empty cases. while (B->empty() && !Term && B->succ_size() == 1) { B = *B->succ_begin(); Term = B->getTerminator(); } if (!(B->empty() && Term && isa<BreakStmt>(Term))) { Preprocessor &PP = S.getPreprocessor(); TokenValue Tokens[] = { tok::l_square, tok::l_square, PP.getIdentifierInfo("clang"), tok::coloncolon, PP.getIdentifierInfo("fallthrough"), tok::r_square, tok::r_square }; StringRef AnnotationSpelling = "[[clang::fallthrough]]"; StringRef MacroName = PP.getLastMacroWithSpelling(L, Tokens); if (!MacroName.empty()) AnnotationSpelling = MacroName; SmallString<64> TextToInsert(AnnotationSpelling); TextToInsert += "; "; S.Diag(L, diag::note_insert_fallthrough_fixit) << AnnotationSpelling << FixItHint::CreateInsertion(L, TextToInsert); } } S.Diag(L, diag::note_insert_break_fixit) << FixItHint::CreateInsertion(L, "break; "); } } for (const auto *F : FM.getFallthroughStmts()) S.Diag(F->getLocStart(), diag::warn_fallthrough_attr_invalid_placement); } static bool isInLoop(const ASTContext &Ctx, const ParentMap &PM, const Stmt *S) { assert(S); do { switch (S->getStmtClass()) { case Stmt::ForStmtClass: case Stmt::WhileStmtClass: case Stmt::CXXForRangeStmtClass: case Stmt::ObjCForCollectionStmtClass: return true; case Stmt::DoStmtClass: { const Expr *Cond = cast<DoStmt>(S)->getCond(); llvm::APSInt Val; if (!Cond->EvaluateAsInt(Val, Ctx)) return true; return Val.getBoolValue(); } default: break; } } while ((S = PM.getParent(S))); return false; } static void diagnoseRepeatedUseOfWeak(Sema &S, const sema::FunctionScopeInfo *CurFn, const Decl *D, const ParentMap &PM) { typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy; typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap; typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector; typedef std::pair<const Stmt *, WeakObjectUseMap::const_iterator> StmtUsesPair; ASTContext &Ctx = S.getASTContext(); const WeakObjectUseMap &WeakMap = CurFn->getWeakObjectUses(); // Extract all weak objects that are referenced more than once. SmallVector<StmtUsesPair, 8> UsesByStmt; for (WeakObjectUseMap::const_iterator I = WeakMap.begin(), E = WeakMap.end(); I != E; ++I) { const WeakUseVector &Uses = I->second; // Find the first read of the weak object. WeakUseVector::const_iterator UI = Uses.begin(), UE = Uses.end(); for ( ; UI != UE; ++UI) { if (UI->isUnsafe()) break; } // If there were only writes to this object, don't warn. if (UI == UE) continue; // If there was only one read, followed by any number of writes, and the // read is not within a loop, don't warn. Additionally, don't warn in a // loop if the base object is a local variable -- local variables are often // changed in loops. if (UI == Uses.begin()) { WeakUseVector::const_iterator UI2 = UI; for (++UI2; UI2 != UE; ++UI2) if (UI2->isUnsafe()) break; if (UI2 == UE) { if (!isInLoop(Ctx, PM, UI->getUseExpr())) continue; const WeakObjectProfileTy &Profile = I->first; if (!Profile.isExactProfile()) continue; const NamedDecl *Base = Profile.getBase(); if (!Base) Base = Profile.getProperty(); assert(Base && "A profile always has a base or property."); if (const VarDecl *BaseVar = dyn_cast<VarDecl>(Base)) if (BaseVar->hasLocalStorage() && !isa<ParmVarDecl>(Base)) continue; } } UsesByStmt.push_back(StmtUsesPair(UI->getUseExpr(), I)); } if (UsesByStmt.empty()) return; // Sort by first use so that we emit the warnings in a deterministic order. SourceManager &SM = S.getSourceManager(); std::sort(UsesByStmt.begin(), UsesByStmt.end(), [&SM](const StmtUsesPair &LHS, const StmtUsesPair &RHS) { return SM.isBeforeInTranslationUnit(LHS.first->getLocStart(), RHS.first->getLocStart()); }); // Classify the current code body for better warning text. // This enum should stay in sync with the cases in // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. // FIXME: Should we use a common classification enum and the same set of // possibilities all throughout Sema? enum { Function, Method, Block, Lambda } FunctionKind; if (isa<sema::BlockScopeInfo>(CurFn)) FunctionKind = Block; else if (isa<sema::LambdaScopeInfo>(CurFn)) FunctionKind = Lambda; else if (isa<ObjCMethodDecl>(D)) FunctionKind = Method; else FunctionKind = Function; // Iterate through the sorted problems and emit warnings for each. for (const auto &P : UsesByStmt) { const Stmt *FirstRead = P.first; const WeakObjectProfileTy &Key = P.second->first; const WeakUseVector &Uses = P.second->second; // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy // may not contain enough information to determine that these are different // properties. We can only be 100% sure of a repeated use in certain cases, // and we adjust the diagnostic kind accordingly so that the less certain // case can be turned off if it is too noisy. unsigned DiagKind; if (Key.isExactProfile()) DiagKind = diag::warn_arc_repeated_use_of_weak; else DiagKind = diag::warn_arc_possible_repeated_use_of_weak; // Classify the weak object being accessed for better warning text. // This enum should stay in sync with the cases in // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak. enum { Variable, Property, ImplicitProperty, Ivar } ObjectKind; const NamedDecl *D = Key.getProperty(); if (isa<VarDecl>(D)) ObjectKind = Variable; else if (isa<ObjCPropertyDecl>(D)) ObjectKind = Property; else if (isa<ObjCMethodDecl>(D)) ObjectKind = ImplicitProperty; else if (isa<ObjCIvarDecl>(D)) ObjectKind = Ivar; else llvm_unreachable("Unexpected weak object kind!"); // Show the first time the object was read. S.Diag(FirstRead->getLocStart(), DiagKind) << int(ObjectKind) << D << int(FunctionKind) << FirstRead->getSourceRange(); // Print all the other accesses as notes. for (const auto &Use : Uses) { if (Use.getUseExpr() == FirstRead) continue; S.Diag(Use.getUseExpr()->getLocStart(), diag::note_arc_weak_also_accessed_here) << Use.getUseExpr()->getSourceRange(); } } } namespace { class UninitValsDiagReporter : public UninitVariablesHandler { Sema &S; typedef SmallVector<UninitUse, 2> UsesVec; typedef llvm::PointerIntPair<UsesVec *, 1, bool> MappedType; // Prefer using MapVector to DenseMap, so that iteration order will be // the same as insertion order. This is needed to obtain a deterministic // order of diagnostics when calling flushDiagnostics(). typedef llvm::MapVector<const VarDecl *, MappedType> UsesMap; UsesMap *uses; public: UninitValsDiagReporter(Sema &S) : S(S), uses(nullptr) {} ~UninitValsDiagReporter() override { flushDiagnostics(); } MappedType &getUses(const VarDecl *vd) { if (!uses) uses = new UsesMap(); MappedType &V = (*uses)[vd]; if (!V.getPointer()) V.setPointer(new UsesVec()); return V; } void handleUseOfUninitVariable(const VarDecl *vd, const UninitUse &use) override { getUses(vd).getPointer()->push_back(use); } void handleSelfInit(const VarDecl *vd) override { getUses(vd).setInt(true); } void flushDiagnostics() { if (!uses) return; for (const auto &P : *uses) { const VarDecl *vd = P.first; const MappedType &V = P.second; UsesVec *vec = V.getPointer(); bool hasSelfInit = V.getInt(); // Specially handle the case where we have uses of an uninitialized // variable, but the root cause is an idiomatic self-init. We want // to report the diagnostic at the self-init since that is the root cause. if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec)) DiagnoseUninitializedUse(S, vd, UninitUse(vd->getInit()->IgnoreParenCasts(), /* isAlwaysUninit */ true), /* alwaysReportSelfInit */ true); else { // Sort the uses by their SourceLocations. While not strictly // guaranteed to produce them in line/column order, this will provide // a stable ordering. std::sort(vec->begin(), vec->end(), [](const UninitUse &a, const UninitUse &b) { // Prefer a more confident report over a less confident one. if (a.getKind() != b.getKind()) return a.getKind() > b.getKind(); return a.getUser()->getLocStart() < b.getUser()->getLocStart(); }); for (const auto &U : *vec) { // If we have self-init, downgrade all uses to 'may be uninitialized'. UninitUse Use = hasSelfInit ? UninitUse(U.getUser(), false) : U; if (DiagnoseUninitializedUse(S, vd, Use)) // Skip further diagnostics for this variable. We try to warn only // on the first point at which a variable is used uninitialized. break; } } // Release the uses vector. delete vec; } delete uses; } private: static bool hasAlwaysUninitializedUse(const UsesVec* vec) { return std::any_of(vec->begin(), vec->end(), [](const UninitUse &U) { return U.getKind() == UninitUse::Always || U.getKind() == UninitUse::AfterCall || U.getKind() == UninitUse::AfterDecl; }); } }; } namespace clang { namespace { typedef SmallVector<PartialDiagnosticAt, 1> OptionalNotes; typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag; typedef std::list<DelayedDiag> DiagList; struct SortDiagBySourceLocation { SourceManager &SM; SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {} bool operator()(const DelayedDiag &left, const DelayedDiag &right) { // Although this call will be slow, this is only called when outputting // multiple warnings. return SM.isBeforeInTranslationUnit(left.first.first, right.first.first); } }; }} //===----------------------------------------------------------------------===// // -Wthread-safety //===----------------------------------------------------------------------===// namespace clang { namespace threadSafety { namespace { class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler { Sema &S; DiagList Warnings; SourceLocation FunLocation, FunEndLocation; const FunctionDecl *CurrentFunction; bool Verbose; OptionalNotes getNotes() const { if (Verbose && CurrentFunction) { PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(), S.PDiag(diag::note_thread_warning_in_fun) << CurrentFunction->getNameAsString()); return OptionalNotes(1, FNote); } return OptionalNotes(); } OptionalNotes getNotes(const PartialDiagnosticAt &Note) const { OptionalNotes ONS(1, Note); if (Verbose && CurrentFunction) { PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(), S.PDiag(diag::note_thread_warning_in_fun) << CurrentFunction->getNameAsString()); ONS.push_back(std::move(FNote)); } return ONS; } OptionalNotes getNotes(const PartialDiagnosticAt &Note1, const PartialDiagnosticAt &Note2) const { OptionalNotes ONS; ONS.push_back(Note1); ONS.push_back(Note2); if (Verbose && CurrentFunction) { PartialDiagnosticAt FNote(CurrentFunction->getBody()->getLocStart(), S.PDiag(diag::note_thread_warning_in_fun) << CurrentFunction->getNameAsString()); ONS.push_back(std::move(FNote)); } return ONS; } // Helper functions void warnLockMismatch(unsigned DiagID, StringRef Kind, Name LockName, SourceLocation Loc) { // Gracefully handle rare cases when the analysis can't get a more // precise source location. if (!Loc.isValid()) Loc = FunLocation; PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind << LockName); Warnings.emplace_back(std::move(Warning), getNotes()); } public: ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL) : S(S), FunLocation(FL), FunEndLocation(FEL), CurrentFunction(nullptr), Verbose(false) {} void setVerbose(bool b) { Verbose = b; } /// \brief Emit all buffered diagnostics in order of sourcelocation. /// We need to output diagnostics produced while iterating through /// the lockset in deterministic order, so this function orders diagnostics /// and outputs them. void emitDiagnostics() { Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); for (const auto &Diag : Warnings) { S.Diag(Diag.first.first, Diag.first.second); for (const auto &Note : Diag.second) S.Diag(Note.first, Note.second); } } void handleInvalidLockExp(StringRef Kind, SourceLocation Loc) override { PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_cannot_resolve_lock) << Loc); Warnings.emplace_back(std::move(Warning), getNotes()); } void handleUnmatchedUnlock(StringRef Kind, Name LockName, SourceLocation Loc) override { warnLockMismatch(diag::warn_unlock_but_no_lock, Kind, LockName, Loc); } void handleIncorrectUnlockKind(StringRef Kind, Name LockName, LockKind Expected, LockKind Received, SourceLocation Loc) override { if (Loc.isInvalid()) Loc = FunLocation; PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_unlock_kind_mismatch) << Kind << LockName << Received << Expected); Warnings.emplace_back(std::move(Warning), getNotes()); } void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation Loc) override { warnLockMismatch(diag::warn_double_lock, Kind, LockName, Loc); } void handleMutexHeldEndOfScope(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocEndOfScope, LockErrorKind LEK) override { unsigned DiagID = 0; switch (LEK) { case LEK_LockedSomePredecessors: DiagID = diag::warn_lock_some_predecessors; break; case LEK_LockedSomeLoopIterations: DiagID = diag::warn_expecting_lock_held_on_loop; break; case LEK_LockedAtEndOfFunction: DiagID = diag::warn_no_unlock; break; case LEK_NotLockedAtEndOfFunction: DiagID = diag::warn_expecting_locked; break; } if (LocEndOfScope.isInvalid()) LocEndOfScope = FunEndLocation; PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << Kind << LockName); if (LocLocked.isValid()) { PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here) << Kind); Warnings.emplace_back(std::move(Warning), getNotes(Note)); return; } Warnings.emplace_back(std::move(Warning), getNotes()); } void handleExclusiveAndShared(StringRef Kind, Name LockName, SourceLocation Loc1, SourceLocation Loc2) override { PartialDiagnosticAt Warning(Loc1, S.PDiag(diag::warn_lock_exclusive_and_shared) << Kind << LockName); PartialDiagnosticAt Note(Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) << Kind << LockName); Warnings.emplace_back(std::move(Warning), getNotes(Note)); } void handleNoMutexHeld(StringRef Kind, const NamedDecl *D, ProtectedOperationKind POK, AccessKind AK, SourceLocation Loc) override { assert((POK == POK_VarAccess || POK == POK_VarDereference) && "Only works for variables"); unsigned DiagID = POK == POK_VarAccess? diag::warn_variable_requires_any_lock: diag::warn_var_deref_requires_any_lock; PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << D->getNameAsString() << getLockKindFromAccessKind(AK)); Warnings.emplace_back(std::move(Warning), getNotes()); } void handleMutexNotHeld(StringRef Kind, const NamedDecl *D, ProtectedOperationKind POK, Name LockName, LockKind LK, SourceLocation Loc, Name *PossibleMatch) override { unsigned DiagID = 0; if (PossibleMatch) { switch (POK) { case POK_VarAccess: DiagID = diag::warn_variable_requires_lock_precise; break; case POK_VarDereference: DiagID = diag::warn_var_deref_requires_lock_precise; break; case POK_FunctionCall: DiagID = diag::warn_fun_requires_lock_precise; break; case POK_PassByRef: DiagID = diag::warn_guarded_pass_by_reference; break; case POK_PtPassByRef: DiagID = diag::warn_pt_guarded_pass_by_reference; break; } PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind << D->getNameAsString() << LockName << LK); PartialDiagnosticAt Note(Loc, S.PDiag(diag::note_found_mutex_near_match) << *PossibleMatch); if (Verbose && POK == POK_VarAccess) { PartialDiagnosticAt VNote(D->getLocation(), S.PDiag(diag::note_guarded_by_declared_here) << D->getNameAsString()); Warnings.emplace_back(std::move(Warning), getNotes(Note, VNote)); } else Warnings.emplace_back(std::move(Warning), getNotes(Note)); } else { switch (POK) { case POK_VarAccess: DiagID = diag::warn_variable_requires_lock; break; case POK_VarDereference: DiagID = diag::warn_var_deref_requires_lock; break; case POK_FunctionCall: DiagID = diag::warn_fun_requires_lock; break; case POK_PassByRef: DiagID = diag::warn_guarded_pass_by_reference; break; case POK_PtPassByRef: DiagID = diag::warn_pt_guarded_pass_by_reference; break; } PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << Kind << D->getNameAsString() << LockName << LK); if (Verbose && POK == POK_VarAccess) { PartialDiagnosticAt Note(D->getLocation(), S.PDiag(diag::note_guarded_by_declared_here) << D->getNameAsString()); Warnings.emplace_back(std::move(Warning), getNotes(Note)); } else Warnings.emplace_back(std::move(Warning), getNotes()); } } void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, SourceLocation Loc) override { PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_acquire_requires_negative_cap) << Kind << LockName << Neg); Warnings.emplace_back(std::move(Warning), getNotes()); } void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName, SourceLocation Loc) override { PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_fun_excludes_mutex) << Kind << FunName << LockName); Warnings.emplace_back(std::move(Warning), getNotes()); } void handleLockAcquiredBefore(StringRef Kind, Name L1Name, Name L2Name, SourceLocation Loc) override { PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_acquired_before) << Kind << L1Name << L2Name); Warnings.emplace_back(std::move(Warning), getNotes()); } void handleBeforeAfterCycle(Name L1Name, SourceLocation Loc) override { PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_acquired_before_after_cycle) << L1Name); Warnings.emplace_back(std::move(Warning), getNotes()); } void enterFunction(const FunctionDecl* FD) override { CurrentFunction = FD; } void leaveFunction(const FunctionDecl* FD) override { CurrentFunction = 0; } }; } // namespace } // namespace threadSafety } // namespace clang //===----------------------------------------------------------------------===// // -Wconsumed //===----------------------------------------------------------------------===// namespace clang { namespace consumed { namespace { class ConsumedWarningsHandler : public ConsumedWarningsHandlerBase { Sema &S; DiagList Warnings; public: ConsumedWarningsHandler(Sema &S) : S(S) {} void emitDiagnostics() override { Warnings.sort(SortDiagBySourceLocation(S.getSourceManager())); for (const auto &Diag : Warnings) { S.Diag(Diag.first.first, Diag.first.second); for (const auto &Note : Diag.second) S.Diag(Note.first, Note.second); } } void warnLoopStateMismatch(SourceLocation Loc, StringRef VariableName) override { PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_loop_state_mismatch) << VariableName); Warnings.emplace_back(std::move(Warning), OptionalNotes()); } void warnParamReturnTypestateMismatch(SourceLocation Loc, StringRef VariableName, StringRef ExpectedState, StringRef ObservedState) override { PartialDiagnosticAt Warning(Loc, S.PDiag( diag::warn_param_return_typestate_mismatch) << VariableName << ExpectedState << ObservedState); Warnings.emplace_back(std::move(Warning), OptionalNotes()); } void warnParamTypestateMismatch(SourceLocation Loc, StringRef ExpectedState, StringRef ObservedState) override { PartialDiagnosticAt Warning(Loc, S.PDiag( diag::warn_param_typestate_mismatch) << ExpectedState << ObservedState); Warnings.emplace_back(std::move(Warning), OptionalNotes()); } void warnReturnTypestateForUnconsumableType(SourceLocation Loc, StringRef TypeName) override { PartialDiagnosticAt Warning(Loc, S.PDiag( diag::warn_return_typestate_for_unconsumable_type) << TypeName); Warnings.emplace_back(std::move(Warning), OptionalNotes()); } void warnReturnTypestateMismatch(SourceLocation Loc, StringRef ExpectedState, StringRef ObservedState) override { PartialDiagnosticAt Warning(Loc, S.PDiag( diag::warn_return_typestate_mismatch) << ExpectedState << ObservedState); Warnings.emplace_back(std::move(Warning), OptionalNotes()); } void warnUseOfTempInInvalidState(StringRef MethodName, StringRef State, SourceLocation Loc) override { PartialDiagnosticAt Warning(Loc, S.PDiag( diag::warn_use_of_temp_in_invalid_state) << MethodName << State); Warnings.emplace_back(std::move(Warning), OptionalNotes()); } void warnUseInInvalidState(StringRef MethodName, StringRef VariableName, StringRef State, SourceLocation Loc) override { PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_use_in_invalid_state) << MethodName << VariableName << State); Warnings.emplace_back(std::move(Warning), OptionalNotes()); } }; }}} //===----------------------------------------------------------------------===// // AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based // warnings on a function, method, or block. //===----------------------------------------------------------------------===// clang::sema::AnalysisBasedWarnings::Policy::Policy() { enableCheckFallThrough = 1; enableCheckUnreachable = 0; enableThreadSafetyAnalysis = 0; enableConsumedAnalysis = 0; } static unsigned isEnabled(DiagnosticsEngine &D, unsigned diag) { return (unsigned)!D.isIgnored(diag, SourceLocation()); } clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) : S(s), NumFunctionsAnalyzed(0), NumFunctionsWithBadCFGs(0), NumCFGBlocks(0), MaxCFGBlocksPerFunction(0), NumUninitAnalysisFunctions(0), NumUninitAnalysisVariables(0), MaxUninitAnalysisVariablesPerFunction(0), NumUninitAnalysisBlockVisits(0), MaxUninitAnalysisBlockVisitsPerFunction(0) { using namespace diag; DiagnosticsEngine &D = S.getDiagnostics(); DefaultPolicy.enableCheckUnreachable = isEnabled(D, warn_unreachable) || isEnabled(D, warn_unreachable_break) || isEnabled(D, warn_unreachable_return) || isEnabled(D, warn_unreachable_loop_increment); DefaultPolicy.enableThreadSafetyAnalysis = isEnabled(D, warn_double_lock); DefaultPolicy.enableConsumedAnalysis = isEnabled(D, warn_use_in_invalid_state); } static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) { for (const auto &D : fscope->PossiblyUnreachableDiags) S.Diag(D.Loc, D.PD); } void clang::sema:: AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P, sema::FunctionScopeInfo *fscope, const Decl *D, const BlockExpr *blkExpr) { // We avoid doing analysis-based warnings when there are errors for // two reasons: // (1) The CFGs often can't be constructed (if the body is invalid), so // don't bother trying. // (2) The code already has problems; running the analysis just takes more // time. DiagnosticsEngine &Diags = S.getDiagnostics(); // Do not do any analysis for declarations in system headers if we are // going to just ignore them. if (Diags.getSuppressSystemWarnings() && S.SourceMgr.isInSystemHeader(D->getLocation())) return; // For code in dependent contexts, we'll do this at instantiation time. if (cast<DeclContext>(D)->isDependentContext()) return; if (Diags.hasUncompilableErrorOccurred() || Diags.hasFatalErrorOccurred()) { // Flush out any possibly unreachable diagnostics. flushDiagnostics(S, fscope); return; } const Stmt *Body = D->getBody(); assert(Body); // Construct the analysis context with the specified CFG build options. AnalysisDeclContext AC(/* AnalysisDeclContextManager */ nullptr, D); // Don't generate EH edges for CallExprs as we'd like to avoid the n^2 // explosion for destructors that can result and the compile time hit. AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true; AC.getCFGBuildOptions().AddEHEdges = false; AC.getCFGBuildOptions().AddInitializers = true; AC.getCFGBuildOptions().AddImplicitDtors = true; AC.getCFGBuildOptions().AddTemporaryDtors = true; AC.getCFGBuildOptions().AddCXXNewAllocator = false; AC.getCFGBuildOptions().AddCXXDefaultInitExprInCtors = true; // Force that certain expressions appear as CFGElements in the CFG. This // is used to speed up various analyses. // FIXME: This isn't the right factoring. This is here for initial // prototyping, but we need a way for analyses to say what expressions they // expect to always be CFGElements and then fill in the BuildOptions // appropriately. This is essentially a layering violation. if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis || P.enableConsumedAnalysis) { // Unreachable code analysis and thread safety require a linearized CFG. AC.getCFGBuildOptions().setAllAlwaysAdd(); } else { AC.getCFGBuildOptions() .setAlwaysAdd(Stmt::BinaryOperatorClass) .setAlwaysAdd(Stmt::CompoundAssignOperatorClass) .setAlwaysAdd(Stmt::BlockExprClass) .setAlwaysAdd(Stmt::CStyleCastExprClass) .setAlwaysAdd(Stmt::DeclRefExprClass) .setAlwaysAdd(Stmt::ImplicitCastExprClass) .setAlwaysAdd(Stmt::UnaryOperatorClass) .setAlwaysAdd(Stmt::AttributedStmtClass); } // Install the logical handler for -Wtautological-overlap-compare std::unique_ptr<LogicalErrorHandler> LEH; if (!Diags.isIgnored(diag::warn_tautological_overlap_comparison, D->getLocStart())) { LEH.reset(new LogicalErrorHandler(S)); AC.getCFGBuildOptions().Observer = LEH.get(); } // Emit delayed diagnostics. if (!fscope->PossiblyUnreachableDiags.empty()) { bool analyzed = false; // Register the expressions with the CFGBuilder. for (const auto &D : fscope->PossiblyUnreachableDiags) { if (D.stmt) AC.registerForcedBlockExpression(D.stmt); } if (AC.getCFG()) { analyzed = true; for (const auto &D : fscope->PossiblyUnreachableDiags) { bool processed = false; if (D.stmt) { const CFGBlock *block = AC.getBlockForRegisteredExpression(D.stmt); CFGReverseBlockReachabilityAnalysis *cra = AC.getCFGReachablityAnalysis(); // FIXME: We should be able to assert that block is non-null, but // the CFG analysis can skip potentially-evaluated expressions in // edge cases; see test/Sema/vla-2.c. if (block && cra) { // Can this block be reached from the entrance? if (cra->isReachable(&AC.getCFG()->getEntry(), block)) S.Diag(D.Loc, D.PD); processed = true; } } if (!processed) { // Emit the warning anyway if we cannot map to a basic block. S.Diag(D.Loc, D.PD); } } } if (!analyzed) flushDiagnostics(S, fscope); } // Warning: check missing 'return' if (P.enableCheckFallThrough) { const CheckFallThroughDiagnostics &CD = (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock() : (isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call && cast<CXXMethodDecl>(D)->getParent()->isLambda()) ? CheckFallThroughDiagnostics::MakeForLambda() : CheckFallThroughDiagnostics::MakeForFunction(D)); CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC); } // Warning: check for unreachable code if (P.enableCheckUnreachable) { // Only check for unreachable code on non-template instantiations. // Different template instantiations can effectively change the control-flow // and it is very difficult to prove that a snippet of code in a template // is unreachable for all instantiations. bool isTemplateInstantiation = false; if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) isTemplateInstantiation = Function->isTemplateInstantiation(); if (!isTemplateInstantiation) CheckUnreachable(S, AC); } // Check for thread safety violations if (P.enableThreadSafetyAnalysis) { SourceLocation FL = AC.getDecl()->getLocation(); SourceLocation FEL = AC.getDecl()->getLocEnd(); threadSafety::ThreadSafetyReporter Reporter(S, FL, FEL); if (!Diags.isIgnored(diag::warn_thread_safety_beta, D->getLocStart())) Reporter.setIssueBetaWarnings(true); if (!Diags.isIgnored(diag::warn_thread_safety_verbose, D->getLocStart())) Reporter.setVerbose(true); threadSafety::runThreadSafetyAnalysis(AC, Reporter, &S.ThreadSafetyDeclCache); Reporter.emitDiagnostics(); } // Check for violations of consumed properties. if (P.enableConsumedAnalysis) { consumed::ConsumedWarningsHandler WarningHandler(S); consumed::ConsumedAnalyzer Analyzer(WarningHandler); Analyzer.run(AC); } // HLSL Change Start - Generate warnings for uninitialized out params. if (!Diags.isIgnored(diag::warn_uninit_var, D->getLocStart()) || !Diags.isIgnored(diag::warn_sometimes_uninit_var, D->getLocStart()) || !Diags.isIgnored(diag::warn_maybe_uninit_var, D->getLocStart()) || !Diags.isIgnored(diag::warn_hlsl_uninit_out_param, D->getLocStart()) || !Diags.isIgnored(diag::warn_hlsl_sometimes_uninit_out_param, D->getLocStart())) { // HLSL Change End - Generate warnings for uninitialized out params. if (CFG *cfg = AC.getCFG()) { UninitValsDiagReporter reporter(S); UninitVariablesAnalysisStats stats; std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats)); runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC, reporter, stats); if (S.CollectStats && stats.NumVariablesAnalyzed > 0) { ++NumUninitAnalysisFunctions; NumUninitAnalysisVariables += stats.NumVariablesAnalyzed; NumUninitAnalysisBlockVisits += stats.NumBlockVisits; MaxUninitAnalysisVariablesPerFunction = std::max(MaxUninitAnalysisVariablesPerFunction, stats.NumVariablesAnalyzed); MaxUninitAnalysisBlockVisitsPerFunction = std::max(MaxUninitAnalysisBlockVisitsPerFunction, stats.NumBlockVisits); } } } bool FallThroughDiagFull = !Diags.isIgnored(diag::warn_unannotated_fallthrough, D->getLocStart()); bool FallThroughDiagPerFunction = !Diags.isIgnored( diag::warn_unannotated_fallthrough_per_function, D->getLocStart()); if (FallThroughDiagFull || FallThroughDiagPerFunction) { DiagnoseSwitchLabelsFallthrough(S, AC, !FallThroughDiagFull); } if (S.getLangOpts().ObjCARCWeak && !Diags.isIgnored(diag::warn_arc_repeated_use_of_weak, D->getLocStart())) diagnoseRepeatedUseOfWeak(S, fscope, D, AC.getParentMap()); // Check for infinite self-recursion in functions if (!Diags.isIgnored(diag::warn_infinite_recursive_function, D->getLocStart())) { if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { checkRecursiveFunction(S, FD, Body, AC); } } // If none of the previous checks caused a CFG build, trigger one here // for -Wtautological-overlap-compare if (!Diags.isIgnored(diag::warn_tautological_overlap_comparison, D->getLocStart())) { AC.getCFG(); } // Collect statistics about the CFG if it was built. if (S.CollectStats && AC.isCFGBuilt()) { ++NumFunctionsAnalyzed; if (CFG *cfg = AC.getCFG()) { // If we successfully built a CFG for this context, record some more // detail information about it. NumCFGBlocks += cfg->getNumBlockIDs(); MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction, cfg->getNumBlockIDs()); } else { ++NumFunctionsWithBadCFGs; } } } void clang::sema::AnalysisBasedWarnings::PrintStats() const { llvm::errs() << "\n*** Analysis Based Warnings Stats:\n"; unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs; unsigned AvgCFGBlocksPerFunction = !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt; llvm::errs() << NumFunctionsAnalyzed << " functions analyzed (" << NumFunctionsWithBadCFGs << " w/o CFGs).\n" << " " << NumCFGBlocks << " CFG blocks built.\n" << " " << AvgCFGBlocksPerFunction << " average CFG blocks per function.\n" << " " << MaxCFGBlocksPerFunction << " max CFG blocks per function.\n"; unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0 : NumUninitAnalysisVariables/NumUninitAnalysisFunctions; unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0 : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions; llvm::errs() << NumUninitAnalysisFunctions << " functions analyzed for uninitialiazed variables\n" << " " << NumUninitAnalysisVariables << " variables analyzed.\n" << " " << AvgUninitVariablesPerFunction << " average variables per function.\n" << " " << MaxUninitAnalysisVariablesPerFunction << " max variables per function.\n" << " " << NumUninitAnalysisBlockVisits << " block visits.\n" << " " << AvgUninitBlockVisitsPerFunction << " average block visits per function.\n" << " " << MaxUninitAnalysisBlockVisitsPerFunction << " max block visits per function.\n"; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaInit.cpp
//===--- SemaInit.cpp - Semantic Analysis for Initializers ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for initializers. // //===----------------------------------------------------------------------===// #include "clang/Sema/Initialization.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/TargetInfo.h" #include "clang/Sema/Designator.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change #include "llvm/ADT/APInt.h" #include "llvm/ADT/SmallString.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include <map> using namespace clang; //===----------------------------------------------------------------------===// // Sema Initialization Checking //===----------------------------------------------------------------------===// /// \brief Check whether T is compatible with a wide character type (wchar_t, /// char16_t or char32_t). static bool IsWideCharCompatible(QualType T, ASTContext &Context) { if (Context.typesAreCompatible(Context.getWideCharType(), T)) return true; if (Context.getLangOpts().CPlusPlus || Context.getLangOpts().C11) { return Context.typesAreCompatible(Context.Char16Ty, T) || Context.typesAreCompatible(Context.Char32Ty, T); } return false; } enum StringInitFailureKind { SIF_None, SIF_NarrowStringIntoWideChar, SIF_WideStringIntoChar, SIF_IncompatWideStringIntoWideChar, SIF_Other }; /// \brief Check whether the array of type AT can be initialized by the Init /// expression by means of string initialization. Returns SIF_None if so, /// otherwise returns a StringInitFailureKind that describes why the /// initialization would not work. static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT, ASTContext &Context) { if (!isa<ConstantArrayType>(AT) && !isa<IncompleteArrayType>(AT)) return SIF_Other; // See if this is a string literal or @encode. Init = Init->IgnoreParens(); // Handle @encode, which is a narrow string. if (isa<ObjCEncodeExpr>(Init) && AT->getElementType()->isCharType()) return SIF_None; // Otherwise we can only handle string literals. StringLiteral *SL = dyn_cast<StringLiteral>(Init); if (!SL) return SIF_Other; const QualType ElemTy = Context.getCanonicalType(AT->getElementType()).getUnqualifiedType(); switch (SL->getKind()) { case StringLiteral::Ascii: case StringLiteral::UTF8: // char array can be initialized with a narrow string. // Only allow char x[] = "foo"; not char x[] = L"foo"; if (ElemTy->isCharType()) return SIF_None; if (IsWideCharCompatible(ElemTy, Context)) return SIF_NarrowStringIntoWideChar; return SIF_Other; // C99 6.7.8p15 (with correction from DR343), or C11 6.7.9p15: // "An array with element type compatible with a qualified or unqualified // version of wchar_t, char16_t, or char32_t may be initialized by a wide // string literal with the corresponding encoding prefix (L, u, or U, // respectively), optionally enclosed in braces. case StringLiteral::UTF16: if (Context.typesAreCompatible(Context.Char16Ty, ElemTy)) return SIF_None; if (ElemTy->isCharType()) return SIF_WideStringIntoChar; if (IsWideCharCompatible(ElemTy, Context)) return SIF_IncompatWideStringIntoWideChar; return SIF_Other; case StringLiteral::UTF32: if (Context.typesAreCompatible(Context.Char32Ty, ElemTy)) return SIF_None; if (ElemTy->isCharType()) return SIF_WideStringIntoChar; if (IsWideCharCompatible(ElemTy, Context)) return SIF_IncompatWideStringIntoWideChar; return SIF_Other; case StringLiteral::Wide: if (Context.typesAreCompatible(Context.getWideCharType(), ElemTy)) return SIF_None; if (ElemTy->isCharType()) return SIF_WideStringIntoChar; if (IsWideCharCompatible(ElemTy, Context)) return SIF_IncompatWideStringIntoWideChar; return SIF_Other; } llvm_unreachable("missed a StringLiteral kind?"); } static StringInitFailureKind IsStringInit(Expr *init, QualType declType, ASTContext &Context) { const ArrayType *arrayType = Context.getAsArrayType(declType); if (!arrayType) return SIF_Other; return IsStringInit(init, arrayType, Context); } /// Update the type of a string literal, including any surrounding parentheses, /// to match the type of the object which it is initializing. static void updateStringLiteralType(Expr *E, QualType Ty) { while (true) { E->setType(Ty); if (isa<StringLiteral>(E) || isa<ObjCEncodeExpr>(E)) break; else if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) E = PE->getSubExpr(); else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) E = UO->getSubExpr(); else if (GenericSelectionExpr *GSE = dyn_cast<GenericSelectionExpr>(E)) E = GSE->getResultExpr(); else llvm_unreachable("unexpected expr in string literal init"); } } static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT, Sema &S) { // Get the length of the string as parsed. auto *ConstantArrayTy = cast<ConstantArrayType>(Str->getType()->getAsArrayTypeUnsafe()); uint64_t StrLength = ConstantArrayTy->getSize().getZExtValue(); if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) { // C99 6.7.8p14. We have an array of character type with unknown size // being initialized to a string literal. llvm::APInt ConstVal(32, StrLength); // Return a new array type (C99 6.7.8p22). DeclT = S.Context.getConstantArrayType(IAT->getElementType(), ConstVal, ArrayType::Normal, 0); updateStringLiteralType(Str, DeclT); return; } const ConstantArrayType *CAT = cast<ConstantArrayType>(AT); // We have an array of character type with known size. However, // the size may be smaller or larger than the string we are initializing. // FIXME: Avoid truncation for 64-bit length strings. if (S.getLangOpts().CPlusPlus) { if (StringLiteral *SL = dyn_cast<StringLiteral>(Str->IgnoreParens())) { // For Pascal strings it's OK to strip off the terminating null character, // so the example below is valid: // // unsigned char a[2] = "\pa"; if (SL->isPascal()) StrLength--; } // [dcl.init.string]p2 if (StrLength > CAT->getSize().getZExtValue()) S.Diag(Str->getLocStart(), diag::err_initializer_string_for_char_array_too_long) << Str->getSourceRange(); } else { // C99 6.7.8p14. if (StrLength-1 > CAT->getSize().getZExtValue()) S.Diag(Str->getLocStart(), diag::ext_initializer_string_for_char_array_too_long) << Str->getSourceRange(); } // Set the type to the actual size that we are initializing. If we have // something like: // char x[1] = "foo"; // then this will set the string literal's type to char[1]. updateStringLiteralType(Str, DeclT); } //===----------------------------------------------------------------------===// // Semantic checking for initializer lists. //===----------------------------------------------------------------------===// /// @brief Semantic checking for initializer lists. /// /// The InitListChecker class contains a set of routines that each /// handle the initialization of a certain kind of entity, e.g., /// arrays, vectors, struct/union types, scalars, etc. The /// InitListChecker itself performs a recursive walk of the subobject /// structure of the type to be initialized, while stepping through /// the initializer list one element at a time. The IList and Index /// parameters to each of the Check* routines contain the active /// (syntactic) initializer list and the index into that initializer /// list that represents the current initializer. Each routine is /// responsible for moving that Index forward as it consumes elements. /// /// Each Check* routine also has a StructuredList/StructuredIndex /// arguments, which contains the current "structured" (semantic) /// initializer list and the index into that initializer list where we /// are copying initializers as we map them over to the semantic /// list. Once we have completed our recursive walk of the subobject /// structure, we will have constructed a full semantic initializer /// list. /// /// C99 designators cause changes in the initializer list traversal, /// because they make the initialization "jump" into a specific /// subobject and then continue the initialization from that /// point. CheckDesignatedInitializer() recursively steps into the /// designated subobject and manages backing out the recursion to /// initialize the subobjects after the one designated. namespace { class InitListChecker { Sema &SemaRef; const InitializationKind &Kind; // HLSL Change: provide access to the initialization kind bool hadError; bool VerifyOnly; // no diagnostics, no structure building llvm::DenseMap<InitListExpr *, InitListExpr *> SyntacticToSemantic; InitListExpr *FullyStructuredList; void CheckImplicitInitList(const InitializedEntity &Entity, InitListExpr *ParentIList, QualType T, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex); void CheckExplicitInitList(const InitializedEntity &Entity, InitListExpr *IList, QualType &T, InitListExpr *StructuredList, bool TopLevelObject = false); void CheckListElementTypes(const InitializedEntity &Entity, InitListExpr *IList, QualType &DeclType, bool SubobjectIsDesignatorContext, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex, bool TopLevelObject = false); void CheckSubElementType(const InitializedEntity &Entity, InitListExpr *IList, QualType ElemType, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex); void CheckComplexType(const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex); void CheckScalarType(const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex); void CheckReferenceType(const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex); void CheckVectorType(const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex); void CheckStructUnionTypes(const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType, RecordDecl::field_iterator Field, bool SubobjectIsDesignatorContext, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex, bool TopLevelObject = false); void CheckArrayType(const InitializedEntity &Entity, InitListExpr *IList, QualType &DeclType, llvm::APSInt elementIndex, bool SubobjectIsDesignatorContext, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex); bool CheckDesignatedInitializer(const InitializedEntity &Entity, InitListExpr *IList, DesignatedInitExpr *DIE, unsigned DesigIdx, QualType &CurrentObjectType, RecordDecl::field_iterator *NextField, llvm::APSInt *NextElementIndex, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex, bool FinishSubobjectInit, bool TopLevelObject); InitListExpr *getStructuredSubobjectInit(InitListExpr *IList, unsigned Index, QualType CurrentObjectType, InitListExpr *StructuredList, unsigned StructuredIndex, SourceRange InitRange, bool IsFullyOverwritten = false); void UpdateStructuredListElement(InitListExpr *StructuredList, unsigned &StructuredIndex, Expr *expr); int numArrayElements(QualType DeclType); int numStructUnionElements(QualType DeclType); static ExprResult PerformEmptyInit(Sema &SemaRef, SourceLocation Loc, const InitializedEntity &Entity, bool VerifyOnly); // Explanation on the "FillWithNoInit" mode: // // Assume we have the following definitions (Case#1): // struct P { char x[6][6]; } xp = { .x[1] = "bar" }; // struct PP { struct P lp; } l = { .lp = xp, .lp.x[1][2] = 'f' }; // // l.lp.x[1][0..1] should not be filled with implicit initializers because the // "base" initializer "xp" will provide values for them; l.lp.x[1] will be "baf". // // But if we have (Case#2): // struct PP l = { .lp = xp, .lp.x[1] = { [2] = 'f' } }; // // l.lp.x[1][0..1] are implicitly initialized and do not use values from the // "base" initializer; l.lp.x[1] will be "\0\0f\0\0\0". // // To distinguish Case#1 from Case#2, and also to avoid leaving many "holes" // in the InitListExpr, the "holes" in Case#1 are filled not with empty // initializers but with special "NoInitExpr" place holders, which tells the // CodeGen not to generate any initializers for these parts. void FillInEmptyInitForField(unsigned Init, FieldDecl *Field, const InitializedEntity &ParentEntity, InitListExpr *ILE, bool &RequiresSecondPass, bool FillWithNoInit = false); void FillInEmptyInitializations(const InitializedEntity &Entity, InitListExpr *ILE, bool &RequiresSecondPass, bool FillWithNoInit = false); bool CheckFlexibleArrayInit(const InitializedEntity &Entity, Expr *InitExpr, FieldDecl *Field, bool TopLevelObject); void CheckEmptyInitializable(const InitializedEntity &Entity, SourceLocation Loc); public: InitListChecker(Sema &S, const InitializedEntity &Entity, const InitializationKind &K, // HLSL Change - add Kind InitListExpr *IL, QualType &T, bool VerifyOnly); bool HadError() { return hadError; } // @brief Retrieves the fully-structured initializer list used for // semantic analysis and code generation. InitListExpr *getFullyStructuredList() const { return FullyStructuredList; } }; } // end anonymous namespace ExprResult InitListChecker::PerformEmptyInit(Sema &SemaRef, SourceLocation Loc, const InitializedEntity &Entity, bool VerifyOnly) { InitializationKind Kind = InitializationKind::CreateValue(Loc, Loc, Loc, true); MultiExprArg SubInit; Expr *InitExpr; InitListExpr DummyInitList(SemaRef.Context, Loc, None, Loc); // C++ [dcl.init.aggr]p7: // If there are fewer initializer-clauses in the list than there are // members in the aggregate, then each member not explicitly initialized // ... bool EmptyInitList = SemaRef.getLangOpts().CPlusPlus11 && Entity.getType()->getBaseElementTypeUnsafe()->isRecordType(); if (EmptyInitList) { // C++1y / DR1070: // shall be initialized [...] from an empty initializer list. // // We apply the resolution of this DR to C++11 but not C++98, since C++98 // does not have useful semantics for initialization from an init list. // We treat this as copy-initialization, because aggregate initialization // always performs copy-initialization on its elements. // // Only do this if we're initializing a class type, to avoid filling in // the initializer list where possible. InitExpr = VerifyOnly ? &DummyInitList : new (SemaRef.Context) InitListExpr(SemaRef.Context, Loc, None, Loc); InitExpr->setType(SemaRef.Context.VoidTy); SubInit = InitExpr; Kind = InitializationKind::CreateCopy(Loc, Loc); } else { // C++03: // shall be value-initialized. } InitializationSequence InitSeq(SemaRef, Entity, Kind, SubInit); // libstdc++4.6 marks the vector default constructor as explicit in // _GLIBCXX_DEBUG mode, so recover using the C++03 logic in that case. // stlport does so too. Look for std::__debug for libstdc++, and for // std:: for stlport. This is effectively a compiler-side implementation of // LWG2193. if (!InitSeq && EmptyInitList && InitSeq.getFailureKind() == InitializationSequence::FK_ExplicitConstructor) { OverloadCandidateSet::iterator Best; OverloadingResult O = InitSeq.getFailedCandidateSet() .BestViableFunction(SemaRef, Kind.getLocation(), Best); (void)O; assert(O == OR_Success && "Inconsistent overload resolution"); CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function); CXXRecordDecl *R = CtorDecl->getParent(); if (CtorDecl->getMinRequiredArguments() == 0 && CtorDecl->isExplicit() && R->getDeclName() && SemaRef.SourceMgr.isInSystemHeader(CtorDecl->getLocation())) { bool IsInStd = false; for (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(R->getDeclContext()); ND && !IsInStd; ND = dyn_cast<NamespaceDecl>(ND->getParent())) { if (SemaRef.getStdNamespace()->InEnclosingNamespaceSetOf(ND)) IsInStd = true; } if (IsInStd && llvm::StringSwitch<bool>(R->getName()) .Cases("basic_string", "deque", "forward_list", true) .Cases("list", "map", "multimap", "multiset", true) .Cases("priority_queue", "queue", "set", "stack", true) .Cases("unordered_map", "unordered_set", "vector", true) .Default(false)) { InitSeq.InitializeFrom( SemaRef, Entity, InitializationKind::CreateValue(Loc, Loc, Loc, true), MultiExprArg(), /*TopLevelOfInitList=*/false); // Emit a warning for this. System header warnings aren't shown // by default, but people working on system headers should see it. if (!VerifyOnly) { SemaRef.Diag(CtorDecl->getLocation(), diag::warn_invalid_initializer_from_system_header); SemaRef.Diag(Entity.getDecl()->getLocation(), diag::note_used_in_initialization_here); } } } } if (!InitSeq) { if (!VerifyOnly) { InitSeq.Diagnose(SemaRef, Entity, Kind, SubInit); if (Entity.getKind() == InitializedEntity::EK_Member) SemaRef.Diag(Entity.getDecl()->getLocation(), diag::note_in_omitted_aggregate_initializer) << /*field*/1 << Entity.getDecl(); else if (Entity.getKind() == InitializedEntity::EK_ArrayElement) SemaRef.Diag(Loc, diag::note_in_omitted_aggregate_initializer) << /*array element*/0 << Entity.getElementIndex(); } return ExprError(); } return VerifyOnly ? ExprResult(static_cast<Expr *>(nullptr)) : InitSeq.Perform(SemaRef, Entity, Kind, SubInit); } void InitListChecker::CheckEmptyInitializable(const InitializedEntity &Entity, SourceLocation Loc) { assert(VerifyOnly && "CheckEmptyInitializable is only inteded for verification mode."); if (PerformEmptyInit(SemaRef, Loc, Entity, /*VerifyOnly*/true).isInvalid()) hadError = true; } void InitListChecker::FillInEmptyInitForField(unsigned Init, FieldDecl *Field, const InitializedEntity &ParentEntity, InitListExpr *ILE, bool &RequiresSecondPass, bool FillWithNoInit) { SourceLocation Loc = ILE->getLocEnd(); unsigned NumInits = ILE->getNumInits(); InitializedEntity MemberEntity = InitializedEntity::InitializeMember(Field, &ParentEntity); if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) if (!RType->getDecl()->isUnion()) assert(Init < NumInits && "This ILE should have been expanded"); if (Init >= NumInits || !ILE->getInit(Init)) { if (FillWithNoInit) { Expr *Filler = new (SemaRef.Context) NoInitExpr(Field->getType()); if (Init < NumInits) ILE->setInit(Init, Filler); else ILE->updateInit(SemaRef.Context, Init, Filler); return; } // C++1y [dcl.init.aggr]p7: // If there are fewer initializer-clauses in the list than there are // members in the aggregate, then each member not explicitly initialized // shall be initialized from its brace-or-equal-initializer [...] if (Field->hasInClassInitializer()) { ExprResult DIE = SemaRef.BuildCXXDefaultInitExpr(Loc, Field); if (DIE.isInvalid()) { hadError = true; return; } if (Init < NumInits) ILE->setInit(Init, DIE.get()); else { ILE->updateInit(SemaRef.Context, Init, DIE.get()); RequiresSecondPass = true; } return; } if (Field->getType()->isReferenceType()) { // C++ [dcl.init.aggr]p9: // If an incomplete or empty initializer-list leaves a // member of reference type uninitialized, the program is // ill-formed. SemaRef.Diag(Loc, diag::err_init_reference_member_uninitialized) << Field->getType() << ILE->getSyntacticForm()->getSourceRange(); SemaRef.Diag(Field->getLocation(), diag::note_uninit_reference_member); hadError = true; return; } ExprResult MemberInit = PerformEmptyInit(SemaRef, Loc, MemberEntity, /*VerifyOnly*/false); if (MemberInit.isInvalid()) { hadError = true; return; } if (hadError) { // Do nothing } else if (Init < NumInits) { ILE->setInit(Init, MemberInit.getAs<Expr>()); } else if (!isa<ImplicitValueInitExpr>(MemberInit.get())) { // Empty initialization requires a constructor call, so // extend the initializer list to include the constructor // call and make a note that we'll need to take another pass // through the initializer list. ILE->updateInit(SemaRef.Context, Init, MemberInit.getAs<Expr>()); RequiresSecondPass = true; } } else if (InitListExpr *InnerILE = dyn_cast<InitListExpr>(ILE->getInit(Init))) FillInEmptyInitializations(MemberEntity, InnerILE, RequiresSecondPass, FillWithNoInit); else if (DesignatedInitUpdateExpr *InnerDIUE = dyn_cast<DesignatedInitUpdateExpr>(ILE->getInit(Init))) FillInEmptyInitializations(MemberEntity, InnerDIUE->getUpdater(), RequiresSecondPass, /*FillWithNoInit =*/ true); } /// Recursively replaces NULL values within the given initializer list /// with expressions that perform value-initialization of the /// appropriate type. void InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity, InitListExpr *ILE, bool &RequiresSecondPass, bool FillWithNoInit) { assert((ILE->getType() != SemaRef.Context.VoidTy) && "Should not have void type"); if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) { const RecordDecl *RDecl = RType->getDecl(); if (RDecl->isUnion() && ILE->getInitializedFieldInUnion()) FillInEmptyInitForField(0, ILE->getInitializedFieldInUnion(), Entity, ILE, RequiresSecondPass, FillWithNoInit); else if (RDecl->isUnion() && isa<CXXRecordDecl>(RDecl) && cast<CXXRecordDecl>(RDecl)->hasInClassInitializer()) { for (auto *Field : RDecl->fields()) { if (Field->hasInClassInitializer()) { FillInEmptyInitForField(0, Field, Entity, ILE, RequiresSecondPass, FillWithNoInit); break; } } } else { // The fields beyond ILE->getNumInits() are default initialized, so in // order to leave them uninitialized, the ILE is expanded and the extra // fields are then filled with NoInitExpr. unsigned NumFields = 0; for (auto *Field : RDecl->fields()) if (!Field->isUnnamedBitfield()) ++NumFields; if (ILE->getNumInits() < NumFields) ILE->resizeInits(SemaRef.Context, NumFields); unsigned Init = 0; for (auto *Field : RDecl->fields()) { if (Field->isUnnamedBitfield()) continue; if (hadError) return; FillInEmptyInitForField(Init, Field, Entity, ILE, RequiresSecondPass, FillWithNoInit); if (hadError) return; ++Init; // Only look at the first initialization of a union. if (RDecl->isUnion()) break; } } return; } QualType ElementType; InitializedEntity ElementEntity = Entity; unsigned NumInits = ILE->getNumInits(); unsigned NumElements = NumInits; if (const ArrayType *AType = SemaRef.Context.getAsArrayType(ILE->getType())) { ElementType = AType->getElementType(); if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType)) NumElements = CAType->getSize().getZExtValue(); ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity); } else if (const VectorType *VType = ILE->getType()->getAs<VectorType>()) { ElementType = VType->getElementType(); NumElements = VType->getNumElements(); ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity); } else ElementType = ILE->getType(); for (unsigned Init = 0; Init != NumElements; ++Init) { if (hadError) return; if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement || ElementEntity.getKind() == InitializedEntity::EK_VectorElement) ElementEntity.setElementIndex(Init); Expr *InitExpr = (Init < NumInits ? ILE->getInit(Init) : nullptr); if (!InitExpr && Init < NumInits && ILE->hasArrayFiller()) ILE->setInit(Init, ILE->getArrayFiller()); else if (!InitExpr && !ILE->hasArrayFiller()) { Expr *Filler = nullptr; if (FillWithNoInit) Filler = new (SemaRef.Context) NoInitExpr(ElementType); else { ExprResult ElementInit = PerformEmptyInit(SemaRef, ILE->getLocEnd(), ElementEntity, /*VerifyOnly*/false); if (ElementInit.isInvalid()) { hadError = true; return; } Filler = ElementInit.getAs<Expr>(); } if (hadError) { // Do nothing } else if (Init < NumInits) { // For arrays, just set the expression used for value-initialization // of the "holes" in the array. if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement) ILE->setArrayFiller(Filler); else ILE->setInit(Init, Filler); } else { // For arrays, just set the expression used for value-initialization // of the rest of elements and exit. if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement) { ILE->setArrayFiller(Filler); return; } if (!isa<ImplicitValueInitExpr>(Filler) && !isa<NoInitExpr>(Filler)) { // Empty initialization requires a constructor call, so // extend the initializer list to include the constructor // call and make a note that we'll need to take another pass // through the initializer list. ILE->updateInit(SemaRef.Context, Init, Filler); RequiresSecondPass = true; } } } else if (InitListExpr *InnerILE = dyn_cast_or_null<InitListExpr>(InitExpr)) FillInEmptyInitializations(ElementEntity, InnerILE, RequiresSecondPass, FillWithNoInit); else if (DesignatedInitUpdateExpr *InnerDIUE = dyn_cast_or_null<DesignatedInitUpdateExpr>(InitExpr)) FillInEmptyInitializations(ElementEntity, InnerDIUE->getUpdater(), RequiresSecondPass, /*FillWithNoInit =*/ true); } } InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity, const InitializationKind &K, // HLSL Change - add K InitListExpr *IL, QualType &T, bool VerifyOnly) : SemaRef(S), Kind(K), VerifyOnly(VerifyOnly) { // HLSL Change - add Kind // FIXME: Check that IL isn't already the semantic form of some other // InitListExpr. If it is, we'd create a broken AST. hadError = false; FullyStructuredList = getStructuredSubobjectInit(IL, 0, T, nullptr, 0, IL->getSourceRange()); CheckExplicitInitList(Entity, IL, T, FullyStructuredList, /*TopLevelObject=*/true); if (!hadError && !VerifyOnly) { bool RequiresSecondPass = false; FillInEmptyInitializations(Entity, FullyStructuredList, RequiresSecondPass); if (RequiresSecondPass && !hadError) FillInEmptyInitializations(Entity, FullyStructuredList, RequiresSecondPass); } } int InitListChecker::numArrayElements(QualType DeclType) { // FIXME: use a proper constant int maxElements = 0x7FFFFFFF; if (const ConstantArrayType *CAT = SemaRef.Context.getAsConstantArrayType(DeclType)) { maxElements = static_cast<int>(CAT->getSize().getZExtValue()); } return maxElements; } int InitListChecker::numStructUnionElements(QualType DeclType) { RecordDecl *structDecl = DeclType->getAs<RecordType>()->getDecl(); int InitializableMembers = 0; for (const auto *Field : structDecl->fields()) if (!Field->isUnnamedBitfield()) ++InitializableMembers; if (structDecl->isUnion()) return std::min(InitializableMembers, 1); return InitializableMembers - structDecl->hasFlexibleArrayMember(); } /// Check whether the range of the initializer \p ParentIList from element /// \p Index onwards can be used to initialize an object of type \p T. Update /// \p Index to indicate how many elements of the list were consumed. /// /// This also fills in \p StructuredList, from element \p StructuredIndex /// onwards, with the fully-braced, desugared form of the initialization. void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity, InitListExpr *ParentIList, QualType T, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex) { int maxElements = 0; if (T->isArrayType()) maxElements = numArrayElements(T); else if (T->isRecordType()) maxElements = numStructUnionElements(T); else if (T->isVectorType()) maxElements = T->getAs<VectorType>()->getNumElements(); else llvm_unreachable("CheckImplicitInitList(): Illegal type"); if (maxElements == 0) { if (!VerifyOnly) SemaRef.Diag(ParentIList->getInit(Index)->getLocStart(), diag::err_implicit_empty_initializer); ++Index; hadError = true; return; } // Build a structured initializer list corresponding to this subobject. InitListExpr *StructuredSubobjectInitList = getStructuredSubobjectInit(ParentIList, Index, T, StructuredList, StructuredIndex, SourceRange(ParentIList->getInit(Index)->getLocStart(), ParentIList->getSourceRange().getEnd())); unsigned StructuredSubobjectInitIndex = 0; // Check the element types and build the structural subobject. unsigned StartIndex = Index; CheckListElementTypes(Entity, ParentIList, T, /*SubobjectIsDesignatorContext=*/false, Index, StructuredSubobjectInitList, StructuredSubobjectInitIndex); if (!VerifyOnly) { StructuredSubobjectInitList->setType(T); unsigned EndIndex = (Index == StartIndex? StartIndex : Index - 1); // Update the structured sub-object initializer so that it's ending // range corresponds with the end of the last initializer it used. if (EndIndex < ParentIList->getNumInits()) { SourceLocation EndLoc = ParentIList->getInit(EndIndex)->getSourceRange().getEnd(); StructuredSubobjectInitList->setRBraceLoc(EndLoc); } // Complain about missing braces. if (T->isArrayType() || T->isRecordType()) { SemaRef.Diag(StructuredSubobjectInitList->getLocStart(), diag::warn_missing_braces) << StructuredSubobjectInitList->getSourceRange() << FixItHint::CreateInsertion( StructuredSubobjectInitList->getLocStart(), "{") << FixItHint::CreateInsertion( SemaRef.getLocForEndOfToken( StructuredSubobjectInitList->getLocEnd()), "}"); } } } /// Warn that \p Entity was of scalar type and was initialized by a /// single-element braced initializer list. static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity, SourceRange Braces) { // Don't warn during template instantiation. If the initialization was // non-dependent, we warned during the initial parse; otherwise, the // type might not be scalar in some uses of the template. if (!S.ActiveTemplateInstantiations.empty()) return; unsigned DiagID = 0; switch (Entity.getKind()) { case InitializedEntity::EK_VectorElement: case InitializedEntity::EK_ComplexElement: case InitializedEntity::EK_ArrayElement: case InitializedEntity::EK_Parameter: case InitializedEntity::EK_Parameter_CF_Audited: case InitializedEntity::EK_Result: // Extra braces here are suspicious. DiagID = diag::warn_braces_around_scalar_init; break; case InitializedEntity::EK_Member: // Warn on aggregate initialization but not on ctor init list or // default member initializer. if (Entity.getParent()) DiagID = diag::warn_braces_around_scalar_init; break; case InitializedEntity::EK_Variable: case InitializedEntity::EK_LambdaCapture: // No warning, might be direct-list-initialization. // FIXME: Should we warn for copy-list-initialization in these cases? break; case InitializedEntity::EK_New: case InitializedEntity::EK_Temporary: case InitializedEntity::EK_CompoundLiteralInit: // No warning, braces are part of the syntax of the underlying construct. break; case InitializedEntity::EK_RelatedResult: // No warning, we already warned when initializing the result. break; case InitializedEntity::EK_Exception: case InitializedEntity::EK_Base: case InitializedEntity::EK_Delegating: case InitializedEntity::EK_BlockElement: llvm_unreachable("unexpected braced scalar init"); } if (DiagID) { S.Diag(Braces.getBegin(), DiagID) << Braces << FixItHint::CreateRemoval(Braces.getBegin()) << FixItHint::CreateRemoval(Braces.getEnd()); } } /// Check whether the initializer \p IList (that was written with explicit /// braces) can be used to initialize an object of type \p T. /// /// This also fills in \p StructuredList with the fully-braced, desugared /// form of the initialization. void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity, InitListExpr *IList, QualType &T, InitListExpr *StructuredList, bool TopLevelObject) { assert((IList->isExplicit() || SemaRef.getLangOpts().HLSL) && "Illegal Implicit InitListExpr"); // HLSL Change: reuse in context of constructors for vectors if (!VerifyOnly) { SyntacticToSemantic[IList] = StructuredList; StructuredList->setSyntacticForm(IList); } unsigned Index = 0, StructuredIndex = 0; CheckListElementTypes(Entity, IList, T, /*SubobjectIsDesignatorContext=*/true, Index, StructuredList, StructuredIndex, TopLevelObject); if (!VerifyOnly) { QualType ExprTy = T; if (!ExprTy->isArrayType()) ExprTy = ExprTy.getNonLValueExprType(SemaRef.Context); IList->setType(ExprTy); StructuredList->setType(ExprTy); } if (hadError) return; if (Index < IList->getNumInits()) { // We have leftover initializers if (VerifyOnly) { if (SemaRef.getLangOpts().CPlusPlus || (SemaRef.getLangOpts().OpenCL && IList->getType()->isVectorType())) { hadError = true; } return; } if (StructuredIndex == 1 && IsStringInit(StructuredList->getInit(0), T, SemaRef.Context) == SIF_None) { unsigned DK = diag::ext_excess_initializers_in_char_array_initializer; if (SemaRef.getLangOpts().CPlusPlus) { DK = diag::err_excess_initializers_in_char_array_initializer; hadError = true; } // Special-case SemaRef.Diag(IList->getInit(Index)->getLocStart(), DK) << IList->getInit(Index)->getSourceRange(); } else if (!T->isIncompleteType()) { // Don't complain for incomplete types, since we'll get an error // elsewhere QualType CurrentObjectType = StructuredList->getType(); int initKind = CurrentObjectType->isArrayType()? 0 : CurrentObjectType->isVectorType()? 1 : CurrentObjectType->isScalarType()? 2 : CurrentObjectType->isUnionType()? 3 : 4; unsigned DK = diag::ext_excess_initializers; if (SemaRef.getLangOpts().CPlusPlus) { DK = diag::err_excess_initializers; hadError = true; } if (SemaRef.getLangOpts().OpenCL && initKind == 1) { DK = diag::err_excess_initializers; hadError = true; } SemaRef.Diag(IList->getInit(Index)->getLocStart(), DK) << initKind << IList->getInit(Index)->getSourceRange(); } } if (!VerifyOnly && T->isScalarType() && IList->getNumInits() == 1 && !isa<InitListExpr>(IList->getInit(0))) warnBracedScalarInit(SemaRef, Entity, IList->getSourceRange()); } void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity, InitListExpr *IList, QualType &DeclType, bool SubobjectIsDesignatorContext, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex, bool TopLevelObject) { if (DeclType->isAnyComplexType() && SubobjectIsDesignatorContext) { // Explicitly braced initializer for complex type can be real+imaginary // parts. CheckComplexType(Entity, IList, DeclType, Index, StructuredList, StructuredIndex); } else if (DeclType->isScalarType()) { CheckScalarType(Entity, IList, DeclType, Index, StructuredList, StructuredIndex); } else if (DeclType->isVectorType()) { CheckVectorType(Entity, IList, DeclType, Index, StructuredList, StructuredIndex); } else if (DeclType->isRecordType()) { assert(DeclType->isAggregateType() && "non-aggregate records should be handed in CheckSubElementType"); RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl(); CheckStructUnionTypes(Entity, IList, DeclType, RD->field_begin(), SubobjectIsDesignatorContext, Index, StructuredList, StructuredIndex, TopLevelObject); } else if (DeclType->isArrayType()) { llvm::APSInt Zero( SemaRef.Context.getTypeSize(SemaRef.Context.getSizeType()), false); CheckArrayType(Entity, IList, DeclType, Zero, SubobjectIsDesignatorContext, Index, StructuredList, StructuredIndex); } else if (DeclType->isVoidType() || DeclType->isFunctionType()) { // This type is invalid, issue a diagnostic. ++Index; if (!VerifyOnly) SemaRef.Diag(IList->getLocStart(), diag::err_illegal_initializer_type) << DeclType; hadError = true; } else if (DeclType->isReferenceType()) { CheckReferenceType(Entity, IList, DeclType, Index, StructuredList, StructuredIndex); } else if (DeclType->isObjCObjectType()) { if (!VerifyOnly) SemaRef.Diag(IList->getLocStart(), diag::err_init_objc_class) << DeclType; hadError = true; } else { if (!VerifyOnly) SemaRef.Diag(IList->getLocStart(), diag::err_illegal_initializer_type) << DeclType; hadError = true; } } void InitListChecker::CheckSubElementType(const InitializedEntity &Entity, InitListExpr *IList, QualType ElemType, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex) { Expr *expr = IList->getInit(Index); if (ElemType->isReferenceType()) return CheckReferenceType(Entity, IList, ElemType, Index, StructuredList, StructuredIndex); if (InitListExpr *SubInitList = dyn_cast<InitListExpr>(expr)) { if (SubInitList->getNumInits() == 1 && IsStringInit(SubInitList->getInit(0), ElemType, SemaRef.Context) == SIF_None) { expr = SubInitList->getInit(0); } else if (!SemaRef.getLangOpts().CPlusPlus) { InitListExpr *InnerStructuredList = getStructuredSubobjectInit(IList, Index, ElemType, StructuredList, StructuredIndex, SubInitList->getSourceRange(), true); CheckExplicitInitList(Entity, SubInitList, ElemType, InnerStructuredList); if (!hadError && !VerifyOnly) { bool RequiresSecondPass = false; FillInEmptyInitializations(Entity, InnerStructuredList, RequiresSecondPass); if (RequiresSecondPass && !hadError) FillInEmptyInitializations(Entity, InnerStructuredList, RequiresSecondPass); } ++StructuredIndex; ++Index; return; } // C++ initialization is handled later. } else if (isa<ImplicitValueInitExpr>(expr)) { // This happens during template instantiation when we see an InitListExpr // that we've already checked once. assert(SemaRef.Context.hasSameType(expr->getType(), ElemType) && "found implicit initialization for the wrong type"); if (!VerifyOnly) UpdateStructuredListElement(StructuredList, StructuredIndex, expr); ++Index; return; } if (SemaRef.getLangOpts().CPlusPlus && !SemaRef.getLangOpts().HLSL) { // HLSL Change: use OpenCL-style rules // C++ [dcl.init.aggr]p2: // Each member is copy-initialized from the corresponding // initializer-clause. // FIXME: Better EqualLoc? InitializationKind Kind = InitializationKind::CreateCopy(expr->getLocStart(), SourceLocation()); InitializationSequence Seq(SemaRef, Entity, Kind, expr, /*TopLevelOfInitList*/ true); // C++14 [dcl.init.aggr]p13: // If the assignment-expression can initialize a member, the member is // initialized. Otherwise [...] brace elision is assumed // // Brace elision is never performed if the element is not an // assignment-expression. if (Seq || isa<InitListExpr>(expr)) { if (!VerifyOnly) { ExprResult Result = Seq.Perform(SemaRef, Entity, Kind, expr); if (Result.isInvalid()) hadError = true; UpdateStructuredListElement(StructuredList, StructuredIndex, Result.getAs<Expr>()); } else if (!Seq) hadError = true; ++Index; return; } // Fall through for subaggregate initialization } else if (ElemType->isScalarType() || ElemType->isAtomicType()) { // FIXME: Need to handle atomic aggregate types with implicit init lists. return CheckScalarType(Entity, IList, ElemType, Index, StructuredList, StructuredIndex); } else if (const ArrayType *arrayType = SemaRef.Context.getAsArrayType(ElemType)) { // arrayType can be incomplete if we're initializing a flexible // array member. There's nothing we can do with the completed // type here, though. if (IsStringInit(expr, arrayType, SemaRef.Context) == SIF_None) { if (!VerifyOnly) { CheckStringInit(expr, ElemType, arrayType, SemaRef); UpdateStructuredListElement(StructuredList, StructuredIndex, expr); } ++Index; return; } // Fall through for subaggregate initialization. } else { assert((ElemType->isRecordType() || ElemType->isVectorType()) && "Unexpected type"); // C99 6.7.8p13: // // The initializer for a structure or union object that has // automatic storage duration shall be either an initializer // list as described below, or a single expression that has // compatible structure or union type. In the latter case, the // initial value of the object, including unnamed members, is // that of the expression. ExprResult ExprRes = expr; if (SemaRef.CheckSingleAssignmentConstraints( ElemType, ExprRes, !VerifyOnly) != Sema::Incompatible) { if (ExprRes.isInvalid()) hadError = true; else { ExprRes = SemaRef.DefaultFunctionArrayLvalueConversion(ExprRes.get()); if (ExprRes.isInvalid()) hadError = true; } UpdateStructuredListElement(StructuredList, StructuredIndex, ExprRes.getAs<Expr>()); ++Index; return; } ExprRes.get(); // Fall through for subaggregate initialization } // C++ [dcl.init.aggr]p12: // // [...] Otherwise, if the member is itself a non-empty // subaggregate, brace elision is assumed and the initializer is // considered for the initialization of the first member of // the subaggregate. if (!SemaRef.getLangOpts().OpenCL && (ElemType->isAggregateType() || ElemType->isVectorType())) { CheckImplicitInitList(Entity, IList, ElemType, Index, StructuredList, StructuredIndex); ++StructuredIndex; } else { if (!VerifyOnly) { // We cannot initialize this element, so let // PerformCopyInitialization produce the appropriate diagnostic. SemaRef.PerformCopyInitialization(Entity, SourceLocation(), expr, /*TopLevelOfInitList=*/true); } hadError = true; ++Index; ++StructuredIndex; } } void InitListChecker::CheckComplexType(const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex) { assert(Index == 0 && "Index in explicit init list must be zero"); // As an extension, clang supports complex initializers, which initialize // a complex number component-wise. When an explicit initializer list for // a complex number contains two two initializers, this extension kicks in: // it exepcts the initializer list to contain two elements convertible to // the element type of the complex type. The first element initializes // the real part, and the second element intitializes the imaginary part. if (IList->getNumInits() != 2) return CheckScalarType(Entity, IList, DeclType, Index, StructuredList, StructuredIndex); // This is an extension in C. (The builtin _Complex type does not exist // in the C++ standard.) if (!SemaRef.getLangOpts().CPlusPlus && !VerifyOnly) SemaRef.Diag(IList->getLocStart(), diag::ext_complex_component_init) << IList->getSourceRange(); // Initialize the complex number. QualType elementType = DeclType->getAs<ComplexType>()->getElementType(); InitializedEntity ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity); for (unsigned i = 0; i < 2; ++i) { ElementEntity.setElementIndex(Index); CheckSubElementType(ElementEntity, IList, elementType, Index, StructuredList, StructuredIndex); } } void InitListChecker::CheckScalarType(const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex) { if (Index >= IList->getNumInits()) { if (!VerifyOnly) SemaRef.Diag(IList->getLocStart(), SemaRef.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_empty_scalar_initializer : diag::err_empty_scalar_initializer) << IList->getSourceRange(); hadError = !SemaRef.getLangOpts().CPlusPlus11; ++Index; ++StructuredIndex; return; } Expr *expr = IList->getInit(Index); if (InitListExpr *SubIList = dyn_cast<InitListExpr>(expr)) { // FIXME: This is invalid, and accepting it causes overload resolution // to pick the wrong overload in some corner cases. if (!VerifyOnly) SemaRef.Diag(SubIList->getLocStart(), diag::ext_many_braces_around_scalar_init) << SubIList->getSourceRange(); CheckScalarType(Entity, SubIList, DeclType, Index, StructuredList, StructuredIndex); return; } else if (isa<DesignatedInitExpr>(expr)) { if (!VerifyOnly) SemaRef.Diag(expr->getLocStart(), diag::err_designator_for_scalar_init) << DeclType << expr->getSourceRange(); hadError = true; ++Index; ++StructuredIndex; return; } if (VerifyOnly) { if (!SemaRef.CanPerformCopyInitialization(Entity,expr)) hadError = true; ++Index; return; } ExprResult Result = SemaRef.PerformCopyInitialization(Entity, expr->getLocStart(), expr, /*TopLevelOfInitList=*/true); Expr *ResultExpr = nullptr; if (Result.isInvalid()) hadError = true; // types weren't compatible. else { ResultExpr = Result.getAs<Expr>(); if (ResultExpr != expr) { // The type was promoted, update initializer list. IList->setInit(Index, ResultExpr); } } if (hadError) ++StructuredIndex; else UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr); ++Index; } void InitListChecker::CheckReferenceType(const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex) { if (Index >= IList->getNumInits()) { // FIXME: It would be wonderful if we could point at the actual member. In // general, it would be useful to pass location information down the stack, // so that we know the location (or decl) of the "current object" being // initialized. if (!VerifyOnly) SemaRef.Diag(IList->getLocStart(), diag::err_init_reference_member_uninitialized) << DeclType << IList->getSourceRange(); hadError = true; ++Index; ++StructuredIndex; return; } Expr *expr = IList->getInit(Index); if (isa<InitListExpr>(expr) && !SemaRef.getLangOpts().CPlusPlus11) { if (!VerifyOnly) SemaRef.Diag(IList->getLocStart(), diag::err_init_non_aggr_init_list) << DeclType << IList->getSourceRange(); hadError = true; ++Index; ++StructuredIndex; return; } if (VerifyOnly) { if (!SemaRef.CanPerformCopyInitialization(Entity,expr)) hadError = true; ++Index; return; } ExprResult Result = SemaRef.PerformCopyInitialization(Entity, expr->getLocStart(), expr, /*TopLevelOfInitList=*/true); if (Result.isInvalid()) hadError = true; expr = Result.getAs<Expr>(); IList->setInit(Index, expr); if (hadError) ++StructuredIndex; else UpdateStructuredListElement(StructuredList, StructuredIndex, expr); ++Index; } void InitListChecker::CheckVectorType(const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex) { const VectorType *VT = DeclType->getAs<VectorType>(); unsigned maxElements = VT->getNumElements(); unsigned numEltsInit = 0; QualType elementType = VT->getElementType(); if (Index >= IList->getNumInits()) { // Make sure the element type can be value-initialized. if (VerifyOnly) CheckEmptyInitializable( InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity), IList->getLocEnd()); return; } // HLSL Change - HLSL is more similar to OpenCL than C/C++ if (!SemaRef.getLangOpts().OpenCL && !SemaRef.getLangOpts().HLSL) { // If the initializing element is a vector, try to copy-initialize // instead of breaking it apart (which is doomed to failure anyway). Expr *Init = IList->getInit(Index); if (!isa<InitListExpr>(Init) && Init->getType()->isVectorType()) { if (VerifyOnly) { if (!SemaRef.CanPerformCopyInitialization(Entity, Init)) hadError = true; ++Index; return; } ExprResult Result = SemaRef.PerformCopyInitialization(Entity, Init->getLocStart(), Init, /*TopLevelOfInitList=*/true); Expr *ResultExpr = nullptr; if (Result.isInvalid()) hadError = true; // types weren't compatible. else { ResultExpr = Result.getAs<Expr>(); if (ResultExpr != Init) { // The type was promoted, update initializer list. IList->setInit(Index, ResultExpr); } } if (hadError) ++StructuredIndex; else UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr); ++Index; return; } InitializedEntity ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity); for (unsigned i = 0; i < maxElements; ++i, ++numEltsInit) { // Don't attempt to go past the end of the init list if (Index >= IList->getNumInits()) { if (VerifyOnly) CheckEmptyInitializable(ElementEntity, IList->getLocEnd()); break; } ElementEntity.setElementIndex(Index); CheckSubElementType(ElementEntity, IList, elementType, Index, StructuredList, StructuredIndex); } if (VerifyOnly) return; bool isBigEndian = SemaRef.Context.getTargetInfo().isBigEndian(); const VectorType *T = Entity.getType()->getAs<VectorType>(); if (isBigEndian && (T->getVectorKind() == VectorType::NeonVector || T->getVectorKind() == VectorType::NeonPolyVector)) { // The ability to use vector initializer lists is a GNU vector extension // and is unrelated to the NEON intrinsics in arm_neon.h. On little // endian machines it works fine, however on big endian machines it // exhibits surprising behaviour: // // uint32x2_t x = {42, 64}; // return vget_lane_u32(x, 0); // Will return 64. // // Because of this, explicitly call out that it is non-portable. // SemaRef.Diag(IList->getLocStart(), diag::warn_neon_vector_initializer_non_portable); const char *typeCode; unsigned typeSize = SemaRef.Context.getTypeSize(elementType); if (elementType->isFloatingType()) typeCode = "f"; else if (elementType->isSignedIntegerType()) typeCode = "s"; else if (elementType->isUnsignedIntegerType()) typeCode = "u"; else llvm_unreachable("Invalid element type!"); SemaRef.Diag(IList->getLocStart(), SemaRef.Context.getTypeSize(VT) > 64 ? diag::note_neon_vector_initializer_non_portable_q : diag::note_neon_vector_initializer_non_portable) << typeCode << typeSize; } return; } InitializedEntity ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity); // OpenCL initializers allows vectors to be constructed from vectors. for (unsigned i = 0; i < maxElements; ++i) { // Don't attempt to go past the end of the init list if (Index >= IList->getNumInits()) break; ElementEntity.setElementIndex(Index); QualType IType = IList->getInit(Index)->getType(); if (!IType->isVectorType()) { CheckSubElementType(ElementEntity, IList, elementType, Index, StructuredList, StructuredIndex); ++numEltsInit; } else { QualType VecType; const VectorType *IVT = IType->getAs<VectorType>(); unsigned numIElts = IVT->getNumElements(); if (IType->isExtVectorType()) VecType = SemaRef.Context.getExtVectorType(elementType, numIElts); else VecType = SemaRef.Context.getVectorType(elementType, numIElts, IVT->getVectorKind()); CheckSubElementType(ElementEntity, IList, VecType, Index, StructuredList, StructuredIndex); numEltsInit += numIElts; } } // HLSL Change Starts // For copy assignments that aren't explicit initialization lists, allow extra elements (emit a warning though). bool extraElementsAllowed = false; if (SemaRef.getLangOpts().HLSL && IList->getLBraceLoc().isInvalid()) { extraElementsAllowed = numEltsInit > maxElements; if (extraElementsAllowed && !VerifyOnly) { SemaRef.Diag(Kind.getLocation(), diag::warn_hlsl_implicit_vector_truncation); } } // HLSL Change Ends // OpenCL requires all elements to be initialized. if (numEltsInit != maxElements && !extraElementsAllowed) { // HLSL Change if (!VerifyOnly) { static const unsigned selectVectorIdx = 0; SemaRef.Diag(IList->getLocStart(), diag::err_incorrect_num_initializers) << (numEltsInit < maxElements) << selectVectorIdx << maxElements << numEltsInit; } hadError = true; } } void InitListChecker::CheckArrayType(const InitializedEntity &Entity, InitListExpr *IList, QualType &DeclType, llvm::APSInt elementIndex, bool SubobjectIsDesignatorContext, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex) { const ArrayType *arrayType = SemaRef.Context.getAsArrayType(DeclType); // Check for the special-case of initializing an array with a string. if (Index < IList->getNumInits()) { if (IsStringInit(IList->getInit(Index), arrayType, SemaRef.Context) == SIF_None) { // We place the string literal directly into the resulting // initializer list. This is the only place where the structure // of the structured initializer list doesn't match exactly, // because doing so would involve allocating one character // constant for each string. if (!VerifyOnly) { CheckStringInit(IList->getInit(Index), DeclType, arrayType, SemaRef); UpdateStructuredListElement(StructuredList, StructuredIndex, IList->getInit(Index)); StructuredList->resizeInits(SemaRef.Context, StructuredIndex); } ++Index; return; } } if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(arrayType)) { // Check for VLAs; in standard C it would be possible to check this // earlier, but I don't know where clang accepts VLAs (gcc accepts // them in all sorts of strange places). if (!VerifyOnly) SemaRef.Diag(VAT->getSizeExpr()->getLocStart(), diag::err_variable_object_no_init) << VAT->getSizeExpr()->getSourceRange(); hadError = true; ++Index; ++StructuredIndex; return; } // We might know the maximum number of elements in advance. llvm::APSInt maxElements(elementIndex.getBitWidth(), elementIndex.isUnsigned()); bool maxElementsKnown = false; if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(arrayType)) { maxElements = CAT->getSize(); elementIndex = elementIndex.extOrTrunc(maxElements.getBitWidth()); elementIndex.setIsUnsigned(maxElements.isUnsigned()); maxElementsKnown = true; } QualType elementType = arrayType->getElementType(); while (Index < IList->getNumInits()) { Expr *Init = IList->getInit(Index); if (DesignatedInitExpr *DIE = dyn_cast<DesignatedInitExpr>(Init)) { // If we're not the subobject that matches up with the '{' for // the designator, we shouldn't be handling the // designator. Return immediately. if (!SubobjectIsDesignatorContext) return; // Handle this designated initializer. elementIndex will be // updated to be the next array element we'll initialize. if (CheckDesignatedInitializer(Entity, IList, DIE, 0, DeclType, nullptr, &elementIndex, Index, StructuredList, StructuredIndex, true, false)) { hadError = true; continue; } if (elementIndex.getBitWidth() > maxElements.getBitWidth()) maxElements = maxElements.extend(elementIndex.getBitWidth()); else if (elementIndex.getBitWidth() < maxElements.getBitWidth()) elementIndex = elementIndex.extend(maxElements.getBitWidth()); elementIndex.setIsUnsigned(maxElements.isUnsigned()); // If the array is of incomplete type, keep track of the number of // elements in the initializer. if (!maxElementsKnown && elementIndex > maxElements) maxElements = elementIndex; continue; } // If we know the maximum number of elements, and we've already // hit it, stop consuming elements in the initializer list. if (maxElementsKnown && elementIndex == maxElements) break; InitializedEntity ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context, StructuredIndex, Entity); // Check this element. CheckSubElementType(ElementEntity, IList, elementType, Index, StructuredList, StructuredIndex); ++elementIndex; // If the array is of incomplete type, keep track of the number of // elements in the initializer. if (!maxElementsKnown && elementIndex > maxElements) maxElements = elementIndex; } if (!hadError && DeclType->isIncompleteArrayType() && !VerifyOnly) { // If this is an incomplete array type, the actual type needs to // be calculated here. llvm::APSInt Zero(maxElements.getBitWidth(), maxElements.isUnsigned()); if (maxElements == Zero) { // Sizing an array implicitly to zero is not allowed by ISO C, // but is supported by GNU. SemaRef.Diag(IList->getLocStart(), diag::ext_typecheck_zero_array_size); } DeclType = SemaRef.Context.getConstantArrayType(elementType, maxElements, ArrayType::Normal, 0); } if (!hadError && VerifyOnly) { // Check if there are any members of the array that get value-initialized. // If so, check if doing that is possible. // FIXME: This needs to detect holes left by designated initializers too. if (maxElementsKnown && elementIndex < maxElements) CheckEmptyInitializable(InitializedEntity::InitializeElement( SemaRef.Context, 0, Entity), IList->getLocEnd()); } } bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity, Expr *InitExpr, FieldDecl *Field, bool TopLevelObject) { // Handle GNU flexible array initializers. unsigned FlexArrayDiag; if (isa<InitListExpr>(InitExpr) && cast<InitListExpr>(InitExpr)->getNumInits() == 0) { // Empty flexible array init always allowed as an extension FlexArrayDiag = diag::ext_flexible_array_init; } else if (SemaRef.getLangOpts().CPlusPlus) { // Disallow flexible array init in C++; it is not required for gcc // compatibility, and it needs work to IRGen correctly in general. FlexArrayDiag = diag::err_flexible_array_init; } else if (!TopLevelObject) { // Disallow flexible array init on non-top-level object FlexArrayDiag = diag::err_flexible_array_init; } else if (Entity.getKind() != InitializedEntity::EK_Variable) { // Disallow flexible array init on anything which is not a variable. FlexArrayDiag = diag::err_flexible_array_init; } else if (cast<VarDecl>(Entity.getDecl())->hasLocalStorage()) { // Disallow flexible array init on local variables. FlexArrayDiag = diag::err_flexible_array_init; } else { // Allow other cases. FlexArrayDiag = diag::ext_flexible_array_init; } if (!VerifyOnly) { SemaRef.Diag(InitExpr->getLocStart(), FlexArrayDiag) << InitExpr->getLocStart(); SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member) << Field; } return FlexArrayDiag != diag::ext_flexible_array_init; } void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity, InitListExpr *IList, QualType DeclType, RecordDecl::field_iterator Field, bool SubobjectIsDesignatorContext, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex, bool TopLevelObject) { RecordDecl* structDecl = DeclType->getAs<RecordType>()->getDecl(); // If the record is invalid, some of it's members are invalid. To avoid // confusion, we forgo checking the intializer for the entire record. if (structDecl->isInvalidDecl()) { // Assume it was supposed to consume a single initializer. ++Index; hadError = true; return; } if (DeclType->isUnionType() && IList->getNumInits() == 0) { RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl(); // If there's a default initializer, use it. if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->hasInClassInitializer()) { if (VerifyOnly) return; for (RecordDecl::field_iterator FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) { if (Field->hasInClassInitializer()) { StructuredList->setInitializedFieldInUnion(*Field); // FIXME: Actually build a CXXDefaultInitExpr? return; } } } // Value-initialize the first member of the union that isn't an unnamed // bitfield. for (RecordDecl::field_iterator FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) { if (!Field->isUnnamedBitfield()) { if (VerifyOnly) CheckEmptyInitializable( InitializedEntity::InitializeMember(*Field, &Entity), IList->getLocEnd()); else StructuredList->setInitializedFieldInUnion(*Field); break; } } return; } // If structDecl is a forward declaration, this loop won't do // anything except look at designated initializers; That's okay, // because an error should get printed out elsewhere. It might be // worthwhile to skip over the rest of the initializer, though. RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl(); RecordDecl::field_iterator FieldEnd = RD->field_end(); bool InitializedSomething = false; bool CheckForMissingFields = true; while (Index < IList->getNumInits()) { Expr *Init = IList->getInit(Index); if (DesignatedInitExpr *DIE = dyn_cast<DesignatedInitExpr>(Init)) { // If we're not the subobject that matches up with the '{' for // the designator, we shouldn't be handling the // designator. Return immediately. if (!SubobjectIsDesignatorContext) return; // Handle this designated initializer. Field will be updated to // the next field that we'll be initializing. if (CheckDesignatedInitializer(Entity, IList, DIE, 0, DeclType, &Field, nullptr, Index, StructuredList, StructuredIndex, true, TopLevelObject)) hadError = true; InitializedSomething = true; // Disable check for missing fields when designators are used. // This matches gcc behaviour. CheckForMissingFields = false; continue; } if (Field == FieldEnd) { // We've run out of fields. We're done. break; } // We've already initialized a member of a union. We're done. if (InitializedSomething && DeclType->isUnionType()) break; // If we've hit the flexible array member at the end, we're done. if (Field->getType()->isIncompleteArrayType()) break; if (Field->isUnnamedBitfield()) { // Don't initialize unnamed bitfields, e.g. "int : 20;" ++Field; continue; } // Make sure we can use this declaration. bool InvalidUse; if (VerifyOnly) InvalidUse = !SemaRef.CanUseDecl(*Field); else InvalidUse = SemaRef.DiagnoseUseOfDecl(*Field, IList->getInit(Index)->getLocStart()); if (InvalidUse) { ++Index; ++Field; hadError = true; continue; } InitializedEntity MemberEntity = InitializedEntity::InitializeMember(*Field, &Entity); CheckSubElementType(MemberEntity, IList, Field->getType(), Index, StructuredList, StructuredIndex); InitializedSomething = true; if (DeclType->isUnionType() && !VerifyOnly) { // Initialize the first field within the union. StructuredList->setInitializedFieldInUnion(*Field); } ++Field; } // Emit warnings for missing struct field initializers. if (!VerifyOnly && InitializedSomething && CheckForMissingFields && Field != FieldEnd && !Field->getType()->isIncompleteArrayType() && !DeclType->isUnionType()) { // It is possible we have one or more unnamed bitfields remaining. // Find first (if any) named field and emit warning. for (RecordDecl::field_iterator it = Field, end = RD->field_end(); it != end; ++it) { if (!it->isUnnamedBitfield() && !it->hasInClassInitializer()) { SemaRef.Diag(IList->getSourceRange().getEnd(), diag::warn_missing_field_initializers) << *it; break; } } } // Check that any remaining fields can be value-initialized. if (VerifyOnly && Field != FieldEnd && !DeclType->isUnionType() && !Field->getType()->isIncompleteArrayType()) { // FIXME: Should check for holes left by designated initializers too. for (; Field != FieldEnd && !hadError; ++Field) { if (!Field->isUnnamedBitfield() && !Field->hasInClassInitializer()) CheckEmptyInitializable( InitializedEntity::InitializeMember(*Field, &Entity), IList->getLocEnd()); } } if (Field == FieldEnd || !Field->getType()->isIncompleteArrayType() || Index >= IList->getNumInits()) return; if (CheckFlexibleArrayInit(Entity, IList->getInit(Index), *Field, TopLevelObject)) { hadError = true; ++Index; return; } InitializedEntity MemberEntity = InitializedEntity::InitializeMember(*Field, &Entity); if (isa<InitListExpr>(IList->getInit(Index))) CheckSubElementType(MemberEntity, IList, Field->getType(), Index, StructuredList, StructuredIndex); else CheckImplicitInitList(MemberEntity, IList, Field->getType(), Index, StructuredList, StructuredIndex); } /// \brief Expand a field designator that refers to a member of an /// anonymous struct or union into a series of field designators that /// refers to the field within the appropriate subobject. /// static void ExpandAnonymousFieldDesignator(Sema &SemaRef, DesignatedInitExpr *DIE, unsigned DesigIdx, IndirectFieldDecl *IndirectField) { typedef DesignatedInitExpr::Designator Designator; // Build the replacement designators. SmallVector<Designator, 4> Replacements; for (IndirectFieldDecl::chain_iterator PI = IndirectField->chain_begin(), PE = IndirectField->chain_end(); PI != PE; ++PI) { if (PI + 1 == PE) Replacements.push_back(Designator((IdentifierInfo *)nullptr, DIE->getDesignator(DesigIdx)->getDotLoc(), DIE->getDesignator(DesigIdx)->getFieldLoc())); else Replacements.push_back(Designator((IdentifierInfo *)nullptr, SourceLocation(), SourceLocation())); assert(isa<FieldDecl>(*PI)); Replacements.back().setField(cast<FieldDecl>(*PI)); } // Expand the current designator into the set of replacement // designators, so we have a full subobject path down to where the // member of the anonymous struct/union is actually stored. DIE->ExpandDesignator(SemaRef.Context, DesigIdx, &Replacements[0], &Replacements[0] + Replacements.size()); } static DesignatedInitExpr *CloneDesignatedInitExpr(Sema &SemaRef, DesignatedInitExpr *DIE) { unsigned NumIndexExprs = DIE->getNumSubExprs() - 1; SmallVector<Expr*, 4> IndexExprs(NumIndexExprs); for (unsigned I = 0; I < NumIndexExprs; ++I) IndexExprs[I] = DIE->getSubExpr(I + 1); return DesignatedInitExpr::Create(SemaRef.Context, DIE->designators_begin(), DIE->size(), IndexExprs, DIE->getEqualOrColonLoc(), DIE->usesGNUSyntax(), DIE->getInit()); } namespace { // Callback to only accept typo corrections that are for field members of // the given struct or union. class FieldInitializerValidatorCCC : public CorrectionCandidateCallback { public: explicit FieldInitializerValidatorCCC(RecordDecl *RD) : Record(RD) {} bool ValidateCandidate(const TypoCorrection &candidate) override { FieldDecl *FD = candidate.getCorrectionDeclAs<FieldDecl>(); return FD && FD->getDeclContext()->getRedeclContext()->Equals(Record); } private: RecordDecl *Record; }; } /// @brief Check the well-formedness of a C99 designated initializer. /// /// Determines whether the designated initializer @p DIE, which /// resides at the given @p Index within the initializer list @p /// IList, is well-formed for a current object of type @p DeclType /// (C99 6.7.8). The actual subobject that this designator refers to /// within the current subobject is returned in either /// @p NextField or @p NextElementIndex (whichever is appropriate). /// /// @param IList The initializer list in which this designated /// initializer occurs. /// /// @param DIE The designated initializer expression. /// /// @param DesigIdx The index of the current designator. /// /// @param CurrentObjectType The type of the "current object" (C99 6.7.8p17), /// into which the designation in @p DIE should refer. /// /// @param NextField If non-NULL and the first designator in @p DIE is /// a field, this will be set to the field declaration corresponding /// to the field named by the designator. /// /// @param NextElementIndex If non-NULL and the first designator in @p /// DIE is an array designator or GNU array-range designator, this /// will be set to the last index initialized by this designator. /// /// @param Index Index into @p IList where the designated initializer /// @p DIE occurs. /// /// @param StructuredList The initializer list expression that /// describes all of the subobject initializers in the order they'll /// actually be initialized. /// /// @returns true if there was an error, false otherwise. bool InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity, InitListExpr *IList, DesignatedInitExpr *DIE, unsigned DesigIdx, QualType &CurrentObjectType, RecordDecl::field_iterator *NextField, llvm::APSInt *NextElementIndex, unsigned &Index, InitListExpr *StructuredList, unsigned &StructuredIndex, bool FinishSubobjectInit, bool TopLevelObject) { if (DesigIdx == DIE->size()) { // Check the actual initialization for the designated object type. bool prevHadError = hadError; // Temporarily remove the designator expression from the // initializer list that the child calls see, so that we don't try // to re-process the designator. unsigned OldIndex = Index; IList->setInit(OldIndex, DIE->getInit()); CheckSubElementType(Entity, IList, CurrentObjectType, Index, StructuredList, StructuredIndex); // Restore the designated initializer expression in the syntactic // form of the initializer list. if (IList->getInit(OldIndex) != DIE->getInit()) DIE->setInit(IList->getInit(OldIndex)); IList->setInit(OldIndex, DIE); return hadError && !prevHadError; } DesignatedInitExpr::Designator *D = DIE->getDesignator(DesigIdx); bool IsFirstDesignator = (DesigIdx == 0); if (!VerifyOnly) { assert((IsFirstDesignator || StructuredList) && "Need a non-designated initializer list to start from"); // Determine the structural initializer list that corresponds to the // current subobject. if (IsFirstDesignator) StructuredList = SyntacticToSemantic.lookup(IList); else { Expr *ExistingInit = StructuredIndex < StructuredList->getNumInits() ? StructuredList->getInit(StructuredIndex) : nullptr; if (!ExistingInit && StructuredList->hasArrayFiller()) ExistingInit = StructuredList->getArrayFiller(); if (!ExistingInit) StructuredList = getStructuredSubobjectInit(IList, Index, CurrentObjectType, StructuredList, StructuredIndex, SourceRange(D->getLocStart(), DIE->getLocEnd())); else if (InitListExpr *Result = dyn_cast<InitListExpr>(ExistingInit)) StructuredList = Result; else { if (DesignatedInitUpdateExpr *E = dyn_cast<DesignatedInitUpdateExpr>(ExistingInit)) StructuredList = E->getUpdater(); else { DesignatedInitUpdateExpr *DIUE = new (SemaRef.Context) DesignatedInitUpdateExpr(SemaRef.Context, D->getLocStart(), ExistingInit, DIE->getLocEnd()); StructuredList->updateInit(SemaRef.Context, StructuredIndex, DIUE); StructuredList = DIUE->getUpdater(); } // We need to check on source range validity because the previous // initializer does not have to be an explicit initializer. e.g., // // struct P { int a, b; }; // struct PP { struct P p } l = { { .a = 2 }, .p.b = 3 }; // // There is an overwrite taking place because the first braced initializer // list "{ .a = 2 }" already provides value for .p.b (which is zero). if (ExistingInit->getSourceRange().isValid()) { // We are creating an initializer list that initializes the // subobjects of the current object, but there was already an // initialization that completely initialized the current // subobject, e.g., by a compound literal: // // struct X { int a, b; }; // struct X xs[] = { [0] = (struct X) { 1, 2 }, [0].b = 3 }; // // Here, xs[0].a == 0 and xs[0].b == 3, since the second, // designated initializer re-initializes the whole // subobject [0], overwriting previous initializers. SemaRef.Diag(D->getLocStart(), diag::warn_subobject_initializer_overrides) << SourceRange(D->getLocStart(), DIE->getLocEnd()); SemaRef.Diag(ExistingInit->getLocStart(), diag::note_previous_initializer) << /*FIXME:has side effects=*/0 << ExistingInit->getSourceRange(); } } } assert(StructuredList && "Expected a structured initializer list"); } if (D->isFieldDesignator()) { // C99 6.7.8p7: // // If a designator has the form // // . identifier // // then the current object (defined below) shall have // structure or union type and the identifier shall be the // name of a member of that type. const RecordType *RT = CurrentObjectType->getAs<RecordType>(); if (!RT) { SourceLocation Loc = D->getDotLoc(); if (Loc.isInvalid()) Loc = D->getFieldLoc(); if (!VerifyOnly) SemaRef.Diag(Loc, diag::err_field_designator_non_aggr) << SemaRef.getLangOpts().CPlusPlus << CurrentObjectType; ++Index; return true; } FieldDecl *KnownField = D->getField(); if (!KnownField) { IdentifierInfo *FieldName = D->getFieldName(); DeclContext::lookup_result Lookup = RT->getDecl()->lookup(FieldName); for (NamedDecl *ND : Lookup) { if (auto *FD = dyn_cast<FieldDecl>(ND)) { KnownField = FD; break; } if (auto *IFD = dyn_cast<IndirectFieldDecl>(ND)) { // In verify mode, don't modify the original. if (VerifyOnly) DIE = CloneDesignatedInitExpr(SemaRef, DIE); ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx, IFD); D = DIE->getDesignator(DesigIdx); KnownField = cast<FieldDecl>(*IFD->chain_begin()); break; } } if (!KnownField) { if (VerifyOnly) { ++Index; return true; // No typo correction when just trying this out. } // Name lookup found something, but it wasn't a field. if (!Lookup.empty()) { SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_nonfield) << FieldName; SemaRef.Diag(Lookup.front()->getLocation(), diag::note_field_designator_found); ++Index; return true; } // Name lookup didn't find anything. // Determine whether this was a typo for another field name. if (TypoCorrection Corrected = SemaRef.CorrectTypo( DeclarationNameInfo(FieldName, D->getFieldLoc()), Sema::LookupMemberName, /*Scope=*/nullptr, /*SS=*/nullptr, llvm::make_unique<FieldInitializerValidatorCCC>(RT->getDecl()), Sema::CTK_ErrorRecovery, RT->getDecl())) { SemaRef.diagnoseTypo( Corrected, SemaRef.PDiag(diag::err_field_designator_unknown_suggest) << FieldName << CurrentObjectType); KnownField = Corrected.getCorrectionDeclAs<FieldDecl>(); hadError = true; } else { // Typo correction didn't find anything. SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_unknown) << FieldName << CurrentObjectType; ++Index; return true; } } } unsigned FieldIndex = 0; for (auto *FI : RT->getDecl()->fields()) { if (FI->isUnnamedBitfield()) continue; if (KnownField == FI) break; ++FieldIndex; } RecordDecl::field_iterator Field = RecordDecl::field_iterator(DeclContext::decl_iterator(KnownField)); // All of the fields of a union are located at the same place in // the initializer list. if (RT->getDecl()->isUnion()) { FieldIndex = 0; if (!VerifyOnly) { FieldDecl *CurrentField = StructuredList->getInitializedFieldInUnion(); if (CurrentField && CurrentField != *Field) { assert(StructuredList->getNumInits() == 1 && "A union should never have more than one initializer!"); // we're about to throw away an initializer, emit warning SemaRef.Diag(D->getFieldLoc(), diag::warn_initializer_overrides) << D->getSourceRange(); Expr *ExistingInit = StructuredList->getInit(0); SemaRef.Diag(ExistingInit->getLocStart(), diag::note_previous_initializer) << /*FIXME:has side effects=*/0 << ExistingInit->getSourceRange(); // remove existing initializer StructuredList->resizeInits(SemaRef.Context, 0); StructuredList->setInitializedFieldInUnion(nullptr); } StructuredList->setInitializedFieldInUnion(*Field); } } // Make sure we can use this declaration. bool InvalidUse; if (VerifyOnly) InvalidUse = !SemaRef.CanUseDecl(*Field); else InvalidUse = SemaRef.DiagnoseUseOfDecl(*Field, D->getFieldLoc()); if (InvalidUse) { ++Index; return true; } if (!VerifyOnly) { // Update the designator with the field declaration. D->setField(*Field); // Make sure that our non-designated initializer list has space // for a subobject corresponding to this field. if (FieldIndex >= StructuredList->getNumInits()) StructuredList->resizeInits(SemaRef.Context, FieldIndex + 1); } // This designator names a flexible array member. if (Field->getType()->isIncompleteArrayType()) { bool Invalid = false; if ((DesigIdx + 1) != DIE->size()) { // We can't designate an object within the flexible array // member (because GCC doesn't allow it). if (!VerifyOnly) { DesignatedInitExpr::Designator *NextD = DIE->getDesignator(DesigIdx + 1); SemaRef.Diag(NextD->getLocStart(), diag::err_designator_into_flexible_array_member) << SourceRange(NextD->getLocStart(), DIE->getLocEnd()); SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member) << *Field; } Invalid = true; } if (!hadError && !isa<InitListExpr>(DIE->getInit()) && !isa<StringLiteral>(DIE->getInit())) { // The initializer is not an initializer list. if (!VerifyOnly) { SemaRef.Diag(DIE->getInit()->getLocStart(), diag::err_flexible_array_init_needs_braces) << DIE->getInit()->getSourceRange(); SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member) << *Field; } Invalid = true; } // Check GNU flexible array initializer. if (!Invalid && CheckFlexibleArrayInit(Entity, DIE->getInit(), *Field, TopLevelObject)) Invalid = true; if (Invalid) { ++Index; return true; } // Initialize the array. bool prevHadError = hadError; unsigned newStructuredIndex = FieldIndex; unsigned OldIndex = Index; IList->setInit(Index, DIE->getInit()); InitializedEntity MemberEntity = InitializedEntity::InitializeMember(*Field, &Entity); CheckSubElementType(MemberEntity, IList, Field->getType(), Index, StructuredList, newStructuredIndex); IList->setInit(OldIndex, DIE); if (hadError && !prevHadError) { ++Field; ++FieldIndex; if (NextField) *NextField = Field; StructuredIndex = FieldIndex; return true; } } else { // Recurse to check later designated subobjects. QualType FieldType = Field->getType(); unsigned newStructuredIndex = FieldIndex; InitializedEntity MemberEntity = InitializedEntity::InitializeMember(*Field, &Entity); if (CheckDesignatedInitializer(MemberEntity, IList, DIE, DesigIdx + 1, FieldType, nullptr, nullptr, Index, StructuredList, newStructuredIndex, true, false)) return true; } // Find the position of the next field to be initialized in this // subobject. ++Field; ++FieldIndex; // If this the first designator, our caller will continue checking // the rest of this struct/class/union subobject. if (IsFirstDesignator) { if (NextField) *NextField = Field; StructuredIndex = FieldIndex; return false; } if (!FinishSubobjectInit) return false; // We've already initialized something in the union; we're done. if (RT->getDecl()->isUnion()) return hadError; // Check the remaining fields within this class/struct/union subobject. bool prevHadError = hadError; CheckStructUnionTypes(Entity, IList, CurrentObjectType, Field, false, Index, StructuredList, FieldIndex); return hadError && !prevHadError; } // C99 6.7.8p6: // // If a designator has the form // // [ constant-expression ] // // then the current object (defined below) shall have array // type and the expression shall be an integer constant // expression. If the array is of unknown size, any // nonnegative value is valid. // // Additionally, cope with the GNU extension that permits // designators of the form // // [ constant-expression ... constant-expression ] const ArrayType *AT = SemaRef.Context.getAsArrayType(CurrentObjectType); if (!AT) { if (!VerifyOnly) SemaRef.Diag(D->getLBracketLoc(), diag::err_array_designator_non_array) << CurrentObjectType; ++Index; return true; } Expr *IndexExpr = nullptr; llvm::APSInt DesignatedStartIndex, DesignatedEndIndex; if (D->isArrayDesignator()) { IndexExpr = DIE->getArrayIndex(*D); DesignatedStartIndex = IndexExpr->EvaluateKnownConstInt(SemaRef.Context); DesignatedEndIndex = DesignatedStartIndex; } else { assert(D->isArrayRangeDesignator() && "Need array-range designator"); DesignatedStartIndex = DIE->getArrayRangeStart(*D)->EvaluateKnownConstInt(SemaRef.Context); DesignatedEndIndex = DIE->getArrayRangeEnd(*D)->EvaluateKnownConstInt(SemaRef.Context); IndexExpr = DIE->getArrayRangeEnd(*D); // Codegen can't handle evaluating array range designators that have side // effects, because we replicate the AST value for each initialized element. // As such, set the sawArrayRangeDesignator() bit if we initialize multiple // elements with something that has a side effect, so codegen can emit an // "error unsupported" error instead of miscompiling the app. if (DesignatedStartIndex.getZExtValue()!=DesignatedEndIndex.getZExtValue()&& DIE->getInit()->HasSideEffects(SemaRef.Context) && !VerifyOnly) FullyStructuredList->sawArrayRangeDesignator(); } if (isa<ConstantArrayType>(AT)) { llvm::APSInt MaxElements(cast<ConstantArrayType>(AT)->getSize(), false); DesignatedStartIndex = DesignatedStartIndex.extOrTrunc(MaxElements.getBitWidth()); DesignatedStartIndex.setIsUnsigned(MaxElements.isUnsigned()); DesignatedEndIndex = DesignatedEndIndex.extOrTrunc(MaxElements.getBitWidth()); DesignatedEndIndex.setIsUnsigned(MaxElements.isUnsigned()); if (DesignatedEndIndex >= MaxElements) { if (!VerifyOnly) SemaRef.Diag(IndexExpr->getLocStart(), diag::err_array_designator_too_large) << DesignatedEndIndex.toString(10) << MaxElements.toString(10) << IndexExpr->getSourceRange(); ++Index; return true; } } else { unsigned DesignatedIndexBitWidth = ConstantArrayType::getMaxSizeBits(SemaRef.Context); DesignatedStartIndex = DesignatedStartIndex.extOrTrunc(DesignatedIndexBitWidth); DesignatedEndIndex = DesignatedEndIndex.extOrTrunc(DesignatedIndexBitWidth); DesignatedStartIndex.setIsUnsigned(true); DesignatedEndIndex.setIsUnsigned(true); } if (!VerifyOnly && StructuredList->isStringLiteralInit()) { // We're modifying a string literal init; we have to decompose the string // so we can modify the individual characters. ASTContext &Context = SemaRef.Context; Expr *SubExpr = StructuredList->getInit(0)->IgnoreParens(); // Compute the character type QualType CharTy = AT->getElementType(); // Compute the type of the integer literals. QualType PromotedCharTy = CharTy; if (CharTy->isPromotableIntegerType()) PromotedCharTy = Context.getPromotedIntegerType(CharTy); unsigned PromotedCharTyWidth = Context.getTypeSize(PromotedCharTy); if (StringLiteral *SL = dyn_cast<StringLiteral>(SubExpr)) { // Get the length of the string. uint64_t StrLen = SL->getLength(); if (cast<ConstantArrayType>(AT)->getSize().ult(StrLen)) StrLen = cast<ConstantArrayType>(AT)->getSize().getZExtValue(); StructuredList->resizeInits(Context, StrLen); // Build a literal for each character in the string, and put them into // the init list. for (unsigned i = 0, e = StrLen; i != e; ++i) { llvm::APInt CodeUnit(PromotedCharTyWidth, SL->getCodeUnit(i)); Expr *Init = new (Context) IntegerLiteral( Context, CodeUnit, PromotedCharTy, SubExpr->getExprLoc()); if (CharTy != PromotedCharTy) Init = ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast, Init, nullptr, VK_RValue); StructuredList->updateInit(Context, i, Init); } } else { ObjCEncodeExpr *E = cast<ObjCEncodeExpr>(SubExpr); std::string Str; Context.getObjCEncodingForType(E->getEncodedType(), Str); // Get the length of the string. uint64_t StrLen = Str.size(); if (cast<ConstantArrayType>(AT)->getSize().ult(StrLen)) StrLen = cast<ConstantArrayType>(AT)->getSize().getZExtValue(); StructuredList->resizeInits(Context, StrLen); // Build a literal for each character in the string, and put them into // the init list. for (unsigned i = 0, e = StrLen; i != e; ++i) { llvm::APInt CodeUnit(PromotedCharTyWidth, Str[i]); Expr *Init = new (Context) IntegerLiteral( Context, CodeUnit, PromotedCharTy, SubExpr->getExprLoc()); if (CharTy != PromotedCharTy) Init = ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast, Init, nullptr, VK_RValue); StructuredList->updateInit(Context, i, Init); } } } // Make sure that our non-designated initializer list has space // for a subobject corresponding to this array element. if (!VerifyOnly && DesignatedEndIndex.getZExtValue() >= StructuredList->getNumInits()) StructuredList->resizeInits(SemaRef.Context, DesignatedEndIndex.getZExtValue() + 1); // Repeatedly perform subobject initializations in the range // [DesignatedStartIndex, DesignatedEndIndex]. // Move to the next designator unsigned ElementIndex = DesignatedStartIndex.getZExtValue(); unsigned OldIndex = Index; InitializedEntity ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity); while (DesignatedStartIndex <= DesignatedEndIndex) { // Recurse to check later designated subobjects. QualType ElementType = AT->getElementType(); Index = OldIndex; ElementEntity.setElementIndex(ElementIndex); if (CheckDesignatedInitializer(ElementEntity, IList, DIE, DesigIdx + 1, ElementType, nullptr, nullptr, Index, StructuredList, ElementIndex, (DesignatedStartIndex == DesignatedEndIndex), false)) return true; // Move to the next index in the array that we'll be initializing. ++DesignatedStartIndex; ElementIndex = DesignatedStartIndex.getZExtValue(); } // If this the first designator, our caller will continue checking // the rest of this array subobject. if (IsFirstDesignator) { if (NextElementIndex) *NextElementIndex = DesignatedStartIndex; StructuredIndex = ElementIndex; return false; } if (!FinishSubobjectInit) return false; // Check the remaining elements within this array subobject. bool prevHadError = hadError; CheckArrayType(Entity, IList, CurrentObjectType, DesignatedStartIndex, /*SubobjectIsDesignatorContext=*/false, Index, StructuredList, ElementIndex); return hadError && !prevHadError; } // Get the structured initializer list for a subobject of type // @p CurrentObjectType. InitListExpr * InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index, QualType CurrentObjectType, InitListExpr *StructuredList, unsigned StructuredIndex, SourceRange InitRange, bool IsFullyOverwritten) { if (VerifyOnly) return nullptr; // No structured list in verification-only mode. Expr *ExistingInit = nullptr; if (!StructuredList) ExistingInit = SyntacticToSemantic.lookup(IList); else if (StructuredIndex < StructuredList->getNumInits()) ExistingInit = StructuredList->getInit(StructuredIndex); if (InitListExpr *Result = dyn_cast_or_null<InitListExpr>(ExistingInit)) // There might have already been initializers for subobjects of the current // object, but a subsequent initializer list will overwrite the entirety // of the current object. (See DR 253 and C99 6.7.8p21). e.g., // // struct P { char x[6]; }; // struct P l = { .x[2] = 'x', .x = { [0] = 'f' } }; // // The first designated initializer is ignored, and l.x is just "f". if (!IsFullyOverwritten) return Result; if (ExistingInit) { // We are creating an initializer list that initializes the // subobjects of the current object, but there was already an // initialization that completely initialized the current // subobject, e.g., by a compound literal: // // struct X { int a, b; }; // struct X xs[] = { [0] = (struct X) { 1, 2 }, [0].b = 3 }; // // Here, xs[0].a == 0 and xs[0].b == 3, since the second, // designated initializer re-initializes the whole // subobject [0], overwriting previous initializers. SemaRef.Diag(InitRange.getBegin(), diag::warn_subobject_initializer_overrides) << InitRange; SemaRef.Diag(ExistingInit->getLocStart(), diag::note_previous_initializer) << /*FIXME:has side effects=*/0 << ExistingInit->getSourceRange(); } InitListExpr *Result = new (SemaRef.Context) InitListExpr(SemaRef.Context, InitRange.getBegin(), None, InitRange.getEnd()); QualType ResultType = CurrentObjectType; if (!ResultType->isArrayType()) ResultType = ResultType.getNonLValueExprType(SemaRef.Context); Result->setType(ResultType); // Pre-allocate storage for the structured initializer list. unsigned NumElements = 0; unsigned NumInits = 0; bool GotNumInits = false; if (!StructuredList) { NumInits = IList->getNumInits(); GotNumInits = true; } else if (Index < IList->getNumInits()) { if (InitListExpr *SubList = dyn_cast<InitListExpr>(IList->getInit(Index))) { NumInits = SubList->getNumInits(); GotNumInits = true; } } if (const ArrayType *AType = SemaRef.Context.getAsArrayType(CurrentObjectType)) { if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType)) { NumElements = CAType->getSize().getZExtValue(); // Simple heuristic so that we don't allocate a very large // initializer with many empty entries at the end. if (GotNumInits && NumElements > NumInits) NumElements = 0; } } else if (const VectorType *VType = CurrentObjectType->getAs<VectorType>()) NumElements = VType->getNumElements(); else if (const RecordType *RType = CurrentObjectType->getAs<RecordType>()) { RecordDecl *RDecl = RType->getDecl(); if (RDecl->isUnion()) NumElements = 1; else NumElements = std::distance(RDecl->field_begin(), RDecl->field_end()); } Result->reserveInits(SemaRef.Context, NumElements); // Link this new initializer list into the structured initializer // lists. if (StructuredList) StructuredList->updateInit(SemaRef.Context, StructuredIndex, Result); else { Result->setSyntacticForm(IList); SyntacticToSemantic[IList] = Result; } return Result; } /// Update the initializer at index @p StructuredIndex within the /// structured initializer list to the value @p expr. void InitListChecker::UpdateStructuredListElement(InitListExpr *StructuredList, unsigned &StructuredIndex, Expr *expr) { // No structured initializer list to update if (!StructuredList) return; if (Expr *PrevInit = StructuredList->updateInit(SemaRef.Context, StructuredIndex, expr)) { // This initializer overwrites a previous initializer. Warn. // We need to check on source range validity because the previous // initializer does not have to be an explicit initializer. // struct P { int a, b; }; // struct PP { struct P p } l = { { .a = 2 }, .p.b = 3 }; // There is an overwrite taking place because the first braced initializer // list "{ .a = 2 }' already provides value for .p.b (which is zero). if (PrevInit->getSourceRange().isValid()) { SemaRef.Diag(expr->getLocStart(), diag::warn_initializer_overrides) << expr->getSourceRange(); SemaRef.Diag(PrevInit->getLocStart(), diag::note_previous_initializer) << /*FIXME:has side effects=*/0 << PrevInit->getSourceRange(); } } ++StructuredIndex; } /// Check that the given Index expression is a valid array designator /// value. This is essentially just a wrapper around /// VerifyIntegerConstantExpression that also checks for negative values /// and produces a reasonable diagnostic if there is a /// failure. Returns the index expression, possibly with an implicit cast /// added, on success. If everything went okay, Value will receive the /// value of the constant expression. static ExprResult CheckArrayDesignatorExpr(Sema &S, Expr *Index, llvm::APSInt &Value) { SourceLocation Loc = Index->getLocStart(); // Make sure this is an integer constant expression. ExprResult Result = S.VerifyIntegerConstantExpression(Index, &Value); if (Result.isInvalid()) return Result; if (Value.isSigned() && Value.isNegative()) return S.Diag(Loc, diag::err_array_designator_negative) << Value.toString(10) << Index->getSourceRange(); Value.setIsUnsigned(true); return Result; } ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init) { typedef DesignatedInitExpr::Designator ASTDesignator; bool Invalid = false; SmallVector<ASTDesignator, 32> Designators; SmallVector<Expr *, 32> InitExpressions; // Build designators and check array designator expressions. for (unsigned Idx = 0; Idx < Desig.getNumDesignators(); ++Idx) { const Designator &D = Desig.getDesignator(Idx); switch (D.getKind()) { case Designator::FieldDesignator: Designators.push_back(ASTDesignator(D.getField(), D.getDotLoc(), D.getFieldLoc())); break; case Designator::ArrayDesignator: { Expr *Index = static_cast<Expr *>(D.getArrayIndex()); llvm::APSInt IndexValue; if (!Index->isTypeDependent() && !Index->isValueDependent()) Index = CheckArrayDesignatorExpr(*this, Index, IndexValue).get(); if (!Index) Invalid = true; else { Designators.push_back(ASTDesignator(InitExpressions.size(), D.getLBracketLoc(), D.getRBracketLoc())); InitExpressions.push_back(Index); } break; } case Designator::ArrayRangeDesignator: { Expr *StartIndex = static_cast<Expr *>(D.getArrayRangeStart()); Expr *EndIndex = static_cast<Expr *>(D.getArrayRangeEnd()); llvm::APSInt StartValue; llvm::APSInt EndValue; bool StartDependent = StartIndex->isTypeDependent() || StartIndex->isValueDependent(); bool EndDependent = EndIndex->isTypeDependent() || EndIndex->isValueDependent(); if (!StartDependent) StartIndex = CheckArrayDesignatorExpr(*this, StartIndex, StartValue).get(); if (!EndDependent) EndIndex = CheckArrayDesignatorExpr(*this, EndIndex, EndValue).get(); if (!StartIndex || !EndIndex) Invalid = true; else { // Make sure we're comparing values with the same bit width. if (StartDependent || EndDependent) { // Nothing to compute. } else if (StartValue.getBitWidth() > EndValue.getBitWidth()) EndValue = EndValue.extend(StartValue.getBitWidth()); else if (StartValue.getBitWidth() < EndValue.getBitWidth()) StartValue = StartValue.extend(EndValue.getBitWidth()); if (!StartDependent && !EndDependent && EndValue < StartValue) { Diag(D.getEllipsisLoc(), diag::err_array_designator_empty_range) << StartValue.toString(10) << EndValue.toString(10) << StartIndex->getSourceRange() << EndIndex->getSourceRange(); Invalid = true; } else { Designators.push_back(ASTDesignator(InitExpressions.size(), D.getLBracketLoc(), D.getEllipsisLoc(), D.getRBracketLoc())); InitExpressions.push_back(StartIndex); InitExpressions.push_back(EndIndex); } } break; } } } if (Invalid || Init.isInvalid()) return ExprError(); // Clear out the expressions within the designation. Desig.ClearExprs(*this); DesignatedInitExpr *DIE = DesignatedInitExpr::Create(Context, Designators.data(), Designators.size(), InitExpressions, Loc, GNUSyntax, Init.getAs<Expr>()); if (!getLangOpts().C99) Diag(DIE->getLocStart(), diag::ext_designated_init) << DIE->getSourceRange(); return DIE; } //===----------------------------------------------------------------------===// // Initialization entity //===----------------------------------------------------------------------===// InitializedEntity::InitializedEntity(ASTContext &Context, unsigned Index, const InitializedEntity &Parent) : Parent(&Parent), Index(Index) { if (const ArrayType *AT = Context.getAsArrayType(Parent.getType())) { Kind = EK_ArrayElement; Type = AT->getElementType(); } else if (const VectorType *VT = Parent.getType()->getAs<VectorType>()) { Kind = EK_VectorElement; Type = VT->getElementType(); } else { const ComplexType *CT = Parent.getType()->getAs<ComplexType>(); assert(CT && "Unexpected type"); Kind = EK_ComplexElement; Type = CT->getElementType(); } } InitializedEntity InitializedEntity::InitializeBase(ASTContext &Context, const CXXBaseSpecifier *Base, bool IsInheritedVirtualBase) { InitializedEntity Result; Result.Kind = EK_Base; Result.Parent = nullptr; Result.Base = reinterpret_cast<uintptr_t>(Base); if (IsInheritedVirtualBase) Result.Base |= 0x01; Result.Type = Base->getType(); return Result; } DeclarationName InitializedEntity::getName() const { switch (getKind()) { case EK_Parameter: case EK_Parameter_CF_Audited: { ParmVarDecl *D = reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1); return (D ? D->getDeclName() : DeclarationName()); } case EK_Variable: case EK_Member: return VariableOrMember->getDeclName(); case EK_LambdaCapture: return DeclarationName(Capture.VarID); case EK_Result: case EK_Exception: case EK_New: case EK_Temporary: case EK_Base: case EK_Delegating: case EK_ArrayElement: case EK_VectorElement: case EK_ComplexElement: case EK_BlockElement: case EK_CompoundLiteralInit: case EK_RelatedResult: return DeclarationName(); } llvm_unreachable("Invalid EntityKind!"); } DeclaratorDecl *InitializedEntity::getDecl() const { switch (getKind()) { case EK_Variable: case EK_Member: return VariableOrMember; case EK_Parameter: case EK_Parameter_CF_Audited: return reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1); case EK_Result: case EK_Exception: case EK_New: case EK_Temporary: case EK_Base: case EK_Delegating: case EK_ArrayElement: case EK_VectorElement: case EK_ComplexElement: case EK_BlockElement: case EK_LambdaCapture: case EK_CompoundLiteralInit: case EK_RelatedResult: return nullptr; } llvm_unreachable("Invalid EntityKind!"); } bool InitializedEntity::allowsNRVO() const { switch (getKind()) { case EK_Result: case EK_Exception: return LocAndNRVO.NRVO; case EK_Variable: case EK_Parameter: case EK_Parameter_CF_Audited: case EK_Member: case EK_New: case EK_Temporary: case EK_CompoundLiteralInit: case EK_Base: case EK_Delegating: case EK_ArrayElement: case EK_VectorElement: case EK_ComplexElement: case EK_BlockElement: case EK_LambdaCapture: case EK_RelatedResult: break; } return false; } unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const { assert(getParent() != this); unsigned Depth = getParent() ? getParent()->dumpImpl(OS) : 0; for (unsigned I = 0; I != Depth; ++I) OS << "`-"; switch (getKind()) { case EK_Variable: OS << "Variable"; break; case EK_Parameter: OS << "Parameter"; break; case EK_Parameter_CF_Audited: OS << "CF audited function Parameter"; break; case EK_Result: OS << "Result"; break; case EK_Exception: OS << "Exception"; break; case EK_Member: OS << "Member"; break; case EK_New: OS << "New"; break; case EK_Temporary: OS << "Temporary"; break; case EK_CompoundLiteralInit: OS << "CompoundLiteral";break; case EK_RelatedResult: OS << "RelatedResult"; break; case EK_Base: OS << "Base"; break; case EK_Delegating: OS << "Delegating"; break; case EK_ArrayElement: OS << "ArrayElement " << Index; break; case EK_VectorElement: OS << "VectorElement " << Index; break; case EK_ComplexElement: OS << "ComplexElement " << Index; break; case EK_BlockElement: OS << "Block"; break; case EK_LambdaCapture: OS << "LambdaCapture "; OS << DeclarationName(Capture.VarID); break; } if (Decl *D = getDecl()) { OS << " "; cast<NamedDecl>(D)->printQualifiedName(OS); } OS << " '" << getType().getAsString() << "'\n"; return Depth + 1; } void InitializedEntity::dump() const { dumpImpl(llvm::errs()); } //===----------------------------------------------------------------------===// // Initialization sequence //===----------------------------------------------------------------------===// void InitializationSequence::Step::Destroy() { switch (Kind) { case SK_ResolveAddressOfOverloadedFunction: case SK_CastDerivedToBaseRValue: case SK_CastDerivedToBaseXValue: case SK_CastDerivedToBaseLValue: case SK_BindReference: case SK_BindReferenceToTemporary: case SK_ExtraneousCopyToTemporary: case SK_UserConversion: case SK_QualificationConversionRValue: case SK_QualificationConversionXValue: case SK_QualificationConversionLValue: case SK_AtomicConversion: case SK_LValueToRValue: case SK_ListInitialization: case SK_UnwrapInitList: case SK_RewrapInitList: case SK_ConstructorInitialization: case SK_ConstructorInitializationFromList: case SK_ZeroInitialization: case SK_CAssignment: case SK_StringInit: case SK_ObjCObjectConversion: case SK_ArrayInit: case SK_ParenthesizedArrayInit: case SK_PassByIndirectCopyRestore: case SK_PassByIndirectRestore: case SK_ProduceObjCObject: case SK_StdInitializerList: case SK_StdInitializerListConstructorCall: case SK_OCLSamplerInit: case SK_OCLZeroEvent: break; case SK_ConversionSequence: case SK_ConversionSequenceNoNarrowing: delete ICS; } } bool InitializationSequence::isDirectReferenceBinding() const { return !Steps.empty() && Steps.back().Kind == SK_BindReference; } bool InitializationSequence::isAmbiguous() const { if (!Failed()) return false; switch (getFailureKind()) { case FK_TooManyInitsForReference: case FK_ArrayNeedsInitList: case FK_ArrayNeedsInitListOrStringLiteral: case FK_ArrayNeedsInitListOrWideStringLiteral: case FK_NarrowStringIntoWideCharArray: case FK_WideStringIntoCharArray: case FK_IncompatWideStringIntoWideChar: case FK_AddressOfOverloadFailed: // FIXME: Could do better case FK_NonConstLValueReferenceBindingToTemporary: case FK_NonConstLValueReferenceBindingToUnrelated: case FK_RValueReferenceBindingToLValue: case FK_ReferenceInitDropsQualifiers: case FK_ReferenceInitFailed: case FK_ConversionFailed: case FK_ConversionFromPropertyFailed: case FK_TooManyInitsForScalar: case FK_ReferenceBindingToInitList: case FK_InitListBadDestinationType: case FK_DefaultInitOfConst: case FK_Incomplete: case FK_ArrayTypeMismatch: case FK_NonConstantArrayInit: case FK_ListInitializationFailed: case FK_VariableLengthArrayHasInitializer: case FK_PlaceholderType: case FK_ExplicitConstructor: return false; case FK_ReferenceInitOverloadFailed: case FK_UserConversionOverloadFailed: case FK_ConstructorOverloadFailed: case FK_ListConstructorOverloadFailed: return FailedOverloadResult == OR_Ambiguous; } llvm_unreachable("Invalid EntityKind!"); } bool InitializationSequence::isConstructorInitialization() const { return !Steps.empty() && Steps.back().Kind == SK_ConstructorInitialization; } void InitializationSequence ::AddAddressOverloadResolutionStep(FunctionDecl *Function, DeclAccessPair Found, bool HadMultipleCandidates) { Step S; S.Kind = SK_ResolveAddressOfOverloadedFunction; S.Type = Function->getType(); S.Function.HadMultipleCandidates = HadMultipleCandidates; S.Function.Function = Function; S.Function.FoundDecl = Found; Steps.push_back(S); } void InitializationSequence::AddDerivedToBaseCastStep(QualType BaseType, ExprValueKind VK) { Step S; switch (VK) { case VK_RValue: S.Kind = SK_CastDerivedToBaseRValue; break; case VK_XValue: S.Kind = SK_CastDerivedToBaseXValue; break; case VK_LValue: S.Kind = SK_CastDerivedToBaseLValue; break; } S.Type = BaseType; Steps.push_back(S); } void InitializationSequence::AddReferenceBindingStep(QualType T, bool BindingTemporary) { Step S; S.Kind = BindingTemporary? SK_BindReferenceToTemporary : SK_BindReference; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddExtraneousCopyToTemporary(QualType T) { Step S; S.Kind = SK_ExtraneousCopyToTemporary; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddUserConversionStep(FunctionDecl *Function, DeclAccessPair FoundDecl, QualType T, bool HadMultipleCandidates) { Step S; S.Kind = SK_UserConversion; S.Type = T; S.Function.HadMultipleCandidates = HadMultipleCandidates; S.Function.Function = Function; S.Function.FoundDecl = FoundDecl; Steps.push_back(S); } void InitializationSequence::AddQualificationConversionStep(QualType Ty, ExprValueKind VK) { Step S; S.Kind = SK_QualificationConversionRValue; // work around a gcc warning switch (VK) { case VK_RValue: S.Kind = SK_QualificationConversionRValue; break; case VK_XValue: S.Kind = SK_QualificationConversionXValue; break; case VK_LValue: S.Kind = SK_QualificationConversionLValue; break; } S.Type = Ty; Steps.push_back(S); } void InitializationSequence::AddAtomicConversionStep(QualType Ty) { Step S; S.Kind = SK_AtomicConversion; S.Type = Ty; Steps.push_back(S); } void InitializationSequence::AddLValueToRValueStep(QualType Ty) { assert(!Ty.hasQualifiers() && "rvalues may not have qualifiers"); Step S; S.Kind = SK_LValueToRValue; S.Type = Ty; Steps.push_back(S); } void InitializationSequence::AddConversionSequenceStep( const ImplicitConversionSequence &ICS, QualType T, bool TopLevelOfInitList) { Step S; S.Kind = TopLevelOfInitList ? SK_ConversionSequenceNoNarrowing : SK_ConversionSequence; S.Type = T; S.ICS = new ImplicitConversionSequence(ICS); Steps.push_back(S); } void InitializationSequence::AddListInitializationStep(QualType T) { Step S; S.Kind = SK_ListInitialization; S.Type = T; Steps.push_back(S); } void InitializationSequence ::AddConstructorInitializationStep(CXXConstructorDecl *Constructor, AccessSpecifier Access, QualType T, bool HadMultipleCandidates, bool FromInitList, bool AsInitList) { Step S; S.Kind = FromInitList ? AsInitList ? SK_StdInitializerListConstructorCall : SK_ConstructorInitializationFromList : SK_ConstructorInitialization; S.Type = T; S.Function.HadMultipleCandidates = HadMultipleCandidates; S.Function.Function = Constructor; S.Function.FoundDecl = DeclAccessPair::make(Constructor, Access); Steps.push_back(S); } void InitializationSequence::AddZeroInitializationStep(QualType T) { Step S; S.Kind = SK_ZeroInitialization; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddCAssignmentStep(QualType T) { Step S; S.Kind = SK_CAssignment; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddStringInitStep(QualType T) { Step S; S.Kind = SK_StringInit; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddObjCObjectConversionStep(QualType T) { Step S; S.Kind = SK_ObjCObjectConversion; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddArrayInitStep(QualType T) { Step S; S.Kind = SK_ArrayInit; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddParenthesizedArrayInitStep(QualType T) { Step S; S.Kind = SK_ParenthesizedArrayInit; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddPassByIndirectCopyRestoreStep(QualType type, bool shouldCopy) { Step s; s.Kind = (shouldCopy ? SK_PassByIndirectCopyRestore : SK_PassByIndirectRestore); s.Type = type; Steps.push_back(s); } void InitializationSequence::AddProduceObjCObjectStep(QualType T) { Step S; S.Kind = SK_ProduceObjCObject; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddStdInitializerListConstructionStep(QualType T) { Step S; S.Kind = SK_StdInitializerList; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddOCLSamplerInitStep(QualType T) { Step S; S.Kind = SK_OCLSamplerInit; S.Type = T; Steps.push_back(S); } void InitializationSequence::AddOCLZeroEventStep(QualType T) { Step S; S.Kind = SK_OCLZeroEvent; S.Type = T; Steps.push_back(S); } void InitializationSequence::RewrapReferenceInitList(QualType T, InitListExpr *Syntactic) { assert(Syntactic->getNumInits() == 1 && "Can only rewrap trivial init lists."); Step S; S.Kind = SK_UnwrapInitList; S.Type = Syntactic->getInit(0)->getType(); Steps.insert(Steps.begin(), S); S.Kind = SK_RewrapInitList; S.Type = T; S.WrappingSyntacticList = Syntactic; Steps.push_back(S); } void InitializationSequence::SetOverloadFailure(FailureKind Failure, OverloadingResult Result) { setSequenceKind(FailedSequence); this->Failure = Failure; this->FailedOverloadResult = Result; } //===----------------------------------------------------------------------===// // Attempt initialization //===----------------------------------------------------------------------===// /// Tries to add a zero initializer. Returns true if that worked. static bool maybeRecoverWithZeroInitialization(Sema &S, InitializationSequence &Sequence, const InitializedEntity &Entity) { if (Entity.getKind() != InitializedEntity::EK_Variable) return false; VarDecl *VD = cast<VarDecl>(Entity.getDecl()); if (VD->getInit() || VD->getLocEnd().isMacroID()) return false; QualType VariableTy = VD->getType().getCanonicalType(); SourceLocation Loc = S.getLocForEndOfToken(VD->getLocEnd()); std::string Init = S.getFixItZeroInitializerForType(VariableTy, Loc); if (!Init.empty()) { Sequence.AddZeroInitializationStep(Entity.getType()); Sequence.SetZeroInitializationFixit(Init, Loc); return true; } return false; } static void MaybeProduceObjCObject(Sema &S, InitializationSequence &Sequence, const InitializedEntity &Entity) { if (!S.getLangOpts().ObjCAutoRefCount) return; /// When initializing a parameter, produce the value if it's marked /// __attribute__((ns_consumed)). if (Entity.isParameterKind()) { if (!Entity.isParameterConsumed()) return; assert(Entity.getType()->isObjCRetainableType() && "consuming an object of unretainable type?"); Sequence.AddProduceObjCObjectStep(Entity.getType()); /// When initializing a return value, if the return type is a /// retainable type, then returns need to immediately retain the /// object. If an autorelease is required, it will be done at the /// last instant. } else if (Entity.getKind() == InitializedEntity::EK_Result) { if (!Entity.getType()->isObjCRetainableType()) return; Sequence.AddProduceObjCObjectStep(Entity.getType()); } } static void TryListInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, InitListExpr *InitList, InitializationSequence &Sequence); /// \brief When initializing from init list via constructor, handle /// initialization of an object of type std::initializer_list<T>. /// /// \return true if we have handled initialization of an object of type /// std::initializer_list<T>, false otherwise. static bool TryInitializerListConstruction(Sema &S, InitListExpr *List, QualType DestType, InitializationSequence &Sequence) { QualType E; if (!S.isStdInitializerList(DestType, &E)) return false; if (S.RequireCompleteType(List->getExprLoc(), E, 0)) { Sequence.setIncompleteTypeFailure(E); return true; } // Try initializing a temporary array from the init list. QualType ArrayType = S.Context.getConstantArrayType( E.withConst(), llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()), List->getNumInits()), clang::ArrayType::Normal, 0); InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(ArrayType); InitializationKind Kind = InitializationKind::CreateDirectList(List->getExprLoc()); TryListInitialization(S, HiddenArray, Kind, List, Sequence); if (Sequence) Sequence.AddStdInitializerListConstructionStep(DestType); return true; } static OverloadingResult ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc, MultiExprArg Args, OverloadCandidateSet &CandidateSet, DeclContext::lookup_result Ctors, OverloadCandidateSet::iterator &Best, bool CopyInitializing, bool AllowExplicit, bool OnlyListConstructors, bool IsListInit) { CandidateSet.clear(); for (NamedDecl *D : Ctors) { DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); bool SuppressUserConversions = false; // Find the constructor (which may be a template). CXXConstructorDecl *Constructor = nullptr; FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D); if (ConstructorTmpl) Constructor = cast<CXXConstructorDecl>( ConstructorTmpl->getTemplatedDecl()); else { Constructor = cast<CXXConstructorDecl>(D); // C++11 [over.best.ics]p4: // ... and the constructor or user-defined conversion function is a // candidate by // - 13.3.1.3, when the argument is the temporary in the second step // of a class copy-initialization, or // - 13.3.1.4, 13.3.1.5, or 13.3.1.6 (in all cases), // user-defined conversion sequences are not considered. // FIXME: This breaks backward compatibility, e.g. PR12117. As a // temporary fix, let's re-instate the third bullet above until // there is a resolution in the standard, i.e., // - 13.3.1.7 when the initializer list has exactly one element that is // itself an initializer list and a conversion to some class X or // reference to (possibly cv-qualified) X is considered for the first // parameter of a constructor of X. if ((CopyInitializing || (IsListInit && Args.size() == 1 && isa<InitListExpr>(Args[0]))) && Constructor->isCopyOrMoveConstructor()) SuppressUserConversions = true; } if (!Constructor->isInvalidDecl() && (AllowExplicit || !Constructor->isExplicit()) && (!OnlyListConstructors || S.isInitListConstructor(Constructor))) { if (ConstructorTmpl) S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, /*ExplicitArgs*/ nullptr, Args, CandidateSet, SuppressUserConversions); else { // C++ [over.match.copy]p1: // - When initializing a temporary to be bound to the first parameter // of a constructor that takes a reference to possibly cv-qualified // T as its first argument, called with a single argument in the // context of direct-initialization, explicit conversion functions // are also considered. bool AllowExplicitConv = AllowExplicit && !CopyInitializing && Args.size() == 1 && Constructor->isCopyOrMoveConstructor(); S.AddOverloadCandidate(Constructor, FoundDecl, Args, CandidateSet, SuppressUserConversions, /*PartialOverloading=*/false, /*AllowExplicit=*/AllowExplicitConv); } } } // Perform overload resolution and return the result. return CandidateSet.BestViableFunction(S, DeclLoc, Best); } /// \brief Attempt initialization by constructor (C++ [dcl.init]), which /// enumerates the constructors of the initialized entity and performs overload /// resolution to select the best. /// \param IsListInit Is this list-initialization? /// \param IsInitListCopy Is this non-list-initialization resulting from a /// list-initialization from {x} where x is the same /// type as the entity? static void TryConstructorInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Args, QualType DestType, InitializationSequence &Sequence, bool IsListInit = false, bool IsInitListCopy = false) { assert((!IsListInit || (Args.size() == 1 && isa<InitListExpr>(Args[0]))) && "IsListInit must come with a single initializer list argument."); // The type we're constructing needs to be complete. if (S.RequireCompleteType(Kind.getLocation(), DestType, 0)) { Sequence.setIncompleteTypeFailure(DestType); return; } const RecordType *DestRecordType = DestType->getAs<RecordType>(); assert(DestRecordType && "Constructor initialization requires record type"); CXXRecordDecl *DestRecordDecl = cast<CXXRecordDecl>(DestRecordType->getDecl()); // Build the candidate set directly in the initialization sequence // structure, so that it will persist if we fail. OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet(); // Determine whether we are allowed to call explicit constructors or // explicit conversion operators. bool AllowExplicit = Kind.AllowExplicit() || IsListInit; bool CopyInitialization = Kind.getKind() == InitializationKind::IK_Copy; // - Otherwise, if T is a class type, constructors are considered. The // applicable constructors are enumerated, and the best one is chosen // through overload resolution. DeclContext::lookup_result Ctors = S.LookupConstructors(DestRecordDecl); OverloadingResult Result = OR_No_Viable_Function; OverloadCandidateSet::iterator Best; bool AsInitializerList = false; // C++11 [over.match.list]p1, per DR1467: // When objects of non-aggregate type T are list-initialized, such that // 8.5.4 [dcl.init.list] specifies that overload resolution is performed // according to the rules in this section, overload resolution selects // the constructor in two phases: // // - Initially, the candidate functions are the initializer-list // constructors of the class T and the argument list consists of the // initializer list as a single argument. if (IsListInit) { InitListExpr *ILE = cast<InitListExpr>(Args[0]); AsInitializerList = true; // If the initializer list has no elements and T has a default constructor, // the first phase is omitted. if (ILE->getNumInits() != 0 || !DestRecordDecl->hasDefaultConstructor()) Result = ResolveConstructorOverload(S, Kind.getLocation(), Args, CandidateSet, Ctors, Best, CopyInitialization, AllowExplicit, /*OnlyListConstructor=*/true, IsListInit); // Time to unwrap the init list. Args = MultiExprArg(ILE->getInits(), ILE->getNumInits()); } // C++11 [over.match.list]p1: // - If no viable initializer-list constructor is found, overload resolution // is performed again, where the candidate functions are all the // constructors of the class T and the argument list consists of the // elements of the initializer list. if (Result == OR_No_Viable_Function) { AsInitializerList = false; Result = ResolveConstructorOverload(S, Kind.getLocation(), Args, CandidateSet, Ctors, Best, CopyInitialization, AllowExplicit, /*OnlyListConstructors=*/false, IsListInit); } if (Result) { Sequence.SetOverloadFailure(IsListInit ? InitializationSequence::FK_ListConstructorOverloadFailed : InitializationSequence::FK_ConstructorOverloadFailed, Result); return; } // C++11 [dcl.init]p6: // If a program calls for the default initialization of an object // of a const-qualified type T, T shall be a class type with a // user-provided default constructor. if (Kind.getKind() == InitializationKind::IK_Default && Entity.getType().isConstQualified() && !cast<CXXConstructorDecl>(Best->Function)->isUserProvided()) { if (!maybeRecoverWithZeroInitialization(S, Sequence, Entity)) Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst); return; } // C++11 [over.match.list]p1: // In copy-list-initialization, if an explicit constructor is chosen, the // initializer is ill-formed. CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function); if (IsListInit && !Kind.AllowExplicit() && CtorDecl->isExplicit()) { Sequence.SetFailed(InitializationSequence::FK_ExplicitConstructor); return; } // Add the constructor initialization step. Any cv-qualification conversion is // subsumed by the initialization. bool HadMultipleCandidates = (CandidateSet.size() > 1); Sequence.AddConstructorInitializationStep( CtorDecl, Best->FoundDecl.getAccess(), DestType, HadMultipleCandidates, IsListInit | IsInitListCopy, AsInitializerList); } static bool ResolveOverloadedFunctionForReferenceBinding(Sema &S, Expr *Initializer, QualType &SourceType, QualType &UnqualifiedSourceType, QualType UnqualifiedTargetType, InitializationSequence &Sequence) { if (S.Context.getCanonicalType(UnqualifiedSourceType) == S.Context.OverloadTy) { DeclAccessPair Found; bool HadMultipleCandidates = false; if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(Initializer, UnqualifiedTargetType, false, Found, &HadMultipleCandidates)) { Sequence.AddAddressOverloadResolutionStep(Fn, Found, HadMultipleCandidates); SourceType = Fn->getType(); UnqualifiedSourceType = SourceType.getUnqualifiedType(); } else if (!UnqualifiedTargetType->isRecordType()) { Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed); return true; } } return false; } static void TryReferenceInitializationCore(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, Expr *Initializer, QualType cv1T1, QualType T1, Qualifiers T1Quals, QualType cv2T2, QualType T2, Qualifiers T2Quals, InitializationSequence &Sequence); static void TryValueInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, InitializationSequence &Sequence, InitListExpr *InitList = nullptr); /// \brief Attempt list initialization of a reference. static void TryReferenceListInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, InitListExpr *InitList, InitializationSequence &Sequence) { // First, catch C++03 where this isn't possible. if (!S.getLangOpts().CPlusPlus11) { Sequence.SetFailed(InitializationSequence::FK_ReferenceBindingToInitList); return; } // Can't reference initialize a compound literal. if (Entity.getKind() == InitializedEntity::EK_CompoundLiteralInit) { Sequence.SetFailed(InitializationSequence::FK_ReferenceBindingToInitList); return; } QualType DestType = Entity.getType(); QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType(); Qualifiers T1Quals; QualType T1 = S.Context.getUnqualifiedArrayType(cv1T1, T1Quals); // Reference initialization via an initializer list works thus: // If the initializer list consists of a single element that is // reference-related to the referenced type, bind directly to that element // (possibly creating temporaries). // Otherwise, initialize a temporary with the initializer list and // bind to that. if (InitList->getNumInits() == 1) { Expr *Initializer = InitList->getInit(0); QualType cv2T2 = Initializer->getType(); Qualifiers T2Quals; QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals); // If this fails, creating a temporary wouldn't work either. if (ResolveOverloadedFunctionForReferenceBinding(S, Initializer, cv2T2, T2, T1, Sequence)) return; SourceLocation DeclLoc = Initializer->getLocStart(); bool dummy1, dummy2, dummy3; Sema::ReferenceCompareResult RefRelationship = S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, dummy1, dummy2, dummy3); if (RefRelationship >= Sema::Ref_Related) { // Try to bind the reference here. TryReferenceInitializationCore(S, Entity, Kind, Initializer, cv1T1, T1, T1Quals, cv2T2, T2, T2Quals, Sequence); if (Sequence) Sequence.RewrapReferenceInitList(cv1T1, InitList); return; } // Update the initializer if we've resolved an overloaded function. if (Sequence.step_begin() != Sequence.step_end()) Sequence.RewrapReferenceInitList(cv1T1, InitList); } // Not reference-related. Create a temporary and bind to that. InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1); TryListInitialization(S, TempEntity, Kind, InitList, Sequence); if (Sequence) { if (DestType->isRValueReferenceType() || (T1Quals.hasConst() && !T1Quals.hasVolatile())) Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true); else Sequence.SetFailed( InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary); } } /// \brief Attempt list initialization (C++0x [dcl.init.list]) static void TryListInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, InitListExpr *InitList, InitializationSequence &Sequence) { QualType DestType = Entity.getType(); // C++ doesn't allow scalar initialization with more than one argument. // But C99 complex numbers are scalars and it makes sense there. if (S.getLangOpts().CPlusPlus && DestType->isScalarType() && !DestType->isAnyComplexType() && InitList->getNumInits() > 1) { Sequence.SetFailed(InitializationSequence::FK_TooManyInitsForScalar); return; } if (DestType->isReferenceType()) { TryReferenceListInitialization(S, Entity, Kind, InitList, Sequence); return; } if (DestType->isRecordType() && S.RequireCompleteType(InitList->getLocStart(), DestType, 0)) { Sequence.setIncompleteTypeFailure(DestType); return; } // C++11 [dcl.init.list]p3, per DR1467: // - If T is a class type and the initializer list has a single element of // type cv U, where U is T or a class derived from T, the object is // initialized from that element (by copy-initialization for // copy-list-initialization, or by direct-initialization for // direct-list-initialization). // - Otherwise, if T is a character array and the initializer list has a // single element that is an appropriately-typed string literal // (8.5.2 [dcl.init.string]), initialization is performed as described // in that section. // - Otherwise, if T is an aggregate, [...] (continue below). if (S.getLangOpts().CPlusPlus11 && InitList->getNumInits() == 1) { if (DestType->isRecordType()) { QualType InitType = InitList->getInit(0)->getType(); if (S.Context.hasSameUnqualifiedType(InitType, DestType) || S.IsDerivedFrom(InitType, DestType)) { Expr *InitAsExpr = InitList->getInit(0); TryConstructorInitialization(S, Entity, Kind, InitAsExpr, DestType, Sequence, /*InitListSyntax*/ false, /*IsInitListCopy*/ true); return; } } if (const ArrayType *DestAT = S.Context.getAsArrayType(DestType)) { Expr *SubInit[1] = {InitList->getInit(0)}; if (!isa<VariableArrayType>(DestAT) && IsStringInit(SubInit[0], DestAT, S.Context) == SIF_None) { InitializationKind SubKind = Kind.getKind() == InitializationKind::IK_DirectList ? InitializationKind::CreateDirect(Kind.getLocation(), InitList->getLBraceLoc(), InitList->getRBraceLoc()) : Kind; Sequence.InitializeFrom(S, Entity, SubKind, SubInit, /*TopLevelOfInitList*/ true); // TryStringLiteralInitialization() (in InitializeFrom()) will fail if // the element is not an appropriately-typed string literal, in which // case we should proceed as in C++11 (below). if (Sequence) { Sequence.RewrapReferenceInitList(Entity.getType(), InitList); return; } } } } // C++11 [dcl.init.list]p3: // - If T is an aggregate, aggregate initialization is performed. if (DestType->isRecordType() && !DestType->isAggregateType()) { if (S.getLangOpts().CPlusPlus11) { // - Otherwise, if the initializer list has no elements and T is a // class type with a default constructor, the object is // value-initialized. if (InitList->getNumInits() == 0) { CXXRecordDecl *RD = DestType->getAsCXXRecordDecl(); if (RD->hasDefaultConstructor()) { TryValueInitialization(S, Entity, Kind, Sequence, InitList); return; } } // - Otherwise, if T is a specialization of std::initializer_list<E>, // an initializer_list object constructed [...] if (TryInitializerListConstruction(S, InitList, DestType, Sequence)) return; // - Otherwise, if T is a class type, constructors are considered. Expr *InitListAsExpr = InitList; TryConstructorInitialization(S, Entity, Kind, InitListAsExpr, DestType, Sequence, /*InitListSyntax*/ true); } else Sequence.SetFailed(InitializationSequence::FK_InitListBadDestinationType); return; } if (S.getLangOpts().CPlusPlus && !DestType->isAggregateType() && InitList->getNumInits() == 1 && InitList->getInit(0)->getType()->isRecordType()) { // - Otherwise, if the initializer list has a single element of type E // [...references are handled above...], the object or reference is // initialized from that element (by copy-initialization for // copy-list-initialization, or by direct-initialization for // direct-list-initialization); if a narrowing conversion is required // to convert the element to T, the program is ill-formed. // // Per core-24034, this is direct-initialization if we were performing // direct-list-initialization and copy-initialization otherwise. // We can't use InitListChecker for this, because it always performs // copy-initialization. This only matters if we might use an 'explicit' // conversion operator, so we only need to handle the cases where the source // is of record type. InitializationKind SubKind = Kind.getKind() == InitializationKind::IK_DirectList ? InitializationKind::CreateDirect(Kind.getLocation(), InitList->getLBraceLoc(), InitList->getRBraceLoc()) : Kind; Expr *SubInit[1] = { InitList->getInit(0) }; Sequence.InitializeFrom(S, Entity, SubKind, SubInit, /*TopLevelOfInitList*/true); if (Sequence) Sequence.RewrapReferenceInitList(Entity.getType(), InitList); return; } InitListChecker CheckInitList(S, Entity, Kind, InitList, // HLSL Change - add Kind DestType, /*VerifyOnly=*/true); if (CheckInitList.HadError()) { Sequence.SetFailed(InitializationSequence::FK_ListInitializationFailed); return; } // Add the list initialization step with the built init list. Sequence.AddListInitializationStep(DestType); } /// \brief Try a reference initialization that involves calling a conversion /// function. static OverloadingResult TryRefInitWithConversionFunction(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, Expr *Initializer, bool AllowRValues, InitializationSequence &Sequence) { QualType DestType = Entity.getType(); QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType(); QualType T1 = cv1T1.getUnqualifiedType(); QualType cv2T2 = Initializer->getType(); QualType T2 = cv2T2.getUnqualifiedType(); bool DerivedToBase; bool ObjCConversion; bool ObjCLifetimeConversion; assert(!S.CompareReferenceRelationship(Initializer->getLocStart(), T1, T2, DerivedToBase, ObjCConversion, ObjCLifetimeConversion) && "Must have incompatible references when binding via conversion"); (void)DerivedToBase; (void)ObjCConversion; (void)ObjCLifetimeConversion; // Build the candidate set directly in the initialization sequence // structure, so that it will persist if we fail. OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet(); CandidateSet.clear(); // Determine whether we are allowed to call explicit constructors or // explicit conversion operators. bool AllowExplicit = Kind.AllowExplicit(); bool AllowExplicitConvs = Kind.allowExplicitConversionFunctionsInRefBinding(); const RecordType *T1RecordType = nullptr; if (AllowRValues && (T1RecordType = T1->getAs<RecordType>()) && !S.RequireCompleteType(Kind.getLocation(), T1, 0)) { // The type we're converting to is a class type. Enumerate its constructors // to see if there is a suitable conversion. CXXRecordDecl *T1RecordDecl = cast<CXXRecordDecl>(T1RecordType->getDecl()); for (NamedDecl *D : S.LookupConstructors(T1RecordDecl)) { DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); // Find the constructor (which may be a template). CXXConstructorDecl *Constructor = nullptr; FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D); if (ConstructorTmpl) Constructor = cast<CXXConstructorDecl>( ConstructorTmpl->getTemplatedDecl()); else Constructor = cast<CXXConstructorDecl>(D); if (!Constructor->isInvalidDecl() && Constructor->isConvertingConstructor(AllowExplicit)) { if (ConstructorTmpl) S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, /*ExplicitArgs*/ nullptr, Initializer, CandidateSet, /*SuppressUserConversions=*/true); else S.AddOverloadCandidate(Constructor, FoundDecl, Initializer, CandidateSet, /*SuppressUserConversions=*/true); } } } if (T1RecordType && T1RecordType->getDecl()->isInvalidDecl()) return OR_No_Viable_Function; const RecordType *T2RecordType = nullptr; if ((T2RecordType = T2->getAs<RecordType>()) && !S.RequireCompleteType(Kind.getLocation(), T2, 0)) { // The type we're converting from is a class type, enumerate its conversion // functions. CXXRecordDecl *T2RecordDecl = cast<CXXRecordDecl>(T2RecordType->getDecl()); const auto &Conversions = T2RecordDecl->getVisibleConversionFunctions(); for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { NamedDecl *D = *I; CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D); CXXConversionDecl *Conv; if (ConvTemplate) Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl()); else Conv = cast<CXXConversionDecl>(D); // If the conversion function doesn't return a reference type, // it can't be considered for this conversion unless we're allowed to // consider rvalues. // FIXME: Do we need to make sure that we only consider conversion // candidates with reference-compatible results? That might be needed to // break recursion. if ((AllowExplicitConvs || !Conv->isExplicit()) && (AllowRValues || Conv->getConversionType()->isLValueReferenceType())){ if (ConvTemplate) S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC, Initializer, DestType, CandidateSet, /*AllowObjCConversionOnExplicit=*/ false); else S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Initializer, DestType, CandidateSet, /*AllowObjCConversionOnExplicit=*/false); } } } if (T2RecordType && T2RecordType->getDecl()->isInvalidDecl()) return OR_No_Viable_Function; SourceLocation DeclLoc = Initializer->getLocStart(); // Perform overload resolution. If it fails, return the failed result. OverloadCandidateSet::iterator Best; if (OverloadingResult Result = CandidateSet.BestViableFunction(S, DeclLoc, Best, true)) return Result; FunctionDecl *Function = Best->Function; // This is the overload that will be used for this initialization step if we // use this initialization. Mark it as referenced. Function->setReferenced(); // Compute the returned type of the conversion. if (isa<CXXConversionDecl>(Function)) T2 = Function->getReturnType(); else T2 = cv1T1; // Add the user-defined conversion step. bool HadMultipleCandidates = (CandidateSet.size() > 1); Sequence.AddUserConversionStep(Function, Best->FoundDecl, T2.getNonLValueExprType(S.Context), HadMultipleCandidates); // Determine whether we need to perform derived-to-base or // cv-qualification adjustments. ExprValueKind VK = VK_RValue; if (T2->isLValueReferenceType()) VK = VK_LValue; else if (const RValueReferenceType *RRef = T2->getAs<RValueReferenceType>()) VK = RRef->getPointeeType()->isFunctionType() ? VK_LValue : VK_XValue; bool NewDerivedToBase = false; bool NewObjCConversion = false; bool NewObjCLifetimeConversion = false; Sema::ReferenceCompareResult NewRefRelationship = S.CompareReferenceRelationship(DeclLoc, T1, T2.getNonLValueExprType(S.Context), NewDerivedToBase, NewObjCConversion, NewObjCLifetimeConversion); if (NewRefRelationship == Sema::Ref_Incompatible) { // If the type we've converted to is not reference-related to the // type we're looking for, then there is another conversion step // we need to perform to produce a temporary of the right type // that we'll be binding to. ImplicitConversionSequence ICS; ICS.setStandard(); ICS.Standard = Best->FinalConversion; T2 = ICS.Standard.getToType(2); Sequence.AddConversionSequenceStep(ICS, T2); } else if (NewDerivedToBase) Sequence.AddDerivedToBaseCastStep( S.Context.getQualifiedType(T1, T2.getNonReferenceType().getQualifiers()), VK); else if (NewObjCConversion) Sequence.AddObjCObjectConversionStep( S.Context.getQualifiedType(T1, T2.getNonReferenceType().getQualifiers())); if (cv1T1.getQualifiers() != T2.getNonReferenceType().getQualifiers()) Sequence.AddQualificationConversionStep(cv1T1, VK); Sequence.AddReferenceBindingStep(cv1T1, !T2->isReferenceType()); return OR_Success; } static void CheckCXX98CompatAccessibleCopy(Sema &S, const InitializedEntity &Entity, Expr *CurInitExpr); /// \brief Attempt reference initialization (C++0x [dcl.init.ref]) static void TryReferenceInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, Expr *Initializer, InitializationSequence &Sequence) { QualType DestType = Entity.getType(); QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType(); Qualifiers T1Quals; QualType T1 = S.Context.getUnqualifiedArrayType(cv1T1, T1Quals); QualType cv2T2 = Initializer->getType(); Qualifiers T2Quals; QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals); // If the initializer is the address of an overloaded function, try // to resolve the overloaded function. If all goes well, T2 is the // type of the resulting function. if (ResolveOverloadedFunctionForReferenceBinding(S, Initializer, cv2T2, T2, T1, Sequence)) return; // Delegate everything else to a subfunction. TryReferenceInitializationCore(S, Entity, Kind, Initializer, cv1T1, T1, T1Quals, cv2T2, T2, T2Quals, Sequence); } /// Converts the target of reference initialization so that it has the /// appropriate qualifiers and value kind. /// /// In this case, 'x' is an 'int' lvalue, but it needs to be 'const int'. /// \code /// int x; /// const int &r = x; /// \endcode /// /// In this case the reference is binding to a bitfield lvalue, which isn't /// valid. Perform a load to create a lifetime-extended temporary instead. /// \code /// const int &r = someStruct.bitfield; /// \endcode static ExprValueKind convertQualifiersAndValueKindIfNecessary(Sema &S, InitializationSequence &Sequence, Expr *Initializer, QualType cv1T1, Qualifiers T1Quals, Qualifiers T2Quals, bool IsLValueRef) { bool IsNonAddressableType = Initializer->refersToBitField() || Initializer->refersToVectorElement(); if (IsNonAddressableType) { // C++11 [dcl.init.ref]p5: [...] Otherwise, the reference shall be an // lvalue reference to a non-volatile const type, or the reference shall be // an rvalue reference. // // If not, we can't make a temporary and bind to that. Give up and allow the // error to be diagnosed later. if (IsLValueRef && (!T1Quals.hasConst() || T1Quals.hasVolatile())) { assert(Initializer->isGLValue()); return Initializer->getValueKind(); } // Force a load so we can materialize a temporary. Sequence.AddLValueToRValueStep(cv1T1.getUnqualifiedType()); return VK_RValue; } if (T1Quals != T2Quals) { Sequence.AddQualificationConversionStep(cv1T1, Initializer->getValueKind()); } return Initializer->getValueKind(); } /// \brief Reference initialization without resolving overloaded functions. static void TryReferenceInitializationCore(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, Expr *Initializer, QualType cv1T1, QualType T1, Qualifiers T1Quals, QualType cv2T2, QualType T2, Qualifiers T2Quals, InitializationSequence &Sequence) { QualType DestType = Entity.getType(); SourceLocation DeclLoc = Initializer->getLocStart(); // Compute some basic properties of the types and the initializer. bool isLValueRef = DestType->isLValueReferenceType(); bool isRValueRef = !isLValueRef; bool DerivedToBase = false; bool ObjCConversion = false; bool ObjCLifetimeConversion = false; Expr::Classification InitCategory = Initializer->Classify(S.Context); Sema::ReferenceCompareResult RefRelationship = S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, DerivedToBase, ObjCConversion, ObjCLifetimeConversion); // C++0x [dcl.init.ref]p5: // A reference to type "cv1 T1" is initialized by an expression of type // "cv2 T2" as follows: // // - If the reference is an lvalue reference and the initializer // expression // Note the analogous bullet points for rvalue refs to functions. Because // there are no function rvalues in C++, rvalue refs to functions are treated // like lvalue refs. OverloadingResult ConvOvlResult = OR_Success; bool T1Function = T1->isFunctionType(); if (isLValueRef || T1Function) { if (InitCategory.isLValue() && (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification || (Kind.isCStyleOrFunctionalCast() && RefRelationship == Sema::Ref_Related))) { // - is an lvalue (but is not a bit-field), and "cv1 T1" is // reference-compatible with "cv2 T2," or // // Per C++ [over.best.ics]p2, we don't diagnose whether the lvalue is a // bit-field when we're determining whether the reference initialization // can occur. However, we do pay attention to whether it is a bit-field // to decide whether we're actually binding to a temporary created from // the bit-field. if (DerivedToBase) Sequence.AddDerivedToBaseCastStep( S.Context.getQualifiedType(T1, T2Quals), VK_LValue); else if (ObjCConversion) Sequence.AddObjCObjectConversionStep( S.Context.getQualifiedType(T1, T2Quals)); ExprValueKind ValueKind = convertQualifiersAndValueKindIfNecessary(S, Sequence, Initializer, cv1T1, T1Quals, T2Quals, isLValueRef); Sequence.AddReferenceBindingStep(cv1T1, ValueKind == VK_RValue); return; } // - has a class type (i.e., T2 is a class type), where T1 is not // reference-related to T2, and can be implicitly converted to an // lvalue of type "cv3 T3," where "cv1 T1" is reference-compatible // with "cv3 T3" (this conversion is selected by enumerating the // applicable conversion functions (13.3.1.6) and choosing the best // one through overload resolution (13.3)), // If we have an rvalue ref to function type here, the rhs must be // an rvalue. DR1287 removed the "implicitly" here. if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType() && (isLValueRef || InitCategory.isRValue())) { ConvOvlResult = TryRefInitWithConversionFunction( S, Entity, Kind, Initializer, /*AllowRValues*/isRValueRef, Sequence); if (ConvOvlResult == OR_Success) return; if (ConvOvlResult != OR_No_Viable_Function) Sequence.SetOverloadFailure( InitializationSequence::FK_ReferenceInitOverloadFailed, ConvOvlResult); } } // - Otherwise, the reference shall be an lvalue reference to a // non-volatile const type (i.e., cv1 shall be const), or the reference // shall be an rvalue reference. if (isLValueRef && !(T1Quals.hasConst() && !T1Quals.hasVolatile())) { if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed); else if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty()) Sequence.SetOverloadFailure( InitializationSequence::FK_ReferenceInitOverloadFailed, ConvOvlResult); else Sequence.SetFailed(InitCategory.isLValue() ? (RefRelationship == Sema::Ref_Related ? InitializationSequence::FK_ReferenceInitDropsQualifiers : InitializationSequence::FK_NonConstLValueReferenceBindingToUnrelated) : InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary); return; } // - If the initializer expression // - is an xvalue, class prvalue, array prvalue, or function lvalue and // "cv1 T1" is reference-compatible with "cv2 T2" // Note: functions are handled below. if (!T1Function && (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification || (Kind.isCStyleOrFunctionalCast() && RefRelationship == Sema::Ref_Related)) && (InitCategory.isXValue() || (InitCategory.isPRValue() && T2->isRecordType()) || (InitCategory.isPRValue() && T2->isArrayType()))) { ExprValueKind ValueKind = InitCategory.isXValue()? VK_XValue : VK_RValue; if (InitCategory.isPRValue() && T2->isRecordType()) { // The corresponding bullet in C++03 [dcl.init.ref]p5 gives the // compiler the freedom to perform a copy here or bind to the // object, while C++0x requires that we bind directly to the // object. Hence, we always bind to the object without making an // extra copy. However, in C++03 requires that we check for the // presence of a suitable copy constructor: // // The constructor that would be used to make the copy shall // be callable whether or not the copy is actually done. if (!S.getLangOpts().CPlusPlus11 && !S.getLangOpts().MicrosoftExt) Sequence.AddExtraneousCopyToTemporary(cv2T2); else if (S.getLangOpts().CPlusPlus11) CheckCXX98CompatAccessibleCopy(S, Entity, Initializer); } if (DerivedToBase) Sequence.AddDerivedToBaseCastStep(S.Context.getQualifiedType(T1, T2Quals), ValueKind); else if (ObjCConversion) Sequence.AddObjCObjectConversionStep( S.Context.getQualifiedType(T1, T2Quals)); ValueKind = convertQualifiersAndValueKindIfNecessary(S, Sequence, Initializer, cv1T1, T1Quals, T2Quals, isLValueRef); Sequence.AddReferenceBindingStep(cv1T1, ValueKind == VK_RValue); return; } // - has a class type (i.e., T2 is a class type), where T1 is not // reference-related to T2, and can be implicitly converted to an // xvalue, class prvalue, or function lvalue of type "cv3 T3", // where "cv1 T1" is reference-compatible with "cv3 T3", // // DR1287 removes the "implicitly" here. if (T2->isRecordType()) { if (RefRelationship == Sema::Ref_Incompatible) { ConvOvlResult = TryRefInitWithConversionFunction( S, Entity, Kind, Initializer, /*AllowRValues*/true, Sequence); if (ConvOvlResult) Sequence.SetOverloadFailure( InitializationSequence::FK_ReferenceInitOverloadFailed, ConvOvlResult); return; } if ((RefRelationship == Sema::Ref_Compatible || RefRelationship == Sema::Ref_Compatible_With_Added_Qualification) && isRValueRef && InitCategory.isLValue()) { Sequence.SetFailed( InitializationSequence::FK_RValueReferenceBindingToLValue); return; } Sequence.SetFailed(InitializationSequence::FK_ReferenceInitDropsQualifiers); return; } // - Otherwise, a temporary of type "cv1 T1" is created and initialized // from the initializer expression using the rules for a non-reference // copy-initialization (8.5). The reference is then bound to the // temporary. [...] InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1); // FIXME: Why do we use an implicit conversion here rather than trying // copy-initialization? ImplicitConversionSequence ICS = S.TryImplicitConversion(Initializer, TempEntity.getType(), /*SuppressUserConversions=*/false, /*AllowExplicit=*/false, /*FIXME:InOverloadResolution=*/false, /*CStyle=*/Kind.isCStyleOrFunctionalCast(), /*AllowObjCWritebackConversion=*/false); if (ICS.isBad()) { // FIXME: Use the conversion function set stored in ICS to turn // this into an overloading ambiguity diagnostic. However, we need // to keep that set as an OverloadCandidateSet rather than as some // other kind of set. if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty()) Sequence.SetOverloadFailure( InitializationSequence::FK_ReferenceInitOverloadFailed, ConvOvlResult); else if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed); else Sequence.SetFailed(InitializationSequence::FK_ReferenceInitFailed); return; } else { Sequence.AddConversionSequenceStep(ICS, TempEntity.getType()); } // [...] If T1 is reference-related to T2, cv1 must be the // same cv-qualification as, or greater cv-qualification // than, cv2; otherwise, the program is ill-formed. unsigned T1CVRQuals = T1Quals.getCVRQualifiers(); unsigned T2CVRQuals = T2Quals.getCVRQualifiers(); if (RefRelationship == Sema::Ref_Related && (T1CVRQuals | T2CVRQuals) != T1CVRQuals) { Sequence.SetFailed(InitializationSequence::FK_ReferenceInitDropsQualifiers); return; } // [...] If T1 is reference-related to T2 and the reference is an rvalue // reference, the initializer expression shall not be an lvalue. if (RefRelationship >= Sema::Ref_Related && !isLValueRef && InitCategory.isLValue()) { Sequence.SetFailed( InitializationSequence::FK_RValueReferenceBindingToLValue); return; } Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true); return; } /// \brief Attempt character array initialization from a string literal /// (C++ [dcl.init.string], C99 6.7.8). static void TryStringLiteralInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, Expr *Initializer, InitializationSequence &Sequence) { Sequence.AddStringInitStep(Entity.getType()); } /// \brief Attempt value initialization (C++ [dcl.init]p7). static void TryValueInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, InitializationSequence &Sequence, InitListExpr *InitList) { assert((!InitList || InitList->getNumInits() == 0) && "Shouldn't use value-init for non-empty init lists"); // C++98 [dcl.init]p5, C++11 [dcl.init]p7: // // To value-initialize an object of type T means: QualType T = Entity.getType(); // -- if T is an array type, then each element is value-initialized; T = S.Context.getBaseElementType(T); if (const RecordType *RT = T->getAs<RecordType>()) { if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { bool NeedZeroInitialization = true; if (!S.getLangOpts().CPlusPlus11) { // C++98: // -- if T is a class type (clause 9) with a user-declared constructor // (12.1), then the default constructor for T is called (and the // initialization is ill-formed if T has no accessible default // constructor); if (ClassDecl->hasUserDeclaredConstructor()) NeedZeroInitialization = false; } else { // C++11: // -- if T is a class type (clause 9) with either no default constructor // (12.1 [class.ctor]) or a default constructor that is user-provided // or deleted, then the object is default-initialized; CXXConstructorDecl *CD = S.LookupDefaultConstructor(ClassDecl); if (!CD || !CD->getCanonicalDecl()->isDefaulted() || CD->isDeleted()) NeedZeroInitialization = false; } // -- if T is a (possibly cv-qualified) non-union class type without a // user-provided or deleted default constructor, then the object is // zero-initialized and, if T has a non-trivial default constructor, // default-initialized; // The 'non-union' here was removed by DR1502. The 'non-trivial default // constructor' part was removed by DR1507. if (NeedZeroInitialization) Sequence.AddZeroInitializationStep(Entity.getType()); // C++03: // -- if T is a non-union class type without a user-declared constructor, // then every non-static data member and base class component of T is // value-initialized; // [...] A program that calls for [...] value-initialization of an // entity of reference type is ill-formed. // // C++11 doesn't need this handling, because value-initialization does not // occur recursively there, and the implicit default constructor is // defined as deleted in the problematic cases. if (!S.getLangOpts().CPlusPlus11 && ClassDecl->hasUninitializedReferenceMember()) { Sequence.SetFailed(InitializationSequence::FK_TooManyInitsForReference); return; } // If this is list-value-initialization, pass the empty init list on when // building the constructor call. This affects the semantics of a few // things (such as whether an explicit default constructor can be called). Expr *InitListAsExpr = InitList; MultiExprArg Args(&InitListAsExpr, InitList ? 1 : 0); bool InitListSyntax = InitList; return TryConstructorInitialization(S, Entity, Kind, Args, T, Sequence, InitListSyntax); } } Sequence.AddZeroInitializationStep(Entity.getType()); } /// \brief Attempt default initialization (C++ [dcl.init]p6). static void TryDefaultInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, InitializationSequence &Sequence) { assert(Kind.getKind() == InitializationKind::IK_Default); // C++ [dcl.init]p6: // To default-initialize an object of type T means: // - if T is an array type, each element is default-initialized; QualType DestType = S.Context.getBaseElementType(Entity.getType()); // - if T is a (possibly cv-qualified) class type (Clause 9), the default // constructor for T is called (and the initialization is ill-formed if // T has no accessible default constructor); if (DestType->isRecordType() && S.getLangOpts().CPlusPlus) { TryConstructorInitialization(S, Entity, Kind, None, DestType, Sequence); return; } // - otherwise, no initialization is performed. // If a program calls for the default initialization of an object of // a const-qualified type T, T shall be a class type with a user-provided // default constructor. if (DestType.isConstQualified() && S.getLangOpts().CPlusPlus) { if (!maybeRecoverWithZeroInitialization(S, Sequence, Entity)) Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst); return; } // If the destination type has a lifetime property, zero-initialize it. if (DestType.getQualifiers().hasObjCLifetime()) { Sequence.AddZeroInitializationStep(Entity.getType()); return; } } /// \brief Attempt a user-defined conversion between two types (C++ [dcl.init]), /// which enumerates all conversion functions and performs overload resolution /// to select the best. static void TryUserDefinedConversion(Sema &S, QualType DestType, const InitializationKind &Kind, Expr *Initializer, InitializationSequence &Sequence, bool TopLevelOfInitList) { assert(!DestType->isReferenceType() && "References are handled elsewhere"); QualType SourceType = Initializer->getType(); assert((DestType->isRecordType() || SourceType->isRecordType()) && "Must have a class type to perform a user-defined conversion"); // Build the candidate set directly in the initialization sequence // structure, so that it will persist if we fail. OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet(); CandidateSet.clear(); // Determine whether we are allowed to call explicit constructors or // explicit conversion operators. bool AllowExplicit = Kind.AllowExplicit(); if (const RecordType *DestRecordType = DestType->getAs<RecordType>()) { // The type we're converting to is a class type. Enumerate its constructors // to see if there is a suitable conversion. CXXRecordDecl *DestRecordDecl = cast<CXXRecordDecl>(DestRecordType->getDecl()); // Try to complete the type we're converting to. if (!S.RequireCompleteType(Kind.getLocation(), DestType, 0)) { DeclContext::lookup_result R = S.LookupConstructors(DestRecordDecl); // The container holding the constructors can under certain conditions // be changed while iterating. To be safe we copy the lookup results // to a new container. SmallVector<NamedDecl*, 8> CopyOfCon(R.begin(), R.end()); for (SmallVectorImpl<NamedDecl *>::iterator Con = CopyOfCon.begin(), ConEnd = CopyOfCon.end(); Con != ConEnd; ++Con) { NamedDecl *D = *Con; DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess()); // Find the constructor (which may be a template). CXXConstructorDecl *Constructor = nullptr; FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D); if (ConstructorTmpl) Constructor = cast<CXXConstructorDecl>( ConstructorTmpl->getTemplatedDecl()); else Constructor = cast<CXXConstructorDecl>(D); if (!Constructor->isInvalidDecl() && Constructor->isConvertingConstructor(AllowExplicit)) { if (ConstructorTmpl) S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, /*ExplicitArgs*/ nullptr, Initializer, CandidateSet, /*SuppressUserConversions=*/true); else S.AddOverloadCandidate(Constructor, FoundDecl, Initializer, CandidateSet, /*SuppressUserConversions=*/true); } } } } SourceLocation DeclLoc = Initializer->getLocStart(); if (const RecordType *SourceRecordType = SourceType->getAs<RecordType>()) { // The type we're converting from is a class type, enumerate its conversion // functions. // We can only enumerate the conversion functions for a complete type; if // the type isn't complete, simply skip this step. if (!S.RequireCompleteType(DeclLoc, SourceType, 0)) { CXXRecordDecl *SourceRecordDecl = cast<CXXRecordDecl>(SourceRecordType->getDecl()); const auto &Conversions = SourceRecordDecl->getVisibleConversionFunctions(); for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) { NamedDecl *D = *I; CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext()); if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D); CXXConversionDecl *Conv; if (ConvTemplate) Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl()); else Conv = cast<CXXConversionDecl>(D); if (AllowExplicit || !Conv->isExplicit()) { if (ConvTemplate) S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC, Initializer, DestType, CandidateSet, AllowExplicit); else S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Initializer, DestType, CandidateSet, AllowExplicit); } } } } // Perform overload resolution. If it fails, return the failed result. OverloadCandidateSet::iterator Best; if (OverloadingResult Result = CandidateSet.BestViableFunction(S, DeclLoc, Best, true)) { Sequence.SetOverloadFailure( InitializationSequence::FK_UserConversionOverloadFailed, Result); return; } FunctionDecl *Function = Best->Function; Function->setReferenced(); bool HadMultipleCandidates = (CandidateSet.size() > 1); if (isa<CXXConstructorDecl>(Function)) { // Add the user-defined conversion step. Any cv-qualification conversion is // subsumed by the initialization. Per DR5, the created temporary is of the // cv-unqualified type of the destination. Sequence.AddUserConversionStep(Function, Best->FoundDecl, DestType.getUnqualifiedType(), HadMultipleCandidates); return; } // Add the user-defined conversion step that calls the conversion function. QualType ConvType = Function->getCallResultType(); if (ConvType->getAs<RecordType>()) { // If we're converting to a class type, there may be an copy of // the resulting temporary object (possible to create an object of // a base class type). That copy is not a separate conversion, so // we just make a note of the actual destination type (possibly a // base class of the type returned by the conversion function) and // let the user-defined conversion step handle the conversion. Sequence.AddUserConversionStep(Function, Best->FoundDecl, DestType, HadMultipleCandidates); return; } Sequence.AddUserConversionStep(Function, Best->FoundDecl, ConvType, HadMultipleCandidates); // If the conversion following the call to the conversion function // is interesting, add it as a separate step. if (Best->FinalConversion.First || Best->FinalConversion.Second || Best->FinalConversion.Third) { ImplicitConversionSequence ICS; ICS.setStandard(); ICS.Standard = Best->FinalConversion; Sequence.AddConversionSequenceStep(ICS, DestType, TopLevelOfInitList); } } /// An egregious hack for compatibility with libstdc++-4.2: in <tr1/hashtable>, /// a function with a pointer return type contains a 'return false;' statement. /// In C++11, 'false' is not a null pointer, so this breaks the build of any /// code using that header. /// /// Work around this by treating 'return false;' as zero-initializing the result /// if it's used in a pointer-returning function in a system header. static bool isLibstdcxxPointerReturnFalseHack(Sema &S, const InitializedEntity &Entity, const Expr *Init) { return S.getLangOpts().CPlusPlus11 && Entity.getKind() == InitializedEntity::EK_Result && Entity.getType()->isPointerType() && isa<CXXBoolLiteralExpr>(Init) && !cast<CXXBoolLiteralExpr>(Init)->getValue() && S.getSourceManager().isInSystemHeader(Init->getExprLoc()); } /// The non-zero enum values here are indexes into diagnostic alternatives. enum InvalidICRKind { IIK_okay, IIK_nonlocal, IIK_nonscalar }; /// Determines whether this expression is an acceptable ICR source. static InvalidICRKind isInvalidICRSource(ASTContext &C, Expr *e, bool isAddressOf, bool &isWeakAccess) { // Skip parens. e = e->IgnoreParens(); // Skip address-of nodes. if (UnaryOperator *op = dyn_cast<UnaryOperator>(e)) { if (op->getOpcode() == UO_AddrOf) return isInvalidICRSource(C, op->getSubExpr(), /*addressof*/ true, isWeakAccess); // Skip certain casts. } else if (CastExpr *ce = dyn_cast<CastExpr>(e)) { switch (ce->getCastKind()) { case CK_Dependent: case CK_BitCast: case CK_LValueBitCast: case CK_NoOp: return isInvalidICRSource(C, ce->getSubExpr(), isAddressOf, isWeakAccess); case CK_ArrayToPointerDecay: return IIK_nonscalar; case CK_NullToPointer: return IIK_okay; default: break; } // If we have a declaration reference, it had better be a local variable. } else if (isa<DeclRefExpr>(e)) { // set isWeakAccess to true, to mean that there will be an implicit // load which requires a cleanup. if (e->getType().getObjCLifetime() == Qualifiers::OCL_Weak) isWeakAccess = true; if (!isAddressOf) return IIK_nonlocal; VarDecl *var = dyn_cast<VarDecl>(cast<DeclRefExpr>(e)->getDecl()); if (!var) return IIK_nonlocal; return (var->hasLocalStorage() ? IIK_okay : IIK_nonlocal); // If we have a conditional operator, check both sides. } else if (ConditionalOperator *cond = dyn_cast<ConditionalOperator>(e)) { if (InvalidICRKind iik = isInvalidICRSource(C, cond->getLHS(), isAddressOf, isWeakAccess)) return iik; return isInvalidICRSource(C, cond->getRHS(), isAddressOf, isWeakAccess); // These are never scalar. } else if (isa<ArraySubscriptExpr>(e)) { return IIK_nonscalar; // Otherwise, it needs to be a null pointer constant. } else { return (e->isNullPointerConstant(C, Expr::NPC_ValueDependentIsNull) ? IIK_okay : IIK_nonlocal); } return IIK_nonlocal; } /// Check whether the given expression is a valid operand for an /// indirect copy/restore. static void checkIndirectCopyRestoreSource(Sema &S, Expr *src) { assert(src->isRValue()); bool isWeakAccess = false; InvalidICRKind iik = isInvalidICRSource(S.Context, src, false, isWeakAccess); // If isWeakAccess to true, there will be an implicit // load which requires a cleanup. if (S.getLangOpts().ObjCAutoRefCount && isWeakAccess) S.ExprNeedsCleanups = true; if (iik == IIK_okay) return; S.Diag(src->getExprLoc(), diag::err_arc_nonlocal_writeback) << ((unsigned) iik - 1) // shift index into diagnostic explanations << src->getSourceRange(); } /// \brief Determine whether we have compatible array types for the /// purposes of GNU by-copy array initialization. static bool hasCompatibleArrayTypes(ASTContext &Context, const ArrayType *Dest, const ArrayType *Source) { // If the source and destination array types are equivalent, we're // done. if (Context.hasSameType(QualType(Dest, 0), QualType(Source, 0))) return true; // Make sure that the element types are the same. if (!Context.hasSameType(Dest->getElementType(), Source->getElementType())) return false; // The only mismatch we allow is when the destination is an // incomplete array type and the source is a constant array type. return Source->isConstantArrayType() && Dest->isIncompleteArrayType(); } static bool tryObjCWritebackConversion(Sema &S, InitializationSequence &Sequence, const InitializedEntity &Entity, Expr *Initializer) { bool ArrayDecay = false; QualType ArgType = Initializer->getType(); QualType ArgPointee; if (const ArrayType *ArgArrayType = S.Context.getAsArrayType(ArgType)) { ArrayDecay = true; ArgPointee = ArgArrayType->getElementType(); ArgType = S.Context.getPointerType(ArgPointee); } // Handle write-back conversion. QualType ConvertedArgType; if (!S.isObjCWritebackConversion(ArgType, Entity.getType(), ConvertedArgType)) return false; // We should copy unless we're passing to an argument explicitly // marked 'out'. bool ShouldCopy = true; if (ParmVarDecl *param = cast_or_null<ParmVarDecl>(Entity.getDecl())) ShouldCopy = (param->getObjCDeclQualifier() != ParmVarDecl::OBJC_TQ_Out); // Do we need an lvalue conversion? if (ArrayDecay || Initializer->isGLValue()) { ImplicitConversionSequence ICS; ICS.setStandard(); ICS.Standard.setAsIdentityConversion(); QualType ResultType; if (ArrayDecay) { ICS.Standard.First = ICK_Array_To_Pointer; ResultType = S.Context.getPointerType(ArgPointee); } else { ICS.Standard.First = ICK_Lvalue_To_Rvalue; ResultType = Initializer->getType().getNonLValueExprType(S.Context); } Sequence.AddConversionSequenceStep(ICS, ResultType); } Sequence.AddPassByIndirectCopyRestoreStep(Entity.getType(), ShouldCopy); return true; } static bool TryOCLSamplerInitialization(Sema &S, InitializationSequence &Sequence, QualType DestType, Expr *Initializer) { if (!S.getLangOpts().OpenCL || !DestType->isSamplerT() || !Initializer->isIntegerConstantExpr(S.getASTContext())) return false; Sequence.AddOCLSamplerInitStep(DestType); return true; } // // OpenCL 1.2 spec, s6.12.10 // // The event argument can also be used to associate the // async_work_group_copy with a previous async copy allowing // an event to be shared by multiple async copies; otherwise // event should be zero. // static bool TryOCLZeroEventInitialization(Sema &S, InitializationSequence &Sequence, QualType DestType, Expr *Initializer) { if (!S.getLangOpts().OpenCL || !DestType->isEventT() || !Initializer->isIntegerConstantExpr(S.getASTContext()) || (Initializer->EvaluateKnownConstInt(S.getASTContext()) != 0)) return false; Sequence.AddOCLZeroEventStep(DestType); return true; } InitializationSequence::InitializationSequence(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Args, bool TopLevelOfInitList) : FailedCandidateSet(Kind.getLocation(), OverloadCandidateSet::CSK_Normal) { InitializeFrom(S, Entity, Kind, Args, TopLevelOfInitList); } void InitializationSequence::InitializeFrom(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Args, bool TopLevelOfInitList) { ASTContext &Context = S.Context; // Eliminate non-overload placeholder types in the arguments. We // need to do this before checking whether types are dependent // because lowering a pseudo-object expression might well give us // something of dependent type. for (unsigned I = 0, E = Args.size(); I != E; ++I) if (Args[I]->getType()->isNonOverloadPlaceholderType()) { // FIXME: should we be doing this here? ExprResult result = S.CheckPlaceholderExpr(Args[I]); if (result.isInvalid()) { SetFailed(FK_PlaceholderType); return; } Args[I] = result.get(); } // C++0x [dcl.init]p16: // The semantics of initializers are as follows. The destination type is // the type of the object or reference being initialized and the source // type is the type of the initializer expression. The source type is not // defined when the initializer is a braced-init-list or when it is a // parenthesized list of expressions. QualType DestType = Entity.getType(); if (DestType->isDependentType() || Expr::hasAnyTypeDependentArguments(Args)) { SequenceKind = DependentSequence; return; } // Almost everything is a normal sequence. setSequenceKind(NormalSequence); QualType SourceType; Expr *Initializer = nullptr; if (Args.size() == 1) { Initializer = Args[0]; if (S.getLangOpts().ObjC1) { if (S.CheckObjCBridgeRelatedConversions(Initializer->getLocStart(), DestType, Initializer->getType(), Initializer) || S.ConversionToObjCStringLiteralCheck(DestType, Initializer)) Args[0] = Initializer; } if (!isa<InitListExpr>(Initializer)) SourceType = Initializer->getType(); } // HLSL Change Starts if (S.getLangOpts().HLSL) { hlsl::InitializeInitSequenceForHLSL(&S, Entity, Kind, Args, TopLevelOfInitList, this); return; } // HLSL Change Ends // - If the initializer is a (non-parenthesized) braced-init-list, the // object is list-initialized (8.5.4). if (Kind.getKind() != InitializationKind::IK_Direct) { if (InitListExpr *InitList = dyn_cast_or_null<InitListExpr>(Initializer)) { TryListInitialization(S, Entity, Kind, InitList, *this); return; } } // - If the destination type is a reference type, see 8.5.3. if (DestType->isReferenceType()) { // C++0x [dcl.init.ref]p1: // A variable declared to be a T& or T&&, that is, "reference to type T" // (8.3.2), shall be initialized by an object, or function, of type T or // by an object that can be converted into a T. // (Therefore, multiple arguments are not permitted.) if (Args.size() != 1) SetFailed(FK_TooManyInitsForReference); else TryReferenceInitialization(S, Entity, Kind, Args[0], *this); return; } // - If the initializer is (), the object is value-initialized. if (Kind.getKind() == InitializationKind::IK_Value || (Kind.getKind() == InitializationKind::IK_Direct && Args.empty())) { TryValueInitialization(S, Entity, Kind, *this); return; } // Handle default initialization. if (Kind.getKind() == InitializationKind::IK_Default) { TryDefaultInitialization(S, Entity, Kind, *this); return; } // - If the destination type is an array of characters, an array of // char16_t, an array of char32_t, or an array of wchar_t, and the // initializer is a string literal, see 8.5.2. // - Otherwise, if the destination type is an array, the program is // ill-formed. if (const ArrayType *DestAT = Context.getAsArrayType(DestType)) { if (Initializer && isa<VariableArrayType>(DestAT)) { SetFailed(FK_VariableLengthArrayHasInitializer); return; } if (Initializer) { switch (IsStringInit(Initializer, DestAT, Context)) { case SIF_None: TryStringLiteralInitialization(S, Entity, Kind, Initializer, *this); return; case SIF_NarrowStringIntoWideChar: SetFailed(FK_NarrowStringIntoWideCharArray); return; case SIF_WideStringIntoChar: SetFailed(FK_WideStringIntoCharArray); return; case SIF_IncompatWideStringIntoWideChar: SetFailed(FK_IncompatWideStringIntoWideChar); return; case SIF_Other: break; } } // Note: as an GNU C extension, we allow initialization of an // array from a compound literal that creates an array of the same // type, so long as the initializer has no side effects. if (!S.getLangOpts().CPlusPlus && Initializer && isa<CompoundLiteralExpr>(Initializer->IgnoreParens()) && Initializer->getType()->isArrayType()) { const ArrayType *SourceAT = Context.getAsArrayType(Initializer->getType()); if (!hasCompatibleArrayTypes(S.Context, DestAT, SourceAT)) SetFailed(FK_ArrayTypeMismatch); else if (Initializer->HasSideEffects(S.Context)) SetFailed(FK_NonConstantArrayInit); else { AddArrayInitStep(DestType); } } // Note: as a GNU C++ extension, we allow list-initialization of a // class member of array type from a parenthesized initializer list. else if (S.getLangOpts().CPlusPlus && Entity.getKind() == InitializedEntity::EK_Member && Initializer && isa<InitListExpr>(Initializer)) { TryListInitialization(S, Entity, Kind, cast<InitListExpr>(Initializer), *this); AddParenthesizedArrayInitStep(DestType); } else if (DestAT->getElementType()->isCharType()) SetFailed(FK_ArrayNeedsInitListOrStringLiteral); else if (IsWideCharCompatible(DestAT->getElementType(), Context)) SetFailed(FK_ArrayNeedsInitListOrWideStringLiteral); else SetFailed(FK_ArrayNeedsInitList); return; } // Determine whether we should consider writeback conversions for // Objective-C ARC. bool allowObjCWritebackConversion = S.getLangOpts().ObjCAutoRefCount && Entity.isParameterKind(); // We're at the end of the line for C: it's either a write-back conversion // or it's a C assignment. There's no need to check anything else. if (!S.getLangOpts().CPlusPlus) { // If allowed, check whether this is an Objective-C writeback conversion. if (allowObjCWritebackConversion && tryObjCWritebackConversion(S, *this, Entity, Initializer)) { return; } if (TryOCLSamplerInitialization(S, *this, DestType, Initializer)) return; if (TryOCLZeroEventInitialization(S, *this, DestType, Initializer)) return; // Handle initialization in C AddCAssignmentStep(DestType); MaybeProduceObjCObject(S, *this, Entity); return; } assert(S.getLangOpts().CPlusPlus); // - If the destination type is a (possibly cv-qualified) class type: if (DestType->isRecordType()) { // - If the initialization is direct-initialization, or if it is // copy-initialization where the cv-unqualified version of the // source type is the same class as, or a derived class of, the // class of the destination, constructors are considered. [...] if (Kind.getKind() == InitializationKind::IK_Direct || (Kind.getKind() == InitializationKind::IK_Copy && (Context.hasSameUnqualifiedType(SourceType, DestType) || S.IsDerivedFrom(SourceType, DestType)))) TryConstructorInitialization(S, Entity, Kind, Args, DestType, *this); // - Otherwise (i.e., for the remaining copy-initialization cases), // user-defined conversion sequences that can convert from the source // type to the destination type or (when a conversion function is // used) to a derived class thereof are enumerated as described in // 13.3.1.4, and the best one is chosen through overload resolution // (13.3). else TryUserDefinedConversion(S, DestType, Kind, Initializer, *this, TopLevelOfInitList); return; } if (Args.size() > 1) { SetFailed(FK_TooManyInitsForScalar); return; } assert(Args.size() == 1 && "Zero-argument case handled above"); // HLSL Change Starts assert(Initializer != nullptr && "otherwise prior code changed and Args.size() == 1 no longer reads from first argument"); assert(Initializer != nullptr); // HLSL Change Ends // - Otherwise, if the source type is a (possibly cv-qualified) class // type, conversion functions are considered. if (!SourceType.isNull() && SourceType->isRecordType()) { // For a conversion to _Atomic(T) from either T or a class type derived // from T, initialize the T object then convert to _Atomic type. bool NeedAtomicConversion = false; if (const AtomicType *Atomic = DestType->getAs<AtomicType>()) { if (Context.hasSameUnqualifiedType(SourceType, Atomic->getValueType()) || S.IsDerivedFrom(SourceType, Atomic->getValueType())) { DestType = Atomic->getValueType(); NeedAtomicConversion = true; } } TryUserDefinedConversion(S, DestType, Kind, Initializer, *this, TopLevelOfInitList); MaybeProduceObjCObject(S, *this, Entity); if (!Failed() && NeedAtomicConversion) AddAtomicConversionStep(Entity.getType()); return; } // - Otherwise, the initial value of the object being initialized is the // (possibly converted) value of the initializer expression. Standard // conversions (Clause 4) will be used, if necessary, to convert the // initializer expression to the cv-unqualified version of the // destination type; no user-defined conversions are considered. ImplicitConversionSequence ICS = S.TryImplicitConversion(Initializer, DestType, /*SuppressUserConversions*/true, /*AllowExplicitConversions*/ false, /*InOverloadResolution*/ false, /*CStyle=*/Kind.isCStyleOrFunctionalCast(), allowObjCWritebackConversion); if (ICS.isStandard() && ICS.Standard.Second == ICK_Writeback_Conversion) { // Objective-C ARC writeback conversion. // We should copy unless we're passing to an argument explicitly // marked 'out'. bool ShouldCopy = true; if (ParmVarDecl *Param = cast_or_null<ParmVarDecl>(Entity.getDecl())) ShouldCopy = (Param->getObjCDeclQualifier() != ParmVarDecl::OBJC_TQ_Out); // If there was an lvalue adjustment, add it as a separate conversion. if (ICS.Standard.First == ICK_Array_To_Pointer || ICS.Standard.First == ICK_Lvalue_To_Rvalue) { ImplicitConversionSequence LvalueICS; LvalueICS.setStandard(); LvalueICS.Standard.setAsIdentityConversion(); LvalueICS.Standard.setAllToTypes(ICS.Standard.getToType(0)); LvalueICS.Standard.First = ICS.Standard.First; AddConversionSequenceStep(LvalueICS, ICS.Standard.getToType(0)); } AddPassByIndirectCopyRestoreStep(DestType, ShouldCopy); } else if (ICS.isBad()) { DeclAccessPair dap; if (isLibstdcxxPointerReturnFalseHack(S, Entity, Initializer)) { AddZeroInitializationStep(Entity.getType()); } else if (Initializer->getType() == Context.OverloadTy && !S.ResolveAddressOfOverloadedFunction(Initializer, DestType, false, dap)) SetFailed(InitializationSequence::FK_AddressOfOverloadFailed); else SetFailed(InitializationSequence::FK_ConversionFailed); } else { AddConversionSequenceStep(ICS, DestType, TopLevelOfInitList); MaybeProduceObjCObject(S, *this, Entity); } } InitializationSequence::~InitializationSequence() { for (auto &S : Steps) S.Destroy(); } //===----------------------------------------------------------------------===// // Perform initialization //===----------------------------------------------------------------------===// static Sema::AssignmentAction getAssignmentAction(const InitializedEntity &Entity, bool Diagnose = false) { switch(Entity.getKind()) { case InitializedEntity::EK_Variable: case InitializedEntity::EK_New: case InitializedEntity::EK_Exception: case InitializedEntity::EK_Base: case InitializedEntity::EK_Delegating: return Sema::AA_Initializing; case InitializedEntity::EK_Parameter: if (Entity.getDecl() && isa<ObjCMethodDecl>(Entity.getDecl()->getDeclContext())) return Sema::AA_Sending; return Sema::AA_Passing; case InitializedEntity::EK_Parameter_CF_Audited: if (Entity.getDecl() && isa<ObjCMethodDecl>(Entity.getDecl()->getDeclContext())) return Sema::AA_Sending; return !Diagnose ? Sema::AA_Passing : Sema::AA_Passing_CFAudited; case InitializedEntity::EK_Result: return Sema::AA_Returning; case InitializedEntity::EK_Temporary: case InitializedEntity::EK_RelatedResult: // FIXME: Can we tell apart casting vs. converting? return Sema::AA_Casting; case InitializedEntity::EK_Member: case InitializedEntity::EK_ArrayElement: case InitializedEntity::EK_VectorElement: case InitializedEntity::EK_ComplexElement: case InitializedEntity::EK_BlockElement: case InitializedEntity::EK_LambdaCapture: case InitializedEntity::EK_CompoundLiteralInit: return Sema::AA_Initializing; } llvm_unreachable("Invalid EntityKind!"); } /// \brief Whether we should bind a created object as a temporary when /// initializing the given entity. static bool shouldBindAsTemporary(const InitializedEntity &Entity) { switch (Entity.getKind()) { case InitializedEntity::EK_ArrayElement: case InitializedEntity::EK_Member: case InitializedEntity::EK_Result: case InitializedEntity::EK_New: case InitializedEntity::EK_Variable: case InitializedEntity::EK_Base: case InitializedEntity::EK_Delegating: case InitializedEntity::EK_VectorElement: case InitializedEntity::EK_ComplexElement: case InitializedEntity::EK_Exception: case InitializedEntity::EK_BlockElement: case InitializedEntity::EK_LambdaCapture: case InitializedEntity::EK_CompoundLiteralInit: return false; case InitializedEntity::EK_Parameter: case InitializedEntity::EK_Parameter_CF_Audited: case InitializedEntity::EK_Temporary: case InitializedEntity::EK_RelatedResult: return true; } llvm_unreachable("missed an InitializedEntity kind?"); } /// \brief Whether the given entity, when initialized with an object /// created for that initialization, requires destruction. static bool shouldDestroyTemporary(const InitializedEntity &Entity) { switch (Entity.getKind()) { case InitializedEntity::EK_Result: case InitializedEntity::EK_New: case InitializedEntity::EK_Base: case InitializedEntity::EK_Delegating: case InitializedEntity::EK_VectorElement: case InitializedEntity::EK_ComplexElement: case InitializedEntity::EK_BlockElement: case InitializedEntity::EK_LambdaCapture: return false; case InitializedEntity::EK_Member: case InitializedEntity::EK_Variable: case InitializedEntity::EK_Parameter: case InitializedEntity::EK_Parameter_CF_Audited: case InitializedEntity::EK_Temporary: case InitializedEntity::EK_ArrayElement: case InitializedEntity::EK_Exception: case InitializedEntity::EK_CompoundLiteralInit: case InitializedEntity::EK_RelatedResult: return true; } llvm_unreachable("missed an InitializedEntity kind?"); } /// \brief Look for copy and move constructors and constructor templates, for /// copying an object via direct-initialization (per C++11 [dcl.init]p16). static void LookupCopyAndMoveConstructors(Sema &S, OverloadCandidateSet &CandidateSet, CXXRecordDecl *Class, Expr *CurInitExpr) { DeclContext::lookup_result R = S.LookupConstructors(Class); // The container holding the constructors can under certain conditions // be changed while iterating (e.g. because of deserialization). // To be safe we copy the lookup results to a new container. SmallVector<NamedDecl*, 16> Ctors(R.begin(), R.end()); for (SmallVectorImpl<NamedDecl *>::iterator CI = Ctors.begin(), CE = Ctors.end(); CI != CE; ++CI) { NamedDecl *D = *CI; CXXConstructorDecl *Constructor = nullptr; if ((Constructor = dyn_cast<CXXConstructorDecl>(D))) { // Handle copy/moveconstructors, only. if (!Constructor || Constructor->isInvalidDecl() || !Constructor->isCopyOrMoveConstructor() || !Constructor->isConvertingConstructor(/*AllowExplicit=*/true)) continue; DeclAccessPair FoundDecl = DeclAccessPair::make(Constructor, Constructor->getAccess()); S.AddOverloadCandidate(Constructor, FoundDecl, CurInitExpr, CandidateSet); continue; } // Handle constructor templates. FunctionTemplateDecl *ConstructorTmpl = cast<FunctionTemplateDecl>(D); if (ConstructorTmpl->isInvalidDecl()) continue; Constructor = cast<CXXConstructorDecl>( ConstructorTmpl->getTemplatedDecl()); if (!Constructor->isConvertingConstructor(/*AllowExplicit=*/true)) continue; // FIXME: Do we need to limit this to copy-constructor-like // candidates? DeclAccessPair FoundDecl = DeclAccessPair::make(ConstructorTmpl, ConstructorTmpl->getAccess()); S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, nullptr, CurInitExpr, CandidateSet, true); } } /// \brief Get the location at which initialization diagnostics should appear. static SourceLocation getInitializationLoc(const InitializedEntity &Entity, Expr *Initializer) { switch (Entity.getKind()) { case InitializedEntity::EK_Result: return Entity.getReturnLoc(); case InitializedEntity::EK_Exception: return Entity.getThrowLoc(); case InitializedEntity::EK_Variable: return Entity.getDecl()->getLocation(); case InitializedEntity::EK_LambdaCapture: return Entity.getCaptureLoc(); case InitializedEntity::EK_ArrayElement: case InitializedEntity::EK_Member: case InitializedEntity::EK_Parameter: case InitializedEntity::EK_Parameter_CF_Audited: case InitializedEntity::EK_Temporary: case InitializedEntity::EK_New: case InitializedEntity::EK_Base: case InitializedEntity::EK_Delegating: case InitializedEntity::EK_VectorElement: case InitializedEntity::EK_ComplexElement: case InitializedEntity::EK_BlockElement: case InitializedEntity::EK_CompoundLiteralInit: case InitializedEntity::EK_RelatedResult: return Initializer->getLocStart(); } llvm_unreachable("missed an InitializedEntity kind?"); } /// \brief Make a (potentially elidable) temporary copy of the object /// provided by the given initializer by calling the appropriate copy /// constructor. /// /// \param S The Sema object used for type-checking. /// /// \param T The type of the temporary object, which must either be /// the type of the initializer expression or a superclass thereof. /// /// \param Entity The entity being initialized. /// /// \param CurInit The initializer expression. /// /// \param IsExtraneousCopy Whether this is an "extraneous" copy that /// is permitted in C++03 (but not C++0x) when binding a reference to /// an rvalue. /// /// \returns An expression that copies the initializer expression into /// a temporary object, or an error expression if a copy could not be /// created. static ExprResult CopyObject(Sema &S, QualType T, const InitializedEntity &Entity, ExprResult CurInit, bool IsExtraneousCopy) { if (CurInit.isInvalid()) return CurInit; // Determine which class type we're copying to. Expr *CurInitExpr = (Expr *)CurInit.get(); CXXRecordDecl *Class = nullptr; if (const RecordType *Record = T->getAs<RecordType>()) Class = cast<CXXRecordDecl>(Record->getDecl()); if (!Class) return CurInit; // C++0x [class.copy]p32: // When certain criteria are met, an implementation is allowed to // omit the copy/move construction of a class object, even if the // copy/move constructor and/or destructor for the object have // side effects. [...] // - when a temporary class object that has not been bound to a // reference (12.2) would be copied/moved to a class object // with the same cv-unqualified type, the copy/move operation // can be omitted by constructing the temporary object // directly into the target of the omitted copy/move // // Note that the other three bullets are handled elsewhere. Copy // elision for return statements and throw expressions are handled as part // of constructor initialization, while copy elision for exception handlers // is handled by the run-time. bool Elidable = CurInitExpr->isTemporaryObject(S.Context, Class); SourceLocation Loc = getInitializationLoc(Entity, CurInit.get()); // Make sure that the type we are copying is complete. if (S.RequireCompleteType(Loc, T, diag::err_temp_copy_incomplete)) return CurInit; // Perform overload resolution using the class's copy/move constructors. // Only consider constructors and constructor templates. Per // C++0x [dcl.init]p16, second bullet to class types, this initialization // is direct-initialization. OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal); LookupCopyAndMoveConstructors(S, CandidateSet, Class, CurInitExpr); bool HadMultipleCandidates = (CandidateSet.size() > 1); OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(S, Loc, Best)) { case OR_Success: break; case OR_No_Viable_Function: S.Diag(Loc, IsExtraneousCopy && !S.isSFINAEContext() ? diag::ext_rvalue_to_reference_temp_copy_no_viable : diag::err_temp_copy_no_viable) << (int)Entity.getKind() << CurInitExpr->getType() << CurInitExpr->getSourceRange(); CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr); if (!IsExtraneousCopy || S.isSFINAEContext()) return ExprError(); return CurInit; case OR_Ambiguous: S.Diag(Loc, diag::err_temp_copy_ambiguous) << (int)Entity.getKind() << CurInitExpr->getType() << CurInitExpr->getSourceRange(); CandidateSet.NoteCandidates(S, OCD_ViableCandidates, CurInitExpr); return ExprError(); case OR_Deleted: S.Diag(Loc, diag::err_temp_copy_deleted) << (int)Entity.getKind() << CurInitExpr->getType() << CurInitExpr->getSourceRange(); S.NoteDeletedFunction(Best->Function); return ExprError(); } CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function); SmallVector<Expr*, 8> ConstructorArgs; CurInit.get(); // Ownership transferred into MultiExprArg, below. S.CheckConstructorAccess(Loc, Constructor, Entity, Best->FoundDecl.getAccess(), IsExtraneousCopy); if (IsExtraneousCopy) { // If this is a totally extraneous copy for C++03 reference // binding purposes, just return the original initialization // expression. We don't generate an (elided) copy operation here // because doing so would require us to pass down a flag to avoid // infinite recursion, where each step adds another extraneous, // elidable copy. // Instantiate the default arguments of any extra parameters in // the selected copy constructor, as if we were going to create a // proper call to the copy constructor. for (unsigned I = 1, N = Constructor->getNumParams(); I != N; ++I) { ParmVarDecl *Parm = Constructor->getParamDecl(I); if (S.RequireCompleteType(Loc, Parm->getType(), diag::err_call_incomplete_argument)) break; // Build the default argument expression; we don't actually care // if this succeeds or not, because this routine will complain // if there was a problem. S.BuildCXXDefaultArgExpr(Loc, Constructor, Parm); } return CurInitExpr; } // Determine the arguments required to actually perform the // constructor call (we might have derived-to-base conversions, or // the copy constructor may have default arguments). if (S.CompleteConstructorCall(Constructor, CurInitExpr, Loc, ConstructorArgs)) return ExprError(); // Actually perform the constructor call. CurInit = S.BuildCXXConstructExpr(Loc, T, Constructor, Elidable, ConstructorArgs, HadMultipleCandidates, /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, SourceRange()); // If we're supposed to bind temporaries, do so. if (!CurInit.isInvalid() && shouldBindAsTemporary(Entity)) CurInit = S.MaybeBindToTemporary(CurInit.getAs<Expr>()); return CurInit; } /// \brief Check whether elidable copy construction for binding a reference to /// a temporary would have succeeded if we were building in C++98 mode, for /// -Wc++98-compat. static void CheckCXX98CompatAccessibleCopy(Sema &S, const InitializedEntity &Entity, Expr *CurInitExpr) { assert(S.getLangOpts().CPlusPlus11); const RecordType *Record = CurInitExpr->getType()->getAs<RecordType>(); if (!Record) return; SourceLocation Loc = getInitializationLoc(Entity, CurInitExpr); if (S.Diags.isIgnored(diag::warn_cxx98_compat_temp_copy, Loc)) return; // Find constructors which would have been considered. OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal); LookupCopyAndMoveConstructors( S, CandidateSet, cast<CXXRecordDecl>(Record->getDecl()), CurInitExpr); // Perform overload resolution. OverloadCandidateSet::iterator Best; OverloadingResult OR = CandidateSet.BestViableFunction(S, Loc, Best); PartialDiagnostic Diag = S.PDiag(diag::warn_cxx98_compat_temp_copy) << OR << (int)Entity.getKind() << CurInitExpr->getType() << CurInitExpr->getSourceRange(); switch (OR) { case OR_Success: S.CheckConstructorAccess(Loc, cast<CXXConstructorDecl>(Best->Function), Entity, Best->FoundDecl.getAccess(), Diag); // FIXME: Check default arguments as far as that's possible. break; case OR_No_Viable_Function: S.Diag(Loc, Diag); CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr); break; case OR_Ambiguous: S.Diag(Loc, Diag); CandidateSet.NoteCandidates(S, OCD_ViableCandidates, CurInitExpr); break; case OR_Deleted: S.Diag(Loc, Diag); S.NoteDeletedFunction(Best->Function); break; } } void InitializationSequence::PrintInitLocationNote(Sema &S, const InitializedEntity &Entity) { if (Entity.isParameterKind() && Entity.getDecl()) { if (Entity.getDecl()->getLocation().isInvalid()) return; if (Entity.getDecl()->getDeclName()) S.Diag(Entity.getDecl()->getLocation(), diag::note_parameter_named_here) << Entity.getDecl()->getDeclName(); else S.Diag(Entity.getDecl()->getLocation(), diag::note_parameter_here); } else if (Entity.getKind() == InitializedEntity::EK_RelatedResult && Entity.getMethodDecl()) S.Diag(Entity.getMethodDecl()->getLocation(), diag::note_method_return_type_change) << Entity.getMethodDecl()->getDeclName(); } static bool isReferenceBinding(const InitializationSequence::Step &s) { return s.Kind == InitializationSequence::SK_BindReference || s.Kind == InitializationSequence::SK_BindReferenceToTemporary; } /// Returns true if the parameters describe a constructor initialization of /// an explicit temporary object, e.g. "Point(x, y)". static bool isExplicitTemporary(const InitializedEntity &Entity, const InitializationKind &Kind, unsigned NumArgs) { switch (Entity.getKind()) { case InitializedEntity::EK_Temporary: case InitializedEntity::EK_CompoundLiteralInit: case InitializedEntity::EK_RelatedResult: break; default: return false; } switch (Kind.getKind()) { case InitializationKind::IK_DirectList: return true; // FIXME: Hack to work around cast weirdness. case InitializationKind::IK_Direct: case InitializationKind::IK_Value: return NumArgs != 1; default: return false; } } static ExprResult PerformConstructorInitialization(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Args, const InitializationSequence::Step& Step, bool &ConstructorInitRequiresZeroInit, bool IsListInitialization, bool IsStdInitListInitialization, SourceLocation LBraceLoc, SourceLocation RBraceLoc) { unsigned NumArgs = Args.size(); CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Step.Function.Function); bool HadMultipleCandidates = Step.Function.HadMultipleCandidates; // Build a call to the selected constructor. SmallVector<Expr*, 8> ConstructorArgs; SourceLocation Loc = (Kind.isCopyInit() && Kind.getEqualLoc().isValid()) ? Kind.getEqualLoc() : Kind.getLocation(); if (Kind.getKind() == InitializationKind::IK_Default) { // Force even a trivial, implicit default constructor to be // semantically checked. We do this explicitly because we don't build // the definition for completely trivial constructors. assert(Constructor->getParent() && "No parent class for constructor."); if (Constructor->isDefaulted() && Constructor->isDefaultConstructor() && Constructor->isTrivial() && !Constructor->isUsed(false)) S.DefineImplicitDefaultConstructor(Loc, Constructor); } ExprResult CurInit((Expr *)nullptr); // C++ [over.match.copy]p1: // - When initializing a temporary to be bound to the first parameter // of a constructor that takes a reference to possibly cv-qualified // T as its first argument, called with a single argument in the // context of direct-initialization, explicit conversion functions // are also considered. bool AllowExplicitConv = Kind.AllowExplicit() && !Kind.isCopyInit() && Args.size() == 1 && Constructor->isCopyOrMoveConstructor(); // Determine the arguments required to actually perform the constructor // call. if (S.CompleteConstructorCall(Constructor, Args, Loc, ConstructorArgs, AllowExplicitConv, IsListInitialization)) return ExprError(); if (isExplicitTemporary(Entity, Kind, NumArgs)) { // An explicitly-constructed temporary, e.g., X(1, 2). S.MarkFunctionReferenced(Loc, Constructor); if (S.DiagnoseUseOfDecl(Constructor, Loc)) return ExprError(); TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo(); if (!TSInfo) TSInfo = S.Context.getTrivialTypeSourceInfo(Entity.getType(), Loc); SourceRange ParenOrBraceRange = (Kind.getKind() == InitializationKind::IK_DirectList) ? SourceRange(LBraceLoc, RBraceLoc) : Kind.getParenRange(); CurInit = new (S.Context) CXXTemporaryObjectExpr( S.Context, Constructor, TSInfo, ConstructorArgs, ParenOrBraceRange, HadMultipleCandidates, IsListInitialization, IsStdInitListInitialization, ConstructorInitRequiresZeroInit); } else { CXXConstructExpr::ConstructionKind ConstructKind = CXXConstructExpr::CK_Complete; if (Entity.getKind() == InitializedEntity::EK_Base) { ConstructKind = Entity.getBaseSpecifier()->isVirtual() ? CXXConstructExpr::CK_VirtualBase : CXXConstructExpr::CK_NonVirtualBase; } else if (Entity.getKind() == InitializedEntity::EK_Delegating) { ConstructKind = CXXConstructExpr::CK_Delegating; } // Only get the parenthesis or brace range if it is a list initialization or // direct construction. SourceRange ParenOrBraceRange; if (IsListInitialization) ParenOrBraceRange = SourceRange(LBraceLoc, RBraceLoc); else if (Kind.getKind() == InitializationKind::IK_Direct) ParenOrBraceRange = Kind.getParenRange(); // If the entity allows NRVO, mark the construction as elidable // unconditionally. if (Entity.allowsNRVO()) CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(), Constructor, /*Elidable=*/true, ConstructorArgs, HadMultipleCandidates, IsListInitialization, IsStdInitListInitialization, ConstructorInitRequiresZeroInit, ConstructKind, ParenOrBraceRange); else CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(), Constructor, ConstructorArgs, HadMultipleCandidates, IsListInitialization, IsStdInitListInitialization, ConstructorInitRequiresZeroInit, ConstructKind, ParenOrBraceRange); } if (CurInit.isInvalid()) return ExprError(); // Only check access if all of that succeeded. S.CheckConstructorAccess(Loc, Constructor, Entity, Step.Function.FoundDecl.getAccess()); if (S.DiagnoseUseOfDecl(Step.Function.FoundDecl, Loc)) return ExprError(); if (shouldBindAsTemporary(Entity)) CurInit = S.MaybeBindToTemporary(CurInit.get()); return CurInit; } /// Determine whether the specified InitializedEntity definitely has a lifetime /// longer than the current full-expression. Conservatively returns false if /// it's unclear. static bool InitializedEntityOutlivesFullExpression(const InitializedEntity &Entity) { const InitializedEntity *Top = &Entity; while (Top->getParent()) Top = Top->getParent(); switch (Top->getKind()) { case InitializedEntity::EK_Variable: case InitializedEntity::EK_Result: case InitializedEntity::EK_Exception: case InitializedEntity::EK_Member: case InitializedEntity::EK_New: case InitializedEntity::EK_Base: case InitializedEntity::EK_Delegating: return true; case InitializedEntity::EK_ArrayElement: case InitializedEntity::EK_VectorElement: case InitializedEntity::EK_BlockElement: case InitializedEntity::EK_ComplexElement: // Could not determine what the full initialization is. Assume it might not // outlive the full-expression. return false; case InitializedEntity::EK_Parameter: case InitializedEntity::EK_Parameter_CF_Audited: case InitializedEntity::EK_Temporary: case InitializedEntity::EK_LambdaCapture: case InitializedEntity::EK_CompoundLiteralInit: case InitializedEntity::EK_RelatedResult: // The entity being initialized might not outlive the full-expression. return false; } llvm_unreachable("unknown entity kind"); } /// Determine the declaration which an initialized entity ultimately refers to, /// for the purpose of lifetime-extending a temporary bound to a reference in /// the initialization of \p Entity. static const InitializedEntity *getEntityForTemporaryLifetimeExtension( const InitializedEntity *Entity, const InitializedEntity *FallbackDecl = nullptr) { // C++11 [class.temporary]p5: switch (Entity->getKind()) { case InitializedEntity::EK_Variable: // The temporary [...] persists for the lifetime of the reference return Entity; case InitializedEntity::EK_Member: // For subobjects, we look at the complete object. if (Entity->getParent()) return getEntityForTemporaryLifetimeExtension(Entity->getParent(), Entity); // except: // -- A temporary bound to a reference member in a constructor's // ctor-initializer persists until the constructor exits. return Entity; case InitializedEntity::EK_Parameter: case InitializedEntity::EK_Parameter_CF_Audited: // -- A temporary bound to a reference parameter in a function call // persists until the completion of the full-expression containing // the call. case InitializedEntity::EK_Result: // -- The lifetime of a temporary bound to the returned value in a // function return statement is not extended; the temporary is // destroyed at the end of the full-expression in the return statement. case InitializedEntity::EK_New: // -- A temporary bound to a reference in a new-initializer persists // until the completion of the full-expression containing the // new-initializer. return nullptr; case InitializedEntity::EK_Temporary: case InitializedEntity::EK_CompoundLiteralInit: case InitializedEntity::EK_RelatedResult: // We don't yet know the storage duration of the surrounding temporary. // Assume it's got full-expression duration for now, it will patch up our // storage duration if that's not correct. return nullptr; case InitializedEntity::EK_ArrayElement: // For subobjects, we look at the complete object. return getEntityForTemporaryLifetimeExtension(Entity->getParent(), FallbackDecl); case InitializedEntity::EK_Base: case InitializedEntity::EK_Delegating: // We can reach this case for aggregate initialization in a constructor: // struct A { int &&r; }; // struct B : A { B() : A{0} {} }; // In this case, use the innermost field decl as the context. return FallbackDecl; case InitializedEntity::EK_BlockElement: case InitializedEntity::EK_LambdaCapture: case InitializedEntity::EK_Exception: case InitializedEntity::EK_VectorElement: case InitializedEntity::EK_ComplexElement: return nullptr; } llvm_unreachable("unknown entity kind"); } static void performLifetimeExtension(Expr *Init, const InitializedEntity *ExtendingEntity); /// Update a glvalue expression that is used as the initializer of a reference /// to note that its lifetime is extended. /// \return \c true if any temporary had its lifetime extended. static bool performReferenceExtension(Expr *Init, const InitializedEntity *ExtendingEntity) { // Walk past any constructs which we can lifetime-extend across. Expr *Old; do { Old = Init; if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) { if (ILE->getNumInits() == 1 && ILE->isGLValue()) { // This is just redundant braces around an initializer. Step over it. Init = ILE->getInit(0); } } // Step over any subobject adjustments; we may have a materialized // temporary inside them. SmallVector<const Expr *, 2> CommaLHSs; SmallVector<SubobjectAdjustment, 2> Adjustments; Init = const_cast<Expr *>( Init->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments)); // Per current approach for DR1376, look through casts to reference type // when performing lifetime extension. if (CastExpr *CE = dyn_cast<CastExpr>(Init)) if (CE->getSubExpr()->isGLValue()) Init = CE->getSubExpr(); // FIXME: Per DR1213, subscripting on an array temporary produces an xvalue. // It's unclear if binding a reference to that xvalue extends the array // temporary. } while (Init != Old); if (MaterializeTemporaryExpr *ME = dyn_cast<MaterializeTemporaryExpr>(Init)) { // Update the storage duration of the materialized temporary. // FIXME: Rebuild the expression instead of mutating it. ME->setExtendingDecl(ExtendingEntity->getDecl(), ExtendingEntity->allocateManglingNumber()); performLifetimeExtension(ME->GetTemporaryExpr(), ExtendingEntity); return true; } return false; } /// Update a prvalue expression that is going to be materialized as a /// lifetime-extended temporary. static void performLifetimeExtension(Expr *Init, const InitializedEntity *ExtendingEntity) { // Dig out the expression which constructs the extended temporary. SmallVector<const Expr *, 2> CommaLHSs; SmallVector<SubobjectAdjustment, 2> Adjustments; Init = const_cast<Expr *>( Init->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments)); if (CXXBindTemporaryExpr *BTE = dyn_cast<CXXBindTemporaryExpr>(Init)) Init = BTE->getSubExpr(); if (CXXStdInitializerListExpr *ILE = dyn_cast<CXXStdInitializerListExpr>(Init)) { performReferenceExtension(ILE->getSubExpr(), ExtendingEntity); return; } if (InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) { if (ILE->getType()->isArrayType()) { for (unsigned I = 0, N = ILE->getNumInits(); I != N; ++I) performLifetimeExtension(ILE->getInit(I), ExtendingEntity); return; } if (CXXRecordDecl *RD = ILE->getType()->getAsCXXRecordDecl()) { assert(RD->isAggregate() && "aggregate init on non-aggregate"); // If we lifetime-extend a braced initializer which is initializing an // aggregate, and that aggregate contains reference members which are // bound to temporaries, those temporaries are also lifetime-extended. if (RD->isUnion() && ILE->getInitializedFieldInUnion() && ILE->getInitializedFieldInUnion()->getType()->isReferenceType()) performReferenceExtension(ILE->getInit(0), ExtendingEntity); else { unsigned Index = 0; for (const auto *I : RD->fields()) { if (Index >= ILE->getNumInits()) break; if (I->isUnnamedBitfield()) continue; Expr *SubInit = ILE->getInit(Index); if (I->getType()->isReferenceType()) performReferenceExtension(SubInit, ExtendingEntity); else if (isa<InitListExpr>(SubInit) || isa<CXXStdInitializerListExpr>(SubInit)) // This may be either aggregate-initialization of a member or // initialization of a std::initializer_list object. Either way, // we should recursively lifetime-extend that initializer. performLifetimeExtension(SubInit, ExtendingEntity); ++Index; } } } } } static void warnOnLifetimeExtension(Sema &S, const InitializedEntity &Entity, const Expr *Init, bool IsInitializerList, const ValueDecl *ExtendingDecl) { // Warn if a field lifetime-extends a temporary. if (isa<FieldDecl>(ExtendingDecl)) { if (IsInitializerList) { S.Diag(Init->getExprLoc(), diag::warn_dangling_std_initializer_list) << /*at end of constructor*/true; return; } bool IsSubobjectMember = false; for (const InitializedEntity *Ent = Entity.getParent(); Ent; Ent = Ent->getParent()) { if (Ent->getKind() != InitializedEntity::EK_Base) { IsSubobjectMember = true; break; } } S.Diag(Init->getExprLoc(), diag::warn_bind_ref_member_to_temporary) << ExtendingDecl << Init->getSourceRange() << IsSubobjectMember << IsInitializerList; if (IsSubobjectMember) S.Diag(ExtendingDecl->getLocation(), diag::note_ref_subobject_of_member_declared_here); else S.Diag(ExtendingDecl->getLocation(), diag::note_ref_or_ptr_member_declared_here) << /*is pointer*/false; } } static void DiagnoseNarrowingInInitList(Sema &S, const ImplicitConversionSequence &ICS, QualType PreNarrowingType, QualType EntityType, const Expr *PostInit); /// Provide warnings when std::move is used on construction. static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr, bool IsReturnStmt) { if (!InitExpr) return; if (!S.ActiveTemplateInstantiations.empty()) return; QualType DestType = InitExpr->getType(); if (!DestType->isRecordType()) return; unsigned DiagID = 0; if (IsReturnStmt) { const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(InitExpr->IgnoreParens()); if (!CCE || CCE->getNumArgs() != 1) return; if (!CCE->getConstructor()->isCopyOrMoveConstructor()) return; InitExpr = CCE->getArg(0)->IgnoreImpCasts(); } // Find the std::move call and get the argument. const CallExpr *CE = dyn_cast<CallExpr>(InitExpr->IgnoreParens()); if (!CE || CE->getNumArgs() != 1) return; const FunctionDecl *MoveFunction = CE->getDirectCallee(); if (!MoveFunction || !MoveFunction->isInStdNamespace() || !MoveFunction->getIdentifier() || !MoveFunction->getIdentifier()->isStr("move")) return; const Expr *Arg = CE->getArg(0)->IgnoreImplicit(); if (IsReturnStmt) { const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg->IgnoreParenImpCasts()); if (!DRE || DRE->refersToEnclosingVariableOrCapture()) return; const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()); if (!VD || !VD->hasLocalStorage()) return; QualType SourceType = VD->getType(); if (!SourceType->isRecordType()) return; if (!S.Context.hasSameUnqualifiedType(DestType, SourceType)) { return; } // If we're returning a function parameter, copy elision // is not possible. if (isa<ParmVarDecl>(VD)) DiagID = diag::warn_redundant_move_on_return; else DiagID = diag::warn_pessimizing_move_on_return; } else { DiagID = diag::warn_pessimizing_move_on_initialization; const Expr *ArgStripped = Arg->IgnoreImplicit()->IgnoreParens(); if (!ArgStripped->isRValue() || !ArgStripped->getType()->isRecordType()) return; } S.Diag(CE->getLocStart(), DiagID); // Get all the locations for a fix-it. Don't emit the fix-it if any location // is within a macro. SourceLocation CallBegin = CE->getCallee()->getLocStart(); if (CallBegin.isMacroID()) return; SourceLocation RParen = CE->getRParenLoc(); if (RParen.isMacroID()) return; SourceLocation LParen; SourceLocation ArgLoc = Arg->getLocStart(); // Special testing for the argument location. Since the fix-it needs the // location right before the argument, the argument location can be in a // macro only if it is at the beginning of the macro. while (ArgLoc.isMacroID() && S.getSourceManager().isAtStartOfImmediateMacroExpansion(ArgLoc)) { ArgLoc = S.getSourceManager().getImmediateExpansionRange(ArgLoc).first; } if (LParen.isMacroID()) return; LParen = ArgLoc.getLocWithOffset(-1); S.Diag(CE->getLocStart(), diag::note_remove_move) << FixItHint::CreateRemoval(SourceRange(CallBegin, LParen)) << FixItHint::CreateRemoval(SourceRange(RParen, RParen)); } ExprResult InitializationSequence::Perform(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Args, QualType *ResultType) { if (Failed()) { Diagnose(S, Entity, Kind, Args); return ExprError(); } if (!ZeroInitializationFixit.empty()) { unsigned DiagID = diag::err_default_init_const; if (Decl *D = Entity.getDecl()) if (S.getLangOpts().MSVCCompat && D->hasAttr<SelectAnyAttr>()) DiagID = diag::ext_default_init_const; // The initialization would have succeeded with this fixit. Since the fixit // is on the error, we need to build a valid AST in this case, so this isn't // handled in the Failed() branch above. QualType DestType = Entity.getType(); S.Diag(Kind.getLocation(), DiagID) << DestType << (bool)DestType->getAs<RecordType>() << FixItHint::CreateInsertion(ZeroInitializationFixitLoc, ZeroInitializationFixit); } if (getKind() == DependentSequence) { // If the declaration is a non-dependent, incomplete array type // that has an initializer, then its type will be completed once // the initializer is instantiated. if (ResultType && !Entity.getType()->isDependentType() && Args.size() == 1) { QualType DeclType = Entity.getType(); if (const IncompleteArrayType *ArrayT = S.Context.getAsIncompleteArrayType(DeclType)) { // FIXME: We don't currently have the ability to accurately // compute the length of an initializer list without // performing full type-checking of the initializer list // (since we have to determine where braces are implicitly // introduced and such). So, we fall back to making the array // type a dependently-sized array type with no specified // bound. if (isa<InitListExpr>((Expr *)Args[0])) { SourceRange Brackets; // Scavange the location of the brackets from the entity, if we can. if (DeclaratorDecl *DD = Entity.getDecl()) { if (TypeSourceInfo *TInfo = DD->getTypeSourceInfo()) { TypeLoc TL = TInfo->getTypeLoc(); if (IncompleteArrayTypeLoc ArrayLoc = TL.getAs<IncompleteArrayTypeLoc>()) Brackets = ArrayLoc.getBracketsRange(); } } *ResultType = S.Context.getDependentSizedArrayType(ArrayT->getElementType(), /*NumElts=*/nullptr, ArrayT->getSizeModifier(), ArrayT->getIndexTypeCVRQualifiers(), Brackets); } } } if (Kind.getKind() == InitializationKind::IK_Direct && !Kind.isExplicitCast()) { // Rebuild the ParenListExpr. SourceRange ParenRange = Kind.getParenRange(); return S.ActOnParenListExpr(ParenRange.getBegin(), ParenRange.getEnd(), Args); } assert(Kind.getKind() == InitializationKind::IK_Copy || Kind.isExplicitCast() || Kind.getKind() == InitializationKind::IK_DirectList); return ExprResult(Args[0]); } // No steps means no initialization. if (Steps.empty()) return ExprResult((Expr *)nullptr); if (S.getLangOpts().CPlusPlus11 && Entity.getType()->isReferenceType() && Args.size() == 1 && isa<InitListExpr>(Args[0]) && !Entity.isParameterKind()) { // Produce a C++98 compatibility warning if we are initializing a reference // from an initializer list. For parameters, we produce a better warning // elsewhere. Expr *Init = Args[0]; S.Diag(Init->getLocStart(), diag::warn_cxx98_compat_reference_list_init) << Init->getSourceRange(); } // Diagnose cases where we initialize a pointer to an array temporary, and the // pointer obviously outlives the temporary. if (Args.size() == 1 && Args[0]->getType()->isArrayType() && Entity.getType()->isPointerType() && InitializedEntityOutlivesFullExpression(Entity)) { Expr *Init = Args[0]; Expr::LValueClassification Kind = Init->ClassifyLValue(S.Context); if (Kind == Expr::LV_ClassTemporary || Kind == Expr::LV_ArrayTemporary) S.Diag(Init->getLocStart(), diag::warn_temporary_array_to_pointer_decay) << Init->getSourceRange(); } QualType DestType = Entity.getType().getNonReferenceType(); // FIXME: Ugly hack around the fact that Entity.getType() is not // the same as Entity.getDecl()->getType() in cases involving type merging, // and we want latter when it makes sense. if (ResultType) *ResultType = Entity.getDecl() ? Entity.getDecl()->getType() : Entity.getType(); ExprResult CurInit((Expr *)nullptr); // For initialization steps that start with a single initializer, // grab the only argument out the Args and place it into the "current" // initializer. switch (Steps.front().Kind) { // HLSL Change Starts case SK_ListInitialization: { // In vector constructor cases, we may synthesize an InitListExpr assert(Args.size() == 1 || S.getLangOpts().HLSL); CurInit = Args[0]; Expr* CurInitExpr = CurInit.get(); if (!CurInitExpr) return ExprError(); if (CurInitExpr->getStmtClass() != Stmt::StmtClass::InitListExprClass) { assert(S.getLangOpts().HLSL); for (unsigned i = 0; i < Args.size(); ++i) { if (Args[i]->isLValue()) { Args[i] = ImplicitCastExpr::Create(S.Context, Args[i]->getType(), CK_LValueToRValue, Args[i], /*BasePath=*/0, VK_RValue); } } InitListExpr *castInit = new (S.getASTContext()) InitListExpr(S.getASTContext(), Kind.getParenRange().getBegin(), Args, Kind.getParenRange().getEnd()); castInit->sawVectorInitWithCXXFunctionalCastExpr(true); CurInit = castInit; } break; } // HLSL Change Ends case SK_ResolveAddressOfOverloadedFunction: case SK_CastDerivedToBaseRValue: case SK_CastDerivedToBaseXValue: case SK_CastDerivedToBaseLValue: case SK_BindReference: case SK_BindReferenceToTemporary: case SK_ExtraneousCopyToTemporary: case SK_UserConversion: case SK_QualificationConversionLValue: case SK_QualificationConversionXValue: case SK_QualificationConversionRValue: case SK_AtomicConversion: case SK_LValueToRValue: case SK_ConversionSequence: case SK_ConversionSequenceNoNarrowing: case SK_UnwrapInitList: case SK_RewrapInitList: case SK_CAssignment: case SK_StringInit: case SK_ObjCObjectConversion: case SK_ArrayInit: case SK_ParenthesizedArrayInit: case SK_PassByIndirectCopyRestore: case SK_PassByIndirectRestore: case SK_ProduceObjCObject: case SK_StdInitializerList: case SK_OCLSamplerInit: case SK_OCLZeroEvent: { assert(Args.size() == 1); CurInit = Args[0]; if (!CurInit.get()) return ExprError(); break; } case SK_ConstructorInitialization: case SK_ConstructorInitializationFromList: case SK_StdInitializerListConstructorCall: case SK_ZeroInitialization: break; } // Walk through the computed steps for the initialization sequence, // performing the specified conversions along the way. bool ConstructorInitRequiresZeroInit = false; for (step_iterator Step = step_begin(), StepEnd = step_end(); Step != StepEnd; ++Step) { if (CurInit.isInvalid()) return ExprError(); QualType SourceType = CurInit.get() ? CurInit.get()->getType() : QualType(); switch (Step->Kind) { case SK_ResolveAddressOfOverloadedFunction: // Overload resolution determined which function invoke; update the // initializer to reflect that choice. S.CheckAddressOfMemberAccess(CurInit.get(), Step->Function.FoundDecl); if (S.DiagnoseUseOfDecl(Step->Function.FoundDecl, Kind.getLocation())) return ExprError(); CurInit = S.FixOverloadedFunctionReference(CurInit, Step->Function.FoundDecl, Step->Function.Function); break; case SK_CastDerivedToBaseRValue: case SK_CastDerivedToBaseXValue: case SK_CastDerivedToBaseLValue: { // We have a derived-to-base cast that produces either an rvalue or an // lvalue. Perform that cast. CXXCastPath BasePath; // Casts to inaccessible base classes are allowed with C-style casts. bool IgnoreBaseAccess = Kind.isCStyleOrFunctionalCast(); if (S.CheckDerivedToBaseConversion(SourceType, Step->Type, CurInit.get()->getLocStart(), CurInit.get()->getSourceRange(), &BasePath, IgnoreBaseAccess)) return ExprError(); ExprValueKind VK = Step->Kind == SK_CastDerivedToBaseLValue ? VK_LValue : (Step->Kind == SK_CastDerivedToBaseXValue ? VK_XValue : VK_RValue); CurInit = ImplicitCastExpr::Create(S.Context, Step->Type, CK_DerivedToBase, CurInit.get(), &BasePath, VK); break; } case SK_BindReference: // References cannot bind to bit-fields (C++ [dcl.init.ref]p5). if (CurInit.get()->refersToBitField()) { // We don't necessarily have an unambiguous source bit-field. FieldDecl *BitField = CurInit.get()->getSourceBitField(); S.Diag(Kind.getLocation(), diag::err_reference_bind_to_bitfield) << Entity.getType().isVolatileQualified() << (BitField ? BitField->getDeclName() : DeclarationName()) << (BitField != nullptr) << CurInit.get()->getSourceRange(); if (BitField) S.Diag(BitField->getLocation(), diag::note_bitfield_decl); return ExprError(); } if (CurInit.get()->refersToVectorElement()) { // References cannot bind to vector elements. S.Diag(Kind.getLocation(), diag::err_reference_bind_to_vector_element) << Entity.getType().isVolatileQualified() << CurInit.get()->getSourceRange(); PrintInitLocationNote(S, Entity); return ExprError(); } // Reference binding does not have any corresponding ASTs. // Check exception specifications if (S.CheckExceptionSpecCompatibility(CurInit.get(), DestType)) return ExprError(); // Even though we didn't materialize a temporary, the binding may still // extend the lifetime of a temporary. This happens if we bind a reference // to the result of a cast to reference type. if (const InitializedEntity *ExtendingEntity = getEntityForTemporaryLifetimeExtension(&Entity)) if (performReferenceExtension(CurInit.get(), ExtendingEntity)) warnOnLifetimeExtension(S, Entity, CurInit.get(), /*IsInitializerList=*/false, ExtendingEntity->getDecl()); break; case SK_BindReferenceToTemporary: { // Make sure the "temporary" is actually an rvalue. assert(CurInit.get()->isRValue() && "not a temporary"); // Check exception specifications if (S.CheckExceptionSpecCompatibility(CurInit.get(), DestType)) return ExprError(); // Materialize the temporary into memory. MaterializeTemporaryExpr *MTE = new (S.Context) MaterializeTemporaryExpr( Entity.getType().getNonReferenceType(), CurInit.get(), Entity.getType()->isLValueReferenceType()); // Maybe lifetime-extend the temporary's subobjects to match the // entity's lifetime. if (const InitializedEntity *ExtendingEntity = getEntityForTemporaryLifetimeExtension(&Entity)) if (performReferenceExtension(MTE, ExtendingEntity)) warnOnLifetimeExtension(S, Entity, CurInit.get(), /*IsInitializerList=*/false, ExtendingEntity->getDecl()); // If we're binding to an Objective-C object that has lifetime, we // need cleanups. Likewise if we're extending this temporary to automatic // storage duration -- we need to register its cleanup during the // full-expression's cleanups. if ((S.getLangOpts().ObjCAutoRefCount && MTE->getType()->isObjCLifetimeType()) || (MTE->getStorageDuration() == SD_Automatic && MTE->getType().isDestructedType())) S.ExprNeedsCleanups = true; CurInit = MTE; break; } case SK_ExtraneousCopyToTemporary: CurInit = CopyObject(S, Step->Type, Entity, CurInit, /*IsExtraneousCopy=*/true); break; case SK_UserConversion: { // We have a user-defined conversion that invokes either a constructor // or a conversion function. CastKind CastKind; bool IsCopy = false; FunctionDecl *Fn = Step->Function.Function; DeclAccessPair FoundFn = Step->Function.FoundDecl; bool HadMultipleCandidates = Step->Function.HadMultipleCandidates; bool CreatedObject = false; if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Fn)) { // Build a call to the selected constructor. SmallVector<Expr*, 8> ConstructorArgs; SourceLocation Loc = CurInit.get()->getLocStart(); CurInit.get(); // Ownership transferred into MultiExprArg, below. // Determine the arguments required to actually perform the constructor // call. Expr *Arg = CurInit.get(); if (S.CompleteConstructorCall(Constructor, MultiExprArg(&Arg, 1), Loc, ConstructorArgs)) return ExprError(); // Build an expression that constructs a temporary. CurInit = S.BuildCXXConstructExpr(Loc, Step->Type, Constructor, ConstructorArgs, HadMultipleCandidates, /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, SourceRange()); if (CurInit.isInvalid()) return ExprError(); S.CheckConstructorAccess(Kind.getLocation(), Constructor, Entity, FoundFn.getAccess()); if (S.DiagnoseUseOfDecl(FoundFn, Kind.getLocation())) return ExprError(); CastKind = CK_ConstructorConversion; QualType Class = S.Context.getTypeDeclType(Constructor->getParent()); if (S.Context.hasSameUnqualifiedType(SourceType, Class) || S.IsDerivedFrom(SourceType, Class)) IsCopy = true; CreatedObject = true; } else { // Build a call to the conversion function. CXXConversionDecl *Conversion = cast<CXXConversionDecl>(Fn); S.CheckMemberOperatorAccess(Kind.getLocation(), CurInit.get(), nullptr, FoundFn); if (S.DiagnoseUseOfDecl(FoundFn, Kind.getLocation())) return ExprError(); // FIXME: Should we move this initialization into a separate // derived-to-base conversion? I believe the answer is "no", because // we don't want to turn off access control here for c-style casts. ExprResult CurInitExprRes = S.PerformObjectArgumentInitialization(CurInit.get(), /*Qualifier=*/nullptr, FoundFn, Conversion); if(CurInitExprRes.isInvalid()) return ExprError(); CurInit = CurInitExprRes; // Build the actual call to the conversion function. CurInit = S.BuildCXXMemberCallExpr(CurInit.get(), FoundFn, Conversion, HadMultipleCandidates); if (CurInit.isInvalid() || !CurInit.get()) return ExprError(); CastKind = CK_UserDefinedConversion; CreatedObject = Conversion->getReturnType()->isRecordType(); } bool RequiresCopy = !IsCopy && !isReferenceBinding(Steps.back()); bool MaybeBindToTemp = RequiresCopy || shouldBindAsTemporary(Entity); if (!MaybeBindToTemp && CreatedObject && shouldDestroyTemporary(Entity)) { QualType T = CurInit.get()->getType(); if (const RecordType *Record = T->getAs<RecordType>()) { CXXDestructorDecl *Destructor = S.LookupDestructor(cast<CXXRecordDecl>(Record->getDecl())); S.CheckDestructorAccess(CurInit.get()->getLocStart(), Destructor, S.PDiag(diag::err_access_dtor_temp) << T); S.MarkFunctionReferenced(CurInit.get()->getLocStart(), Destructor); if (S.DiagnoseUseOfDecl(Destructor, CurInit.get()->getLocStart())) return ExprError(); } } CurInit = ImplicitCastExpr::Create(S.Context, CurInit.get()->getType(), CastKind, CurInit.get(), nullptr, CurInit.get()->getValueKind()); if (MaybeBindToTemp) CurInit = S.MaybeBindToTemporary(CurInit.getAs<Expr>()); if (RequiresCopy) CurInit = CopyObject(S, Entity.getType().getNonReferenceType(), Entity, CurInit, /*IsExtraneousCopy=*/false); break; } case SK_QualificationConversionLValue: case SK_QualificationConversionXValue: case SK_QualificationConversionRValue: { // Perform a qualification conversion; these can never go wrong. ExprValueKind VK = Step->Kind == SK_QualificationConversionLValue ? VK_LValue : (Step->Kind == SK_QualificationConversionXValue ? VK_XValue : VK_RValue); CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type, CK_NoOp, VK); break; } case SK_AtomicConversion: { assert(CurInit.get()->isRValue() && "cannot convert glvalue to atomic"); CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type, CK_NonAtomicToAtomic, VK_RValue); break; } case SK_LValueToRValue: { assert(CurInit.get()->isGLValue() && "cannot load from a prvalue"); CurInit = ImplicitCastExpr::Create(S.Context, Step->Type, CK_LValueToRValue, CurInit.get(), /*BasePath=*/nullptr, VK_RValue); break; } case SK_ConversionSequence: case SK_ConversionSequenceNoNarrowing: { Sema::CheckedConversionKind CCK = Kind.isCStyleCast()? Sema::CCK_CStyleCast : Kind.isFunctionalCast()? Sema::CCK_FunctionalCast : Kind.isExplicitCast()? Sema::CCK_OtherCast : Sema::CCK_ImplicitConversion; ExprResult CurInitExprRes = S.PerformImplicitConversion(CurInit.get(), Step->Type, *Step->ICS, getAssignmentAction(Entity), CCK); if (CurInitExprRes.isInvalid()) return ExprError(); CurInit = CurInitExprRes; if (Step->Kind == SK_ConversionSequenceNoNarrowing && S.getLangOpts().CPlusPlus && !CurInit.get()->isValueDependent()) DiagnoseNarrowingInInitList(S, *Step->ICS, SourceType, Entity.getType(), CurInit.get()); break; } case SK_ListInitialization: { InitListExpr *InitList = cast<InitListExpr>(CurInit.get()); // If we're not initializing the top-level entity, we need to create an // InitializeTemporary entity for our target type. QualType Ty = Step->Type; bool IsTemporary = !S.Context.hasSameType(Entity.getType(), Ty); InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(Ty); InitializedEntity InitEntity = IsTemporary ? TempEntity : Entity; // HLSL Change Starts - all analysis done - TODO semantic changes for IR if (S.getLangOpts().HLSL) { CurInit.get(); InitList->setType(InitEntity.getType()); CurInit = shouldBindAsTemporary(InitEntity) ? S.MaybeBindToTemporary(InitList) : InitList; // Hack: We must update *ResultType if available in order to set the // bounds of arrays, e.g. in 'int ar[] = {1, 2, 3};'. // Worst case: 'const int (&arref)[] = {1, 2, 3};'. if (ResultType && ResultType->getNonReferenceType()->isIncompleteArrayType()) { const IncompleteArrayType *IncompleteAT = S.getASTContext().getAsIncompleteArrayType( ResultType->getNonReferenceType()); QualType EltTy = IncompleteAT->getElementType(); unsigned arraySize = hlsl::CaculateInitListArraySizeForHLSL(&S, InitList, EltTy); if (arraySize) { llvm::APInt Size( /*numBits=*/32, arraySize); QualType AT = S.getASTContext().getConstantArrayType( EltTy, Size, ArrayType::ArraySizeModifier::Normal, /*IndexTypeQuals=*/0); *ResultType = AT; InitList->setType(AT); } } } else { // HLSL Change Ends code below is conditional InitListChecker PerformInitList(S, InitEntity, Kind, // HLSL Change - added Kind InitList, Ty, /*VerifyOnly=*/false); if (PerformInitList.HadError()) return ExprError(); // Hack: We must update *ResultType if available in order to set the // bounds of arrays, e.g. in 'int ar[] = {1, 2, 3};'. // Worst case: 'const int (&arref)[] = {1, 2, 3};'. if (ResultType && ResultType->getNonReferenceType()->isIncompleteArrayType()) { if ((*ResultType)->isRValueReferenceType()) Ty = S.Context.getRValueReferenceType(Ty); else if ((*ResultType)->isLValueReferenceType()) Ty = S.Context.getLValueReferenceType(Ty, (*ResultType)->getAs<LValueReferenceType>()->isSpelledAsLValue()); *ResultType = Ty; } InitListExpr *StructuredInitList = PerformInitList.getFullyStructuredList(); CurInit.get(); CurInit = shouldBindAsTemporary(InitEntity) ? S.MaybeBindToTemporary(StructuredInitList) : StructuredInitList; } // HLSL Change - end conditional break; } case SK_ConstructorInitializationFromList: { // When an initializer list is passed for a parameter of type "reference // to object", we don't get an EK_Temporary entity, but instead an // EK_Parameter entity with reference type. // FIXME: This is a hack. What we really should do is create a user // conversion step for this case, but this makes it considerably more // complicated. For now, this will do. InitializedEntity TempEntity = InitializedEntity::InitializeTemporary( Entity.getType().getNonReferenceType()); bool UseTemporary = Entity.getType()->isReferenceType(); assert(Args.size() == 1 && "expected a single argument for list init"); InitListExpr *InitList = cast<InitListExpr>(Args[0]); S.Diag(InitList->getExprLoc(), diag::warn_cxx98_compat_ctor_list_init) << InitList->getSourceRange(); MultiExprArg Arg(InitList->getInits(), InitList->getNumInits()); CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity : Entity, Kind, Arg, *Step, ConstructorInitRequiresZeroInit, /*IsListInitialization*/true, /*IsStdInitListInit*/false, InitList->getLBraceLoc(), InitList->getRBraceLoc()); break; } case SK_UnwrapInitList: CurInit = cast<InitListExpr>(CurInit.get())->getInit(0); break; case SK_RewrapInitList: { Expr *E = CurInit.get(); InitListExpr *Syntactic = Step->WrappingSyntacticList; InitListExpr *ILE = new (S.Context) InitListExpr(S.Context, Syntactic->getLBraceLoc(), E, Syntactic->getRBraceLoc()); ILE->setSyntacticForm(Syntactic); ILE->setType(E->getType()); ILE->setValueKind(E->getValueKind()); CurInit = ILE; break; } case SK_ConstructorInitialization: case SK_StdInitializerListConstructorCall: { // When an initializer list is passed for a parameter of type "reference // to object", we don't get an EK_Temporary entity, but instead an // EK_Parameter entity with reference type. // FIXME: This is a hack. What we really should do is create a user // conversion step for this case, but this makes it considerably more // complicated. For now, this will do. InitializedEntity TempEntity = InitializedEntity::InitializeTemporary( Entity.getType().getNonReferenceType()); bool UseTemporary = Entity.getType()->isReferenceType(); bool IsStdInitListInit = Step->Kind == SK_StdInitializerListConstructorCall; CurInit = PerformConstructorInitialization( S, UseTemporary ? TempEntity : Entity, Kind, Args, *Step, ConstructorInitRequiresZeroInit, /*IsListInitialization*/IsStdInitListInit, /*IsStdInitListInitialization*/IsStdInitListInit, /*LBraceLoc*/SourceLocation(), /*RBraceLoc*/SourceLocation()); break; } case SK_ZeroInitialization: { step_iterator NextStep = Step; ++NextStep; if (NextStep != StepEnd && (NextStep->Kind == SK_ConstructorInitialization || NextStep->Kind == SK_ConstructorInitializationFromList)) { // The need for zero-initialization is recorded directly into // the call to the object's constructor within the next step. ConstructorInitRequiresZeroInit = true; } else if (Kind.getKind() == InitializationKind::IK_Value && S.getLangOpts().CPlusPlus && !Kind.isImplicitValueInit()) { TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo(); if (!TSInfo) TSInfo = S.Context.getTrivialTypeSourceInfo(Step->Type, Kind.getRange().getBegin()); CurInit = new (S.Context) CXXScalarValueInitExpr( TSInfo->getType().getNonLValueExprType(S.Context), TSInfo, Kind.getRange().getEnd()); } else { CurInit = new (S.Context) ImplicitValueInitExpr(Step->Type); } break; } case SK_CAssignment: { QualType SourceType = CurInit.get()->getType(); ExprResult Result = CurInit; Sema::AssignConvertType ConvTy = S.CheckSingleAssignmentConstraints(Step->Type, Result, true, Entity.getKind() == InitializedEntity::EK_Parameter_CF_Audited); if (Result.isInvalid()) return ExprError(); CurInit = Result; // If this is a call, allow conversion to a transparent union. ExprResult CurInitExprRes = CurInit; if (ConvTy != Sema::Compatible && Entity.isParameterKind() && S.CheckTransparentUnionArgumentConstraints(Step->Type, CurInitExprRes) == Sema::Compatible) ConvTy = Sema::Compatible; if (CurInitExprRes.isInvalid()) return ExprError(); CurInit = CurInitExprRes; bool Complained; if (S.DiagnoseAssignmentResult(ConvTy, Kind.getLocation(), Step->Type, SourceType, CurInit.get(), getAssignmentAction(Entity, true), &Complained)) { PrintInitLocationNote(S, Entity); return ExprError(); } else if (Complained) PrintInitLocationNote(S, Entity); break; } case SK_StringInit: { QualType Ty = Step->Type; CheckStringInit(CurInit.get(), ResultType ? *ResultType : Ty, S.Context.getAsArrayType(Ty), S); break; } case SK_ObjCObjectConversion: CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type, CK_ObjCObjectLValueCast, CurInit.get()->getValueKind()); break; case SK_ArrayInit: // Okay: we checked everything before creating this step. Note that // this is a GNU extension. S.Diag(Kind.getLocation(), diag::ext_array_init_copy) << Step->Type << CurInit.get()->getType() << CurInit.get()->getSourceRange(); // If the destination type is an incomplete array type, update the // type accordingly. if (ResultType) { if (const IncompleteArrayType *IncompleteDest = S.Context.getAsIncompleteArrayType(Step->Type)) { if (const ConstantArrayType *ConstantSource = S.Context.getAsConstantArrayType(CurInit.get()->getType())) { *ResultType = S.Context.getConstantArrayType( IncompleteDest->getElementType(), ConstantSource->getSize(), ArrayType::Normal, 0); } } } break; case SK_ParenthesizedArrayInit: // Okay: we checked everything before creating this step. Note that // this is a GNU extension. S.Diag(Kind.getLocation(), diag::ext_array_init_parens) << CurInit.get()->getSourceRange(); break; case SK_PassByIndirectCopyRestore: case SK_PassByIndirectRestore: checkIndirectCopyRestoreSource(S, CurInit.get()); CurInit = new (S.Context) ObjCIndirectCopyRestoreExpr( CurInit.get(), Step->Type, Step->Kind == SK_PassByIndirectCopyRestore); break; case SK_ProduceObjCObject: CurInit = ImplicitCastExpr::Create(S.Context, Step->Type, CK_ARCProduceObject, CurInit.get(), nullptr, VK_RValue); break; case SK_StdInitializerList: { S.Diag(CurInit.get()->getExprLoc(), diag::warn_cxx98_compat_initializer_list_init) << CurInit.get()->getSourceRange(); // Materialize the temporary into memory. MaterializeTemporaryExpr *MTE = new (S.Context) MaterializeTemporaryExpr(CurInit.get()->getType(), CurInit.get(), /*BoundToLvalueReference=*/false); // Maybe lifetime-extend the array temporary's subobjects to match the // entity's lifetime. if (const InitializedEntity *ExtendingEntity = getEntityForTemporaryLifetimeExtension(&Entity)) if (performReferenceExtension(MTE, ExtendingEntity)) warnOnLifetimeExtension(S, Entity, CurInit.get(), /*IsInitializerList=*/true, ExtendingEntity->getDecl()); // Wrap it in a construction of a std::initializer_list<T>. CurInit = new (S.Context) CXXStdInitializerListExpr(Step->Type, MTE); // Bind the result, in case the library has given initializer_list a // non-trivial destructor. if (shouldBindAsTemporary(Entity)) CurInit = S.MaybeBindToTemporary(CurInit.get()); break; } case SK_OCLSamplerInit: { assert(Step->Type->isSamplerT() && "Sampler initialization on non-sampler type."); QualType SourceType = CurInit.get()->getType(); if (Entity.isParameterKind()) { if (!SourceType->isSamplerT()) S.Diag(Kind.getLocation(), diag::err_sampler_argument_required) << SourceType; } else if (Entity.getKind() != InitializedEntity::EK_Variable) { llvm_unreachable("Invalid EntityKind!"); } break; } case SK_OCLZeroEvent: { assert(Step->Type->isEventT() && "Event initialization on non-event type."); CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type, CK_ZeroToOCLEvent, CurInit.get()->getValueKind()); break; } } } // Diagnose non-fatal problems with the completed initialization. if (Entity.getKind() == InitializedEntity::EK_Member && cast<FieldDecl>(Entity.getDecl())->isBitField()) S.CheckBitFieldInitialization(Kind.getLocation(), cast<FieldDecl>(Entity.getDecl()), CurInit.get()); // Check for std::move on construction. if (const Expr *E = CurInit.get()) { CheckMoveOnConstruction(S, E, Entity.getKind() == InitializedEntity::EK_Result); } return CurInit; } /// Somewhere within T there is an uninitialized reference subobject. /// Dig it out and diagnose it. static bool DiagnoseUninitializedReference(Sema &S, SourceLocation Loc, QualType T) { if (T->isReferenceType()) { S.Diag(Loc, diag::err_reference_without_init) << T.getNonReferenceType(); return true; } CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); if (!RD || !RD->hasUninitializedReferenceMember()) return false; for (const auto *FI : RD->fields()) { if (FI->isUnnamedBitfield()) continue; if (DiagnoseUninitializedReference(S, FI->getLocation(), FI->getType())) { S.Diag(Loc, diag::note_value_initialization_here) << RD; return true; } } for (const auto &BI : RD->bases()) { if (DiagnoseUninitializedReference(S, BI.getLocStart(), BI.getType())) { S.Diag(Loc, diag::note_value_initialization_here) << RD; return true; } } return false; } //===----------------------------------------------------------------------===// // Diagnose initialization failures //===----------------------------------------------------------------------===// /// Emit notes associated with an initialization that failed due to a /// "simple" conversion failure. static void emitBadConversionNotes(Sema &S, const InitializedEntity &entity, Expr *op) { QualType destType = entity.getType(); if (destType.getNonReferenceType()->isObjCObjectPointerType() && op->getType()->isObjCObjectPointerType()) { // Emit a possible note about the conversion failing because the // operand is a message send with a related result type. S.EmitRelatedResultTypeNote(op); // Emit a possible note about a return failing because we're // expecting a related result type. if (entity.getKind() == InitializedEntity::EK_Result) S.EmitRelatedResultTypeNoteForReturn(destType); } } static void diagnoseListInit(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, // HLSL Change - added Kind InitListExpr *InitList) { QualType DestType = Entity.getType(); QualType E; if (S.getLangOpts().CPlusPlus11 && S.isStdInitializerList(DestType, &E)) { QualType ArrayType = S.Context.getConstantArrayType( E.withConst(), llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()), InitList->getNumInits()), clang::ArrayType::Normal, 0); InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(ArrayType); return diagnoseListInit(S, HiddenArray, Kind, InitList); // HLSL Change - added Kind } if (DestType->isReferenceType()) { // A list-initialization failure for a reference means that we tried to // create a temporary of the inner type (per [dcl.init.list]p3.6) and the // inner initialization failed. QualType T = DestType->getAs<ReferenceType>()->getPointeeType(); diagnoseListInit(S, InitializedEntity::InitializeTemporary(T), Kind, InitList); SourceLocation Loc = InitList->getLocStart(); if (auto *D = Entity.getDecl()) Loc = D->getLocation(); S.Diag(Loc, diag::note_in_reference_temporary_list_initializer) << T; return; } InitListChecker DiagnoseInitList(S, Entity, Kind, InitList, DestType, // HLSL Change - added Kind /*VerifyOnly=*/false); assert(DiagnoseInitList.HadError() && "Inconsistent init list check result."); } bool InitializationSequence::Diagnose(Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind, ArrayRef<Expr *> Args) { if (!Failed()) return false; QualType DestType = Entity.getType(); switch (Failure) { case FK_TooManyInitsForReference: // FIXME: Customize for the initialized entity? if (Args.empty()) { // Dig out the reference subobject which is uninitialized and diagnose it. // If this is value-initialization, this could be nested some way within // the target type. assert(Kind.getKind() == InitializationKind::IK_Value || DestType->isReferenceType()); bool Diagnosed = DiagnoseUninitializedReference(S, Kind.getLocation(), DestType); assert(Diagnosed && "couldn't find uninitialized reference to diagnose"); (void)Diagnosed; } else // FIXME: diagnostic below could be better! S.Diag(Kind.getLocation(), diag::err_reference_has_multiple_inits) << SourceRange(Args.front()->getLocStart(), Args.back()->getLocEnd()); break; case FK_ArrayNeedsInitList: S.Diag(Kind.getLocation(), diag::err_array_init_not_init_list) << 0; break; case FK_ArrayNeedsInitListOrStringLiteral: S.Diag(Kind.getLocation(), diag::err_array_init_not_init_list) << 1; break; case FK_ArrayNeedsInitListOrWideStringLiteral: S.Diag(Kind.getLocation(), diag::err_array_init_not_init_list) << 2; break; case FK_NarrowStringIntoWideCharArray: S.Diag(Kind.getLocation(), diag::err_array_init_narrow_string_into_wchar); break; case FK_WideStringIntoCharArray: S.Diag(Kind.getLocation(), diag::err_array_init_wide_string_into_char); break; case FK_IncompatWideStringIntoWideChar: S.Diag(Kind.getLocation(), diag::err_array_init_incompat_wide_string_into_wchar); break; case FK_ArrayTypeMismatch: case FK_NonConstantArrayInit: S.Diag(Kind.getLocation(), (Failure == FK_ArrayTypeMismatch ? diag::err_array_init_different_type : diag::err_array_init_non_constant_array)) << DestType.getNonReferenceType() << Args[0]->getType() << Args[0]->getSourceRange(); break; case FK_VariableLengthArrayHasInitializer: S.Diag(Kind.getLocation(), diag::err_variable_object_no_init) << Args[0]->getSourceRange(); break; case FK_AddressOfOverloadFailed: { DeclAccessPair Found; S.ResolveAddressOfOverloadedFunction(Args[0], DestType.getNonReferenceType(), true, Found); break; } case FK_ReferenceInitOverloadFailed: case FK_UserConversionOverloadFailed: switch (FailedOverloadResult) { case OR_Ambiguous: if (Failure == FK_UserConversionOverloadFailed) S.Diag(Kind.getLocation(), diag::err_typecheck_ambiguous_condition) << Args[0]->getType() << DestType << Args[0]->getSourceRange(); else S.Diag(Kind.getLocation(), diag::err_ref_init_ambiguous) << DestType << Args[0]->getType() << Args[0]->getSourceRange(); FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates, Args); break; case OR_No_Viable_Function: if (!S.RequireCompleteType(Kind.getLocation(), DestType.getNonReferenceType(), diag::err_typecheck_nonviable_condition_incomplete, Args[0]->getType(), Args[0]->getSourceRange())) S.Diag(Kind.getLocation(), diag::err_typecheck_nonviable_condition) << Args[0]->getType() << Args[0]->getSourceRange() << DestType.getNonReferenceType(); FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args); break; case OR_Deleted: { S.Diag(Kind.getLocation(), diag::err_typecheck_deleted_function) << Args[0]->getType() << DestType.getNonReferenceType() << Args[0]->getSourceRange(); OverloadCandidateSet::iterator Best; OverloadingResult Ovl = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best, true); if (Ovl == OR_Deleted) { S.NoteDeletedFunction(Best->Function); } else { llvm_unreachable("Inconsistent overload resolution?"); } break; } case OR_Success: llvm_unreachable("Conversion did not fail!"); } break; case FK_NonConstLValueReferenceBindingToTemporary: if (isa<InitListExpr>(Args[0])) { S.Diag(Kind.getLocation(), diag::err_lvalue_reference_bind_to_initlist) << DestType.getNonReferenceType().isVolatileQualified() << DestType.getNonReferenceType() << Args[0]->getSourceRange(); break; } LLVM_FALLTHROUGH; // HLSL Change case FK_NonConstLValueReferenceBindingToUnrelated: S.Diag(Kind.getLocation(), Failure == FK_NonConstLValueReferenceBindingToTemporary ? diag::err_lvalue_reference_bind_to_temporary : diag::err_lvalue_reference_bind_to_unrelated) << DestType.getNonReferenceType().isVolatileQualified() << DestType.getNonReferenceType() << Args[0]->getType() << Args[0]->getSourceRange(); break; case FK_RValueReferenceBindingToLValue: S.Diag(Kind.getLocation(), diag::err_lvalue_to_rvalue_ref) << DestType.getNonReferenceType() << Args[0]->getType() << Args[0]->getSourceRange(); break; case FK_ReferenceInitDropsQualifiers: { QualType SourceType = Args[0]->getType(); QualType NonRefType = DestType.getNonReferenceType(); Qualifiers DroppedQualifiers = SourceType.getQualifiers() - NonRefType.getQualifiers(); S.Diag(Kind.getLocation(), diag::err_reference_bind_drops_quals) << SourceType << NonRefType << DroppedQualifiers.getCVRQualifiers() << Args[0]->getSourceRange(); break; } case FK_ReferenceInitFailed: S.Diag(Kind.getLocation(), diag::err_reference_bind_failed) << DestType.getNonReferenceType() << Args[0]->isLValue() << Args[0]->getType() << Args[0]->getSourceRange(); emitBadConversionNotes(S, Entity, Args[0]); break; case FK_ConversionFailed: { QualType FromType = Args[0]->getType(); PartialDiagnostic PDiag = S.PDiag(diag::err_init_conversion_failed) << (int)Entity.getKind() << DestType << Args[0]->isLValue() << FromType << Args[0]->getSourceRange(); S.HandleFunctionTypeMismatch(PDiag, FromType, DestType); S.Diag(Kind.getLocation(), PDiag); emitBadConversionNotes(S, Entity, Args[0]); break; } case FK_ConversionFromPropertyFailed: // No-op. This error has already been reported. break; case FK_TooManyInitsForScalar: { SourceRange R; auto *InitList = dyn_cast<InitListExpr>(Args[0]); if (InitList && InitList->getNumInits() == 1) R = SourceRange(InitList->getInit(0)->getLocEnd(), InitList->getLocEnd()); else R = SourceRange(Args.front()->getLocEnd(), Args.back()->getLocEnd()); R.setBegin(S.getLocForEndOfToken(R.getBegin())); if (Kind.isCStyleOrFunctionalCast()) S.Diag(Kind.getLocation(), diag::err_builtin_func_cast_more_than_one_arg) << R; else S.Diag(Kind.getLocation(), diag::err_excess_initializers) << /*scalar=*/2 << R; break; } case FK_ReferenceBindingToInitList: S.Diag(Kind.getLocation(), diag::err_reference_bind_init_list) << DestType.getNonReferenceType() << Args[0]->getSourceRange(); break; case FK_InitListBadDestinationType: S.Diag(Kind.getLocation(), diag::err_init_list_bad_dest_type) << (DestType->isRecordType()) << DestType << Args[0]->getSourceRange(); break; case FK_ListConstructorOverloadFailed: case FK_ConstructorOverloadFailed: { SourceRange ArgsRange; if (Args.size()) ArgsRange = SourceRange(Args.front()->getLocStart(), Args.back()->getLocEnd()); if (Failure == FK_ListConstructorOverloadFailed) { assert(Args.size() == 1 && "List construction from other than 1 argument."); InitListExpr *InitList = cast<InitListExpr>(Args[0]); Args = MultiExprArg(InitList->getInits(), InitList->getNumInits()); } // FIXME: Using "DestType" for the entity we're printing is probably // bad. switch (FailedOverloadResult) { case OR_Ambiguous: S.Diag(Kind.getLocation(), diag::err_ovl_ambiguous_init) << DestType << ArgsRange; FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates, Args); break; case OR_No_Viable_Function: if (Kind.getKind() == InitializationKind::IK_Default && (Entity.getKind() == InitializedEntity::EK_Base || Entity.getKind() == InitializedEntity::EK_Member) && isa<CXXConstructorDecl>(S.CurContext)) { // This is implicit default initialization of a member or // base within a constructor. If no viable function was // found, notify the user that she needs to explicitly // initialize this base/member. CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(S.CurContext); if (Entity.getKind() == InitializedEntity::EK_Base) { S.Diag(Kind.getLocation(), diag::err_missing_default_ctor) << (Constructor->getInheritedConstructor() ? 2 : Constructor->isImplicit() ? 1 : 0) << S.Context.getTypeDeclType(Constructor->getParent()) << /*base=*/0 << Entity.getType(); RecordDecl *BaseDecl = Entity.getBaseSpecifier()->getType()->getAs<RecordType>() ->getDecl(); S.Diag(BaseDecl->getLocation(), diag::note_previous_decl) << S.Context.getTagDeclType(BaseDecl); } else { S.Diag(Kind.getLocation(), diag::err_missing_default_ctor) << (Constructor->getInheritedConstructor() ? 2 : Constructor->isImplicit() ? 1 : 0) << S.Context.getTypeDeclType(Constructor->getParent()) << /*member=*/1 << Entity.getName(); S.Diag(Entity.getDecl()->getLocation(), diag::note_member_declared_at); if (const RecordType *Record = Entity.getType()->getAs<RecordType>()) S.Diag(Record->getDecl()->getLocation(), diag::note_previous_decl) << S.Context.getTagDeclType(Record->getDecl()); } break; } S.Diag(Kind.getLocation(), diag::err_ovl_no_viable_function_in_init) << DestType << ArgsRange; FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates, Args); break; case OR_Deleted: { OverloadCandidateSet::iterator Best; OverloadingResult Ovl = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best); if (Ovl != OR_Deleted) { S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init) << true << DestType << ArgsRange; llvm_unreachable("Inconsistent overload resolution?"); break; } // If this is a defaulted or implicitly-declared function, then // it was implicitly deleted. Make it clear that the deletion was // implicit. if (S.isImplicitlyDeleted(Best->Function)) S.Diag(Kind.getLocation(), diag::err_ovl_deleted_special_init) << S.getSpecialMember(cast<CXXMethodDecl>(Best->Function)) << DestType << ArgsRange; else S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init) << true << DestType << ArgsRange; S.NoteDeletedFunction(Best->Function); break; } case OR_Success: llvm_unreachable("Conversion did not fail!"); } } break; case FK_DefaultInitOfConst: if (Entity.getKind() == InitializedEntity::EK_Member && isa<CXXConstructorDecl>(S.CurContext)) { // This is implicit default-initialization of a const member in // a constructor. Complain that it needs to be explicitly // initialized. CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(S.CurContext); S.Diag(Kind.getLocation(), diag::err_uninitialized_member_in_ctor) << (Constructor->getInheritedConstructor() ? 2 : Constructor->isImplicit() ? 1 : 0) << S.Context.getTypeDeclType(Constructor->getParent()) << /*const=*/1 << Entity.getName(); S.Diag(Entity.getDecl()->getLocation(), diag::note_previous_decl) << Entity.getName(); } else { S.Diag(Kind.getLocation(), diag::err_default_init_const) << DestType << (bool)DestType->getAs<RecordType>(); } break; case FK_Incomplete: S.RequireCompleteType(Kind.getLocation(), FailedIncompleteType, diag::err_init_incomplete_type); break; case FK_ListInitializationFailed: { // Run the init list checker again to emit diagnostics. // HLSL Change Starts: allow for the possibility of having to construct an InitListExpr. if (!S.getLangOpts().HLSL) { InitListExpr *InitList = cast<InitListExpr>(Args[0]); diagnoseListInit(S, Entity, Kind, InitList); // HLSL Change - added Kind } else { // For HLSL, InitializeInitSequenceForHLSL should report error messages. } // HLSL Change Ends break; } case FK_PlaceholderType: { // FIXME: Already diagnosed! break; } case FK_ExplicitConstructor: { S.Diag(Kind.getLocation(), diag::err_selected_explicit_constructor) << Args[0]->getSourceRange(); OverloadCandidateSet::iterator Best; OverloadingResult Ovl = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best); (void)Ovl; assert(Ovl == OR_Success && "Inconsistent overload resolution"); CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function); S.Diag(CtorDecl->getLocation(), diag::note_constructor_declared_here); break; } } PrintInitLocationNote(S, Entity); return true; } void InitializationSequence::dump(raw_ostream &OS) const { switch (SequenceKind) { case FailedSequence: { OS << "Failed sequence: "; switch (Failure) { case FK_TooManyInitsForReference: OS << "too many initializers for reference"; break; case FK_ArrayNeedsInitList: OS << "array requires initializer list"; break; case FK_ArrayNeedsInitListOrStringLiteral: OS << "array requires initializer list or string literal"; break; case FK_ArrayNeedsInitListOrWideStringLiteral: OS << "array requires initializer list or wide string literal"; break; case FK_NarrowStringIntoWideCharArray: OS << "narrow string into wide char array"; break; case FK_WideStringIntoCharArray: OS << "wide string into char array"; break; case FK_IncompatWideStringIntoWideChar: OS << "incompatible wide string into wide char array"; break; case FK_ArrayTypeMismatch: OS << "array type mismatch"; break; case FK_NonConstantArrayInit: OS << "non-constant array initializer"; break; case FK_AddressOfOverloadFailed: OS << "address of overloaded function failed"; break; case FK_ReferenceInitOverloadFailed: OS << "overload resolution for reference initialization failed"; break; case FK_NonConstLValueReferenceBindingToTemporary: OS << "non-const lvalue reference bound to temporary"; break; case FK_NonConstLValueReferenceBindingToUnrelated: OS << "non-const lvalue reference bound to unrelated type"; break; case FK_RValueReferenceBindingToLValue: OS << "rvalue reference bound to an lvalue"; break; case FK_ReferenceInitDropsQualifiers: OS << "reference initialization drops qualifiers"; break; case FK_ReferenceInitFailed: OS << "reference initialization failed"; break; case FK_ConversionFailed: OS << "conversion failed"; break; case FK_ConversionFromPropertyFailed: OS << "conversion from property failed"; break; case FK_TooManyInitsForScalar: OS << "too many initializers for scalar"; break; case FK_ReferenceBindingToInitList: OS << "referencing binding to initializer list"; break; case FK_InitListBadDestinationType: OS << "initializer list for non-aggregate, non-scalar type"; break; case FK_UserConversionOverloadFailed: OS << "overloading failed for user-defined conversion"; break; case FK_ConstructorOverloadFailed: OS << "constructor overloading failed"; break; case FK_DefaultInitOfConst: OS << "default initialization of a const variable"; break; case FK_Incomplete: OS << "initialization of incomplete type"; break; case FK_ListInitializationFailed: OS << "list initialization checker failure"; break; case FK_VariableLengthArrayHasInitializer: OS << "variable length array has an initializer"; break; case FK_PlaceholderType: OS << "initializer expression isn't contextually valid"; break; case FK_ListConstructorOverloadFailed: OS << "list constructor overloading failed"; break; case FK_ExplicitConstructor: OS << "list copy initialization chose explicit constructor"; break; } OS << '\n'; return; } case DependentSequence: OS << "Dependent sequence\n"; return; case NormalSequence: OS << "Normal sequence: "; break; } for (step_iterator S = step_begin(), SEnd = step_end(); S != SEnd; ++S) { if (S != step_begin()) { OS << " -> "; } switch (S->Kind) { case SK_ResolveAddressOfOverloadedFunction: OS << "resolve address of overloaded function"; break; case SK_CastDerivedToBaseRValue: OS << "derived-to-base case (rvalue" << S->Type.getAsString() << ")"; break; case SK_CastDerivedToBaseXValue: OS << "derived-to-base case (xvalue" << S->Type.getAsString() << ")"; break; case SK_CastDerivedToBaseLValue: OS << "derived-to-base case (lvalue" << S->Type.getAsString() << ")"; break; case SK_BindReference: OS << "bind reference to lvalue"; break; case SK_BindReferenceToTemporary: OS << "bind reference to a temporary"; break; case SK_ExtraneousCopyToTemporary: OS << "extraneous C++03 copy to temporary"; break; case SK_UserConversion: OS << "user-defined conversion via " << *S->Function.Function; break; case SK_QualificationConversionRValue: OS << "qualification conversion (rvalue)"; break; case SK_QualificationConversionXValue: OS << "qualification conversion (xvalue)"; break; case SK_QualificationConversionLValue: OS << "qualification conversion (lvalue)"; break; case SK_AtomicConversion: OS << "non-atomic-to-atomic conversion"; break; case SK_LValueToRValue: OS << "load (lvalue to rvalue)"; break; case SK_ConversionSequence: OS << "implicit conversion sequence ("; S->ICS->dump(); // FIXME: use OS OS << ")"; break; case SK_ConversionSequenceNoNarrowing: OS << "implicit conversion sequence with narrowing prohibited ("; S->ICS->dump(); // FIXME: use OS OS << ")"; break; case SK_ListInitialization: OS << "list aggregate initialization"; break; case SK_UnwrapInitList: OS << "unwrap reference initializer list"; break; case SK_RewrapInitList: OS << "rewrap reference initializer list"; break; case SK_ConstructorInitialization: OS << "constructor initialization"; break; case SK_ConstructorInitializationFromList: OS << "list initialization via constructor"; break; case SK_ZeroInitialization: OS << "zero initialization"; break; case SK_CAssignment: OS << "C assignment"; break; case SK_StringInit: OS << "string initialization"; break; case SK_ObjCObjectConversion: OS << "Objective-C object conversion"; break; case SK_ArrayInit: OS << "array initialization"; break; case SK_ParenthesizedArrayInit: OS << "parenthesized array initialization"; break; case SK_PassByIndirectCopyRestore: OS << "pass by indirect copy and restore"; break; case SK_PassByIndirectRestore: OS << "pass by indirect restore"; break; case SK_ProduceObjCObject: OS << "Objective-C object retension"; break; case SK_StdInitializerList: OS << "std::initializer_list from initializer list"; break; case SK_StdInitializerListConstructorCall: OS << "list initialization from std::initializer_list"; break; case SK_OCLSamplerInit: OS << "OpenCL sampler_t from integer constant"; break; case SK_OCLZeroEvent: OS << "OpenCL event_t from zero"; break; } OS << " [" << S->Type.getAsString() << ']'; } OS << '\n'; } void InitializationSequence::dump() const { dump(llvm::errs()); } static void DiagnoseNarrowingInInitList(Sema &S, const ImplicitConversionSequence &ICS, QualType PreNarrowingType, QualType EntityType, const Expr *PostInit) { const StandardConversionSequence *SCS = nullptr; switch (ICS.getKind()) { case ImplicitConversionSequence::StandardConversion: SCS = &ICS.Standard; break; case ImplicitConversionSequence::UserDefinedConversion: SCS = &ICS.UserDefined.After; break; case ImplicitConversionSequence::AmbiguousConversion: case ImplicitConversionSequence::EllipsisConversion: case ImplicitConversionSequence::BadConversion: return; } // C++11 [dcl.init.list]p7: Check whether this is a narrowing conversion. APValue ConstantValue; QualType ConstantType; switch (SCS->getNarrowingKind(S.Context, PostInit, ConstantValue, ConstantType)) { case NK_Not_Narrowing: // No narrowing occurred. return; case NK_Type_Narrowing: // This was a floating-to-integer conversion, which is always considered a // narrowing conversion even if the value is a constant and can be // represented exactly as an integer. S.Diag(PostInit->getLocStart(), (S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus11) ? diag::warn_init_list_type_narrowing : diag::ext_init_list_type_narrowing) << PostInit->getSourceRange() << PreNarrowingType.getLocalUnqualifiedType() << EntityType.getLocalUnqualifiedType(); break; case NK_Constant_Narrowing: // A constant value was narrowed. S.Diag(PostInit->getLocStart(), (S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus11) ? diag::warn_init_list_constant_narrowing : diag::ext_init_list_constant_narrowing) << PostInit->getSourceRange() << ConstantValue.getAsString(S.getASTContext(), ConstantType) << EntityType.getLocalUnqualifiedType(); break; case NK_Variable_Narrowing: // A variable's value may have been narrowed. S.Diag(PostInit->getLocStart(), (S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus11) ? diag::warn_init_list_variable_narrowing : diag::ext_init_list_variable_narrowing) << PostInit->getSourceRange() << PreNarrowingType.getLocalUnqualifiedType() << EntityType.getLocalUnqualifiedType(); break; } SmallString<128> StaticCast; llvm::raw_svector_ostream OS(StaticCast); OS << "static_cast<"; if (const TypedefType *TT = EntityType->getAs<TypedefType>()) { // It's important to use the typedef's name if there is one so that the // fixit doesn't break code using types like int64_t. // // FIXME: This will break if the typedef requires qualification. But // getQualifiedNameAsString() includes non-machine-parsable components. OS << *TT->getDecl(); } else if (const BuiltinType *BT = EntityType->getAs<BuiltinType>()) OS << BT->getName(S.getLangOpts()); else { // Oops, we didn't find the actual type of the variable. Don't emit a fixit // with a broken cast. return; } OS << ">("; S.Diag(PostInit->getLocStart(), diag::note_init_list_narrowing_silence) << PostInit->getSourceRange() << FixItHint::CreateInsertion(PostInit->getLocStart(), OS.str()) << FixItHint::CreateInsertion( S.getLocForEndOfToken(PostInit->getLocEnd()), ")"); } //===----------------------------------------------------------------------===// // Initialization helper functions //===----------------------------------------------------------------------===// bool Sema::CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init) { if (Init.isInvalid()) return false; Expr *InitE = Init.get(); assert(InitE && "No initialization expression"); InitializationKind Kind = InitializationKind::CreateCopy(InitE->getLocStart(), SourceLocation()); InitializationSequence Seq(*this, Entity, Kind, InitE); return !Seq.Failed(); } ExprResult Sema::PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList, bool AllowExplicit) { if (Init.isInvalid()) return ExprError(); Expr *InitE = Init.get(); assert(InitE && "No initialization expression?"); if (EqualLoc.isInvalid()) EqualLoc = InitE->getLocStart(); InitializationKind Kind = InitializationKind::CreateCopy(InitE->getLocStart(), EqualLoc, AllowExplicit); InitializationSequence Seq(*this, Entity, Kind, InitE, TopLevelOfInitList); ExprResult Result = Seq.Perform(*this, Entity, Kind, InitE); return Result; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaTemplate.cpp
//===------- SemaTemplate.cpp - Semantic Analysis for C++ Templates -------===/ // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. //===----------------------------------------------------------------------===/ // // This file implements semantic analysis for C++ templates. //===----------------------------------------------------------------------===/ #include "TreeTransform.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/TypeVisitor.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/TargetInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/Template.h" #include "clang/Sema/TemplateDeduction.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringExtras.h" using namespace clang; using namespace sema; // Exported for use by Parser. SourceRange clang::getTemplateParamsRange(TemplateParameterList const * const *Ps, unsigned N) { if (!N) return SourceRange(); return SourceRange(Ps[0]->getTemplateLoc(), Ps[N-1]->getRAngleLoc()); } /// \brief Determine whether the declaration found is acceptable as the name /// of a template and, if so, return that template declaration. Otherwise, /// returns NULL. static NamedDecl *isAcceptableTemplateName(ASTContext &Context, NamedDecl *Orig, bool AllowFunctionTemplates) { NamedDecl *D = Orig->getUnderlyingDecl(); if (isa<TemplateDecl>(D)) { if (!AllowFunctionTemplates && isa<FunctionTemplateDecl>(D)) return nullptr; return Orig; } if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) { // C++ [temp.local]p1: // Like normal (non-template) classes, class templates have an // injected-class-name (Clause 9). The injected-class-name // can be used with or without a template-argument-list. When // it is used without a template-argument-list, it is // equivalent to the injected-class-name followed by the // template-parameters of the class template enclosed in // <>. When it is used with a template-argument-list, it // refers to the specified class template specialization, // which could be the current specialization or another // specialization. if (Record->isInjectedClassName()) { Record = cast<CXXRecordDecl>(Record->getDeclContext()); if (Record->getDescribedClassTemplate()) return Record->getDescribedClassTemplate(); if (ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Record)) return Spec->getSpecializedTemplate(); } return nullptr; } return nullptr; } void Sema::FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates) { // The set of class templates we've already seen. llvm::SmallPtrSet<ClassTemplateDecl *, 8> ClassTemplates; LookupResult::Filter filter = R.makeFilter(); while (filter.hasNext()) { NamedDecl *Orig = filter.next(); NamedDecl *Repl = isAcceptableTemplateName(Context, Orig, AllowFunctionTemplates); if (!Repl) filter.erase(); else if (Repl != Orig) { // C++ [temp.local]p3: // A lookup that finds an injected-class-name (10.2) can result in an // ambiguity in certain cases (for example, if it is found in more than // one base class). If all of the injected-class-names that are found // refer to specializations of the same class template, and if the name // is used as a template-name, the reference refers to the class // template itself and not a specialization thereof, and is not // ambiguous. if (ClassTemplateDecl *ClassTmpl = dyn_cast<ClassTemplateDecl>(Repl)) if (!ClassTemplates.insert(ClassTmpl).second) { filter.erase(); continue; } // FIXME: we promote access to public here as a workaround to // the fact that LookupResult doesn't let us remember that we // found this template through a particular injected class name, // which means we end up doing nasty things to the invariants. // Pretending that access is public is *much* safer. filter.replace(Repl, AS_public); } } filter.done(); } bool Sema::hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates) { for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I) if (isAcceptableTemplateName(Context, *I, AllowFunctionTemplates)) return true; return false; } TemplateNameKind Sema::isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectTypePtr, bool EnteringContext, TemplateTy &TemplateResult, bool &MemberOfUnknownSpecialization) { assert(getLangOpts().CPlusPlus && "No template names in C!"); DeclarationName TName; MemberOfUnknownSpecialization = false; switch (Name.getKind()) { case UnqualifiedId::IK_Identifier: TName = DeclarationName(Name.Identifier); break; case UnqualifiedId::IK_OperatorFunctionId: TName = Context.DeclarationNames.getCXXOperatorName( Name.OperatorFunctionId.Operator); break; case UnqualifiedId::IK_LiteralOperatorId: TName = Context.DeclarationNames.getCXXLiteralOperatorName(Name.Identifier); break; default: return TNK_Non_template; } QualType ObjectType = ObjectTypePtr.get(); LookupResult R(*this, TName, Name.getLocStart(), LookupOrdinaryName); LookupTemplateName(R, S, SS, ObjectType, EnteringContext, MemberOfUnknownSpecialization); if (R.empty()) return TNK_Non_template; if (R.isAmbiguous()) { // Suppress diagnostics; we'll redo this lookup later. R.suppressDiagnostics(); // FIXME: we might have ambiguous templates, in which case we // should at least parse them properly! return TNK_Non_template; } TemplateName Template; TemplateNameKind TemplateKind; unsigned ResultCount = R.end() - R.begin(); if (ResultCount > 1) { // We assume that we'll preserve the qualifier from a function // template name in other ways. Template = Context.getOverloadedTemplateName(R.begin(), R.end()); TemplateKind = TNK_Function_template; // We'll do this lookup again later. R.suppressDiagnostics(); } else { TemplateDecl *TD = cast<TemplateDecl>((*R.begin())->getUnderlyingDecl()); if (SS.isSet() && !SS.isInvalid()) { NestedNameSpecifier *Qualifier = SS.getScopeRep(); Template = Context.getQualifiedTemplateName(Qualifier, hasTemplateKeyword, TD); } else { Template = TemplateName(TD); } if (isa<FunctionTemplateDecl>(TD)) { TemplateKind = TNK_Function_template; // We'll do this lookup again later. R.suppressDiagnostics(); } else { assert(isa<ClassTemplateDecl>(TD) || isa<TemplateTemplateParmDecl>(TD) || isa<TypeAliasTemplateDecl>(TD) || isa<VarTemplateDecl>(TD)); TemplateKind = isa<VarTemplateDecl>(TD) ? TNK_Var_template : TNK_Type_template; } } TemplateResult = TemplateTy::make(Template); return TemplateKind; } bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind) { // We can't recover unless there's a dependent scope specifier preceding the // template name. // FIXME: Typo correction? if (!SS || !SS->isSet() || !isDependentScopeSpecifier(*SS) || computeDeclContext(*SS)) return false; // The code is missing a 'template' keyword prior to the dependent template // name. NestedNameSpecifier *Qualifier = (NestedNameSpecifier*)SS->getScopeRep(); Diag(IILoc, diag::err_template_kw_missing) << Qualifier << II.getName() << FixItHint::CreateInsertion(IILoc, "template "); SuggestedTemplate = TemplateTy::make(Context.getDependentTemplateName(Qualifier, &II)); SuggestedKind = TNK_Dependent_template_name; return true; } void Sema::LookupTemplateName(LookupResult &Found, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization) { // Determine where to perform name lookup MemberOfUnknownSpecialization = false; DeclContext *LookupCtx = nullptr; bool isDependent = false; if (!ObjectType.isNull()) { // This nested-name-specifier occurs in a member access expression, e.g., // x->B::f, and we are looking into the type of the object. assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist"); LookupCtx = computeDeclContext(ObjectType); isDependent = ObjectType->isDependentType(); assert((isDependent || !ObjectType->isIncompleteType() || ObjectType->castAs<TagType>()->isBeingDefined()) && "Caller should have completed object type"); // Template names cannot appear inside an Objective-C class or object type. if (ObjectType->isObjCObjectOrInterfaceType()) { Found.clear(); return; } } else if (SS.isSet()) { // This nested-name-specifier occurs after another nested-name-specifier, // so long into the context associated with the prior nested-name-specifier. LookupCtx = computeDeclContext(SS, EnteringContext); isDependent = isDependentScopeSpecifier(SS); // The declaration context must be complete. if (LookupCtx && RequireCompleteDeclContext(SS, LookupCtx)) return; } bool ObjectTypeSearchedInScope = false; bool AllowFunctionTemplatesInLookup = true; if (LookupCtx) { // Perform "qualified" name lookup into the declaration context we // computed, which is either the type of the base of a member access // expression or the declaration context associated with a prior // nested-name-specifier. LookupQualifiedName(Found, LookupCtx); if (!ObjectType.isNull() && Found.empty()) { // C++ [basic.lookup.classref]p1: // In a class member access expression (5.2.5), if the . or -> token is // immediately followed by an identifier followed by a <, the // identifier must be looked up to determine whether the < is the // beginning of a template argument list (14.2) or a less-than operator. // The identifier is first looked up in the class of the object // expression. If the identifier is not found, it is then looked up in // the context of the entire postfix-expression and shall name a class // or function template. if (S) LookupName(Found, S); ObjectTypeSearchedInScope = true; AllowFunctionTemplatesInLookup = false; } } else if (isDependent && (!S || ObjectType.isNull())) { // We cannot look into a dependent object type or nested nme // specifier. MemberOfUnknownSpecialization = true; return; } else { // Perform unqualified name lookup in the current scope. LookupName(Found, S); if (!ObjectType.isNull()) AllowFunctionTemplatesInLookup = false; } if (Found.empty() && !isDependent) { // If we did not find any names, attempt to correct any typos. DeclarationName Name = Found.getLookupName(); Found.clear(); // Simple filter callback that, for keywords, only accepts the C++ *_cast auto FilterCCC = llvm::make_unique<CorrectionCandidateCallback>(); FilterCCC->WantTypeSpecifiers = false; FilterCCC->WantExpressionKeywords = false; FilterCCC->WantRemainingKeywords = false; FilterCCC->WantCXXNamedCasts = true; if (TypoCorrection Corrected = CorrectTypo( Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS, std::move(FilterCCC), CTK_ErrorRecovery, LookupCtx)) { Found.setLookupName(Corrected.getCorrection()); if (Corrected.getCorrectionDecl()) Found.addDecl(Corrected.getCorrectionDecl()); FilterAcceptableTemplateNames(Found); if (!Found.empty()) { if (LookupCtx) { std::string CorrectedStr(Corrected.getAsString(getLangOpts())); bool DroppedSpecifier = Corrected.WillReplaceSpecifier() && Name.getAsString() == CorrectedStr; diagnoseTypo(Corrected, PDiag(diag::err_no_member_template_suggest) << Name << LookupCtx << DroppedSpecifier << SS.getRange()); } else { diagnoseTypo(Corrected, PDiag(diag::err_no_template_suggest) << Name); } } } else { Found.setLookupName(Name); } } FilterAcceptableTemplateNames(Found, AllowFunctionTemplatesInLookup); if (Found.empty()) { if (isDependent) MemberOfUnknownSpecialization = true; return; } if (S && !ObjectType.isNull() && !ObjectTypeSearchedInScope && !getLangOpts().CPlusPlus11) { // C++03 [basic.lookup.classref]p1: // [...] If the lookup in the class of the object expression finds a // template, the name is also looked up in the context of the entire // postfix-expression and [...] // // Note: C++11 does not perform this second lookup. LookupResult FoundOuter(*this, Found.getLookupName(), Found.getNameLoc(), LookupOrdinaryName); LookupName(FoundOuter, S); FilterAcceptableTemplateNames(FoundOuter, /*AllowFunctionTemplates=*/false); if (FoundOuter.empty()) { // - if the name is not found, the name found in the class of the // object expression is used, otherwise } else if (!FoundOuter.getAsSingle<ClassTemplateDecl>() || FoundOuter.isAmbiguous()) { // - if the name is found in the context of the entire // postfix-expression and does not name a class template, the name // found in the class of the object expression is used, otherwise FoundOuter.clear(); } else if (!Found.isSuppressingDiagnostics()) { // - if the name found is a class template, it must refer to the same // entity as the one found in the class of the object expression, // otherwise the program is ill-formed. if (!Found.isSingleResult() || Found.getFoundDecl()->getCanonicalDecl() != FoundOuter.getFoundDecl()->getCanonicalDecl()) { Diag(Found.getNameLoc(), diag::ext_nested_name_member_ref_lookup_ambiguous) << Found.getLookupName() << ObjectType; Diag(Found.getRepresentativeDecl()->getLocation(), diag::note_ambig_member_ref_object_type) << ObjectType; Diag(FoundOuter.getFoundDecl()->getLocation(), diag::note_ambig_member_ref_scope); // Recover by taking the template that we found in the object // expression's type. } } } } /// ActOnDependentIdExpression - Handle a dependent id-expression that /// was just parsed. This is only possible with an explicit scope /// specifier naming a dependent type. ExprResult Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs) { DeclContext *DC = getFunctionLevelDeclContext(); if (!isAddressOfOperand && isa<CXXMethodDecl>(DC) && cast<CXXMethodDecl>(DC)->isInstance()) { QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType(Context); // Since the 'this' expression is synthesized, we don't need to // perform the double-lookup check. NamedDecl *FirstQualifierInScope = nullptr; // HLSL Change begin - This is a reference. return CXXDependentScopeMemberExpr::Create( Context, /*This*/ nullptr, ThisType, /*IsArrow*/ !getLangOpts().HLSL, /*Op*/ SourceLocation(), SS.getWithLocInContext(Context), TemplateKWLoc, FirstQualifierInScope, NameInfo, TemplateArgs); // HLSL Change end - This is a reference. } return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs); } ExprResult Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs) { return DependentScopeDeclRefExpr::Create( Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo, TemplateArgs); } /// DiagnoseTemplateParameterShadow - Produce a diagnostic complaining /// that the template parameter 'PrevDecl' is being shadowed by a new /// declaration at location Loc. Returns true to indicate that this is /// an error, and false otherwise. void Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl) { assert(PrevDecl->isTemplateParameter() && "Not a template parameter"); // Microsoft Visual C++ permits template parameters to be shadowed. if (getLangOpts().MicrosoftExt) return; // C++ [temp.local]p4: // A template-parameter shall not be redeclared within its // scope (including nested scopes). Diag(Loc, diag::err_template_param_shadow) << cast<NamedDecl>(PrevDecl)->getDeclName(); Diag(PrevDecl->getLocation(), diag::note_template_param_here); return; } /// AdjustDeclIfTemplate - If the given decl happens to be a template, reset /// the parameter D to reference the templated declaration and return a pointer /// to the template declaration. Otherwise, do nothing to D and return null. TemplateDecl *Sema::AdjustDeclIfTemplate(Decl *&D) { if (TemplateDecl *Temp = dyn_cast_or_null<TemplateDecl>(D)) { D = Temp->getTemplatedDecl(); return Temp; } return nullptr; } ParsedTemplateArgument ParsedTemplateArgument::getTemplatePackExpansion( SourceLocation EllipsisLoc) const { assert(Kind == Template && "Only template template arguments can be pack expansions here"); assert(getAsTemplate().get().containsUnexpandedParameterPack() && "Template template argument pack expansion without packs"); ParsedTemplateArgument Result(*this); Result.EllipsisLoc = EllipsisLoc; return Result; } static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef, const ParsedTemplateArgument &Arg) { switch (Arg.getKind()) { case ParsedTemplateArgument::Type: { TypeSourceInfo *DI; QualType T = SemaRef.GetTypeFromParser(Arg.getAsType(), &DI); if (!DI) DI = SemaRef.Context.getTrivialTypeSourceInfo(T, Arg.getLocation()); return TemplateArgumentLoc(TemplateArgument(T), DI); } case ParsedTemplateArgument::NonType: { Expr *E = static_cast<Expr *>(Arg.getAsExpr()); return TemplateArgumentLoc(TemplateArgument(E), E); } case ParsedTemplateArgument::Template: { TemplateName Template = Arg.getAsTemplate().get(); TemplateArgument TArg; if (Arg.getEllipsisLoc().isValid()) TArg = TemplateArgument(Template, Optional<unsigned int>()); else TArg = Template; return TemplateArgumentLoc(TArg, Arg.getScopeSpec().getWithLocInContext( SemaRef.Context), Arg.getLocation(), Arg.getEllipsisLoc()); } } llvm_unreachable("Unhandled parsed template argument"); } /// \brief Translates template arguments as provided by the parser /// into template arguments used by semantic analysis. void Sema::translateTemplateArguments(const ASTTemplateArgsPtr &TemplateArgsIn, TemplateArgumentListInfo &TemplateArgs) { for (unsigned I = 0, Last = TemplateArgsIn.size(); I != Last; ++I) TemplateArgs.addArgument(translateTemplateArgument(*this, TemplateArgsIn[I])); } static void maybeDiagnoseTemplateParameterShadow(Sema &SemaRef, Scope *S, SourceLocation Loc, IdentifierInfo *Name) { NamedDecl *PrevDecl = SemaRef.LookupSingleName( S, Name, Loc, Sema::LookupOrdinaryName, Sema::ForRedeclaration); if (PrevDecl && PrevDecl->isTemplateParameter()) SemaRef.DiagnoseTemplateParameterShadow(Loc, PrevDecl); } /// ActOnTypeParameter - Called when a C++ template type parameter /// (e.g., "typename T") has been parsed. Typename specifies whether /// the keyword "typename" was used to declare the type parameter /// (otherwise, "class" was used), and KeyLoc is the location of the /// "class" or "typename" keyword. ParamName is the name of the /// parameter (NULL indicates an unnamed template parameter) and /// ParamNameLoc is the location of the parameter name (if any). /// If the type parameter has a default argument, it will be added /// later via ActOnTypeParameterDefault. Decl *Sema::ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg) { assert(S->isTemplateParamScope() && "Template type parameter not in template parameter scope!"); bool Invalid = false; SourceLocation Loc = ParamNameLoc; if (!ParamName) Loc = KeyLoc; bool IsParameterPack = EllipsisLoc.isValid(); TemplateTypeParmDecl *Param = TemplateTypeParmDecl::Create(Context, Context.getTranslationUnitDecl(), KeyLoc, Loc, Depth, Position, ParamName, Typename, IsParameterPack); Param->setAccess(AS_public); if (Invalid) Param->setInvalidDecl(); if (ParamName) { maybeDiagnoseTemplateParameterShadow(*this, S, ParamNameLoc, ParamName); // Add the template parameter into the current scope. S->AddDecl(Param); IdResolver.AddDecl(Param); } // C++0x [temp.param]p9: // A default template-argument may be specified for any kind of // template-parameter that is not a template parameter pack. if (DefaultArg && IsParameterPack) { Diag(EqualLoc, diag::err_template_param_pack_default_arg); DefaultArg = ParsedType(); } // Handle the default argument, if provided. if (DefaultArg) { TypeSourceInfo *DefaultTInfo; GetTypeFromParser(DefaultArg, &DefaultTInfo); assert(DefaultTInfo && "expected source information for type"); // Check for unexpanded parameter packs. if (DiagnoseUnexpandedParameterPack(Loc, DefaultTInfo, UPPC_DefaultArgument)) return Param; // Check the template argument itself. if (CheckTemplateArgument(Param, DefaultTInfo)) { Param->setInvalidDecl(); return Param; } Param->setDefaultArgument(DefaultTInfo); } return Param; } /// \brief Check that the type of a non-type template parameter is /// well-formed. /// /// \returns the (possibly-promoted) parameter type if valid; /// otherwise, produces a diagnostic and returns a NULL type. QualType Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) { // We don't allow variably-modified types as the type of non-type template // parameters. if (T->isVariablyModifiedType()) { Diag(Loc, diag::err_variably_modified_nontype_template_param) << T; return QualType(); } // C++ [temp.param]p4: // // A non-type template-parameter shall have one of the following // (optionally cv-qualified) types: // // -- integral or enumeration type, if (T->isIntegralOrEnumerationType() || // -- pointer to object or pointer to function, T->isPointerType() || // -- reference to object or reference to function, T->isReferenceType() || // -- pointer to member, T->isMemberPointerType() || // -- std::nullptr_t. T->isNullPtrType() || // If T is a dependent type, we can't do the check now, so we // assume that it is well-formed. T->isDependentType()) { // C++ [temp.param]p5: The top-level cv-qualifiers on the template-parameter // are ignored when determining its type. return T.getUnqualifiedType(); } // C++ [temp.param]p8: // // A non-type template-parameter of type "array of T" or // "function returning T" is adjusted to be of type "pointer to // T" or "pointer to function returning T", respectively. else if (T->isArrayType() || T->isFunctionType()) return Context.getDecayedType(T); Diag(Loc, diag::err_template_nontype_parm_bad_type) << T; return QualType(); } Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *Default) { TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S); QualType T = TInfo->getType(); assert(S->isTemplateParamScope() && "Non-type template parameter not in template parameter scope!"); bool Invalid = false; T = CheckNonTypeTemplateParameterType(T, D.getIdentifierLoc()); if (T.isNull()) { T = Context.IntTy; // Recover with an 'int' type. Invalid = true; } IdentifierInfo *ParamName = D.getIdentifier(); bool IsParameterPack = D.hasEllipsis(); NonTypeTemplateParmDecl *Param = NonTypeTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(), D.getLocStart(), D.getIdentifierLoc(), Depth, Position, ParamName, T, IsParameterPack, TInfo); Param->setAccess(AS_public); if (Invalid) Param->setInvalidDecl(); if (ParamName) { maybeDiagnoseTemplateParameterShadow(*this, S, D.getIdentifierLoc(), ParamName); // Add the template parameter into the current scope. S->AddDecl(Param); IdResolver.AddDecl(Param); } // C++0x [temp.param]p9: // A default template-argument may be specified for any kind of // template-parameter that is not a template parameter pack. if (Default && IsParameterPack) { Diag(EqualLoc, diag::err_template_param_pack_default_arg); Default = nullptr; } // Check the well-formedness of the default template argument, if provided. if (Default) { // Check for unexpanded parameter packs. if (DiagnoseUnexpandedParameterPack(Default, UPPC_DefaultArgument)) return Param; TemplateArgument Converted; ExprResult DefaultRes = CheckTemplateArgument(Param, Param->getType(), Default, Converted); if (DefaultRes.isInvalid()) { Param->setInvalidDecl(); return Param; } Default = DefaultRes.get(); Param->setDefaultArgument(Default); } return Param; } /// ActOnTemplateTemplateParameter - Called when a C++ template template /// parameter (e.g. T in template <template \<typename> class T> class array) /// has been parsed. S is the current scope. Decl *Sema::ActOnTemplateTemplateParameter(Scope* S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *Name, SourceLocation NameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument Default) { assert(S->isTemplateParamScope() && "Template template parameter not in template parameter scope!"); // Construct the parameter object. bool IsParameterPack = EllipsisLoc.isValid(); TemplateTemplateParmDecl *Param = TemplateTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(), NameLoc.isInvalid()? TmpLoc : NameLoc, Depth, Position, IsParameterPack, Name, Params); Param->setAccess(AS_public); // If the template template parameter has a name, then link the identifier // into the scope and lookup mechanisms. if (Name) { maybeDiagnoseTemplateParameterShadow(*this, S, NameLoc, Name); S->AddDecl(Param); IdResolver.AddDecl(Param); } if (Params->size() == 0) { Diag(Param->getLocation(), diag::err_template_template_parm_no_parms) << SourceRange(Params->getLAngleLoc(), Params->getRAngleLoc()); Param->setInvalidDecl(); } // C++0x [temp.param]p9: // A default template-argument may be specified for any kind of // template-parameter that is not a template parameter pack. if (IsParameterPack && !Default.isInvalid()) { Diag(EqualLoc, diag::err_template_param_pack_default_arg); Default = ParsedTemplateArgument(); } if (!Default.isInvalid()) { // Check only that we have a template template argument. We don't want to // try to check well-formedness now, because our template template parameter // might have dependent types in its template parameters, which we wouldn't // be able to match now. // // If none of the template template parameter's template arguments mention // other template parameters, we could actually perform more checking here. // However, it isn't worth doing. TemplateArgumentLoc DefaultArg = translateTemplateArgument(*this, Default); if (DefaultArg.getArgument().getAsTemplate().isNull()) { Diag(DefaultArg.getLocation(), diag::err_template_arg_not_class_template) << DefaultArg.getSourceRange(); return Param; } // Check for unexpanded parameter packs. if (DiagnoseUnexpandedParameterPack(DefaultArg.getLocation(), DefaultArg.getArgument().getAsTemplate(), UPPC_DefaultArgument)) return Param; Param->setDefaultArgument(Context, DefaultArg); } return Param; } /// ActOnTemplateParameterList - Builds a TemplateParameterList that /// contains the template parameters in Params/NumParams. TemplateParameterList * Sema::ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc) { if (ExportLoc.isValid()) Diag(ExportLoc, diag::warn_template_export_unsupported); return TemplateParameterList::Create(Context, TemplateLoc, LAngleLoc, (NamedDecl**)Params, NumParams, RAngleLoc); } static void SetNestedNameSpecifier(TagDecl *T, const CXXScopeSpec &SS) { if (SS.isSet()) T->setQualifierInfo(SS.getWithLocInContext(T->getASTContext())); } DeclResult Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList** OuterTemplateParamLists, SkipBodyInfo *SkipBody) { assert(TemplateParams && TemplateParams->size() > 0 && "No template parameters"); assert(TUK != TUK_Reference && "Can only declare or define class templates"); bool Invalid = false; // Check that we can declare a template here. if (CheckTemplateDeclScope(S, TemplateParams)) return true; TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec); assert(Kind != TTK_Enum && "can't build template of enumerated type"); // There is no such thing as an unnamed class template. if (!Name) { Diag(KWLoc, diag::err_template_unnamed_class); return true; } // Find any previous declaration with this name. For a friend with no // scope explicitly specified, we only look for tag declarations (per // C++11 [basic.lookup.elab]p2). DeclContext *SemanticContext; LookupResult Previous(*this, Name, NameLoc, (SS.isEmpty() && TUK == TUK_Friend) ? LookupTagName : LookupOrdinaryName, ForRedeclaration); if (SS.isNotEmpty() && !SS.isInvalid()) { SemanticContext = computeDeclContext(SS, true); if (!SemanticContext) { // FIXME: Horrible, horrible hack! We can't currently represent this // in the AST, and historically we have just ignored such friend // class templates, so don't complain here. Diag(NameLoc, TUK == TUK_Friend ? diag::warn_template_qualified_friend_ignored : diag::err_template_qualified_declarator_no_match) << SS.getScopeRep() << SS.getRange(); return TUK != TUK_Friend; } if (RequireCompleteDeclContext(SS, SemanticContext)) return true; // If we're adding a template to a dependent context, we may need to // rebuilding some of the types used within the template parameter list, // now that we know what the current instantiation is. if (SemanticContext->isDependentContext()) { ContextRAII SavedContext(*this, SemanticContext); if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams)) Invalid = true; } else if (TUK != TUK_Friend && TUK != TUK_Reference) diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc); LookupQualifiedName(Previous, SemanticContext); } else { SemanticContext = CurContext; // C++14 [class.mem]p14: // If T is the name of a class, then each of the following shall have a // name different from T: // -- every member template of class T if (TUK != TUK_Friend && DiagnoseClassNameShadow(SemanticContext, DeclarationNameInfo(Name, NameLoc))) return true; LookupName(Previous, S); } if (Previous.isAmbiguous()) return true; NamedDecl *PrevDecl = nullptr; if (Previous.begin() != Previous.end()) PrevDecl = (*Previous.begin())->getUnderlyingDecl(); // If there is a previous declaration with the same name, check // whether this is a valid redeclaration. ClassTemplateDecl *PrevClassTemplate = dyn_cast_or_null<ClassTemplateDecl>(PrevDecl); // We may have found the injected-class-name of a class template, // class template partial specialization, or class template specialization. // In these cases, grab the template that is being defined or specialized. if (!PrevClassTemplate && PrevDecl && isa<CXXRecordDecl>(PrevDecl) && cast<CXXRecordDecl>(PrevDecl)->isInjectedClassName()) { PrevDecl = cast<CXXRecordDecl>(PrevDecl->getDeclContext()); PrevClassTemplate = cast<CXXRecordDecl>(PrevDecl)->getDescribedClassTemplate(); if (!PrevClassTemplate && isa<ClassTemplateSpecializationDecl>(PrevDecl)) { PrevClassTemplate = cast<ClassTemplateSpecializationDecl>(PrevDecl) ->getSpecializedTemplate(); } } if (TUK == TUK_Friend) { // C++ [namespace.memdef]p3: // [...] When looking for a prior declaration of a class or a function // declared as a friend, and when the name of the friend class or // function is neither a qualified name nor a template-id, scopes outside // the innermost enclosing namespace scope are not considered. if (!SS.isSet()) { DeclContext *OutermostContext = CurContext; while (!OutermostContext->isFileContext()) OutermostContext = OutermostContext->getLookupParent(); if (PrevDecl && (OutermostContext->Equals(PrevDecl->getDeclContext()) || OutermostContext->Encloses(PrevDecl->getDeclContext()))) { SemanticContext = PrevDecl->getDeclContext(); } else { // Declarations in outer scopes don't matter. However, the outermost // context we computed is the semantic context for our new // declaration. PrevDecl = PrevClassTemplate = nullptr; SemanticContext = OutermostContext; // Check that the chosen semantic context doesn't already contain a // declaration of this name as a non-tag type. Previous.clear(LookupOrdinaryName); DeclContext *LookupContext = SemanticContext; while (LookupContext->isTransparentContext()) LookupContext = LookupContext->getLookupParent(); LookupQualifiedName(Previous, LookupContext); if (Previous.isAmbiguous()) return true; if (Previous.begin() != Previous.end()) PrevDecl = (*Previous.begin())->getUnderlyingDecl(); } } } else if (PrevDecl && !isDeclInScope(Previous.getRepresentativeDecl(), SemanticContext, S, SS.isValid())) PrevDecl = PrevClassTemplate = nullptr; if (auto *Shadow = dyn_cast_or_null<UsingShadowDecl>( PrevDecl ? Previous.getRepresentativeDecl() : nullptr)) { if (SS.isEmpty() && !(PrevClassTemplate && PrevClassTemplate->getDeclContext()->getRedeclContext()->Equals( SemanticContext->getRedeclContext()))) { Diag(KWLoc, diag::err_using_decl_conflict_reverse); Diag(Shadow->getTargetDecl()->getLocation(), diag::note_using_decl_target); Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl) << 0; // Recover by ignoring the old declaration. PrevDecl = PrevClassTemplate = nullptr; } } if (PrevClassTemplate) { // Ensure that the template parameter lists are compatible. Skip this check // for a friend in a dependent context: the template parameter list itself // could be dependent. if (!(TUK == TUK_Friend && CurContext->isDependentContext()) && !TemplateParameterListsAreEqual(TemplateParams, PrevClassTemplate->getTemplateParameters(), /*Complain=*/true, TPL_TemplateMatch)) return true; // C++ [temp.class]p4: // In a redeclaration, partial specialization, explicit // specialization or explicit instantiation of a class template, // the class-key shall agree in kind with the original class // template declaration (7.1.5.3). RecordDecl *PrevRecordDecl = PrevClassTemplate->getTemplatedDecl(); if (!isAcceptableTagRedeclaration(PrevRecordDecl, Kind, TUK == TUK_Definition, KWLoc, Name)) { Diag(KWLoc, diag::err_use_with_wrong_tag) << Name << FixItHint::CreateReplacement(KWLoc, PrevRecordDecl->getKindName()); Diag(PrevRecordDecl->getLocation(), diag::note_previous_use); Kind = PrevRecordDecl->getTagKind(); } // Check for redefinition of this class template. if (TUK == TUK_Definition) { if (TagDecl *Def = PrevRecordDecl->getDefinition()) { // If we have a prior definition that is not visible, treat this as // simply making that previous definition visible. NamedDecl *Hidden = nullptr; if (SkipBody && !hasVisibleDefinition(Def, &Hidden)) { SkipBody->ShouldSkip = true; auto *Tmpl = cast<CXXRecordDecl>(Hidden)->getDescribedClassTemplate(); assert(Tmpl && "original definition of a class template is not a " "class template?"); makeMergedDefinitionVisible(Hidden, KWLoc); makeMergedDefinitionVisible(Tmpl, KWLoc); return Def; } Diag(NameLoc, diag::err_redefinition) << Name; Diag(Def->getLocation(), diag::note_previous_definition); // FIXME: Would it make sense to try to "forget" the previous // definition, as part of error recovery? return true; } } } else if (PrevDecl && PrevDecl->isTemplateParameter()) { // Maybe we will complain about the shadowed template parameter. DiagnoseTemplateParameterShadow(NameLoc, PrevDecl); // Just pretend that we didn't see the previous declaration. PrevDecl = nullptr; } else if (PrevDecl) { // C++ [temp]p5: // A class template shall not have the same name as any other // template, class, function, object, enumeration, enumerator, // namespace, or type in the same scope (3.3), except as specified // in (14.5.4). Diag(NameLoc, diag::err_redefinition_different_kind) << Name; Diag(PrevDecl->getLocation(), diag::note_previous_definition); return true; } // Check the template parameter list of this declaration, possibly // merging in the template parameter list from the previous class // template declaration. Skip this check for a friend in a dependent // context, because the template parameter list might be dependent. if (!(TUK == TUK_Friend && CurContext->isDependentContext()) && CheckTemplateParameterList( TemplateParams, PrevClassTemplate ? PrevClassTemplate->getTemplateParameters() : nullptr, (SS.isSet() && SemanticContext && SemanticContext->isRecord() && SemanticContext->isDependentContext()) ? TPC_ClassTemplateMember : TUK == TUK_Friend ? TPC_FriendClassTemplate : TPC_ClassTemplate)) Invalid = true; if (SS.isSet()) { // If the name of the template was qualified, we must be defining the // template out-of-line. if (!SS.isInvalid() && !Invalid && !PrevClassTemplate) { Diag(NameLoc, TUK == TUK_Friend ? diag::err_friend_decl_does_not_match : diag::err_member_decl_does_not_match) << Name << SemanticContext << /*IsDefinition*/true << SS.getRange(); Invalid = true; } } CXXRecordDecl *NewClass = CXXRecordDecl::Create(Context, Kind, SemanticContext, KWLoc, NameLoc, Name, PrevClassTemplate? PrevClassTemplate->getTemplatedDecl() : nullptr, /*DelayTypeCreation=*/true); SetNestedNameSpecifier(NewClass, SS); if (NumOuterTemplateParamLists > 0) NewClass->setTemplateParameterListsInfo(Context, NumOuterTemplateParamLists, OuterTemplateParamLists); // Add alignment attributes if necessary; these attributes are checked when // the ASTContext lays out the structure. if (TUK == TUK_Definition) { AddAlignmentAttributesForRecord(NewClass); AddMsStructLayoutForRecord(NewClass); } ClassTemplateDecl *NewTemplate = ClassTemplateDecl::Create(Context, SemanticContext, NameLoc, DeclarationName(Name), TemplateParams, NewClass, PrevClassTemplate); NewClass->setDescribedClassTemplate(NewTemplate); if (ModulePrivateLoc.isValid()) NewTemplate->setModulePrivate(); // Build the type for the class template declaration now. QualType T = NewTemplate->getInjectedClassNameSpecialization(); T = Context.getInjectedClassNameType(NewClass, T); assert(T->isDependentType() && "Class template type is not dependent?"); (void)T; // If we are providing an explicit specialization of a member that is a // class template, make a note of that. if (PrevClassTemplate && PrevClassTemplate->getInstantiatedFromMemberTemplate()) PrevClassTemplate->setMemberSpecialization(); // Set the access specifier. if (!Invalid && TUK != TUK_Friend && NewTemplate->getDeclContext()->isRecord()) SetMemberAccessSpecifier(NewTemplate, PrevClassTemplate, AS); // Set the lexical context of these templates NewClass->setLexicalDeclContext(CurContext); NewTemplate->setLexicalDeclContext(CurContext); if (TUK == TUK_Definition) NewClass->startDefinition(); if (Attr) ProcessDeclAttributeList(S, NewClass, Attr); if (PrevClassTemplate) mergeDeclAttributes(NewClass, PrevClassTemplate->getTemplatedDecl()); AddPushedVisibilityAttribute(NewClass); if (TUK != TUK_Friend) { // Per C++ [basic.scope.temp]p2, skip the template parameter scopes. Scope *Outer = S; while ((Outer->getFlags() & Scope::TemplateParamScope) != 0) Outer = Outer->getParent(); PushOnScopeChains(NewTemplate, Outer); } else { if (PrevClassTemplate && PrevClassTemplate->getAccess() != AS_none) { NewTemplate->setAccess(PrevClassTemplate->getAccess()); NewClass->setAccess(PrevClassTemplate->getAccess()); } NewTemplate->setObjectOfFriendDecl(); // Friend templates are visible in fairly strange ways. if (!CurContext->isDependentContext()) { DeclContext *DC = SemanticContext->getRedeclContext(); DC->makeDeclVisibleInContext(NewTemplate); if (Scope *EnclosingScope = getScopeForDeclContext(S, DC)) PushOnScopeChains(NewTemplate, EnclosingScope, /* AddToContext = */ false); } FriendDecl *Friend = FriendDecl::Create( Context, CurContext, NewClass->getLocation(), NewTemplate, FriendLoc); Friend->setAccess(AS_public); CurContext->addDecl(Friend); } if (Invalid) { NewTemplate->setInvalidDecl(); NewClass->setInvalidDecl(); } ActOnDocumentableDecl(NewTemplate); return NewTemplate; } /// \brief Diagnose the presence of a default template argument on a /// template parameter, which is ill-formed in certain contexts. /// /// \returns true if the default template argument should be dropped. static bool DiagnoseDefaultTemplateArgument(Sema &S, Sema::TemplateParamListContext TPC, SourceLocation ParamLoc, SourceRange DefArgRange) { switch (TPC) { case Sema::TPC_ClassTemplate: case Sema::TPC_VarTemplate: case Sema::TPC_TypeAliasTemplate: return false; case Sema::TPC_FunctionTemplate: case Sema::TPC_FriendFunctionTemplateDefinition: // C++ [temp.param]p9: // A default template-argument shall not be specified in a // function template declaration or a function template // definition [...] // If a friend function template declaration specifies a default // template-argument, that declaration shall be a definition and shall be // the only declaration of the function template in the translation unit. // (C++98/03 doesn't have this wording; see DR226). // HLSL Change Begin - Treat HLSL as C++11 here. This hides the C++11 // extension warning, and the C++98 compat warning is disabled unless // explicitly enabled. S.Diag(ParamLoc, S.getLangOpts().CPlusPlus11 || S.getLangOpts().HLSL ? diag::warn_cxx98_compat_template_parameter_default_in_function_template : diag::ext_template_parameter_default_in_function_template) << DefArgRange; return false; // HLSL Change End case Sema::TPC_ClassTemplateMember: // C++0x [temp.param]p9: // A default template-argument shall not be specified in the // template-parameter-lists of the definition of a member of a // class template that appears outside of the member's class. S.Diag(ParamLoc, diag::err_template_parameter_default_template_member) << DefArgRange; return true; case Sema::TPC_FriendClassTemplate: case Sema::TPC_FriendFunctionTemplate: // C++ [temp.param]p9: // A default template-argument shall not be specified in a // friend template declaration. S.Diag(ParamLoc, diag::err_template_parameter_default_friend_template) << DefArgRange; return true; // FIXME: C++0x [temp.param]p9 allows default template-arguments // for friend function templates if there is only a single // declaration (and it is a definition). Strange! } llvm_unreachable("Invalid TemplateParamListContext!"); } /// \brief Check for unexpanded parameter packs within the template parameters /// of a template template parameter, recursively. static bool DiagnoseUnexpandedParameterPacks(Sema &S, TemplateTemplateParmDecl *TTP) { // A template template parameter which is a parameter pack is also a pack // expansion. if (TTP->isParameterPack()) return false; TemplateParameterList *Params = TTP->getTemplateParameters(); for (unsigned I = 0, N = Params->size(); I != N; ++I) { NamedDecl *P = Params->getParam(I); if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) { if (!NTTP->isParameterPack() && S.DiagnoseUnexpandedParameterPack(NTTP->getLocation(), NTTP->getTypeSourceInfo(), Sema::UPPC_NonTypeTemplateParameterType)) return true; continue; } if (TemplateTemplateParmDecl *InnerTTP = dyn_cast<TemplateTemplateParmDecl>(P)) if (DiagnoseUnexpandedParameterPacks(S, InnerTTP)) return true; } return false; } /// \brief Checks the validity of a template parameter list, possibly /// considering the template parameter list from a previous /// declaration. /// /// If an "old" template parameter list is provided, it must be /// equivalent (per TemplateParameterListsAreEqual) to the "new" /// template parameter list. /// /// \param NewParams Template parameter list for a new template /// declaration. This template parameter list will be updated with any /// default arguments that are carried through from the previous /// template parameter list. /// /// \param OldParams If provided, template parameter list from a /// previous declaration of the same template. Default template /// arguments will be merged from the old template parameter list to /// the new template parameter list. /// /// \param TPC Describes the context in which we are checking the given /// template parameter list. /// /// \returns true if an error occurred, false otherwise. bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC) { bool Invalid = false; // C++ [temp.param]p10: // The set of default template-arguments available for use with a // template declaration or definition is obtained by merging the // default arguments from the definition (if in scope) and all // declarations in scope in the same way default function // arguments are (8.3.6). bool SawDefaultArgument = false; SourceLocation PreviousDefaultArgLoc; // Dummy initialization to avoid warnings. TemplateParameterList::iterator OldParam = NewParams->end(); if (OldParams) OldParam = OldParams->begin(); bool RemoveDefaultArguments = false; for (TemplateParameterList::iterator NewParam = NewParams->begin(), NewParamEnd = NewParams->end(); NewParam != NewParamEnd; ++NewParam) { // Variables used to diagnose redundant default arguments bool RedundantDefaultArg = false; SourceLocation OldDefaultLoc; SourceLocation NewDefaultLoc; // Variable used to diagnose missing default arguments bool MissingDefaultArg = false; // Variable used to diagnose non-final parameter packs bool SawParameterPack = false; if (TemplateTypeParmDecl *NewTypeParm = dyn_cast<TemplateTypeParmDecl>(*NewParam)) { // Check the presence of a default argument here. if (NewTypeParm->hasDefaultArgument() && DiagnoseDefaultTemplateArgument(*this, TPC, NewTypeParm->getLocation(), NewTypeParm->getDefaultArgumentInfo()->getTypeLoc() .getSourceRange())) NewTypeParm->removeDefaultArgument(); // Merge default arguments for template type parameters. TemplateTypeParmDecl *OldTypeParm = OldParams? cast<TemplateTypeParmDecl>(*OldParam) : nullptr; if (NewTypeParm->isParameterPack()) { assert(!NewTypeParm->hasDefaultArgument() && "Parameter packs can't have a default argument!"); SawParameterPack = true; } else if (OldTypeParm && hasVisibleDefaultArgument(OldTypeParm) && NewTypeParm->hasDefaultArgument()) { OldDefaultLoc = OldTypeParm->getDefaultArgumentLoc(); NewDefaultLoc = NewTypeParm->getDefaultArgumentLoc(); SawDefaultArgument = true; RedundantDefaultArg = true; PreviousDefaultArgLoc = NewDefaultLoc; } else if (OldTypeParm && OldTypeParm->hasDefaultArgument()) { // Merge the default argument from the old declaration to the // new declaration. NewTypeParm->setInheritedDefaultArgument(Context, OldTypeParm); PreviousDefaultArgLoc = OldTypeParm->getDefaultArgumentLoc(); } else if (NewTypeParm->hasDefaultArgument()) { SawDefaultArgument = true; PreviousDefaultArgLoc = NewTypeParm->getDefaultArgumentLoc(); } else if (SawDefaultArgument) MissingDefaultArg = true; } else if (NonTypeTemplateParmDecl *NewNonTypeParm = dyn_cast<NonTypeTemplateParmDecl>(*NewParam)) { // Check for unexpanded parameter packs. if (!NewNonTypeParm->isParameterPack() && DiagnoseUnexpandedParameterPack(NewNonTypeParm->getLocation(), NewNonTypeParm->getTypeSourceInfo(), UPPC_NonTypeTemplateParameterType)) { Invalid = true; continue; } // Check the presence of a default argument here. if (NewNonTypeParm->hasDefaultArgument() && DiagnoseDefaultTemplateArgument(*this, TPC, NewNonTypeParm->getLocation(), NewNonTypeParm->getDefaultArgument()->getSourceRange())) { NewNonTypeParm->removeDefaultArgument(); } // Merge default arguments for non-type template parameters NonTypeTemplateParmDecl *OldNonTypeParm = OldParams? cast<NonTypeTemplateParmDecl>(*OldParam) : nullptr; if (NewNonTypeParm->isParameterPack()) { assert(!NewNonTypeParm->hasDefaultArgument() && "Parameter packs can't have a default argument!"); if (!NewNonTypeParm->isPackExpansion()) SawParameterPack = true; } else if (OldNonTypeParm && hasVisibleDefaultArgument(OldNonTypeParm) && NewNonTypeParm->hasDefaultArgument()) { OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc(); NewDefaultLoc = NewNonTypeParm->getDefaultArgumentLoc(); SawDefaultArgument = true; RedundantDefaultArg = true; PreviousDefaultArgLoc = NewDefaultLoc; } else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument()) { // Merge the default argument from the old declaration to the // new declaration. NewNonTypeParm->setInheritedDefaultArgument(Context, OldNonTypeParm); PreviousDefaultArgLoc = OldNonTypeParm->getDefaultArgumentLoc(); } else if (NewNonTypeParm->hasDefaultArgument()) { SawDefaultArgument = true; PreviousDefaultArgLoc = NewNonTypeParm->getDefaultArgumentLoc(); } else if (SawDefaultArgument) MissingDefaultArg = true; } else { TemplateTemplateParmDecl *NewTemplateParm = cast<TemplateTemplateParmDecl>(*NewParam); // Check for unexpanded parameter packs, recursively. if (::DiagnoseUnexpandedParameterPacks(*this, NewTemplateParm)) { Invalid = true; continue; } // Check the presence of a default argument here. if (NewTemplateParm->hasDefaultArgument() && DiagnoseDefaultTemplateArgument(*this, TPC, NewTemplateParm->getLocation(), NewTemplateParm->getDefaultArgument().getSourceRange())) NewTemplateParm->removeDefaultArgument(); // Merge default arguments for template template parameters TemplateTemplateParmDecl *OldTemplateParm = OldParams? cast<TemplateTemplateParmDecl>(*OldParam) : nullptr; if (NewTemplateParm->isParameterPack()) { assert(!NewTemplateParm->hasDefaultArgument() && "Parameter packs can't have a default argument!"); if (!NewTemplateParm->isPackExpansion()) SawParameterPack = true; } else if (OldTemplateParm && hasVisibleDefaultArgument(OldTemplateParm) && NewTemplateParm->hasDefaultArgument()) { OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation(); NewDefaultLoc = NewTemplateParm->getDefaultArgument().getLocation(); SawDefaultArgument = true; RedundantDefaultArg = true; PreviousDefaultArgLoc = NewDefaultLoc; } else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument()) { // Merge the default argument from the old declaration to the // new declaration. NewTemplateParm->setInheritedDefaultArgument(Context, OldTemplateParm); PreviousDefaultArgLoc = OldTemplateParm->getDefaultArgument().getLocation(); } else if (NewTemplateParm->hasDefaultArgument()) { SawDefaultArgument = true; PreviousDefaultArgLoc = NewTemplateParm->getDefaultArgument().getLocation(); } else if (SawDefaultArgument) MissingDefaultArg = true; } // C++11 [temp.param]p11: // If a template parameter of a primary class template or alias template // is a template parameter pack, it shall be the last template parameter. if (SawParameterPack && (NewParam + 1) != NewParamEnd && (TPC == TPC_ClassTemplate || TPC == TPC_VarTemplate || TPC == TPC_TypeAliasTemplate)) { Diag((*NewParam)->getLocation(), diag::err_template_param_pack_must_be_last_template_parameter); Invalid = true; } if (RedundantDefaultArg) { // C++ [temp.param]p12: // A template-parameter shall not be given default arguments // by two different declarations in the same scope. Diag(NewDefaultLoc, diag::err_template_param_default_arg_redefinition); Diag(OldDefaultLoc, diag::note_template_param_prev_default_arg); Invalid = true; } else if (MissingDefaultArg && TPC != TPC_FunctionTemplate) { // C++ [temp.param]p11: // If a template-parameter of a class template has a default // template-argument, each subsequent template-parameter shall either // have a default template-argument supplied or be a template parameter // pack. Diag((*NewParam)->getLocation(), diag::err_template_param_default_arg_missing); Diag(PreviousDefaultArgLoc, diag::note_template_param_prev_default_arg); Invalid = true; RemoveDefaultArguments = true; } // If we have an old template parameter list that we're merging // in, move on to the next parameter. if (OldParams) ++OldParam; } // We were missing some default arguments at the end of the list, so remove // all of the default arguments. if (RemoveDefaultArguments) { for (TemplateParameterList::iterator NewParam = NewParams->begin(), NewParamEnd = NewParams->end(); NewParam != NewParamEnd; ++NewParam) { if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*NewParam)) TTP->removeDefaultArgument(); else if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*NewParam)) NTTP->removeDefaultArgument(); else cast<TemplateTemplateParmDecl>(*NewParam)->removeDefaultArgument(); } } return Invalid; } namespace { /// A class which looks for a use of a certain level of template /// parameter. struct DependencyChecker : RecursiveASTVisitor<DependencyChecker> { typedef RecursiveASTVisitor<DependencyChecker> super; unsigned Depth; bool Match; SourceLocation MatchLoc; DependencyChecker(unsigned Depth) : Depth(Depth), Match(false) {} DependencyChecker(TemplateParameterList *Params) : Match(false) { NamedDecl *ND = Params->getParam(0); if (TemplateTypeParmDecl *PD = dyn_cast<TemplateTypeParmDecl>(ND)) { Depth = PD->getDepth(); } else if (NonTypeTemplateParmDecl *PD = dyn_cast<NonTypeTemplateParmDecl>(ND)) { Depth = PD->getDepth(); } else { Depth = cast<TemplateTemplateParmDecl>(ND)->getDepth(); } } bool Matches(unsigned ParmDepth, SourceLocation Loc = SourceLocation()) { if (ParmDepth >= Depth) { Match = true; MatchLoc = Loc; return true; } return false; } bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) { return !Matches(TL.getTypePtr()->getDepth(), TL.getNameLoc()); } bool VisitTemplateTypeParmType(const TemplateTypeParmType *T) { return !Matches(T->getDepth()); } bool TraverseTemplateName(TemplateName N) { if (TemplateTemplateParmDecl *PD = dyn_cast_or_null<TemplateTemplateParmDecl>(N.getAsTemplateDecl())) if (Matches(PD->getDepth())) return false; return super::TraverseTemplateName(N); } bool VisitDeclRefExpr(DeclRefExpr *E) { if (NonTypeTemplateParmDecl *PD = dyn_cast<NonTypeTemplateParmDecl>(E->getDecl())) if (Matches(PD->getDepth(), E->getExprLoc())) return false; return super::VisitDeclRefExpr(E); } bool VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) { return TraverseType(T->getReplacementType()); } bool VisitSubstTemplateTypeParmPackType(const SubstTemplateTypeParmPackType *T) { return TraverseTemplateArgument(T->getArgumentPack()); } bool TraverseInjectedClassNameType(const InjectedClassNameType *T) { return TraverseType(T->getInjectedSpecializationType()); } }; } /// Determines whether a given type depends on the given parameter /// list. static bool DependsOnTemplateParameters(QualType T, TemplateParameterList *Params) { DependencyChecker Checker(Params); Checker.TraverseType(T); return Checker.Match; } // Find the source range corresponding to the named type in the given // nested-name-specifier, if any. static SourceRange getRangeOfTypeInNestedNameSpecifier(ASTContext &Context, QualType T, const CXXScopeSpec &SS) { NestedNameSpecifierLoc NNSLoc(SS.getScopeRep(), SS.location_data()); while (NestedNameSpecifier *NNS = NNSLoc.getNestedNameSpecifier()) { if (const Type *CurType = NNS->getAsType()) { if (Context.hasSameUnqualifiedType(T, QualType(CurType, 0))) return NNSLoc.getTypeLoc().getSourceRange(); } else break; NNSLoc = NNSLoc.getPrefix(); } return SourceRange(); } /// \brief Match the given template parameter lists to the given scope /// specifier, returning the template parameter list that applies to the /// name. /// /// \param DeclStartLoc the start of the declaration that has a scope /// specifier or a template parameter list. /// /// \param DeclLoc The location of the declaration itself. /// /// \param SS the scope specifier that will be matched to the given template /// parameter lists. This scope specifier precedes a qualified name that is /// being declared. /// /// \param TemplateId The template-id following the scope specifier, if there /// is one. Used to check for a missing 'template<>'. /// /// \param ParamLists the template parameter lists, from the outermost to the /// innermost template parameter lists. /// /// \param IsFriend Whether to apply the slightly different rules for /// matching template parameters to scope specifiers in friend /// declarations. /// /// \param IsExplicitSpecialization will be set true if the entity being /// declared is an explicit specialization, false otherwise. /// /// \returns the template parameter list, if any, that corresponds to the /// name that is preceded by the scope specifier @p SS. This template /// parameter list may have template parameters (if we're declaring a /// template) or may have no template parameters (if we're declaring a /// template specialization), or may be NULL (if what we're declaring isn't /// itself a template). TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid) { IsExplicitSpecialization = false; Invalid = false; // The sequence of nested types to which we will match up the template // parameter lists. We first build this list by starting with the type named // by the nested-name-specifier and walking out until we run out of types. SmallVector<QualType, 4> NestedTypes; QualType T; if (SS.getScopeRep()) { if (CXXRecordDecl *Record = dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS, true))) T = Context.getTypeDeclType(Record); else T = QualType(SS.getScopeRep()->getAsType(), 0); } // If we found an explicit specialization that prevents us from needing // 'template<>' headers, this will be set to the location of that // explicit specialization. SourceLocation ExplicitSpecLoc; while (!T.isNull()) { NestedTypes.push_back(T); // Retrieve the parent of a record type. if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) { // If this type is an explicit specialization, we're done. if (ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Record)) { if (!isa<ClassTemplatePartialSpecializationDecl>(Spec) && Spec->getSpecializationKind() == TSK_ExplicitSpecialization) { ExplicitSpecLoc = Spec->getLocation(); break; } } else if (Record->getTemplateSpecializationKind() == TSK_ExplicitSpecialization) { ExplicitSpecLoc = Record->getLocation(); break; } if (TypeDecl *Parent = dyn_cast<TypeDecl>(Record->getParent())) T = Context.getTypeDeclType(Parent); else T = QualType(); continue; } if (const TemplateSpecializationType *TST = T->getAs<TemplateSpecializationType>()) { if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) { if (TypeDecl *Parent = dyn_cast<TypeDecl>(Template->getDeclContext())) T = Context.getTypeDeclType(Parent); else T = QualType(); continue; } } // Look one step prior in a dependent template specialization type. if (const DependentTemplateSpecializationType *DependentTST = T->getAs<DependentTemplateSpecializationType>()) { if (NestedNameSpecifier *NNS = DependentTST->getQualifier()) T = QualType(NNS->getAsType(), 0); else T = QualType(); continue; } // Look one step prior in a dependent name type. if (const DependentNameType *DependentName = T->getAs<DependentNameType>()){ if (NestedNameSpecifier *NNS = DependentName->getQualifier()) T = QualType(NNS->getAsType(), 0); else T = QualType(); continue; } // Retrieve the parent of an enumeration type. if (const EnumType *EnumT = T->getAs<EnumType>()) { // FIXME: Forward-declared enums require a TSK_ExplicitSpecialization // check here. EnumDecl *Enum = EnumT->getDecl(); // Get to the parent type. if (TypeDecl *Parent = dyn_cast<TypeDecl>(Enum->getParent())) T = Context.getTypeDeclType(Parent); else T = QualType(); continue; } T = QualType(); } // Reverse the nested types list, since we want to traverse from the outermost // to the innermost while checking template-parameter-lists. std::reverse(NestedTypes.begin(), NestedTypes.end()); // C++0x [temp.expl.spec]p17: // A member or a member template may be nested within many // enclosing class templates. In an explicit specialization for // such a member, the member declaration shall be preceded by a // template<> for each enclosing class template that is // explicitly specialized. bool SawNonEmptyTemplateParameterList = false; auto CheckExplicitSpecialization = [&](SourceRange Range, bool Recovery) { if (SawNonEmptyTemplateParameterList) { Diag(DeclLoc, diag::err_specialize_member_of_template) << !Recovery << Range; Invalid = true; IsExplicitSpecialization = false; return true; } return false; }; auto DiagnoseMissingExplicitSpecialization = [&] (SourceRange Range) { // Check that we can have an explicit specialization here. if (CheckExplicitSpecialization(Range, true)) return true; // We don't have a template header, but we should. SourceLocation ExpectedTemplateLoc; if (!ParamLists.empty()) ExpectedTemplateLoc = ParamLists[0]->getTemplateLoc(); else ExpectedTemplateLoc = DeclStartLoc; Diag(DeclLoc, diag::err_template_spec_needs_header) << Range << FixItHint::CreateInsertion(ExpectedTemplateLoc, "template<> "); return false; }; unsigned ParamIdx = 0; for (unsigned TypeIdx = 0, NumTypes = NestedTypes.size(); TypeIdx != NumTypes; ++TypeIdx) { T = NestedTypes[TypeIdx]; // Whether we expect a 'template<>' header. bool NeedEmptyTemplateHeader = false; // Whether we expect a template header with parameters. bool NeedNonemptyTemplateHeader = false; // For a dependent type, the set of template parameters that we // expect to see. TemplateParameterList *ExpectedTemplateParams = nullptr; // C++0x [temp.expl.spec]p15: // A member or a member template may be nested within many enclosing // class templates. In an explicit specialization for such a member, the // member declaration shall be preceded by a template<> for each // enclosing class template that is explicitly specialized. if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) { if (ClassTemplatePartialSpecializationDecl *Partial = dyn_cast<ClassTemplatePartialSpecializationDecl>(Record)) { ExpectedTemplateParams = Partial->getTemplateParameters(); NeedNonemptyTemplateHeader = true; } else if (Record->isDependentType()) { if (Record->getDescribedClassTemplate()) { ExpectedTemplateParams = Record->getDescribedClassTemplate() ->getTemplateParameters(); NeedNonemptyTemplateHeader = true; } } else if (ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Record)) { // C++0x [temp.expl.spec]p4: // Members of an explicitly specialized class template are defined // in the same manner as members of normal classes, and not using // the template<> syntax. if (Spec->getSpecializationKind() != TSK_ExplicitSpecialization) NeedEmptyTemplateHeader = true; else continue; } else if (Record->getTemplateSpecializationKind()) { if (Record->getTemplateSpecializationKind() != TSK_ExplicitSpecialization && TypeIdx == NumTypes - 1) IsExplicitSpecialization = true; continue; } } else if (const TemplateSpecializationType *TST = T->getAs<TemplateSpecializationType>()) { if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) { ExpectedTemplateParams = Template->getTemplateParameters(); NeedNonemptyTemplateHeader = true; } } else if (T->getAs<DependentTemplateSpecializationType>()) { // FIXME: We actually could/should check the template arguments here // against the corresponding template parameter list. NeedNonemptyTemplateHeader = false; } // C++ [temp.expl.spec]p16: // In an explicit specialization declaration for a member of a class // template or a member template that ap- pears in namespace scope, the // member template and some of its enclosing class templates may remain // unspecialized, except that the declaration shall not explicitly // specialize a class member template if its en- closing class templates // are not explicitly specialized as well. if (ParamIdx < ParamLists.size()) { if (ParamLists[ParamIdx]->size() == 0) { if (CheckExplicitSpecialization(ParamLists[ParamIdx]->getSourceRange(), false)) return nullptr; } else SawNonEmptyTemplateParameterList = true; } if (NeedEmptyTemplateHeader) { // If we're on the last of the types, and we need a 'template<>' header // here, then it's an explicit specialization. if (TypeIdx == NumTypes - 1) IsExplicitSpecialization = true; if (ParamIdx < ParamLists.size()) { if (ParamLists[ParamIdx]->size() > 0) { // The header has template parameters when it shouldn't. Complain. Diag(ParamLists[ParamIdx]->getTemplateLoc(), diag::err_template_param_list_matches_nontemplate) << T << SourceRange(ParamLists[ParamIdx]->getLAngleLoc(), ParamLists[ParamIdx]->getRAngleLoc()) << getRangeOfTypeInNestedNameSpecifier(Context, T, SS); Invalid = true; return nullptr; } // Consume this template header. ++ParamIdx; continue; } if (!IsFriend) if (DiagnoseMissingExplicitSpecialization( getRangeOfTypeInNestedNameSpecifier(Context, T, SS))) return nullptr; continue; } if (NeedNonemptyTemplateHeader) { // In friend declarations we can have template-ids which don't // depend on the corresponding template parameter lists. But // assume that empty parameter lists are supposed to match this // template-id. if (IsFriend && T->isDependentType()) { if (ParamIdx < ParamLists.size() && DependsOnTemplateParameters(T, ParamLists[ParamIdx])) ExpectedTemplateParams = nullptr; else continue; } if (ParamIdx < ParamLists.size()) { // Check the template parameter list, if we can. if (ExpectedTemplateParams && !TemplateParameterListsAreEqual(ParamLists[ParamIdx], ExpectedTemplateParams, true, TPL_TemplateMatch)) Invalid = true; if (!Invalid && CheckTemplateParameterList(ParamLists[ParamIdx], nullptr, TPC_ClassTemplateMember)) Invalid = true; ++ParamIdx; continue; } Diag(DeclLoc, diag::err_template_spec_needs_template_parameters) << T << getRangeOfTypeInNestedNameSpecifier(Context, T, SS); Invalid = true; continue; } } // If there were at least as many template-ids as there were template // parameter lists, then there are no template parameter lists remaining for // the declaration itself. if (ParamIdx >= ParamLists.size()) { if (TemplateId && !IsFriend) { // We don't have a template header for the declaration itself, but we // should. IsExplicitSpecialization = true; DiagnoseMissingExplicitSpecialization(SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc)); // Fabricate an empty template parameter list for the invented header. return TemplateParameterList::Create(Context, SourceLocation(), SourceLocation(), nullptr, 0, SourceLocation()); } return nullptr; } // If there were too many template parameter lists, complain about that now. if (ParamIdx < ParamLists.size() - 1) { bool HasAnyExplicitSpecHeader = false; bool AllExplicitSpecHeaders = true; for (unsigned I = ParamIdx, E = ParamLists.size() - 1; I != E; ++I) { if (ParamLists[I]->size() == 0) HasAnyExplicitSpecHeader = true; else AllExplicitSpecHeaders = false; } Diag(ParamLists[ParamIdx]->getTemplateLoc(), AllExplicitSpecHeaders ? diag::warn_template_spec_extra_headers : diag::err_template_spec_extra_headers) << SourceRange(ParamLists[ParamIdx]->getTemplateLoc(), ParamLists[ParamLists.size() - 2]->getRAngleLoc()); // If there was a specialization somewhere, such that 'template<>' is // not required, and there were any 'template<>' headers, note where the // specialization occurred. if (ExplicitSpecLoc.isValid() && HasAnyExplicitSpecHeader) Diag(ExplicitSpecLoc, diag::note_explicit_template_spec_does_not_need_header) << NestedTypes.back(); // We have a template parameter list with no corresponding scope, which // means that the resulting template declaration can't be instantiated // properly (we'll end up with dependent nodes when we shouldn't). if (!AllExplicitSpecHeaders) Invalid = true; } // C++ [temp.expl.spec]p16: // In an explicit specialization declaration for a member of a class // template or a member template that ap- pears in namespace scope, the // member template and some of its enclosing class templates may remain // unspecialized, except that the declaration shall not explicitly // specialize a class member template if its en- closing class templates // are not explicitly specialized as well. if (ParamLists.back()->size() == 0 && CheckExplicitSpecialization(ParamLists[ParamIdx]->getSourceRange(), false)) return nullptr; // Return the last template parameter list, which corresponds to the // entity being declared. return ParamLists.back(); } void Sema::NoteAllFoundTemplates(TemplateName Name) { if (TemplateDecl *Template = Name.getAsTemplateDecl()) { Diag(Template->getLocation(), diag::note_template_declared_here) << (isa<FunctionTemplateDecl>(Template) ? 0 : isa<ClassTemplateDecl>(Template) ? 1 : isa<VarTemplateDecl>(Template) ? 2 : isa<TypeAliasTemplateDecl>(Template) ? 3 : 4) << Template->getDeclName(); return; } if (OverloadedTemplateStorage *OST = Name.getAsOverloadedTemplate()) { for (OverloadedTemplateStorage::iterator I = OST->begin(), IEnd = OST->end(); I != IEnd; ++I) Diag((*I)->getLocation(), diag::note_template_declared_here) << 0 << (*I)->getDeclName(); return; } } QualType Sema::CheckTemplateIdType(TemplateName Name, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs) { DependentTemplateName *DTN = Name.getUnderlying().getAsDependentTemplateName(); if (DTN && DTN->isIdentifier()) // When building a template-id where the template-name is dependent, // assume the template is a type template. Either our assumption is // correct, or the code is ill-formed and will be diagnosed when the // dependent name is substituted. return Context.getDependentTemplateSpecializationType(ETK_None, DTN->getQualifier(), DTN->getIdentifier(), TemplateArgs); TemplateDecl *Template = Name.getAsTemplateDecl(); if (!Template || isa<FunctionTemplateDecl>(Template) || isa<VarTemplateDecl>(Template)) { // We might have a substituted template template parameter pack. If so, // build a template specialization type for it. if (Name.getAsSubstTemplateTemplateParmPack()) return Context.getTemplateSpecializationType(Name, TemplateArgs); Diag(TemplateLoc, diag::err_template_id_not_a_type) << Name; NoteAllFoundTemplates(Name); return QualType(); } // Check that the template argument list is well-formed for this // template. SmallVector<TemplateArgument, 4> Converted; if (CheckTemplateArgumentList(Template, TemplateLoc, TemplateArgs, false, Converted)) return QualType(); // HLSL Change Starts - check template values for HLSL object/matrix/vector signatures if (getLangOpts().HLSL && Template->isImplicit() && hlsl::CheckTemplateArgumentListForHLSL(*this, Template, TemplateLoc, TemplateArgs)) return QualType(); // HLSL Change Ends QualType CanonType; bool InstantiationDependent = false; if (TypeAliasTemplateDecl *AliasTemplate = dyn_cast<TypeAliasTemplateDecl>(Template)) { // Find the canonical type for this type alias template specialization. TypeAliasDecl *Pattern = AliasTemplate->getTemplatedDecl(); if (Pattern->isInvalidDecl()) return QualType(); TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted.data(), Converted.size()); // Only substitute for the innermost template argument list. MultiLevelTemplateArgumentList TemplateArgLists; TemplateArgLists.addOuterTemplateArguments(&TemplateArgs); unsigned Depth = AliasTemplate->getTemplateParameters()->getDepth(); for (unsigned I = 0; I < Depth; ++I) TemplateArgLists.addOuterTemplateArguments(None); LocalInstantiationScope Scope(*this); InstantiatingTemplate Inst(*this, TemplateLoc, Template); if (Inst.isInvalid()) return QualType(); CanonType = SubstType(Pattern->getUnderlyingType(), TemplateArgLists, AliasTemplate->getLocation(), AliasTemplate->getDeclName()); if (CanonType.isNull()) return QualType(); } else if (Name.isDependent() || TemplateSpecializationType::anyDependentTemplateArguments( TemplateArgs, InstantiationDependent)) { // This class template specialization is a dependent // type. Therefore, its canonical type is another class template // specialization type that contains all of the converted // arguments in canonical form. This ensures that, e.g., A<T> and // A<T, T> have identical types when A is declared as: // // template<typename T, typename U = T> struct A; TemplateName CanonName = Context.getCanonicalTemplateName(Name); CanonType = Context.getTemplateSpecializationType(CanonName, Converted.data(), Converted.size()); // FIXME: CanonType is not actually the canonical type, and unfortunately // it is a TemplateSpecializationType that we will never use again. // In the future, we need to teach getTemplateSpecializationType to only // build the canonical type and return that to us. CanonType = Context.getCanonicalType(CanonType); // This might work out to be a current instantiation, in which // case the canonical type needs to be the InjectedClassNameType. // // TODO: in theory this could be a simple hashtable lookup; most // changes to CurContext don't change the set of current // instantiations. if (isa<ClassTemplateDecl>(Template)) { for (DeclContext *Ctx = CurContext; Ctx; Ctx = Ctx->getLookupParent()) { // If we get out to a namespace, we're done. if (Ctx->isFileContext()) break; // If this isn't a record, keep looking. CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx); if (!Record) continue; // Look for one of the two cases with InjectedClassNameTypes // and check whether it's the same template. if (!isa<ClassTemplatePartialSpecializationDecl>(Record) && !Record->getDescribedClassTemplate()) continue; // Fetch the injected class name type and check whether its // injected type is equal to the type we just built. QualType ICNT = Context.getTypeDeclType(Record); QualType Injected = cast<InjectedClassNameType>(ICNT) ->getInjectedSpecializationType(); if (CanonType != Injected->getCanonicalTypeInternal()) continue; // If so, the canonical type of this TST is the injected // class name type of the record we just found. assert(ICNT.isCanonical()); CanonType = ICNT; break; } } } else if (ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(Template)) { // Find the class template specialization declaration that // corresponds to these arguments. void *InsertPos = nullptr; ClassTemplateSpecializationDecl *Decl = ClassTemplate->findSpecialization(Converted, InsertPos); if (!Decl) { // This is the first time we have referenced this class template // specialization. Create the canonical declaration and add it to // the set of specializations. Decl = ClassTemplateSpecializationDecl::Create(Context, ClassTemplate->getTemplatedDecl()->getTagKind(), ClassTemplate->getDeclContext(), ClassTemplate->getTemplatedDecl()->getLocStart(), ClassTemplate->getLocation(), ClassTemplate, Converted.data(), Converted.size(), nullptr); ClassTemplate->AddSpecialization(Decl, InsertPos); if (ClassTemplate->isOutOfLine()) Decl->setLexicalDeclContext(ClassTemplate->getLexicalDeclContext()); } // Diagnose uses of this specialization. (void)DiagnoseUseOfDecl(Decl, TemplateLoc); CanonType = Context.getTypeDeclType(Decl); assert(isa<RecordType>(CanonType) && "type of non-dependent specialization is not a RecordType"); } // Build the fully-sugared type for this class template // specialization, which refers back to the class template // specialization we created or found. return Context.getTemplateSpecializationType(Name, TemplateArgs, CanonType); } TypeResult Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc, bool IsCtorOrDtorName) { if (SS.isInvalid()) return true; TemplateName Template = TemplateD.get(); // Translate the parser's template argument list in our AST format. TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc); translateTemplateArguments(TemplateArgsIn, TemplateArgs); if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) { QualType T = Context.getDependentTemplateSpecializationType(ETK_None, DTN->getQualifier(), DTN->getIdentifier(), TemplateArgs); // Build type-source information. TypeLocBuilder TLB; DependentTemplateSpecializationTypeLoc SpecTL = TLB.push<DependentTemplateSpecializationTypeLoc>(T); SpecTL.setElaboratedKeywordLoc(SourceLocation()); SpecTL.setQualifierLoc(SS.getWithLocInContext(Context)); SpecTL.setTemplateKeywordLoc(TemplateKWLoc); SpecTL.setTemplateNameLoc(TemplateLoc); SpecTL.setLAngleLoc(LAngleLoc); SpecTL.setRAngleLoc(RAngleLoc); for (unsigned I = 0, N = SpecTL.getNumArgs(); I != N; ++I) SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo()); return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T)); } QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs); if (Result.isNull()) return true; // Build type-source information. TypeLocBuilder TLB; TemplateSpecializationTypeLoc SpecTL = TLB.push<TemplateSpecializationTypeLoc>(Result); SpecTL.setTemplateKeywordLoc(TemplateKWLoc); SpecTL.setTemplateNameLoc(TemplateLoc); SpecTL.setLAngleLoc(LAngleLoc); SpecTL.setRAngleLoc(RAngleLoc); for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i) SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo()); // NOTE: avoid constructing an ElaboratedTypeLoc if this is a // constructor or destructor name (in such a case, the scope specifier // will be attached to the enclosing Decl or Expr node). if (SS.isNotEmpty() && !IsCtorOrDtorName) { // Create an elaborated-type-specifier containing the nested-name-specifier. Result = Context.getElaboratedType(ETK_None, SS.getScopeRep(), Result); ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result); ElabTL.setElaboratedKeywordLoc(SourceLocation()); ElabTL.setQualifierLoc(SS.getWithLocInContext(Context)); } return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result)); } TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc) { TemplateName Template = TemplateD.get(); // Translate the parser's template argument list in our AST format. TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc); translateTemplateArguments(TemplateArgsIn, TemplateArgs); // Determine the tag kind TagTypeKind TagKind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec); ElaboratedTypeKeyword Keyword = TypeWithKeyword::getKeywordForTagTypeKind(TagKind); if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) { QualType T = Context.getDependentTemplateSpecializationType(Keyword, DTN->getQualifier(), DTN->getIdentifier(), TemplateArgs); // Build type-source information. TypeLocBuilder TLB; DependentTemplateSpecializationTypeLoc SpecTL = TLB.push<DependentTemplateSpecializationTypeLoc>(T); SpecTL.setElaboratedKeywordLoc(TagLoc); SpecTL.setQualifierLoc(SS.getWithLocInContext(Context)); SpecTL.setTemplateKeywordLoc(TemplateKWLoc); SpecTL.setTemplateNameLoc(TemplateLoc); SpecTL.setLAngleLoc(LAngleLoc); SpecTL.setRAngleLoc(RAngleLoc); for (unsigned I = 0, N = SpecTL.getNumArgs(); I != N; ++I) SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo()); return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T)); } if (TypeAliasTemplateDecl *TAT = dyn_cast_or_null<TypeAliasTemplateDecl>(Template.getAsTemplateDecl())) { // C++0x [dcl.type.elab]p2: // If the identifier resolves to a typedef-name or the simple-template-id // resolves to an alias template specialization, the // elaborated-type-specifier is ill-formed. Diag(TemplateLoc, diag::err_tag_reference_non_tag) << 4; Diag(TAT->getLocation(), diag::note_declared_at); } QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs); if (Result.isNull()) return TypeResult(true); // Check the tag kind if (const RecordType *RT = Result->getAs<RecordType>()) { RecordDecl *D = RT->getDecl(); IdentifierInfo *Id = D->getIdentifier(); assert(Id && "templated class must have an identifier"); if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TUK_Definition, TagLoc, Id)) { Diag(TagLoc, diag::err_use_with_wrong_tag) << Result << FixItHint::CreateReplacement(SourceRange(TagLoc), D->getKindName()); Diag(D->getLocation(), diag::note_previous_use); } } // Provide source-location information for the template specialization. TypeLocBuilder TLB; TemplateSpecializationTypeLoc SpecTL = TLB.push<TemplateSpecializationTypeLoc>(Result); SpecTL.setTemplateKeywordLoc(TemplateKWLoc); SpecTL.setTemplateNameLoc(TemplateLoc); SpecTL.setLAngleLoc(LAngleLoc); SpecTL.setRAngleLoc(RAngleLoc); for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i) SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo()); // Construct an elaborated type containing the nested-name-specifier (if any) // and tag keyword. Result = Context.getElaboratedType(Keyword, SS.getScopeRep(), Result); ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result); ElabTL.setElaboratedKeywordLoc(TagLoc); ElabTL.setQualifierLoc(SS.getWithLocInContext(Context)); return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result)); } static bool CheckTemplatePartialSpecializationArgs( Sema &S, SourceLocation NameLoc, TemplateParameterList *TemplateParams, unsigned ExplicitArgs, SmallVectorImpl<TemplateArgument> &TemplateArgs); static bool CheckTemplateSpecializationScope(Sema &S, NamedDecl *Specialized, NamedDecl *PrevDecl, SourceLocation Loc, bool IsPartialSpecialization); static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D); static bool isTemplateArgumentTemplateParameter( const TemplateArgument &Arg, unsigned Depth, unsigned Index) { switch (Arg.getKind()) { case TemplateArgument::Null: case TemplateArgument::NullPtr: case TemplateArgument::Integral: case TemplateArgument::Declaration: case TemplateArgument::Pack: case TemplateArgument::TemplateExpansion: return false; case TemplateArgument::Type: { QualType Type = Arg.getAsType(); const TemplateTypeParmType *TPT = Arg.getAsType()->getAs<TemplateTypeParmType>(); return TPT && !Type.hasQualifiers() && TPT->getDepth() == Depth && TPT->getIndex() == Index; } case TemplateArgument::Expression: { DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg.getAsExpr()); if (!DRE || !DRE->getDecl()) return false; const NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl()); return NTTP && NTTP->getDepth() == Depth && NTTP->getIndex() == Index; } case TemplateArgument::Template: const TemplateTemplateParmDecl *TTP = dyn_cast_or_null<TemplateTemplateParmDecl>( Arg.getAsTemplateOrTemplatePattern().getAsTemplateDecl()); return TTP && TTP->getDepth() == Depth && TTP->getIndex() == Index; } llvm_unreachable("unexpected kind of template argument"); } static bool isSameAsPrimaryTemplate(TemplateParameterList *Params, ArrayRef<TemplateArgument> Args) { if (Params->size() != Args.size()) return false; unsigned Depth = Params->getDepth(); for (unsigned I = 0, N = Args.size(); I != N; ++I) { TemplateArgument Arg = Args[I]; // If the parameter is a pack expansion, the argument must be a pack // whose only element is a pack expansion. if (Params->getParam(I)->isParameterPack()) { if (Arg.getKind() != TemplateArgument::Pack || Arg.pack_size() != 1 || !Arg.pack_begin()->isPackExpansion()) return false; Arg = Arg.pack_begin()->getPackExpansionPattern(); } if (!isTemplateArgumentTemplateParameter(Arg, Depth, I)) return false; } return true; } /// Convert the parser's template argument list representation into our form. static TemplateArgumentListInfo makeTemplateArgumentListInfo(Sema &S, TemplateIdAnnotation &TemplateId) { TemplateArgumentListInfo TemplateArgs(TemplateId.LAngleLoc, TemplateId.RAngleLoc); ASTTemplateArgsPtr TemplateArgsPtr(TemplateId.getTemplateArgs(), TemplateId.NumArgs); S.translateTemplateArguments(TemplateArgsPtr, TemplateArgs); return TemplateArgs; } DeclResult Sema::ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization) { // D must be variable template id. assert(D.getName().getKind() == UnqualifiedId::IK_TemplateId && "Variable template specialization is declared with a template it."); TemplateIdAnnotation *TemplateId = D.getName().TemplateId; TemplateArgumentListInfo TemplateArgs = makeTemplateArgumentListInfo(*this, *TemplateId); SourceLocation TemplateNameLoc = D.getIdentifierLoc(); SourceLocation LAngleLoc = TemplateId->LAngleLoc; SourceLocation RAngleLoc = TemplateId->RAngleLoc; TemplateName Name = TemplateId->Template.get(); // The template-id must name a variable template. VarTemplateDecl *VarTemplate = dyn_cast_or_null<VarTemplateDecl>(Name.getAsTemplateDecl()); if (!VarTemplate) { NamedDecl *FnTemplate; if (auto *OTS = Name.getAsOverloadedTemplate()) FnTemplate = *OTS->begin(); else FnTemplate = dyn_cast_or_null<FunctionTemplateDecl>(Name.getAsTemplateDecl()); if (FnTemplate) return Diag(D.getIdentifierLoc(), diag::err_var_spec_no_template_but_method) << FnTemplate->getDeclName(); return Diag(D.getIdentifierLoc(), diag::err_var_spec_no_template) << IsPartialSpecialization; } // Check for unexpanded parameter packs in any of the template arguments. for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) if (DiagnoseUnexpandedParameterPack(TemplateArgs[I], UPPC_PartialSpecialization)) return true; // Check that the template argument list is well-formed for this // template. SmallVector<TemplateArgument, 4> Converted; if (CheckTemplateArgumentList(VarTemplate, TemplateNameLoc, TemplateArgs, false, Converted)) return true; // Check that the type of this variable template specialization // matches the expected type. TypeSourceInfo *ExpectedDI; { // Do substitution on the type of the declaration TemplateArgumentList TemplateArgList(TemplateArgumentList::OnStack, Converted.data(), Converted.size()); InstantiatingTemplate Inst(*this, TemplateKWLoc, VarTemplate); if (Inst.isInvalid()) return true; VarDecl *Templated = VarTemplate->getTemplatedDecl(); ExpectedDI = SubstType(Templated->getTypeSourceInfo(), MultiLevelTemplateArgumentList(TemplateArgList), Templated->getTypeSpecStartLoc(), Templated->getDeclName()); } if (!ExpectedDI) return true; // Find the variable template (partial) specialization declaration that // corresponds to these arguments. if (IsPartialSpecialization) { if (CheckTemplatePartialSpecializationArgs( *this, TemplateNameLoc, VarTemplate->getTemplateParameters(), TemplateArgs.size(), Converted)) return true; bool InstantiationDependent; if (!Name.isDependent() && !TemplateSpecializationType::anyDependentTemplateArguments( TemplateArgs.getArgumentArray(), TemplateArgs.size(), InstantiationDependent)) { Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized) << VarTemplate->getDeclName(); IsPartialSpecialization = false; } if (isSameAsPrimaryTemplate(VarTemplate->getTemplateParameters(), Converted)) { // C++ [temp.class.spec]p9b3: // // -- The argument list of the specialization shall not be identical // to the implicit argument list of the primary template. Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template) << /*variable template*/ 1 << /*is definition*/(SC != SC_Extern && !CurContext->isRecord()) << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc)); // FIXME: Recover from this by treating the declaration as a redeclaration // of the primary template. return true; } } void *InsertPos = nullptr; VarTemplateSpecializationDecl *PrevDecl = nullptr; if (IsPartialSpecialization) // FIXME: Template parameter list matters too PrevDecl = VarTemplate->findPartialSpecialization(Converted, InsertPos); else PrevDecl = VarTemplate->findSpecialization(Converted, InsertPos); VarTemplateSpecializationDecl *Specialization = nullptr; // Check whether we can declare a variable template specialization in // the current scope. if (CheckTemplateSpecializationScope(*this, VarTemplate, PrevDecl, TemplateNameLoc, IsPartialSpecialization)) return true; if (PrevDecl && PrevDecl->getSpecializationKind() == TSK_Undeclared) { // Since the only prior variable template specialization with these // arguments was referenced but not declared, reuse that // declaration node as our own, updating its source location and // the list of outer template parameters to reflect our new declaration. Specialization = PrevDecl; Specialization->setLocation(TemplateNameLoc); PrevDecl = nullptr; } else if (IsPartialSpecialization) { // Create a new class template partial specialization declaration node. VarTemplatePartialSpecializationDecl *PrevPartial = cast_or_null<VarTemplatePartialSpecializationDecl>(PrevDecl); VarTemplatePartialSpecializationDecl *Partial = VarTemplatePartialSpecializationDecl::Create( Context, VarTemplate->getDeclContext(), TemplateKWLoc, TemplateNameLoc, TemplateParams, VarTemplate, DI->getType(), DI, SC, Converted.data(), Converted.size(), TemplateArgs); if (!PrevPartial) VarTemplate->AddPartialSpecialization(Partial, InsertPos); Specialization = Partial; // If we are providing an explicit specialization of a member variable // template specialization, make a note of that. if (PrevPartial && PrevPartial->getInstantiatedFromMember()) PrevPartial->setMemberSpecialization(); // Check that all of the template parameters of the variable template // partial specialization are deducible from the template // arguments. If not, this variable template partial specialization // will never be used. llvm::SmallBitVector DeducibleParams(TemplateParams->size()); MarkUsedTemplateParameters(Partial->getTemplateArgs(), true, TemplateParams->getDepth(), DeducibleParams); if (!DeducibleParams.all()) { unsigned NumNonDeducible = DeducibleParams.size() - DeducibleParams.count(); Diag(TemplateNameLoc, diag::warn_partial_specs_not_deducible) << /*variable template*/ 1 << (NumNonDeducible > 1) << SourceRange(TemplateNameLoc, RAngleLoc); for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I) { if (!DeducibleParams[I]) { NamedDecl *Param = cast<NamedDecl>(TemplateParams->getParam(I)); if (Param->getDeclName()) Diag(Param->getLocation(), diag::note_partial_spec_unused_parameter) << Param->getDeclName(); else Diag(Param->getLocation(), diag::note_partial_spec_unused_parameter) << "(anonymous)"; } } } } else { // Create a new class template specialization declaration node for // this explicit specialization or friend declaration. Specialization = VarTemplateSpecializationDecl::Create( Context, VarTemplate->getDeclContext(), TemplateKWLoc, TemplateNameLoc, VarTemplate, DI->getType(), DI, SC, Converted.data(), Converted.size()); Specialization->setTemplateArgsInfo(TemplateArgs); if (!PrevDecl) VarTemplate->AddSpecialization(Specialization, InsertPos); } // C++ [temp.expl.spec]p6: // If a template, a member template or the member of a class template is // explicitly specialized then that specialization shall be declared // before the first use of that specialization that would cause an implicit // instantiation to take place, in every translation unit in which such a // use occurs; no diagnostic is required. if (PrevDecl && PrevDecl->getPointOfInstantiation().isValid()) { bool Okay = false; for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) { // Is there any previous explicit specialization declaration? if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) { Okay = true; break; } } if (!Okay) { SourceRange Range(TemplateNameLoc, RAngleLoc); Diag(TemplateNameLoc, diag::err_specialization_after_instantiation) << Name << Range; Diag(PrevDecl->getPointOfInstantiation(), diag::note_instantiation_required_here) << (PrevDecl->getTemplateSpecializationKind() != TSK_ImplicitInstantiation); return true; } } Specialization->setTemplateKeywordLoc(TemplateKWLoc); Specialization->setLexicalDeclContext(CurContext); // Add the specialization into its lexical context, so that it can // be seen when iterating through the list of declarations in that // context. However, specializations are not found by name lookup. CurContext->addDecl(Specialization); // Note that this is an explicit specialization. Specialization->setSpecializationKind(TSK_ExplicitSpecialization); if (PrevDecl) { // Check that this isn't a redefinition of this specialization, // merging with previous declarations. LookupResult PrevSpec(*this, GetNameForDeclarator(D), LookupOrdinaryName, ForRedeclaration); PrevSpec.addDecl(PrevDecl); D.setRedeclaration(CheckVariableDeclaration(Specialization, PrevSpec)); } else if (Specialization->isStaticDataMember() && Specialization->isOutOfLine()) { Specialization->setAccess(VarTemplate->getAccess()); } // Link instantiations of static data members back to the template from // which they were instantiated. if (Specialization->isStaticDataMember()) Specialization->setInstantiationOfStaticDataMember( VarTemplate->getTemplatedDecl(), Specialization->getSpecializationKind()); return Specialization; } namespace { /// \brief A partial specialization whose template arguments have matched /// a given template-id. struct PartialSpecMatchResult { VarTemplatePartialSpecializationDecl *Partial; TemplateArgumentList *Args; }; } DeclResult Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs) { assert(Template && "A variable template id without template?"); // Check that the template argument list is well-formed for this template. SmallVector<TemplateArgument, 4> Converted; if (CheckTemplateArgumentList( Template, TemplateNameLoc, const_cast<TemplateArgumentListInfo &>(TemplateArgs), false, Converted)) return true; // Find the variable template specialization declaration that // corresponds to these arguments. void *InsertPos = nullptr; if (VarTemplateSpecializationDecl *Spec = Template->findSpecialization( Converted, InsertPos)) // If we already have a variable template specialization, return it. return Spec; // This is the first time we have referenced this variable template // specialization. Create the canonical declaration and add it to // the set of specializations, based on the closest partial specialization // that it represents. That is, VarDecl *InstantiationPattern = Template->getTemplatedDecl(); TemplateArgumentList TemplateArgList(TemplateArgumentList::OnStack, Converted.data(), Converted.size()); TemplateArgumentList *InstantiationArgs = &TemplateArgList; bool AmbiguousPartialSpec = false; typedef PartialSpecMatchResult MatchResult; SmallVector<MatchResult, 4> Matched; SourceLocation PointOfInstantiation = TemplateNameLoc; TemplateSpecCandidateSet FailedCandidates(PointOfInstantiation); // 1. Attempt to find the closest partial specialization that this // specializes, if any. // If any of the template arguments is dependent, then this is probably // a placeholder for an incomplete declarative context; which must be // complete by instantiation time. Thus, do not search through the partial // specializations yet. // TODO: Unify with InstantiateClassTemplateSpecialization()? // Perhaps better after unification of DeduceTemplateArguments() and // getMoreSpecializedPartialSpecialization(). bool InstantiationDependent = false; if (!TemplateSpecializationType::anyDependentTemplateArguments( TemplateArgs, InstantiationDependent)) { SmallVector<VarTemplatePartialSpecializationDecl *, 4> PartialSpecs; Template->getPartialSpecializations(PartialSpecs); for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) { VarTemplatePartialSpecializationDecl *Partial = PartialSpecs[I]; TemplateDeductionInfo Info(FailedCandidates.getLocation()); if (TemplateDeductionResult Result = DeduceTemplateArguments(Partial, TemplateArgList, Info)) { // Store the failed-deduction information for use in diagnostics, later. // TODO: Actually use the failed-deduction info? FailedCandidates.addCandidate() .set(Partial, MakeDeductionFailureInfo(Context, Result, Info)); (void)Result; } else { Matched.push_back(PartialSpecMatchResult()); Matched.back().Partial = Partial; Matched.back().Args = Info.take(); } } if (Matched.size() >= 1) { SmallVector<MatchResult, 4>::iterator Best = Matched.begin(); if (Matched.size() == 1) { // -- If exactly one matching specialization is found, the // instantiation is generated from that specialization. // We don't need to do anything for this. } else { // -- If more than one matching specialization is found, the // partial order rules (14.5.4.2) are used to determine // whether one of the specializations is more specialized // than the others. If none of the specializations is more // specialized than all of the other matching // specializations, then the use of the variable template is // ambiguous and the program is ill-formed. for (SmallVector<MatchResult, 4>::iterator P = Best + 1, PEnd = Matched.end(); P != PEnd; ++P) { if (getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial, PointOfInstantiation) == P->Partial) Best = P; } // Determine if the best partial specialization is more specialized than // the others. for (SmallVector<MatchResult, 4>::iterator P = Matched.begin(), PEnd = Matched.end(); P != PEnd; ++P) { if (P != Best && getMoreSpecializedPartialSpecialization( P->Partial, Best->Partial, PointOfInstantiation) != Best->Partial) { AmbiguousPartialSpec = true; break; } } } // Instantiate using the best variable template partial specialization. InstantiationPattern = Best->Partial; InstantiationArgs = Best->Args; } else { // -- If no match is found, the instantiation is generated // from the primary template. // InstantiationPattern = Template->getTemplatedDecl(); } } // 2. Create the canonical declaration. // Note that we do not instantiate the variable just yet, since // instantiation is handled in DoMarkVarDeclReferenced(). // FIXME: LateAttrs et al.? VarTemplateSpecializationDecl *Decl = BuildVarTemplateInstantiation( Template, InstantiationPattern, *InstantiationArgs, TemplateArgs, Converted, TemplateNameLoc, InsertPos /*, LateAttrs, StartingScope*/); if (!Decl) return true; if (AmbiguousPartialSpec) { // Partial ordering did not produce a clear winner. Complain. Decl->setInvalidDecl(); Diag(PointOfInstantiation, diag::err_partial_spec_ordering_ambiguous) << Decl; // Print the matching partial specializations. for (SmallVector<MatchResult, 4>::iterator P = Matched.begin(), PEnd = Matched.end(); P != PEnd; ++P) Diag(P->Partial->getLocation(), diag::note_partial_spec_match) << getTemplateArgumentBindingsText( P->Partial->getTemplateParameters(), *P->Args); return true; } if (VarTemplatePartialSpecializationDecl *D = dyn_cast<VarTemplatePartialSpecializationDecl>(InstantiationPattern)) Decl->setInstantiationOf(D, InstantiationArgs); assert(Decl && "No variable template specialization?"); return Decl; } ExprResult Sema::CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs) { DeclResult Decl = CheckVarTemplateId(Template, TemplateLoc, NameInfo.getLoc(), *TemplateArgs); if (Decl.isInvalid()) return ExprError(); VarDecl *Var = cast<VarDecl>(Decl.get()); if (!Var->getTemplateSpecializationKind()) Var->setTemplateSpecializationKind(TSK_ImplicitInstantiation, NameInfo.getLoc()); // Build an ordinary singleton decl ref. return BuildDeclarationNameExpr(SS, NameInfo, Var, /*FoundD=*/nullptr, TemplateArgs); } ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs) { // FIXME: Can we do any checking at this point? I guess we could check the // template arguments that we have against the template name, if the template // name refers to a single template. That's not a terribly common case, // though. // foo<int> could identify a single function unambiguously // This approach does NOT work, since f<int>(1); // gets resolved prior to resorting to overload resolution // i.e., template<class T> void f(double); // vs template<class T, class U> void f(U); // These should be filtered out by our callers. assert(!R.empty() && "empty lookup results when building templateid"); assert(!R.isAmbiguous() && "ambiguous lookup when building templateid"); // In C++1y, check variable template ids. bool InstantiationDependent; if (R.getAsSingle<VarTemplateDecl>() && !TemplateSpecializationType::anyDependentTemplateArguments( *TemplateArgs, InstantiationDependent)) { return CheckVarTemplateId(SS, R.getLookupNameInfo(), R.getAsSingle<VarTemplateDecl>(), TemplateKWLoc, TemplateArgs); } // We don't want lookup warnings at this point. R.suppressDiagnostics(); UnresolvedLookupExpr *ULE = UnresolvedLookupExpr::Create(Context, R.getNamingClass(), SS.getWithLocInContext(Context), TemplateKWLoc, R.getLookupNameInfo(), RequiresADL, TemplateArgs, R.begin(), R.end()); return ULE; } // We actually only call this from template instantiation. ExprResult Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs) { assert(TemplateArgs || TemplateKWLoc.isValid()); DeclContext *DC; if (!(DC = computeDeclContext(SS, false)) || DC->isDependentContext() || RequireCompleteDeclContext(SS, DC)) return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs); bool MemberOfUnknownSpecialization; LookupResult R(*this, NameInfo, LookupOrdinaryName); LookupTemplateName(R, (Scope*)nullptr, SS, QualType(), /*Entering*/ false, MemberOfUnknownSpecialization); if (R.isAmbiguous()) return ExprError(); if (R.empty()) { Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_non_template) << NameInfo.getName() << SS.getRange(); return ExprError(); } if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>()) { Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_class_template) << SS.getScopeRep() << NameInfo.getName().getAsString() << SS.getRange(); Diag(Temp->getLocation(), diag::note_referenced_class_template); return ExprError(); } return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs); } /// \brief Form a dependent template name. /// /// This action forms a dependent template name given the template /// name and its (presumably dependent) scope specifier. For /// example, given "MetaFun::template apply", the scope specifier \p /// SS will be "MetaFun::", \p TemplateKWLoc contains the location /// of the "template" keyword, and "apply" is the \p Name. TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Result) { if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent()) Diag(TemplateKWLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_template_outside_of_template : diag::ext_template_outside_of_template) << FixItHint::CreateRemoval(TemplateKWLoc); DeclContext *LookupCtx = nullptr; if (SS.isSet()) LookupCtx = computeDeclContext(SS, EnteringContext); if (!LookupCtx && ObjectType) LookupCtx = computeDeclContext(ObjectType.get()); if (LookupCtx) { // C++0x [temp.names]p5: // If a name prefixed by the keyword template is not the name of // a template, the program is ill-formed. [Note: the keyword // template may not be applied to non-template members of class // templates. -end note ] [ Note: as is the case with the // typename prefix, the template prefix is allowed in cases // where it is not strictly necessary; i.e., when the // nested-name-specifier or the expression on the left of the -> // or . is not dependent on a template-parameter, or the use // does not appear in the scope of a template. -end note] // // Note: C++03 was more strict here, because it banned the use of // the "template" keyword prior to a template-name that was not a // dependent name. C++ DR468 relaxed this requirement (the // "template" keyword is now permitted). We follow the C++0x // rules, even in C++03 mode with a warning, retroactively applying the DR. bool MemberOfUnknownSpecialization; TemplateNameKind TNK = isTemplateName(S, SS, TemplateKWLoc.isValid(), Name, ObjectType, EnteringContext, Result, MemberOfUnknownSpecialization); if (TNK == TNK_Non_template && LookupCtx->isDependentContext() && isa<CXXRecordDecl>(LookupCtx) && (!cast<CXXRecordDecl>(LookupCtx)->hasDefinition() || cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases())) { // This is a dependent template. Handle it below. } else if (TNK == TNK_Non_template) { Diag(Name.getLocStart(), diag::err_template_kw_refers_to_non_template) << GetNameFromUnqualifiedId(Name).getName() << Name.getSourceRange() << TemplateKWLoc; return TNK_Non_template; } else { // We found something; return it. return TNK; } } NestedNameSpecifier *Qualifier = SS.getScopeRep(); switch (Name.getKind()) { case UnqualifiedId::IK_Identifier: Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier, Name.Identifier)); return TNK_Dependent_template_name; case UnqualifiedId::IK_OperatorFunctionId: Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier, Name.OperatorFunctionId.Operator)); return TNK_Function_template; case UnqualifiedId::IK_LiteralOperatorId: llvm_unreachable("literal operator id cannot have a dependent scope"); default: break; } Diag(Name.getLocStart(), diag::err_template_kw_refers_to_non_template) << GetNameFromUnqualifiedId(Name).getName() << Name.getSourceRange() << TemplateKWLoc; return TNK_Non_template; } bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &AL, SmallVectorImpl<TemplateArgument> &Converted) { const TemplateArgument &Arg = AL.getArgument(); QualType ArgType; TypeSourceInfo *TSI = nullptr; // Check template type parameter. switch(Arg.getKind()) { case TemplateArgument::Type: // C++ [temp.arg.type]p1: // A template-argument for a template-parameter which is a // type shall be a type-id. ArgType = Arg.getAsType(); TSI = AL.getTypeSourceInfo(); break; case TemplateArgument::Template: { // We have a template type parameter but the template argument // is a template without any arguments. SourceRange SR = AL.getSourceRange(); TemplateName Name = Arg.getAsTemplate(); // HLSL Change Starts // HLSL allows omiting empty template argument lists TemplateDecl *Decl = Name.getAsTemplateDecl(); if (getLangOpts().HLSL && Decl) { if (TemplateDecl *TD = dyn_cast<TemplateDecl>(Decl)) { ArgType = getHLSLDefaultSpecialization(TD); if (!ArgType.isNull()) { CXXScopeSpec SS; TypeLocBuilder TLB; TemplateSpecializationTypeLoc TL = TLB.push<TemplateSpecializationTypeLoc>(ArgType); TL.setTemplateKeywordLoc(SourceLocation()); TL.setTemplateNameLoc(SR.getBegin()); TL.setLAngleLoc(SR.getEnd()); TL.setRAngleLoc(SR.getEnd()); TSI = TLB.getTypeSourceInfo(Context, ArgType); } } } if (ArgType.isNull()) { Diag(SR.getBegin(), diag::err_template_missing_args) << Name << SR; // HLSL Change - ellide location notes for built-ins if (Decl && Decl->getLocation().isValid()) { Diag(Decl->getLocation(), diag::note_template_decl_here); } return true; } break; // HLSL Change Ends } case TemplateArgument::Expression: { // We have a template type parameter but the template argument is an // expression; see if maybe it is missing the "typename" keyword. CXXScopeSpec SS; DeclarationNameInfo NameInfo; if (DeclRefExpr *ArgExpr = dyn_cast<DeclRefExpr>(Arg.getAsExpr())) { SS.Adopt(ArgExpr->getQualifierLoc()); NameInfo = ArgExpr->getNameInfo(); } else if (DependentScopeDeclRefExpr *ArgExpr = dyn_cast<DependentScopeDeclRefExpr>(Arg.getAsExpr())) { SS.Adopt(ArgExpr->getQualifierLoc()); NameInfo = ArgExpr->getNameInfo(); } else if (CXXDependentScopeMemberExpr *ArgExpr = dyn_cast<CXXDependentScopeMemberExpr>(Arg.getAsExpr())) { if (ArgExpr->isImplicitAccess()) { SS.Adopt(ArgExpr->getQualifierLoc()); NameInfo = ArgExpr->getMemberNameInfo(); } } if (auto *II = NameInfo.getName().getAsIdentifierInfo()) { LookupResult Result(*this, NameInfo, LookupOrdinaryName); LookupParsedName(Result, CurScope, &SS); if (Result.getAsSingle<TypeDecl>() || Result.getResultKind() == LookupResult::NotFoundInCurrentInstantiation) { // Suggest that the user add 'typename' before the NNS. SourceLocation Loc = AL.getSourceRange().getBegin(); Diag(Loc, getLangOpts().MSVCCompat ? diag::ext_ms_template_type_arg_missing_typename : diag::err_template_arg_must_be_type_suggest) << FixItHint::CreateInsertion(Loc, "typename "); Diag(Param->getLocation(), diag::note_template_param_here); // Recover by synthesizing a type using the location information that we // already have. ArgType = Context.getDependentNameType(ETK_Typename, SS.getScopeRep(), II); TypeLocBuilder TLB; DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(ArgType); TL.setElaboratedKeywordLoc(SourceLocation(/*synthesized*/)); TL.setQualifierLoc(SS.getWithLocInContext(Context)); TL.setNameLoc(NameInfo.getLoc()); TSI = TLB.getTypeSourceInfo(Context, ArgType); // Overwrite our input TemplateArgumentLoc so that we can recover // properly. AL = TemplateArgumentLoc(TemplateArgument(ArgType), TemplateArgumentLocInfo(TSI)); break; } } // fallthrough LLVM_FALLTHROUGH; // HLSL Change } default: { // We have a template type parameter but the template argument // is not a type. SourceRange SR = AL.getSourceRange(); Diag(SR.getBegin(), diag::err_template_arg_must_be_type) << SR; Diag(Param->getLocation(), diag::note_template_param_here); return true; } } if (CheckTemplateArgument(Param, TSI)) return true; // Add the converted template type argument. ArgType = Context.getCanonicalType(ArgType); // Objective-C ARC: // If an explicitly-specified template argument type is a lifetime type // with no lifetime qualifier, the __strong lifetime qualifier is inferred. if (getLangOpts().ObjCAutoRefCount && ArgType->isObjCLifetimeType() && !ArgType.getObjCLifetime()) { Qualifiers Qs; Qs.setObjCLifetime(Qualifiers::OCL_Strong); ArgType = Context.getQualifiedType(ArgType, Qs); } Converted.push_back(TemplateArgument(ArgType)); return false; } /// \brief Substitute template arguments into the default template argument for /// the given template type parameter. /// /// \param SemaRef the semantic analysis object for which we are performing /// the substitution. /// /// \param Template the template that we are synthesizing template arguments /// for. /// /// \param TemplateLoc the location of the template name that started the /// template-id we are checking. /// /// \param RAngleLoc the location of the right angle bracket ('>') that /// terminates the template-id. /// /// \param Param the template template parameter whose default we are /// substituting into. /// /// \param Converted the list of template arguments provided for template /// parameters that precede \p Param in the template parameter list. /// \returns the substituted template argument, or NULL if an error occurred. static TypeSourceInfo * SubstDefaultTemplateArgument(Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, TemplateTypeParmDecl *Param, SmallVectorImpl<TemplateArgument> &Converted) { TypeSourceInfo *ArgType = Param->getDefaultArgumentInfo(); // If the argument type is dependent, instantiate it now based // on the previously-computed template arguments. if (ArgType->getType()->isDependentType()) { Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Template, Converted, SourceRange(TemplateLoc, RAngleLoc)); if (Inst.isInvalid()) return nullptr; TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted.data(), Converted.size()); // Only substitute for the innermost template argument list. MultiLevelTemplateArgumentList TemplateArgLists; TemplateArgLists.addOuterTemplateArguments(&TemplateArgs); for (unsigned i = 0, e = Param->getDepth(); i != e; ++i) TemplateArgLists.addOuterTemplateArguments(None); Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext()); ArgType = SemaRef.SubstType(ArgType, TemplateArgLists, Param->getDefaultArgumentLoc(), Param->getDeclName()); } return ArgType; } /// \brief Substitute template arguments into the default template argument for /// the given non-type template parameter. /// /// \param SemaRef the semantic analysis object for which we are performing /// the substitution. /// /// \param Template the template that we are synthesizing template arguments /// for. /// /// \param TemplateLoc the location of the template name that started the /// template-id we are checking. /// /// \param RAngleLoc the location of the right angle bracket ('>') that /// terminates the template-id. /// /// \param Param the non-type template parameter whose default we are /// substituting into. /// /// \param Converted the list of template arguments provided for template /// parameters that precede \p Param in the template parameter list. /// /// \returns the substituted template argument, or NULL if an error occurred. static ExprResult SubstDefaultTemplateArgument(Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, NonTypeTemplateParmDecl *Param, SmallVectorImpl<TemplateArgument> &Converted) { Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Template, Converted, SourceRange(TemplateLoc, RAngleLoc)); if (Inst.isInvalid()) return ExprError(); TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted.data(), Converted.size()); // Only substitute for the innermost template argument list. MultiLevelTemplateArgumentList TemplateArgLists; TemplateArgLists.addOuterTemplateArguments(&TemplateArgs); for (unsigned i = 0, e = Param->getDepth(); i != e; ++i) TemplateArgLists.addOuterTemplateArguments(None); Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext()); EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated); return SemaRef.SubstExpr(Param->getDefaultArgument(), TemplateArgLists); } /// \brief Substitute template arguments into the default template argument for /// the given template template parameter. /// /// \param SemaRef the semantic analysis object for which we are performing /// the substitution. /// /// \param Template the template that we are synthesizing template arguments /// for. /// /// \param TemplateLoc the location of the template name that started the /// template-id we are checking. /// /// \param RAngleLoc the location of the right angle bracket ('>') that /// terminates the template-id. /// /// \param Param the template template parameter whose default we are /// substituting into. /// /// \param Converted the list of template arguments provided for template /// parameters that precede \p Param in the template parameter list. /// /// \param QualifierLoc Will be set to the nested-name-specifier (with /// source-location information) that precedes the template name. /// /// \returns the substituted template argument, or NULL if an error occurred. static TemplateName SubstDefaultTemplateArgument(Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, TemplateTemplateParmDecl *Param, SmallVectorImpl<TemplateArgument> &Converted, NestedNameSpecifierLoc &QualifierLoc) { Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Template, Converted, SourceRange(TemplateLoc, RAngleLoc)); if (Inst.isInvalid()) return TemplateName(); TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted.data(), Converted.size()); // Only substitute for the innermost template argument list. MultiLevelTemplateArgumentList TemplateArgLists; TemplateArgLists.addOuterTemplateArguments(&TemplateArgs); for (unsigned i = 0, e = Param->getDepth(); i != e; ++i) TemplateArgLists.addOuterTemplateArguments(None); Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext()); // Substitute into the nested-name-specifier first, QualifierLoc = Param->getDefaultArgument().getTemplateQualifierLoc(); if (QualifierLoc) { QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgLists); if (!QualifierLoc) return TemplateName(); } return SemaRef.SubstTemplateName( QualifierLoc, Param->getDefaultArgument().getArgument().getAsTemplate(), Param->getDefaultArgument().getTemplateNameLoc(), TemplateArgLists); } /// \brief If the given template parameter has a default template /// argument, substitute into that default template argument and /// return the corresponding template argument. TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg) { HasDefaultArg = false; if (TemplateTypeParmDecl *TypeParm = dyn_cast<TemplateTypeParmDecl>(Param)) { if (!hasVisibleDefaultArgument(TypeParm)) return TemplateArgumentLoc(); HasDefaultArg = true; TypeSourceInfo *DI = SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc, TypeParm, Converted); if (DI) return TemplateArgumentLoc(TemplateArgument(DI->getType()), DI); return TemplateArgumentLoc(); } if (NonTypeTemplateParmDecl *NonTypeParm = dyn_cast<NonTypeTemplateParmDecl>(Param)) { if (!hasVisibleDefaultArgument(NonTypeParm)) return TemplateArgumentLoc(); HasDefaultArg = true; ExprResult Arg = SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc, NonTypeParm, Converted); if (Arg.isInvalid()) return TemplateArgumentLoc(); Expr *ArgE = Arg.getAs<Expr>(); return TemplateArgumentLoc(TemplateArgument(ArgE), ArgE); } TemplateTemplateParmDecl *TempTempParm = cast<TemplateTemplateParmDecl>(Param); if (!hasVisibleDefaultArgument(TempTempParm)) return TemplateArgumentLoc(); HasDefaultArg = true; NestedNameSpecifierLoc QualifierLoc; TemplateName TName = SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc, TempTempParm, Converted, QualifierLoc); if (TName.isNull()) return TemplateArgumentLoc(); return TemplateArgumentLoc(TemplateArgument(TName), TempTempParm->getDefaultArgument().getTemplateQualifierLoc(), TempTempParm->getDefaultArgument().getTemplateNameLoc()); } /// \brief Check that the given template argument corresponds to the given /// template parameter. /// /// \param Param The template parameter against which the argument will be /// checked. /// /// \param Arg The template argument, which may be updated due to conversions. /// /// \param Template The template in which the template argument resides. /// /// \param TemplateLoc The location of the template name for the template /// whose argument list we're matching. /// /// \param RAngleLoc The location of the right angle bracket ('>') that closes /// the template argument list. /// /// \param ArgumentPackIndex The index into the argument pack where this /// argument will be placed. Only valid if the parameter is a parameter pack. /// /// \param Converted The checked, converted argument will be added to the /// end of this small vector. /// /// \param CTAK Describes how we arrived at this particular template argument: /// explicitly written, deduced, etc. /// /// \returns true on error, false otherwise. bool Sema::CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK) { // Check template type parameters. if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) return CheckTemplateTypeArgument(TTP, Arg, Converted); // Check non-type template parameters. if (NonTypeTemplateParmDecl *NTTP =dyn_cast<NonTypeTemplateParmDecl>(Param)) { // Do substitution on the type of the non-type template parameter // with the template arguments we've seen thus far. But if the // template has a dependent context then we cannot substitute yet. QualType NTTPType = NTTP->getType(); if (NTTP->isParameterPack() && NTTP->isExpandedParameterPack()) NTTPType = NTTP->getExpansionType(ArgumentPackIndex); if (NTTPType->isDependentType() && !isa<TemplateTemplateParmDecl>(Template) && !Template->getDeclContext()->isDependentContext()) { // Do substitution on the type of the non-type template parameter. InstantiatingTemplate Inst(*this, TemplateLoc, Template, NTTP, Converted, SourceRange(TemplateLoc, RAngleLoc)); if (Inst.isInvalid()) return true; TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted.data(), Converted.size()); NTTPType = SubstType(NTTPType, MultiLevelTemplateArgumentList(TemplateArgs), NTTP->getLocation(), NTTP->getDeclName()); // If that worked, check the non-type template parameter type // for validity. if (!NTTPType.isNull()) NTTPType = CheckNonTypeTemplateParameterType(NTTPType, NTTP->getLocation()); if (NTTPType.isNull()) return true; } switch (Arg.getArgument().getKind()) { case TemplateArgument::Null: llvm_unreachable("Should never see a NULL template argument here"); case TemplateArgument::Expression: { TemplateArgument Result; ExprResult Res = CheckTemplateArgument(NTTP, NTTPType, Arg.getArgument().getAsExpr(), Result, CTAK); if (Res.isInvalid()) return true; // If the resulting expression is new, then use it in place of the // old expression in the template argument. if (Res.get() != Arg.getArgument().getAsExpr()) { TemplateArgument TA(Res.get()); Arg = TemplateArgumentLoc(TA, Res.get()); } Converted.push_back(Result); break; } case TemplateArgument::Declaration: case TemplateArgument::Integral: case TemplateArgument::NullPtr: // We've already checked this template argument, so just copy // it to the list of converted arguments. Converted.push_back(Arg.getArgument()); break; case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: // We were given a template template argument. It may not be ill-formed; // see below. if (DependentTemplateName *DTN = Arg.getArgument().getAsTemplateOrTemplatePattern() .getAsDependentTemplateName()) { // We have a template argument such as \c T::template X, which we // parsed as a template template argument. However, since we now // know that we need a non-type template argument, convert this // template name into an expression. DeclarationNameInfo NameInfo(DTN->getIdentifier(), Arg.getTemplateNameLoc()); CXXScopeSpec SS; SS.Adopt(Arg.getTemplateQualifierLoc()); // FIXME: the template-template arg was a DependentTemplateName, // so it was provided with a template keyword. However, its source // location is not stored in the template argument structure. SourceLocation TemplateKWLoc; ExprResult E = DependentScopeDeclRefExpr::Create( Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo, nullptr); // If we parsed the template argument as a pack expansion, create a // pack expansion expression. if (Arg.getArgument().getKind() == TemplateArgument::TemplateExpansion){ E = ActOnPackExpansion(E.get(), Arg.getTemplateEllipsisLoc()); if (E.isInvalid()) return true; } TemplateArgument Result; E = CheckTemplateArgument(NTTP, NTTPType, E.get(), Result); if (E.isInvalid()) return true; Converted.push_back(Result); break; } // We have a template argument that actually does refer to a class // template, alias template, or template template parameter, and // therefore cannot be a non-type template argument. Diag(Arg.getLocation(), diag::err_template_arg_must_be_expr) << Arg.getSourceRange(); Diag(Param->getLocation(), diag::note_template_param_here); return true; case TemplateArgument::Type: { // We have a non-type template parameter but the template // argument is a type. // C++ [temp.arg]p2: // In a template-argument, an ambiguity between a type-id and // an expression is resolved to a type-id, regardless of the // form of the corresponding template-parameter. // // We warn specifically about this case, since it can be rather // confusing for users. QualType T = Arg.getArgument().getAsType(); SourceRange SR = Arg.getSourceRange(); if (T->isFunctionType()) Diag(SR.getBegin(), diag::err_template_arg_nontype_ambig) << SR << T; else Diag(SR.getBegin(), diag::err_template_arg_must_be_expr) << SR; Diag(Param->getLocation(), diag::note_template_param_here); return true; } case TemplateArgument::Pack: llvm_unreachable("Caller must expand template argument packs"); } return false; } // Check template template parameters. TemplateTemplateParmDecl *TempParm = cast<TemplateTemplateParmDecl>(Param); // Substitute into the template parameter list of the template // template parameter, since previously-supplied template arguments // may appear within the template template parameter. { // Set up a template instantiation context. LocalInstantiationScope Scope(*this); InstantiatingTemplate Inst(*this, TemplateLoc, Template, TempParm, Converted, SourceRange(TemplateLoc, RAngleLoc)); if (Inst.isInvalid()) return true; TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted.data(), Converted.size()); TempParm = cast_or_null<TemplateTemplateParmDecl>( SubstDecl(TempParm, CurContext, MultiLevelTemplateArgumentList(TemplateArgs))); if (!TempParm) return true; } switch (Arg.getArgument().getKind()) { case TemplateArgument::Null: llvm_unreachable("Should never see a NULL template argument here"); case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: if (CheckTemplateArgument(TempParm, Arg, ArgumentPackIndex)) return true; Converted.push_back(Arg.getArgument()); break; case TemplateArgument::Expression: case TemplateArgument::Type: // We have a template template parameter but the template // argument does not refer to a template. Diag(Arg.getLocation(), diag::err_template_arg_must_be_template) << getLangOpts().CPlusPlus11; return true; case TemplateArgument::Declaration: llvm_unreachable("Declaration argument with template template parameter"); case TemplateArgument::Integral: llvm_unreachable("Integral argument with template template parameter"); case TemplateArgument::NullPtr: llvm_unreachable("Null pointer argument with template template parameter"); case TemplateArgument::Pack: llvm_unreachable("Caller must expand template argument packs"); } return false; } /// \brief Diagnose an arity mismatch in the static bool diagnoseArityMismatch(Sema &S, TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs) { TemplateParameterList *Params = Template->getTemplateParameters(); unsigned NumParams = Params->size(); unsigned NumArgs = TemplateArgs.size(); SourceRange Range; if (NumArgs > NumParams) Range = SourceRange(TemplateArgs[NumParams].getLocation(), TemplateArgs.getRAngleLoc()); S.Diag(TemplateLoc, diag::err_template_arg_list_different_arity) << (NumArgs > NumParams) << (isa<ClassTemplateDecl>(Template)? 0 : isa<FunctionTemplateDecl>(Template)? 1 : isa<TemplateTemplateParmDecl>(Template)? 2 : 3) << Template << Range; if (Template->getLocation().isValid()) { // HLSL Change - ellide location notes for built-ins S.Diag(Template->getLocation(), diag::note_template_decl_here) << Params->getSourceRange(); } return true; } /// \brief Check whether the template parameter is a pack expansion, and if so, /// determine the number of parameters produced by that expansion. For instance: /// /// \code /// template<typename ...Ts> struct A { /// template<Ts ...NTs, template<Ts> class ...TTs, typename ...Us> struct B; /// }; /// \endcode /// /// In \c A<int,int>::B, \c NTs and \c TTs have expanded pack size 2, and \c Us /// is not a pack expansion, so returns an empty Optional. static Optional<unsigned> getExpandedPackSize(NamedDecl *Param) { if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { if (NTTP->isExpandedParameterPack()) return NTTP->getNumExpansionTypes(); } if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(Param)) { if (TTP->isExpandedParameterPack()) return TTP->getNumExpansionTemplateParameters(); } return None; } /// Diagnose a missing template argument. template<typename TemplateParmDecl> static bool diagnoseMissingArgument(Sema &S, SourceLocation Loc, TemplateDecl *TD, const TemplateParmDecl *D, TemplateArgumentListInfo &Args) { // Dig out the most recent declaration of the template parameter; there may be // declarations of the template that are more recent than TD. D = cast<TemplateParmDecl>(cast<TemplateDecl>(TD->getMostRecentDecl()) ->getTemplateParameters() ->getParam(D->getIndex())); // If there's a default argument that's not visible, diagnose that we're // missing a module import. llvm::SmallVector<Module*, 8> Modules; if (D->hasDefaultArgument() && !S.hasVisibleDefaultArgument(D, &Modules)) { S.diagnoseMissingImport(Loc, cast<NamedDecl>(TD), D->getDefaultArgumentLoc(), Modules, Sema::MissingImportKind::DefaultArgument, /*Recover*/ true); return true; } // FIXME: If there's a more recent default argument that *is* visible, // diagnose that it was declared too late. return diagnoseArityMismatch(S, TD, Loc, Args); } /// \brief Check that the given template argument list is well-formed /// for specializing the given template. bool Sema::CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted) { // Make a copy of the template arguments for processing. Only make the // changes at the end when successful in matching the arguments to the // template. TemplateArgumentListInfo NewArgs = TemplateArgs; TemplateParameterList *Params = Template->getTemplateParameters(); SourceLocation RAngleLoc = NewArgs.getRAngleLoc(); // C++ [temp.arg]p1: // [...] The type and form of each template-argument specified in // a template-id shall match the type and form specified for the // corresponding parameter declared by the template in its // template-parameter-list. bool isTemplateTemplateParameter = isa<TemplateTemplateParmDecl>(Template); SmallVector<TemplateArgument, 2> ArgumentPack; unsigned ArgIdx = 0, NumArgs = NewArgs.size(); LocalInstantiationScope InstScope(*this, true); for (TemplateParameterList::iterator Param = Params->begin(), ParamEnd = Params->end(); Param != ParamEnd; /* increment in loop */) { // If we have an expanded parameter pack, make sure we don't have too // many arguments. if (Optional<unsigned> Expansions = getExpandedPackSize(*Param)) { if (*Expansions == ArgumentPack.size()) { // We're done with this parameter pack. Pack up its arguments and add // them to the list. Converted.push_back( TemplateArgument::CreatePackCopy(Context, ArgumentPack.data(), ArgumentPack.size())); ArgumentPack.clear(); // This argument is assigned to the next parameter. ++Param; continue; } else if (ArgIdx == NumArgs && !PartialTemplateArgs) { // Not enough arguments for this parameter pack. Diag(TemplateLoc, diag::err_template_arg_list_different_arity) << false << (isa<ClassTemplateDecl>(Template)? 0 : isa<FunctionTemplateDecl>(Template)? 1 : isa<TemplateTemplateParmDecl>(Template)? 2 : 3) << Template; if (Template->getLocation().isValid()) { // HLSL Change - ellide location notes for built-ins Diag(Template->getLocation(), diag::note_template_decl_here) << Params->getSourceRange(); } return true; } } if (ArgIdx < NumArgs) { // Check the template argument we were given. if (CheckTemplateArgument(*Param, NewArgs[ArgIdx], Template, TemplateLoc, RAngleLoc, ArgumentPack.size(), Converted)) return true; bool PackExpansionIntoNonPack = NewArgs[ArgIdx].getArgument().isPackExpansion() && (!(*Param)->isTemplateParameterPack() || getExpandedPackSize(*Param)); if (PackExpansionIntoNonPack && isa<TypeAliasTemplateDecl>(Template)) { // Core issue 1430: we have a pack expansion as an argument to an // alias template, and it's not part of a parameter pack. This // can't be canonicalized, so reject it now. Diag(NewArgs[ArgIdx].getLocation(), diag::err_alias_template_expansion_into_fixed_list) << NewArgs[ArgIdx].getSourceRange(); Diag((*Param)->getLocation(), diag::note_template_param_here); return true; } // We're now done with this argument. ++ArgIdx; if ((*Param)->isTemplateParameterPack()) { // The template parameter was a template parameter pack, so take the // deduced argument and place it on the argument pack. Note that we // stay on the same template parameter so that we can deduce more // arguments. ArgumentPack.push_back(Converted.pop_back_val()); } else { // Move to the next template parameter. ++Param; } // If we just saw a pack expansion into a non-pack, then directly convert // the remaining arguments, because we don't know what parameters they'll // match up with. if (PackExpansionIntoNonPack) { if (!ArgumentPack.empty()) { // If we were part way through filling in an expanded parameter pack, // fall back to just producing individual arguments. Converted.insert(Converted.end(), ArgumentPack.begin(), ArgumentPack.end()); ArgumentPack.clear(); } while (ArgIdx < NumArgs) { Converted.push_back(NewArgs[ArgIdx].getArgument()); ++ArgIdx; } return false; } continue; } // If we're checking a partial template argument list, we're done. if (PartialTemplateArgs) { if ((*Param)->isTemplateParameterPack() && !ArgumentPack.empty()) Converted.push_back(TemplateArgument::CreatePackCopy(Context, ArgumentPack.data(), ArgumentPack.size())); return false; } // If we have a template parameter pack with no more corresponding // arguments, just break out now and we'll fill in the argument pack below. if ((*Param)->isTemplateParameterPack()) { assert(!getExpandedPackSize(*Param) && "Should have dealt with this already"); // A non-expanded parameter pack before the end of the parameter list // only occurs for an ill-formed template parameter list, unless we've // got a partial argument list for a function template, so just bail out. if (Param + 1 != ParamEnd) return true; Converted.push_back(TemplateArgument::CreatePackCopy(Context, ArgumentPack.data(), ArgumentPack.size())); ArgumentPack.clear(); ++Param; continue; } // Check whether we have a default argument. TemplateArgumentLoc Arg; // Retrieve the default template argument from the template // parameter. For each kind of template parameter, we substitute the // template arguments provided thus far and any "outer" template arguments // (when the template parameter was part of a nested template) into // the default argument. if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) { if (!hasVisibleDefaultArgument(TTP)) return diagnoseMissingArgument(*this, TemplateLoc, Template, TTP, NewArgs); TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc, TTP, Converted); if (!ArgType) return true; Arg = TemplateArgumentLoc(TemplateArgument(ArgType->getType()), ArgType); } else if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*Param)) { if (!hasVisibleDefaultArgument(NTTP)) return diagnoseMissingArgument(*this, TemplateLoc, Template, NTTP, NewArgs); ExprResult E = SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc, NTTP, Converted); if (E.isInvalid()) return true; Expr *Ex = E.getAs<Expr>(); Arg = TemplateArgumentLoc(TemplateArgument(Ex), Ex); } else { TemplateTemplateParmDecl *TempParm = cast<TemplateTemplateParmDecl>(*Param); if (!hasVisibleDefaultArgument(TempParm)) return diagnoseMissingArgument(*this, TemplateLoc, Template, TempParm, NewArgs); NestedNameSpecifierLoc QualifierLoc; TemplateName Name = SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc, TempParm, Converted, QualifierLoc); if (Name.isNull()) return true; Arg = TemplateArgumentLoc(TemplateArgument(Name), QualifierLoc, TempParm->getDefaultArgument().getTemplateNameLoc()); } // Introduce an instantiation record that describes where we are using // the default template argument. InstantiatingTemplate Inst(*this, RAngleLoc, Template, *Param, Converted, SourceRange(TemplateLoc, RAngleLoc)); if (Inst.isInvalid()) return true; // Check the default template argument. if (CheckTemplateArgument(*Param, Arg, Template, TemplateLoc, RAngleLoc, 0, Converted)) return true; // Core issue 150 (assumed resolution): if this is a template template // parameter, keep track of the default template arguments from the // template definition. if (isTemplateTemplateParameter) NewArgs.addArgument(Arg); // Move to the next template parameter and argument. ++Param; ++ArgIdx; } // If we're performing a partial argument substitution, allow any trailing // pack expansions; they might be empty. This can happen even if // PartialTemplateArgs is false (the list of arguments is complete but // still dependent). if (ArgIdx < NumArgs && CurrentInstantiationScope && CurrentInstantiationScope->getPartiallySubstitutedPack()) { while (ArgIdx < NumArgs && NewArgs[ArgIdx].getArgument().isPackExpansion()) Converted.push_back(NewArgs[ArgIdx++].getArgument()); } // If we have any leftover arguments, then there were too many arguments. // Complain and fail. if (ArgIdx < NumArgs) return diagnoseArityMismatch(*this, Template, TemplateLoc, NewArgs); // No problems found with the new argument list, propagate changes back // to caller. TemplateArgs = NewArgs; return false; } namespace { class UnnamedLocalNoLinkageFinder : public TypeVisitor<UnnamedLocalNoLinkageFinder, bool> { Sema &S; SourceRange SR; typedef TypeVisitor<UnnamedLocalNoLinkageFinder, bool> inherited; public: UnnamedLocalNoLinkageFinder(Sema &S, SourceRange SR) : S(S), SR(SR) { } bool Visit(QualType T) { return inherited::Visit(T.getTypePtr()); } #define TYPE(Class, Parent) \ bool Visit##Class##Type(const Class##Type *); #define ABSTRACT_TYPE(Class, Parent) \ bool Visit##Class##Type(const Class##Type *) { return false; } #define NON_CANONICAL_TYPE(Class, Parent) \ bool Visit##Class##Type(const Class##Type *) { return false; } #include "clang/AST/TypeNodes.def" bool VisitTagDecl(const TagDecl *Tag); bool VisitNestedNameSpecifier(NestedNameSpecifier *NNS); }; } bool UnnamedLocalNoLinkageFinder::VisitBuiltinType(const BuiltinType*) { return false; } bool UnnamedLocalNoLinkageFinder::VisitComplexType(const ComplexType* T) { return Visit(T->getElementType()); } bool UnnamedLocalNoLinkageFinder::VisitPointerType(const PointerType* T) { return Visit(T->getPointeeType()); } bool UnnamedLocalNoLinkageFinder::VisitBlockPointerType( const BlockPointerType* T) { return Visit(T->getPointeeType()); } bool UnnamedLocalNoLinkageFinder::VisitLValueReferenceType( const LValueReferenceType* T) { return Visit(T->getPointeeType()); } bool UnnamedLocalNoLinkageFinder::VisitRValueReferenceType( const RValueReferenceType* T) { return Visit(T->getPointeeType()); } bool UnnamedLocalNoLinkageFinder::VisitMemberPointerType( const MemberPointerType* T) { return Visit(T->getPointeeType()) || Visit(QualType(T->getClass(), 0)); } bool UnnamedLocalNoLinkageFinder::VisitConstantArrayType( const ConstantArrayType* T) { return Visit(T->getElementType()); } bool UnnamedLocalNoLinkageFinder::VisitIncompleteArrayType( const IncompleteArrayType* T) { return Visit(T->getElementType()); } bool UnnamedLocalNoLinkageFinder::VisitVariableArrayType( const VariableArrayType* T) { return Visit(T->getElementType()); } bool UnnamedLocalNoLinkageFinder::VisitDependentSizedArrayType( const DependentSizedArrayType* T) { return Visit(T->getElementType()); } bool UnnamedLocalNoLinkageFinder::VisitDependentSizedExtVectorType( const DependentSizedExtVectorType* T) { return Visit(T->getElementType()); } bool UnnamedLocalNoLinkageFinder::VisitVectorType(const VectorType* T) { return Visit(T->getElementType()); } bool UnnamedLocalNoLinkageFinder::VisitExtVectorType(const ExtVectorType* T) { return Visit(T->getElementType()); } bool UnnamedLocalNoLinkageFinder::VisitFunctionProtoType( const FunctionProtoType* T) { for (const auto &A : T->param_types()) { if (Visit(A)) return true; } return Visit(T->getReturnType()); } bool UnnamedLocalNoLinkageFinder::VisitFunctionNoProtoType( const FunctionNoProtoType* T) { return Visit(T->getReturnType()); } bool UnnamedLocalNoLinkageFinder::VisitUnresolvedUsingType( const UnresolvedUsingType*) { return false; } bool UnnamedLocalNoLinkageFinder::VisitTypeOfExprType(const TypeOfExprType*) { return false; } bool UnnamedLocalNoLinkageFinder::VisitTypeOfType(const TypeOfType* T) { return Visit(T->getUnderlyingType()); } bool UnnamedLocalNoLinkageFinder::VisitDecltypeType(const DecltypeType*) { return false; } bool UnnamedLocalNoLinkageFinder::VisitUnaryTransformType( const UnaryTransformType*) { return false; } bool UnnamedLocalNoLinkageFinder::VisitAutoType(const AutoType *T) { return Visit(T->getDeducedType()); } bool UnnamedLocalNoLinkageFinder::VisitRecordType(const RecordType* T) { return VisitTagDecl(T->getDecl()); } bool UnnamedLocalNoLinkageFinder::VisitEnumType(const EnumType* T) { return VisitTagDecl(T->getDecl()); } bool UnnamedLocalNoLinkageFinder::VisitTemplateTypeParmType( const TemplateTypeParmType*) { return false; } bool UnnamedLocalNoLinkageFinder::VisitSubstTemplateTypeParmPackType( const SubstTemplateTypeParmPackType *) { return false; } bool UnnamedLocalNoLinkageFinder::VisitTemplateSpecializationType( const TemplateSpecializationType*) { return false; } bool UnnamedLocalNoLinkageFinder::VisitInjectedClassNameType( const InjectedClassNameType* T) { return VisitTagDecl(T->getDecl()); } bool UnnamedLocalNoLinkageFinder::VisitDependentNameType( const DependentNameType* T) { return VisitNestedNameSpecifier(T->getQualifier()); } bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType( const DependentTemplateSpecializationType* T) { return VisitNestedNameSpecifier(T->getQualifier()); } bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType( const PackExpansionType* T) { return Visit(T->getPattern()); } bool UnnamedLocalNoLinkageFinder::VisitObjCObjectType(const ObjCObjectType *) { return false; } bool UnnamedLocalNoLinkageFinder::VisitObjCInterfaceType( const ObjCInterfaceType *) { return false; } bool UnnamedLocalNoLinkageFinder::VisitObjCObjectPointerType( const ObjCObjectPointerType *) { return false; } bool UnnamedLocalNoLinkageFinder::VisitAtomicType(const AtomicType* T) { return Visit(T->getValueType()); } bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) { if (Tag->getDeclContext()->isFunctionOrMethod()) { S.Diag(SR.getBegin(), S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_template_arg_local_type : diag::ext_template_arg_local_type) << S.Context.getTypeDeclType(Tag) << SR; return true; } if (!Tag->hasNameForLinkage()) { S.Diag(SR.getBegin(), S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_template_arg_unnamed_type : diag::ext_template_arg_unnamed_type) << SR; S.Diag(Tag->getLocation(), diag::note_template_unnamed_type_here); return true; } return false; } bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier( NestedNameSpecifier *NNS) { if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix())) return true; switch (NNS->getKind()) { case NestedNameSpecifier::Identifier: case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: return false; case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: return Visit(QualType(NNS->getAsType(), 0)); } llvm_unreachable("Invalid NestedNameSpecifier::Kind!"); } /// \brief Check a template argument against its corresponding /// template type parameter. /// /// This routine implements the semantics of C++ [temp.arg.type]. It /// returns true if an error occurred, and false otherwise. bool Sema::CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *ArgInfo) { assert(ArgInfo && "invalid TypeSourceInfo"); QualType Arg = ArgInfo->getType(); SourceRange SR = ArgInfo->getTypeLoc().getSourceRange(); if (Arg->isVariablyModifiedType()) { return Diag(SR.getBegin(), diag::err_variably_modified_template_arg) << Arg; } else if (Context.hasSameUnqualifiedType(Arg, Context.OverloadTy)) { return Diag(SR.getBegin(), diag::err_template_arg_overload_type) << SR; } // C++03 [temp.arg.type]p2: // A local type, a type with no linkage, an unnamed type or a type // compounded from any of these types shall not be used as a // template-argument for a template type-parameter. // // C++11 allows these, and even in C++03 we allow them as an extension with // a warning. bool NeedsCheck; if (LangOpts.CPlusPlus11) NeedsCheck = !Diags.isIgnored(diag::warn_cxx98_compat_template_arg_unnamed_type, SR.getBegin()) || !Diags.isIgnored(diag::warn_cxx98_compat_template_arg_local_type, SR.getBegin()); else NeedsCheck = Arg->hasUnnamedOrLocalType(); if (NeedsCheck) { UnnamedLocalNoLinkageFinder Finder(*this, SR); (void)Finder.Visit(Context.getCanonicalType(Arg)); } return false; } enum NullPointerValueKind { NPV_NotNullPointer, NPV_NullPointer, NPV_Error }; /// \brief Determine whether the given template argument is a null pointer /// value of the appropriate type. static NullPointerValueKind isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param, QualType ParamType, Expr *Arg) { if (Arg->isValueDependent() || Arg->isTypeDependent()) return NPV_NotNullPointer; if (!S.getLangOpts().CPlusPlus11) return NPV_NotNullPointer; // Determine whether we have a constant expression. ExprResult ArgRV = S.DefaultFunctionArrayConversion(Arg); if (ArgRV.isInvalid()) return NPV_Error; Arg = ArgRV.get(); Expr::EvalResult EvalResult; SmallVector<PartialDiagnosticAt, 8> Notes; EvalResult.Diag = &Notes; if (!Arg->EvaluateAsRValue(EvalResult, S.Context) || EvalResult.HasSideEffects) { SourceLocation DiagLoc = Arg->getExprLoc(); // If our only note is the usual "invalid subexpression" note, just point // the caret at its location rather than producing an essentially // redundant note. if (Notes.size() == 1 && Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr) { DiagLoc = Notes[0].first; Notes.clear(); } S.Diag(DiagLoc, diag::err_template_arg_not_address_constant) << Arg->getType() << Arg->getSourceRange(); for (unsigned I = 0, N = Notes.size(); I != N; ++I) S.Diag(Notes[I].first, Notes[I].second); S.Diag(Param->getLocation(), diag::note_template_param_here); return NPV_Error; } // C++11 [temp.arg.nontype]p1: // - an address constant expression of type std::nullptr_t if (Arg->getType()->isNullPtrType()) return NPV_NullPointer; // - a constant expression that evaluates to a null pointer value (4.10); or // - a constant expression that evaluates to a null member pointer value // (4.11); or if ((EvalResult.Val.isLValue() && !EvalResult.Val.getLValueBase()) || (EvalResult.Val.isMemberPointer() && !EvalResult.Val.getMemberPointerDecl())) { // If our expression has an appropriate type, we've succeeded. bool ObjCLifetimeConversion; if (S.Context.hasSameUnqualifiedType(Arg->getType(), ParamType) || S.IsQualificationConversion(Arg->getType(), ParamType, false, ObjCLifetimeConversion)) return NPV_NullPointer; // The types didn't match, but we know we got a null pointer; complain, // then recover as if the types were correct. S.Diag(Arg->getExprLoc(), diag::err_template_arg_wrongtype_null_constant) << Arg->getType() << ParamType << Arg->getSourceRange(); S.Diag(Param->getLocation(), diag::note_template_param_here); return NPV_NullPointer; } // If we don't have a null pointer value, but we do have a NULL pointer // constant, suggest a cast to the appropriate type. if (Arg->isNullPointerConstant(S.Context, Expr::NPC_NeverValueDependent)) { std::string Code = "static_cast<" + ParamType.getAsString() + ">("; S.Diag(Arg->getExprLoc(), diag::err_template_arg_untyped_null_constant) << ParamType << FixItHint::CreateInsertion(Arg->getLocStart(), Code) << FixItHint::CreateInsertion(S.getLocForEndOfToken(Arg->getLocEnd()), ")"); S.Diag(Param->getLocation(), diag::note_template_param_here); return NPV_NullPointer; } // FIXME: If we ever want to support general, address-constant expressions // as non-type template arguments, we should return the ExprResult here to // be interpreted by the caller. return NPV_NotNullPointer; } /// \brief Checks whether the given template argument is compatible with its /// template parameter. static bool CheckTemplateArgumentIsCompatibleWithParameter( Sema &S, NonTypeTemplateParmDecl *Param, QualType ParamType, Expr *ArgIn, Expr *Arg, QualType ArgType) { bool ObjCLifetimeConversion; if (ParamType->isPointerType() && !ParamType->getAs<PointerType>()->getPointeeType()->isFunctionType() && S.IsQualificationConversion(ArgType, ParamType, false, ObjCLifetimeConversion)) { // For pointer-to-object types, qualification conversions are // permitted. } else { if (const ReferenceType *ParamRef = ParamType->getAs<ReferenceType>()) { if (!ParamRef->getPointeeType()->isFunctionType()) { // C++ [temp.arg.nontype]p5b3: // For a non-type template-parameter of type reference to // object, no conversions apply. The type referred to by the // reference may be more cv-qualified than the (otherwise // identical) type of the template- argument. The // template-parameter is bound directly to the // template-argument, which shall be an lvalue. // FIXME: Other qualifiers? unsigned ParamQuals = ParamRef->getPointeeType().getCVRQualifiers(); unsigned ArgQuals = ArgType.getCVRQualifiers(); if ((ParamQuals | ArgQuals) != ParamQuals) { S.Diag(Arg->getLocStart(), diag::err_template_arg_ref_bind_ignores_quals) << ParamType << Arg->getType() << Arg->getSourceRange(); S.Diag(Param->getLocation(), diag::note_template_param_here); return true; } } } // At this point, the template argument refers to an object or // function with external linkage. We now need to check whether the // argument and parameter types are compatible. if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType.getNonReferenceType())) { // We can't perform this conversion or binding. if (ParamType->isReferenceType()) S.Diag(Arg->getLocStart(), diag::err_template_arg_no_ref_bind) << ParamType << ArgIn->getType() << Arg->getSourceRange(); else S.Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible) << ArgIn->getType() << ParamType << Arg->getSourceRange(); S.Diag(Param->getLocation(), diag::note_template_param_here); return true; } } return false; } /// \brief Checks whether the given template argument is the address /// of an object or function according to C++ [temp.arg.nontype]p1. static bool CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S, NonTypeTemplateParmDecl *Param, QualType ParamType, Expr *ArgIn, TemplateArgument &Converted) { bool Invalid = false; Expr *Arg = ArgIn; QualType ArgType = Arg->getType(); bool AddressTaken = false; SourceLocation AddrOpLoc; if (S.getLangOpts().MicrosoftExt) { // Microsoft Visual C++ strips all casts, allows an arbitrary number of // dereference and address-of operators. Arg = Arg->IgnoreParenCasts(); bool ExtWarnMSTemplateArg = false; UnaryOperatorKind FirstOpKind; SourceLocation FirstOpLoc; while (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) { UnaryOperatorKind UnOpKind = UnOp->getOpcode(); if (UnOpKind == UO_Deref) ExtWarnMSTemplateArg = true; if (UnOpKind == UO_AddrOf || UnOpKind == UO_Deref) { Arg = UnOp->getSubExpr()->IgnoreParenCasts(); if (!AddrOpLoc.isValid()) { FirstOpKind = UnOpKind; FirstOpLoc = UnOp->getOperatorLoc(); } } else break; } if (FirstOpLoc.isValid()) { if (ExtWarnMSTemplateArg) S.Diag(ArgIn->getLocStart(), diag::ext_ms_deref_template_argument) << ArgIn->getSourceRange(); if (FirstOpKind == UO_AddrOf) AddressTaken = true; else if (Arg->getType()->isPointerType()) { // We cannot let pointers get dereferenced here, that is obviously not a // constant expression. assert(FirstOpKind == UO_Deref); S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref) << Arg->getSourceRange(); } } } else { // See through any implicit casts we added to fix the type. Arg = Arg->IgnoreImpCasts(); // C++ [temp.arg.nontype]p1: // // A template-argument for a non-type, non-template // template-parameter shall be one of: [...] // // -- the address of an object or function with external // linkage, including function templates and function // template-ids but excluding non-static class members, // expressed as & id-expression where the & is optional if // the name refers to a function or array, or if the // corresponding template-parameter is a reference; or // In C++98/03 mode, give an extension warning on any extra parentheses. // See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773 bool ExtraParens = false; while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) { if (!Invalid && !ExtraParens) { S.Diag(Arg->getLocStart(), S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_template_arg_extra_parens : diag::ext_template_arg_extra_parens) << Arg->getSourceRange(); ExtraParens = true; } Arg = Parens->getSubExpr(); } while (SubstNonTypeTemplateParmExpr *subst = dyn_cast<SubstNonTypeTemplateParmExpr>(Arg)) Arg = subst->getReplacement()->IgnoreImpCasts(); if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) { if (UnOp->getOpcode() == UO_AddrOf) { Arg = UnOp->getSubExpr(); AddressTaken = true; AddrOpLoc = UnOp->getOperatorLoc(); } } while (SubstNonTypeTemplateParmExpr *subst = dyn_cast<SubstNonTypeTemplateParmExpr>(Arg)) Arg = subst->getReplacement()->IgnoreImpCasts(); } DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg); ValueDecl *Entity = DRE ? DRE->getDecl() : nullptr; // If our parameter has pointer type, check for a null template value. if (ParamType->isPointerType() || ParamType->isNullPtrType()) { NullPointerValueKind NPV; // dllimport'd entities aren't constant but are available inside of template // arguments. if (Entity && Entity->hasAttr<DLLImportAttr>()) NPV = NPV_NotNullPointer; else NPV = isNullPointerValueTemplateArgument(S, Param, ParamType, ArgIn); switch (NPV) { case NPV_NullPointer: S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null); Converted = TemplateArgument(S.Context.getCanonicalType(ParamType), /*isNullPtr=*/true); return false; case NPV_Error: return true; case NPV_NotNullPointer: break; } } // Stop checking the precise nature of the argument if it is value dependent, // it should be checked when instantiated. if (Arg->isValueDependent()) { Converted = TemplateArgument(ArgIn); return false; } if (isa<CXXUuidofExpr>(Arg)) { if (CheckTemplateArgumentIsCompatibleWithParameter(S, Param, ParamType, ArgIn, Arg, ArgType)) return true; Converted = TemplateArgument(ArgIn); return false; } if (!DRE) { S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref) << Arg->getSourceRange(); S.Diag(Param->getLocation(), diag::note_template_param_here); return true; } // Cannot refer to non-static data members if (isa<FieldDecl>(Entity) || isa<IndirectFieldDecl>(Entity)) { S.Diag(Arg->getLocStart(), diag::err_template_arg_field) << Entity << Arg->getSourceRange(); S.Diag(Param->getLocation(), diag::note_template_param_here); return true; } // Cannot refer to non-static member functions if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Entity)) { if (!Method->isStatic()) { S.Diag(Arg->getLocStart(), diag::err_template_arg_method) << Method << Arg->getSourceRange(); S.Diag(Param->getLocation(), diag::note_template_param_here); return true; } } FunctionDecl *Func = dyn_cast<FunctionDecl>(Entity); VarDecl *Var = dyn_cast<VarDecl>(Entity); // A non-type template argument must refer to an object or function. if (!Func && !Var) { // We found something, but we don't know specifically what it is. S.Diag(Arg->getLocStart(), diag::err_template_arg_not_object_or_func) << Arg->getSourceRange(); S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here); return true; } // Address / reference template args must have external linkage in C++98. if (Entity->getFormalLinkage() == InternalLinkage) { S.Diag(Arg->getLocStart(), S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_template_arg_object_internal : diag::ext_template_arg_object_internal) << !Func << Entity << Arg->getSourceRange(); S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object) << !Func; } else if (!Entity->hasLinkage()) { S.Diag(Arg->getLocStart(), diag::err_template_arg_object_no_linkage) << !Func << Entity << Arg->getSourceRange(); S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object) << !Func; return true; } if (Func) { // If the template parameter has pointer type, the function decays. if (ParamType->isPointerType() && !AddressTaken) ArgType = S.Context.getPointerType(Func->getType()); else if (AddressTaken && ParamType->isReferenceType()) { // If we originally had an address-of operator, but the // parameter has reference type, complain and (if things look // like they will work) drop the address-of operator. if (!S.Context.hasSameUnqualifiedType(Func->getType(), ParamType.getNonReferenceType())) { S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer) << ParamType; S.Diag(Param->getLocation(), diag::note_template_param_here); return true; } S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer) << ParamType << FixItHint::CreateRemoval(AddrOpLoc); S.Diag(Param->getLocation(), diag::note_template_param_here); ArgType = Func->getType(); } } else { // A value of reference type is not an object. if (Var->getType()->isReferenceType()) { S.Diag(Arg->getLocStart(), diag::err_template_arg_reference_var) << Var->getType() << Arg->getSourceRange(); S.Diag(Param->getLocation(), diag::note_template_param_here); return true; } // A template argument must have static storage duration. if (Var->getTLSKind()) { S.Diag(Arg->getLocStart(), diag::err_template_arg_thread_local) << Arg->getSourceRange(); S.Diag(Var->getLocation(), diag::note_template_arg_refers_here); return true; } // If the template parameter has pointer type, we must have taken // the address of this object. if (ParamType->isReferenceType()) { if (AddressTaken) { // If we originally had an address-of operator, but the // parameter has reference type, complain and (if things look // like they will work) drop the address-of operator. if (!S.Context.hasSameUnqualifiedType(Var->getType(), ParamType.getNonReferenceType())) { S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer) << ParamType; S.Diag(Param->getLocation(), diag::note_template_param_here); return true; } S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer) << ParamType << FixItHint::CreateRemoval(AddrOpLoc); S.Diag(Param->getLocation(), diag::note_template_param_here); ArgType = Var->getType(); } } else if (!AddressTaken && ParamType->isPointerType()) { if (Var->getType()->isArrayType()) { // Array-to-pointer decay. ArgType = S.Context.getArrayDecayedType(Var->getType()); } else { // If the template parameter has pointer type but the address of // this object was not taken, complain and (possibly) recover by // taking the address of the entity. ArgType = S.Context.getPointerType(Var->getType()); if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType)) { S.Diag(Arg->getLocStart(), diag::err_template_arg_not_address_of) << ParamType; S.Diag(Param->getLocation(), diag::note_template_param_here); return true; } S.Diag(Arg->getLocStart(), diag::err_template_arg_not_address_of) << ParamType << FixItHint::CreateInsertion(Arg->getLocStart(), "&"); S.Diag(Param->getLocation(), diag::note_template_param_here); } } } if (CheckTemplateArgumentIsCompatibleWithParameter(S, Param, ParamType, ArgIn, Arg, ArgType)) return true; // Create the template argument. Converted = TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()), ParamType); S.MarkAnyDeclReferenced(Arg->getLocStart(), Entity, false); return false; } /// \brief Checks whether the given template argument is a pointer to /// member constant according to C++ [temp.arg.nontype]p1. static bool CheckTemplateArgumentPointerToMember(Sema &S, NonTypeTemplateParmDecl *Param, QualType ParamType, Expr *&ResultArg, TemplateArgument &Converted) { bool Invalid = false; // Check for a null pointer value. Expr *Arg = ResultArg; switch (isNullPointerValueTemplateArgument(S, Param, ParamType, Arg)) { case NPV_Error: return true; case NPV_NullPointer: S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null); Converted = TemplateArgument(S.Context.getCanonicalType(ParamType), /*isNullPtr*/true); if (S.Context.getTargetInfo().getCXXABI().isMicrosoft()) S.RequireCompleteType(Arg->getExprLoc(), ParamType, 0); return false; case NPV_NotNullPointer: break; } bool ObjCLifetimeConversion; if (S.IsQualificationConversion(Arg->getType(), ParamType.getNonReferenceType(), false, ObjCLifetimeConversion)) { Arg = S.ImpCastExprToType(Arg, ParamType, CK_NoOp, Arg->getValueKind()).get(); ResultArg = Arg; } else if (!S.Context.hasSameUnqualifiedType(Arg->getType(), ParamType.getNonReferenceType())) { // We can't perform this conversion. S.Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible) << Arg->getType() << ParamType << Arg->getSourceRange(); S.Diag(Param->getLocation(), diag::note_template_param_here); return true; } // See through any implicit casts we added to fix the type. while (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Arg)) Arg = Cast->getSubExpr(); // C++ [temp.arg.nontype]p1: // // A template-argument for a non-type, non-template // template-parameter shall be one of: [...] // // -- a pointer to member expressed as described in 5.3.1. DeclRefExpr *DRE = nullptr; // In C++98/03 mode, give an extension warning on any extra parentheses. // See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773 bool ExtraParens = false; while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) { if (!Invalid && !ExtraParens) { S.Diag(Arg->getLocStart(), S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_template_arg_extra_parens : diag::ext_template_arg_extra_parens) << Arg->getSourceRange(); ExtraParens = true; } Arg = Parens->getSubExpr(); } while (SubstNonTypeTemplateParmExpr *subst = dyn_cast<SubstNonTypeTemplateParmExpr>(Arg)) Arg = subst->getReplacement()->IgnoreImpCasts(); // A pointer-to-member constant written &Class::member. if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) { if (UnOp->getOpcode() == UO_AddrOf) { DRE = dyn_cast<DeclRefExpr>(UnOp->getSubExpr()); if (DRE && !DRE->getQualifier()) DRE = nullptr; } } // A constant of pointer-to-member type. else if ((DRE = dyn_cast<DeclRefExpr>(Arg))) { if (ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl())) { if (VD->getType()->isMemberPointerType()) { if (isa<NonTypeTemplateParmDecl>(VD)) { if (Arg->isTypeDependent() || Arg->isValueDependent()) { Converted = TemplateArgument(Arg); } else { VD = cast<ValueDecl>(VD->getCanonicalDecl()); Converted = TemplateArgument(VD, ParamType); } return Invalid; } } } DRE = nullptr; } if (!DRE) return S.Diag(Arg->getLocStart(), diag::err_template_arg_not_pointer_to_member_form) << Arg->getSourceRange(); if (isa<FieldDecl>(DRE->getDecl()) || isa<IndirectFieldDecl>(DRE->getDecl()) || isa<CXXMethodDecl>(DRE->getDecl())) { assert((isa<FieldDecl>(DRE->getDecl()) || isa<IndirectFieldDecl>(DRE->getDecl()) || !cast<CXXMethodDecl>(DRE->getDecl())->isStatic()) && "Only non-static member pointers can make it here"); // Okay: this is the address of a non-static member, and therefore // a member pointer constant. if (Arg->isTypeDependent() || Arg->isValueDependent()) { Converted = TemplateArgument(Arg); } else { ValueDecl *D = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl()); Converted = TemplateArgument(D, ParamType); } return Invalid; } // We found something else, but we don't know specifically what it is. S.Diag(Arg->getLocStart(), diag::err_template_arg_not_pointer_to_member_form) << Arg->getSourceRange(); S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here); return true; } /// \brief Check a template argument against its corresponding /// non-type template parameter. /// /// This routine implements the semantics of C++ [temp.arg.nontype]. /// If an error occurred, it returns ExprError(); otherwise, it /// returns the converted template argument. \p ParamType is the /// type of the non-type template parameter after it has been instantiated. ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType ParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK) { SourceLocation StartLoc = Arg->getLocStart(); // If either the parameter has a dependent type or the argument is // type-dependent, there's nothing we can check now. if (ParamType->isDependentType() || Arg->isTypeDependent()) { // FIXME: Produce a cloned, canonical expression? Converted = TemplateArgument(Arg); return Arg; } // We should have already dropped all cv-qualifiers by now. assert(!ParamType.hasQualifiers() && "non-type template parameter type cannot be qualified"); if (CTAK == CTAK_Deduced && !Context.hasSameUnqualifiedType(ParamType, Arg->getType())) { // C++ [temp.deduct.type]p17: // If, in the declaration of a function template with a non-type // template-parameter, the non-type template-parameter is used // in an expression in the function parameter-list and, if the // corresponding template-argument is deduced, the // template-argument type shall match the type of the // template-parameter exactly, except that a template-argument // deduced from an array bound may be of any integral type. Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch) << Arg->getType().getUnqualifiedType() << ParamType.getUnqualifiedType(); Diag(Param->getLocation(), diag::note_template_param_here); return ExprError(); } if (getLangOpts().CPlusPlus1z) { // FIXME: We can do some limited checking for a value-dependent but not // type-dependent argument. if (Arg->isValueDependent()) { Converted = TemplateArgument(Arg); return Arg; } // C++1z [temp.arg.nontype]p1: // A template-argument for a non-type template parameter shall be // a converted constant expression of the type of the template-parameter. APValue Value; ExprResult ArgResult = CheckConvertedConstantExpression( Arg, ParamType, Value, CCEK_TemplateArg); if (ArgResult.isInvalid()) return ExprError(); QualType CanonParamType = Context.getCanonicalType(ParamType); // Convert the APValue to a TemplateArgument. switch (Value.getKind()) { case APValue::Uninitialized: assert(ParamType->isNullPtrType()); Converted = TemplateArgument(CanonParamType, /*isNullPtr*/true); break; case APValue::Int: assert(ParamType->isIntegralOrEnumerationType()); Converted = TemplateArgument(Context, Value.getInt(), CanonParamType); break; case APValue::MemberPointer: { assert(ParamType->isMemberPointerType()); // FIXME: We need TemplateArgument representation and mangling for these. if (!Value.getMemberPointerPath().empty()) { Diag(Arg->getLocStart(), diag::err_template_arg_member_ptr_base_derived_not_supported) << Value.getMemberPointerDecl() << ParamType << Arg->getSourceRange(); return ExprError(); } auto *VD = const_cast<ValueDecl*>(Value.getMemberPointerDecl()); Converted = VD ? TemplateArgument(VD, CanonParamType) : TemplateArgument(CanonParamType, /*isNullPtr*/true); break; } case APValue::LValue: { // For a non-type template-parameter of pointer or reference type, // the value of the constant expression shall not refer to assert(ParamType->isPointerType() || ParamType->isReferenceType() || ParamType->isNullPtrType()); // -- a temporary object // -- a string literal // -- the result of a typeid expression, or // -- a predefind __func__ variable if (auto *E = Value.getLValueBase().dyn_cast<const Expr*>()) { if (isa<CXXUuidofExpr>(E)) { Converted = TemplateArgument(const_cast<Expr*>(E)); break; } Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref) << Arg->getSourceRange(); return ExprError(); } auto *VD = const_cast<ValueDecl *>( Value.getLValueBase().dyn_cast<const ValueDecl *>()); // -- a subobject if (Value.hasLValuePath() && Value.getLValuePath().size() == 1 && VD && VD->getType()->isArrayType() && Value.getLValuePath()[0].ArrayIndex == 0 && !Value.isLValueOnePastTheEnd() && ParamType->isPointerType()) { // Per defect report (no number yet): // ... other than a pointer to the first element of a complete array // object. } else if (!Value.hasLValuePath() || Value.getLValuePath().size() || Value.isLValueOnePastTheEnd()) { Diag(StartLoc, diag::err_non_type_template_arg_subobject) << Value.getAsString(Context, ParamType); return ExprError(); } assert((VD || !ParamType->isReferenceType()) && "null reference should not be a constant expression"); assert((!VD || !ParamType->isNullPtrType()) && "non-null value of type nullptr_t?"); Converted = VD ? TemplateArgument(VD, CanonParamType) : TemplateArgument(CanonParamType, /*isNullPtr*/true); break; } case APValue::AddrLabelDiff: return Diag(StartLoc, diag::err_non_type_template_arg_addr_label_diff); case APValue::Float: case APValue::ComplexInt: case APValue::ComplexFloat: case APValue::Vector: case APValue::Array: case APValue::Struct: case APValue::Union: llvm_unreachable("invalid kind for template argument"); } return ArgResult.get(); } // C++ [temp.arg.nontype]p5: // The following conversions are performed on each expression used // as a non-type template-argument. If a non-type // template-argument cannot be converted to the type of the // corresponding template-parameter then the program is // ill-formed. if (ParamType->isIntegralOrEnumerationType()) { // C++11: // -- for a non-type template-parameter of integral or // enumeration type, conversions permitted in a converted // constant expression are applied. // // C++98: // -- for a non-type template-parameter of integral or // enumeration type, integral promotions (4.5) and integral // conversions (4.7) are applied. if (getLangOpts().CPlusPlus11) { // We can't check arbitrary value-dependent arguments. // FIXME: If there's no viable conversion to the template parameter type, // we should be able to diagnose that prior to instantiation. if (Arg->isValueDependent()) { Converted = TemplateArgument(Arg); return Arg; } // C++ [temp.arg.nontype]p1: // A template-argument for a non-type, non-template template-parameter // shall be one of: // // -- for a non-type template-parameter of integral or enumeration // type, a converted constant expression of the type of the // template-parameter; or llvm::APSInt Value; ExprResult ArgResult = CheckConvertedConstantExpression(Arg, ParamType, Value, CCEK_TemplateArg); if (ArgResult.isInvalid()) return ExprError(); // Widen the argument value to sizeof(parameter type). This is almost // always a no-op, except when the parameter type is bool. In // that case, this may extend the argument from 1 bit to 8 bits. QualType IntegerType = ParamType; if (const EnumType *Enum = IntegerType->getAs<EnumType>()) IntegerType = Enum->getDecl()->getIntegerType(); Value = Value.extOrTrunc(Context.getTypeSize(IntegerType)); Converted = TemplateArgument(Context, Value, Context.getCanonicalType(ParamType)); return ArgResult; } ExprResult ArgResult = DefaultLvalueConversion(Arg); if (ArgResult.isInvalid()) return ExprError(); Arg = ArgResult.get(); QualType ArgType = Arg->getType(); // C++ [temp.arg.nontype]p1: // A template-argument for a non-type, non-template // template-parameter shall be one of: // // -- an integral constant-expression of integral or enumeration // type; or // -- the name of a non-type template-parameter; or SourceLocation NonConstantLoc; llvm::APSInt Value; if (!ArgType->isIntegralOrEnumerationType()) { Diag(Arg->getLocStart(), diag::err_template_arg_not_integral_or_enumeral) << ArgType << Arg->getSourceRange(); Diag(Param->getLocation(), diag::note_template_param_here); return ExprError(); } else if (!Arg->isValueDependent()) { class TmplArgICEDiagnoser : public VerifyICEDiagnoser { QualType T; public: TmplArgICEDiagnoser(QualType T) : T(T) { } void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) override { S.Diag(Loc, diag::err_template_arg_not_ice) << T << SR; } } Diagnoser(ArgType); Arg = VerifyIntegerConstantExpression(Arg, &Value, Diagnoser, false).get(); if (!Arg) return ExprError(); } // From here on out, all we care about is the unqualified form // of the argument type. ArgType = ArgType.getUnqualifiedType(); // Try to convert the argument to the parameter's type. if (Context.hasSameType(ParamType, ArgType)) { // Okay: no conversion necessary } else if (ParamType->isBooleanType()) { // This is an integral-to-boolean conversion. Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralToBoolean).get(); } else if (IsIntegralPromotion(Arg, ArgType, ParamType) || !ParamType->isEnumeralType()) { // This is an integral promotion or conversion. Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralCast).get(); } else { // We can't perform this conversion. Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible) << Arg->getType() << ParamType << Arg->getSourceRange(); Diag(Param->getLocation(), diag::note_template_param_here); return ExprError(); } // Add the value of this argument to the list of converted // arguments. We use the bitwidth and signedness of the template // parameter. if (Arg->isValueDependent()) { // The argument is value-dependent. Create a new // TemplateArgument with the converted expression. Converted = TemplateArgument(Arg); return Arg; } QualType IntegerType = Context.getCanonicalType(ParamType); if (const EnumType *Enum = IntegerType->getAs<EnumType>()) IntegerType = Context.getCanonicalType(Enum->getDecl()->getIntegerType()); if (ParamType->isBooleanType()) { // Value must be zero or one. Value = Value != 0; unsigned AllowedBits = Context.getTypeSize(IntegerType); if (Value.getBitWidth() != AllowedBits) Value = Value.extOrTrunc(AllowedBits); Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType()); } else { llvm::APSInt OldValue = Value; // Coerce the template argument's value to the value it will have // based on the template parameter's type. unsigned AllowedBits = Context.getTypeSize(IntegerType); if (Value.getBitWidth() != AllowedBits) Value = Value.extOrTrunc(AllowedBits); Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType()); // Complain if an unsigned parameter received a negative value. if (IntegerType->isUnsignedIntegerOrEnumerationType() && (OldValue.isSigned() && OldValue.isNegative())) { Diag(Arg->getLocStart(), diag::warn_template_arg_negative) << OldValue.toString(10) << Value.toString(10) << Param->getType() << Arg->getSourceRange(); Diag(Param->getLocation(), diag::note_template_param_here); } // Complain if we overflowed the template parameter's type. unsigned RequiredBits; if (IntegerType->isUnsignedIntegerOrEnumerationType()) RequiredBits = OldValue.getActiveBits(); else if (OldValue.isUnsigned()) RequiredBits = OldValue.getActiveBits() + 1; else RequiredBits = OldValue.getMinSignedBits(); if (RequiredBits > AllowedBits) { Diag(Arg->getLocStart(), diag::warn_template_arg_too_large) << OldValue.toString(10) << Value.toString(10) << Param->getType() << Arg->getSourceRange(); Diag(Param->getLocation(), diag::note_template_param_here); } } Converted = TemplateArgument(Context, Value, ParamType->isEnumeralType() ? Context.getCanonicalType(ParamType) : IntegerType); return Arg; } QualType ArgType = Arg->getType(); DeclAccessPair FoundResult; // temporary for ResolveOverloadedFunction // Handle pointer-to-function, reference-to-function, and // pointer-to-member-function all in (roughly) the same way. if (// -- For a non-type template-parameter of type pointer to // function, only the function-to-pointer conversion (4.3) is // applied. If the template-argument represents a set of // overloaded functions (or a pointer to such), the matching // function is selected from the set (13.4). (ParamType->isPointerType() && ParamType->getAs<PointerType>()->getPointeeType()->isFunctionType()) || // -- For a non-type template-parameter of type reference to // function, no conversions apply. If the template-argument // represents a set of overloaded functions, the matching // function is selected from the set (13.4). (ParamType->isReferenceType() && ParamType->getAs<ReferenceType>()->getPointeeType()->isFunctionType()) || // -- For a non-type template-parameter of type pointer to // member function, no conversions apply. If the // template-argument represents a set of overloaded member // functions, the matching member function is selected from // the set (13.4). (ParamType->isMemberPointerType() && ParamType->getAs<MemberPointerType>()->getPointeeType() ->isFunctionType())) { if (Arg->getType() == Context.OverloadTy) { if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg, ParamType, true, FoundResult)) { if (DiagnoseUseOfDecl(Fn, Arg->getLocStart())) return ExprError(); Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn); ArgType = Arg->getType(); } else return ExprError(); } if (!ParamType->isMemberPointerType()) { if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param, ParamType, Arg, Converted)) return ExprError(); return Arg; } if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg, Converted)) return ExprError(); return Arg; } if (ParamType->isPointerType()) { // -- for a non-type template-parameter of type pointer to // object, qualification conversions (4.4) and the // array-to-pointer conversion (4.2) are applied. // C++0x also allows a value of std::nullptr_t. assert(ParamType->getPointeeType()->isIncompleteOrObjectType() && "Only object pointers allowed here"); if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param, ParamType, Arg, Converted)) return ExprError(); return Arg; } if (const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>()) { // -- For a non-type template-parameter of type reference to // object, no conversions apply. The type referred to by the // reference may be more cv-qualified than the (otherwise // identical) type of the template-argument. The // template-parameter is bound directly to the // template-argument, which must be an lvalue. assert(ParamRefType->getPointeeType()->isIncompleteOrObjectType() && "Only object references allowed here"); if (Arg->getType() == Context.OverloadTy) { if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg, ParamRefType->getPointeeType(), true, FoundResult)) { if (DiagnoseUseOfDecl(Fn, Arg->getLocStart())) return ExprError(); Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn); ArgType = Arg->getType(); } else return ExprError(); } if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param, ParamType, Arg, Converted)) return ExprError(); return Arg; } // Deal with parameters of type std::nullptr_t. if (ParamType->isNullPtrType()) { if (Arg->isTypeDependent() || Arg->isValueDependent()) { Converted = TemplateArgument(Arg); return Arg; } switch (isNullPointerValueTemplateArgument(*this, Param, ParamType, Arg)) { case NPV_NotNullPointer: Diag(Arg->getExprLoc(), diag::err_template_arg_not_convertible) << Arg->getType() << ParamType; Diag(Param->getLocation(), diag::note_template_param_here); return ExprError(); case NPV_Error: return ExprError(); case NPV_NullPointer: Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null); Converted = TemplateArgument(Context.getCanonicalType(ParamType), /*isNullPtr*/true); return Arg; } } // -- For a non-type template-parameter of type pointer to data // member, qualification conversions (4.4) are applied. assert(ParamType->isMemberPointerType() && "Only pointers to members remain"); if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg, Converted)) return ExprError(); return Arg; } /// \brief Check a template argument against its corresponding /// template template parameter. /// /// This routine implements the semantics of C++ [temp.arg.template]. /// It returns true if an error occurred, and false otherwise. bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex) { TemplateName Name = Arg.getArgument().getAsTemplateOrTemplatePattern(); TemplateDecl *Template = Name.getAsTemplateDecl(); if (!Template) { // Any dependent template name is fine. assert(Name.isDependent() && "Non-dependent template isn't a declaration?"); return false; } // C++0x [temp.arg.template]p1: // A template-argument for a template template-parameter shall be // the name of a class template or an alias template, expressed as an // id-expression. When the template-argument names a class template, only // primary class templates are considered when matching the // template template argument with the corresponding parameter; // partial specializations are not considered even if their // parameter lists match that of the template template parameter. // // Note that we also allow template template parameters here, which // will happen when we are dealing with, e.g., class template // partial specializations. if (!isa<ClassTemplateDecl>(Template) && !isa<TemplateTemplateParmDecl>(Template) && !isa<TypeAliasTemplateDecl>(Template)) { assert(isa<FunctionTemplateDecl>(Template) && "Only function templates are possible here"); Diag(Arg.getLocation(), diag::err_template_arg_not_class_template); Diag(Template->getLocation(), diag::note_template_arg_refers_here_func) << Template; } TemplateParameterList *Params = Param->getTemplateParameters(); if (Param->isExpandedParameterPack()) Params = Param->getExpansionTemplateParameters(ArgumentPackIndex); return !TemplateParameterListsAreEqual(Template->getTemplateParameters(), Params, true, TPL_TemplateTemplateArgumentMatch, Arg.getLocation()); } /// \brief Given a non-type template argument that refers to a /// declaration and the type of its corresponding non-type template /// parameter, produce an expression that properly refers to that /// declaration. ExprResult Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc) { // C++ [temp.param]p8: // // A non-type template-parameter of type "array of T" or // "function returning T" is adjusted to be of type "pointer to // T" or "pointer to function returning T", respectively. if (ParamType->isArrayType()) ParamType = Context.getArrayDecayedType(ParamType); else if (ParamType->isFunctionType()) ParamType = Context.getPointerType(ParamType); // For a NULL non-type template argument, return nullptr casted to the // parameter's type. if (Arg.getKind() == TemplateArgument::NullPtr) { return ImpCastExprToType( new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc), ParamType, ParamType->getAs<MemberPointerType>() ? CK_NullToMemberPointer : CK_NullToPointer); } assert(Arg.getKind() == TemplateArgument::Declaration && "Only declaration template arguments permitted here"); ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl()); if (VD->getDeclContext()->isRecord() && (isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD) || isa<IndirectFieldDecl>(VD))) { // If the value is a class member, we might have a pointer-to-member. // Determine whether the non-type template template parameter is of // pointer-to-member type. If so, we need to build an appropriate // expression for a pointer-to-member, since a "normal" DeclRefExpr // would refer to the member itself. if (ParamType->isMemberPointerType()) { QualType ClassType = Context.getTypeDeclType(cast<RecordDecl>(VD->getDeclContext())); NestedNameSpecifier *Qualifier = NestedNameSpecifier::Create(Context, nullptr, false, ClassType.getTypePtr()); CXXScopeSpec SS; SS.MakeTrivial(Context, Qualifier, Loc); // The actual value-ness of this is unimportant, but for // internal consistency's sake, references to instance methods // are r-values. ExprValueKind VK = VK_LValue; if (isa<CXXMethodDecl>(VD) && cast<CXXMethodDecl>(VD)->isInstance()) VK = VK_RValue; ExprResult RefExpr = BuildDeclRefExpr(VD, VD->getType().getNonReferenceType(), VK, Loc, &SS); if (RefExpr.isInvalid()) return ExprError(); RefExpr = CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get()); // We might need to perform a trailing qualification conversion, since // the element type on the parameter could be more qualified than the // element type in the expression we constructed. bool ObjCLifetimeConversion; if (IsQualificationConversion(((Expr*) RefExpr.get())->getType(), ParamType.getUnqualifiedType(), false, ObjCLifetimeConversion)) RefExpr = ImpCastExprToType(RefExpr.get(), ParamType.getUnqualifiedType(), CK_NoOp); assert(!RefExpr.isInvalid() && Context.hasSameType(((Expr*) RefExpr.get())->getType(), ParamType.getUnqualifiedType())); return RefExpr; } } QualType T = VD->getType().getNonReferenceType(); if (ParamType->isPointerType()) { // When the non-type template parameter is a pointer, take the // address of the declaration. ExprResult RefExpr = BuildDeclRefExpr(VD, T, VK_LValue, Loc); if (RefExpr.isInvalid()) return ExprError(); if (T->isFunctionType() || T->isArrayType()) { // Decay functions and arrays. RefExpr = DefaultFunctionArrayConversion(RefExpr.get()); if (RefExpr.isInvalid()) return ExprError(); return RefExpr; } // Take the address of everything else return CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get()); } ExprValueKind VK = VK_RValue; // If the non-type template parameter has reference type, qualify the // resulting declaration reference with the extra qualifiers on the // type that the reference refers to. if (const ReferenceType *TargetRef = ParamType->getAs<ReferenceType>()) { VK = VK_LValue; T = Context.getQualifiedType(T, TargetRef->getPointeeType().getQualifiers()); } else if (isa<FunctionDecl>(VD)) { // References to functions are always lvalues. VK = VK_LValue; } return BuildDeclRefExpr(VD, T, VK, Loc); } /// \brief Construct a new expression that refers to the given /// integral template argument with the given source-location /// information. /// /// This routine takes care of the mapping from an integral template /// argument (which may have any integral type) to the appropriate /// literal value. ExprResult Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc) { assert(Arg.getKind() == TemplateArgument::Integral && "Operation is only valid for integral template arguments"); QualType OrigT = Arg.getIntegralType(); // If this is an enum type that we're instantiating, we need to use an integer // type the same size as the enumerator. We don't want to build an // IntegerLiteral with enum type. The integer type of an enum type can be of // any integral type with C++11 enum classes, make sure we create the right // type of literal for it. QualType T = OrigT; if (const EnumType *ET = OrigT->getAs<EnumType>()) T = ET->getDecl()->getIntegerType(); Expr *E; if (T->isAnyCharacterType()) { CharacterLiteral::CharacterKind Kind; if (T->isWideCharType()) Kind = CharacterLiteral::Wide; else if (T->isChar16Type()) Kind = CharacterLiteral::UTF16; else if (T->isChar32Type()) Kind = CharacterLiteral::UTF32; else Kind = CharacterLiteral::Ascii; E = new (Context) CharacterLiteral(Arg.getAsIntegral().getZExtValue(), Kind, T, Loc); } else if (T->isBooleanType()) { E = new (Context) CXXBoolLiteralExpr(Arg.getAsIntegral().getBoolValue(), T, Loc); } else if (T->isNullPtrType()) { E = new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc); } else { E = IntegerLiteral::Create(Context, Arg.getAsIntegral(), T, Loc); } if (OrigT->isEnumeralType()) { // FIXME: This is a hack. We need a better way to handle substituted // non-type template parameters. E = CStyleCastExpr::Create(Context, OrigT, VK_RValue, CK_IntegralCast, E, nullptr, Context.getTrivialTypeSourceInfo(OrigT, Loc), Loc, Loc); } return E; } /// \brief Match two template parameters within template parameter lists. static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old, bool Complain, Sema::TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc) { // Check the actual kind (type, non-type, template). if (Old->getKind() != New->getKind()) { if (Complain) { unsigned NextDiag = diag::err_template_param_different_kind; if (TemplateArgLoc.isValid()) { S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch); NextDiag = diag::note_template_param_different_kind; } S.Diag(New->getLocation(), NextDiag) << (Kind != Sema::TPL_TemplateMatch); S.Diag(Old->getLocation(), diag::note_template_prev_declaration) << (Kind != Sema::TPL_TemplateMatch); } return false; } // Check that both are parameter packs are neither are parameter packs. // However, if we are matching a template template argument to a // template template parameter, the template template parameter can have // a parameter pack where the template template argument does not. if (Old->isTemplateParameterPack() != New->isTemplateParameterPack() && !(Kind == Sema::TPL_TemplateTemplateArgumentMatch && Old->isTemplateParameterPack())) { if (Complain) { unsigned NextDiag = diag::err_template_parameter_pack_non_pack; if (TemplateArgLoc.isValid()) { S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch); NextDiag = diag::note_template_parameter_pack_non_pack; } unsigned ParamKind = isa<TemplateTypeParmDecl>(New)? 0 : isa<NonTypeTemplateParmDecl>(New)? 1 : 2; S.Diag(New->getLocation(), NextDiag) << ParamKind << New->isParameterPack(); S.Diag(Old->getLocation(), diag::note_template_parameter_pack_here) << ParamKind << Old->isParameterPack(); } return false; } // For non-type template parameters, check the type of the parameter. if (NonTypeTemplateParmDecl *OldNTTP = dyn_cast<NonTypeTemplateParmDecl>(Old)) { NonTypeTemplateParmDecl *NewNTTP = cast<NonTypeTemplateParmDecl>(New); // If we are matching a template template argument to a template // template parameter and one of the non-type template parameter types // is dependent, then we must wait until template instantiation time // to actually compare the arguments. if (Kind == Sema::TPL_TemplateTemplateArgumentMatch && (OldNTTP->getType()->isDependentType() || NewNTTP->getType()->isDependentType())) return true; if (!S.Context.hasSameType(OldNTTP->getType(), NewNTTP->getType())) { if (Complain) { unsigned NextDiag = diag::err_template_nontype_parm_different_type; if (TemplateArgLoc.isValid()) { S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch); NextDiag = diag::note_template_nontype_parm_different_type; } S.Diag(NewNTTP->getLocation(), NextDiag) << NewNTTP->getType() << (Kind != Sema::TPL_TemplateMatch); S.Diag(OldNTTP->getLocation(), diag::note_template_nontype_parm_prev_declaration) << OldNTTP->getType(); } return false; } return true; } // For template template parameters, check the template parameter types. // The template parameter lists of template template // parameters must agree. if (TemplateTemplateParmDecl *OldTTP = dyn_cast<TemplateTemplateParmDecl>(Old)) { TemplateTemplateParmDecl *NewTTP = cast<TemplateTemplateParmDecl>(New); return S.TemplateParameterListsAreEqual(NewTTP->getTemplateParameters(), OldTTP->getTemplateParameters(), Complain, (Kind == Sema::TPL_TemplateMatch ? Sema::TPL_TemplateTemplateParmMatch : Kind), TemplateArgLoc); } return true; } /// \brief Diagnose a known arity mismatch when comparing template argument /// lists. static void DiagnoseTemplateParameterListArityMismatch(Sema &S, TemplateParameterList *New, TemplateParameterList *Old, Sema::TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc) { unsigned NextDiag = diag::err_template_param_list_different_arity; if (TemplateArgLoc.isValid()) { S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch); NextDiag = diag::note_template_param_list_different_arity; } S.Diag(New->getTemplateLoc(), NextDiag) << (New->size() > Old->size()) << (Kind != Sema::TPL_TemplateMatch) << SourceRange(New->getTemplateLoc(), New->getRAngleLoc()); S.Diag(Old->getTemplateLoc(), diag::note_template_prev_declaration) << (Kind != Sema::TPL_TemplateMatch) << SourceRange(Old->getTemplateLoc(), Old->getRAngleLoc()); } /// \brief Determine whether the given template parameter lists are /// equivalent. /// /// \param New The new template parameter list, typically written in the /// source code as part of a new template declaration. /// /// \param Old The old template parameter list, typically found via /// name lookup of the template declared with this template parameter /// list. /// /// \param Complain If true, this routine will produce a diagnostic if /// the template parameter lists are not equivalent. /// /// \param Kind describes how we are to match the template parameter lists. /// /// \param TemplateArgLoc If this source location is valid, then we /// are actually checking the template parameter list of a template /// argument (New) against the template parameter list of its /// corresponding template template parameter (Old). We produce /// slightly different diagnostics in this scenario. /// /// \returns True if the template parameter lists are equal, false /// otherwise. bool Sema::TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc) { if (Old->size() != New->size() && Kind != TPL_TemplateTemplateArgumentMatch) { if (Complain) DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind, TemplateArgLoc); return false; } // C++0x [temp.arg.template]p3: // A template-argument matches a template template-parameter (call it P) // when each of the template parameters in the template-parameter-list of // the template-argument's corresponding class template or alias template // (call it A) matches the corresponding template parameter in the // template-parameter-list of P. [...] TemplateParameterList::iterator NewParm = New->begin(); TemplateParameterList::iterator NewParmEnd = New->end(); for (TemplateParameterList::iterator OldParm = Old->begin(), OldParmEnd = Old->end(); OldParm != OldParmEnd; ++OldParm) { if (Kind != TPL_TemplateTemplateArgumentMatch || !(*OldParm)->isTemplateParameterPack()) { if (NewParm == NewParmEnd) { if (Complain) DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind, TemplateArgLoc); return false; } if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain, Kind, TemplateArgLoc)) return false; ++NewParm; continue; } // C++0x [temp.arg.template]p3: // [...] When P's template- parameter-list contains a template parameter // pack (14.5.3), the template parameter pack will match zero or more // template parameters or template parameter packs in the // template-parameter-list of A with the same type and form as the // template parameter pack in P (ignoring whether those template // parameters are template parameter packs). for (; NewParm != NewParmEnd; ++NewParm) { if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain, Kind, TemplateArgLoc)) return false; } } // Make sure we exhausted all of the arguments. if (NewParm != NewParmEnd) { if (Complain) DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind, TemplateArgLoc); return false; } return true; } /// \brief Check whether a template can be declared within this scope. /// /// If the template declaration is valid in this scope, returns /// false. Otherwise, issues a diagnostic and returns true. bool Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) { if (!S) return false; // Find the nearest enclosing declaration scope. while ((S->getFlags() & Scope::DeclScope) == 0 || (S->getFlags() & Scope::TemplateParamScope) != 0) S = S->getParent(); // C++ [temp]p4: // A template [...] shall not have C linkage. DeclContext *Ctx = S->getEntity(); if (Ctx && Ctx->isExternCContext()) return Diag(TemplateParams->getTemplateLoc(), diag::err_template_linkage) << TemplateParams->getSourceRange(); while (Ctx && isa<LinkageSpecDecl>(Ctx)) Ctx = Ctx->getParent(); // C++ [temp]p2: // A template-declaration can appear only as a namespace scope or // class scope declaration. if (Ctx) { if (Ctx->isFileContext()) return false; if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Ctx)) { // C++ [temp.mem]p2: // A local class shall not have member templates. if (RD->isLocalClass()) return Diag(TemplateParams->getTemplateLoc(), diag::err_template_inside_local_class) << TemplateParams->getSourceRange(); else return false; } } return Diag(TemplateParams->getTemplateLoc(), diag::err_template_outside_namespace_or_class_scope) << TemplateParams->getSourceRange(); } /// \brief Determine what kind of template specialization the given declaration /// is. static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D) { if (!D) return TSK_Undeclared; if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) return Record->getTemplateSpecializationKind(); if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) return Function->getTemplateSpecializationKind(); if (VarDecl *Var = dyn_cast<VarDecl>(D)) return Var->getTemplateSpecializationKind(); return TSK_Undeclared; } /// \brief Check whether a specialization is well-formed in the current /// context. /// /// This routine determines whether a template specialization can be declared /// in the current context (C++ [temp.expl.spec]p2). /// /// \param S the semantic analysis object for which this check is being /// performed. /// /// \param Specialized the entity being specialized or instantiated, which /// may be a kind of template (class template, function template, etc.) or /// a member of a class template (member function, static data member, /// member class). /// /// \param PrevDecl the previous declaration of this entity, if any. /// /// \param Loc the location of the explicit specialization or instantiation of /// this entity. /// /// \param IsPartialSpecialization whether this is a partial specialization of /// a class template. /// /// \returns true if there was an error that we cannot recover from, false /// otherwise. static bool CheckTemplateSpecializationScope(Sema &S, NamedDecl *Specialized, NamedDecl *PrevDecl, SourceLocation Loc, bool IsPartialSpecialization) { // Keep these "kind" numbers in sync with the %select statements in the // various diagnostics emitted by this routine. int EntityKind = 0; if (isa<ClassTemplateDecl>(Specialized)) EntityKind = IsPartialSpecialization? 1 : 0; else if (isa<VarTemplateDecl>(Specialized)) EntityKind = IsPartialSpecialization ? 3 : 2; else if (isa<FunctionTemplateDecl>(Specialized)) EntityKind = 4; else if (isa<CXXMethodDecl>(Specialized)) EntityKind = 5; else if (isa<VarDecl>(Specialized)) EntityKind = 6; else if (isa<RecordDecl>(Specialized)) EntityKind = 7; else if (isa<EnumDecl>(Specialized) && S.getLangOpts().CPlusPlus11) EntityKind = 8; else { S.Diag(Loc, diag::err_template_spec_unknown_kind) << S.getLangOpts().CPlusPlus11; S.Diag(Specialized->getLocation(), diag::note_specialized_entity); return true; } // C++ [temp.expl.spec]p2: // An explicit specialization shall be declared in the namespace // of which the template is a member, or, for member templates, in // the namespace of which the enclosing class or enclosing class // template is a member. An explicit specialization of a member // function, member class or static data member of a class // template shall be declared in the namespace of which the class // template is a member. Such a declaration may also be a // definition. If the declaration is not a definition, the // specialization may be defined later in the name- space in which // the explicit specialization was declared, or in a namespace // that encloses the one in which the explicit specialization was // declared. if (S.CurContext->getRedeclContext()->isFunctionOrMethod()) { S.Diag(Loc, diag::err_template_spec_decl_function_scope) << Specialized; return true; } if (S.CurContext->isRecord() && !IsPartialSpecialization) { if (S.getLangOpts().MicrosoftExt) { // Do not warn for class scope explicit specialization during // instantiation, warning was already emitted during pattern // semantic analysis. if (!S.ActiveTemplateInstantiations.size()) S.Diag(Loc, diag::ext_function_specialization_in_class) << Specialized; } else { S.Diag(Loc, diag::err_template_spec_decl_class_scope) << Specialized; return true; } } if (S.CurContext->isRecord() && !S.CurContext->Equals(Specialized->getDeclContext())) { // Make sure that we're specializing in the right record context. // Otherwise, things can go horribly wrong. S.Diag(Loc, diag::err_template_spec_decl_class_scope) << Specialized; return true; } // C++ [temp.class.spec]p6: // A class template partial specialization may be declared or redeclared // in any namespace scope in which its definition may be defined (14.5.1 // and 14.5.2). DeclContext *SpecializedContext = Specialized->getDeclContext()->getEnclosingNamespaceContext(); DeclContext *DC = S.CurContext->getEnclosingNamespaceContext(); // Make sure that this redeclaration (or definition) occurs in an enclosing // namespace. // Note that HandleDeclarator() performs this check for explicit // specializations of function templates, static data members, and member // functions, so we skip the check here for those kinds of entities. // FIXME: HandleDeclarator's diagnostics aren't quite as good, though. // Should we refactor that check, so that it occurs later? if (!DC->Encloses(SpecializedContext) && !(isa<FunctionTemplateDecl>(Specialized) || isa<FunctionDecl>(Specialized) || isa<VarTemplateDecl>(Specialized) || isa<VarDecl>(Specialized))) { if (isa<TranslationUnitDecl>(SpecializedContext)) S.Diag(Loc, diag::err_template_spec_redecl_global_scope) << EntityKind << Specialized; else if (isa<NamespaceDecl>(SpecializedContext)) { int Diag = diag::err_template_spec_redecl_out_of_scope; if (S.getLangOpts().MicrosoftExt) Diag = diag::ext_ms_template_spec_redecl_out_of_scope; S.Diag(Loc, Diag) << EntityKind << Specialized << cast<NamedDecl>(SpecializedContext); } else llvm_unreachable("unexpected namespace context for specialization"); S.Diag(Specialized->getLocation(), diag::note_specialized_entity); } else if ((!PrevDecl || getTemplateSpecializationKind(PrevDecl) == TSK_Undeclared || getTemplateSpecializationKind(PrevDecl) == TSK_ImplicitInstantiation)) { // C++ [temp.exp.spec]p2: // An explicit specialization shall be declared in the namespace of which // the template is a member, or, for member templates, in the namespace // of which the enclosing class or enclosing class template is a member. // An explicit specialization of a member function, member class or // static data member of a class template shall be declared in the // namespace of which the class template is a member. // // C++11 [temp.expl.spec]p2: // An explicit specialization shall be declared in a namespace enclosing // the specialized template. // C++11 [temp.explicit]p3: // An explicit instantiation shall appear in an enclosing namespace of its // template. if (!DC->InEnclosingNamespaceSetOf(SpecializedContext)) { bool IsCPlusPlus11Extension = DC->Encloses(SpecializedContext); if (isa<TranslationUnitDecl>(SpecializedContext)) { assert(!IsCPlusPlus11Extension && "DC encloses TU but isn't in enclosing namespace set"); S.Diag(Loc, diag::err_template_spec_decl_out_of_scope_global) << EntityKind << Specialized; } else if (isa<NamespaceDecl>(SpecializedContext)) { int Diag; if (!IsCPlusPlus11Extension) Diag = diag::err_template_spec_decl_out_of_scope; else if (!S.getLangOpts().CPlusPlus11) Diag = diag::ext_template_spec_decl_out_of_scope; else Diag = diag::warn_cxx98_compat_template_spec_decl_out_of_scope; S.Diag(Loc, Diag) << EntityKind << Specialized << cast<NamedDecl>(SpecializedContext); } S.Diag(Specialized->getLocation(), diag::note_specialized_entity); } } return false; } static SourceRange findTemplateParameter(unsigned Depth, Expr *E) { if (!E->isInstantiationDependent()) return SourceLocation(); DependencyChecker Checker(Depth); Checker.TraverseStmt(E); if (Checker.Match && Checker.MatchLoc.isInvalid()) return E->getSourceRange(); return Checker.MatchLoc; } static SourceRange findTemplateParameter(unsigned Depth, TypeLoc TL) { if (!TL.getType()->isDependentType()) return SourceLocation(); DependencyChecker Checker(Depth); Checker.TraverseTypeLoc(TL); if (Checker.Match && Checker.MatchLoc.isInvalid()) return TL.getSourceRange(); return Checker.MatchLoc; } /// \brief Subroutine of Sema::CheckTemplatePartialSpecializationArgs /// that checks non-type template partial specialization arguments. static bool CheckNonTypeTemplatePartialSpecializationArgs( Sema &S, SourceLocation TemplateNameLoc, NonTypeTemplateParmDecl *Param, const TemplateArgument *Args, unsigned NumArgs, bool IsDefaultArgument) { for (unsigned I = 0; I != NumArgs; ++I) { if (Args[I].getKind() == TemplateArgument::Pack) { if (CheckNonTypeTemplatePartialSpecializationArgs( S, TemplateNameLoc, Param, Args[I].pack_begin(), Args[I].pack_size(), IsDefaultArgument)) return true; continue; } if (Args[I].getKind() != TemplateArgument::Expression) continue; Expr *ArgExpr = Args[I].getAsExpr(); // We can have a pack expansion of any of the bullets below. if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(ArgExpr)) ArgExpr = Expansion->getPattern(); // Strip off any implicit casts we added as part of type checking. while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr)) ArgExpr = ICE->getSubExpr(); // C++ [temp.class.spec]p8: // A non-type argument is non-specialized if it is the name of a // non-type parameter. All other non-type arguments are // specialized. // // Below, we check the two conditions that only apply to // specialized non-type arguments, so skip any non-specialized // arguments. if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ArgExpr)) if (isa<NonTypeTemplateParmDecl>(DRE->getDecl())) continue; // C++ [temp.class.spec]p9: // Within the argument list of a class template partial // specialization, the following restrictions apply: // -- A partially specialized non-type argument expression // shall not involve a template parameter of the partial // specialization except when the argument expression is a // simple identifier. SourceRange ParamUseRange = findTemplateParameter(Param->getDepth(), ArgExpr); if (ParamUseRange.isValid()) { if (IsDefaultArgument) { S.Diag(TemplateNameLoc, diag::err_dependent_non_type_arg_in_partial_spec); S.Diag(ParamUseRange.getBegin(), diag::note_dependent_non_type_default_arg_in_partial_spec) << ParamUseRange; } else { S.Diag(ParamUseRange.getBegin(), diag::err_dependent_non_type_arg_in_partial_spec) << ParamUseRange; } return true; } // -- The type of a template parameter corresponding to a // specialized non-type argument shall not be dependent on a // parameter of the specialization. // // FIXME: We need to delay this check until instantiation in some cases: // // template<template<typename> class X> struct A { // template<typename T, X<T> N> struct B; // template<typename T> struct B<T, 0>; // }; // template<typename> using X = int; // A<X>::B<int, 0> b; ParamUseRange = findTemplateParameter( Param->getDepth(), Param->getTypeSourceInfo()->getTypeLoc()); if (ParamUseRange.isValid()) { S.Diag(IsDefaultArgument ? TemplateNameLoc : ArgExpr->getLocStart(), diag::err_dependent_typed_non_type_arg_in_partial_spec) << Param->getType() << ParamUseRange; S.Diag(Param->getLocation(), diag::note_template_param_here) << (IsDefaultArgument ? ParamUseRange : SourceRange()); return true; } } return false; } /// \brief Check the non-type template arguments of a class template /// partial specialization according to C++ [temp.class.spec]p9. /// /// \param TemplateNameLoc the location of the template name. /// \param TemplateParams the template parameters of the primary class /// template. /// \param NumExplicit the number of explicitly-specified template arguments. /// \param TemplateArgs the template arguments of the class template /// partial specialization. /// /// \returns \c true if there was an error, \c false otherwise. static bool CheckTemplatePartialSpecializationArgs( Sema &S, SourceLocation TemplateNameLoc, TemplateParameterList *TemplateParams, unsigned NumExplicit, SmallVectorImpl<TemplateArgument> &TemplateArgs) { const TemplateArgument *ArgList = TemplateArgs.data(); for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) { NonTypeTemplateParmDecl *Param = dyn_cast<NonTypeTemplateParmDecl>(TemplateParams->getParam(I)); if (!Param) continue; if (CheckNonTypeTemplatePartialSpecializationArgs( S, TemplateNameLoc, Param, &ArgList[I], 1, I >= NumExplicit)) return true; } return false; } DeclResult Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody) { assert(TUK != TUK_Reference && "References are not specializations"); CXXScopeSpec &SS = TemplateId.SS; // NOTE: KWLoc is the location of the tag keyword. This will instead // store the location of the outermost template keyword in the declaration. SourceLocation TemplateKWLoc = TemplateParameterLists.size() > 0 ? TemplateParameterLists[0]->getTemplateLoc() : KWLoc; SourceLocation TemplateNameLoc = TemplateId.TemplateNameLoc; SourceLocation LAngleLoc = TemplateId.LAngleLoc; SourceLocation RAngleLoc = TemplateId.RAngleLoc; // Find the class template we're specializing TemplateName Name = TemplateId.Template.get(); ClassTemplateDecl *ClassTemplate = dyn_cast_or_null<ClassTemplateDecl>(Name.getAsTemplateDecl()); if (!ClassTemplate) { Diag(TemplateNameLoc, diag::err_not_class_template_specialization) << (Name.getAsTemplateDecl() && isa<TemplateTemplateParmDecl>(Name.getAsTemplateDecl())); return true; } bool isExplicitSpecialization = false; bool isPartialSpecialization = false; // Check the validity of the template headers that introduce this // template. // FIXME: We probably shouldn't complain about these headers for // friend declarations. bool Invalid = false; TemplateParameterList *TemplateParams = MatchTemplateParametersToScopeSpecifier( KWLoc, TemplateNameLoc, SS, &TemplateId, TemplateParameterLists, TUK == TUK_Friend, isExplicitSpecialization, Invalid); if (Invalid) return true; if (TemplateParams && TemplateParams->size() > 0) { isPartialSpecialization = true; if (TUK == TUK_Friend) { Diag(KWLoc, diag::err_partial_specialization_friend) << SourceRange(LAngleLoc, RAngleLoc); return true; } // C++ [temp.class.spec]p10: // The template parameter list of a specialization shall not // contain default template argument values. for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) { Decl *Param = TemplateParams->getParam(I); if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) { if (TTP->hasDefaultArgument()) { Diag(TTP->getDefaultArgumentLoc(), diag::err_default_arg_in_partial_spec); TTP->removeDefaultArgument(); } } else if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) { if (Expr *DefArg = NTTP->getDefaultArgument()) { Diag(NTTP->getDefaultArgumentLoc(), diag::err_default_arg_in_partial_spec) << DefArg->getSourceRange(); NTTP->removeDefaultArgument(); } } else { TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(Param); if (TTP->hasDefaultArgument()) { Diag(TTP->getDefaultArgument().getLocation(), diag::err_default_arg_in_partial_spec) << TTP->getDefaultArgument().getSourceRange(); TTP->removeDefaultArgument(); } } } } else if (TemplateParams) { if (TUK == TUK_Friend) Diag(KWLoc, diag::err_template_spec_friend) << FixItHint::CreateRemoval( SourceRange(TemplateParams->getTemplateLoc(), TemplateParams->getRAngleLoc())) << SourceRange(LAngleLoc, RAngleLoc); else isExplicitSpecialization = true; } else { assert(TUK == TUK_Friend && "should have a 'template<>' for this decl"); } // Check that the specialization uses the same tag kind as the // original template. TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec); assert(Kind != TTK_Enum && "Invalid enum tag in class template spec!"); if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(), Kind, TUK == TUK_Definition, KWLoc, ClassTemplate->getIdentifier())) { Diag(KWLoc, diag::err_use_with_wrong_tag) << ClassTemplate << FixItHint::CreateReplacement(KWLoc, ClassTemplate->getTemplatedDecl()->getKindName()); Diag(ClassTemplate->getTemplatedDecl()->getLocation(), diag::note_previous_use); Kind = ClassTemplate->getTemplatedDecl()->getTagKind(); } // Translate the parser's template argument list in our AST format. TemplateArgumentListInfo TemplateArgs = makeTemplateArgumentListInfo(*this, TemplateId); // Check for unexpanded parameter packs in any of the template arguments. for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) if (DiagnoseUnexpandedParameterPack(TemplateArgs[I], UPPC_PartialSpecialization)) return true; // Check that the template argument list is well-formed for this // template. SmallVector<TemplateArgument, 4> Converted; if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc, TemplateArgs, false, Converted)) return true; // Find the class template (partial) specialization declaration that // corresponds to these arguments. if (isPartialSpecialization) { if (CheckTemplatePartialSpecializationArgs( *this, TemplateNameLoc, ClassTemplate->getTemplateParameters(), TemplateArgs.size(), Converted)) return true; bool InstantiationDependent; if (!Name.isDependent() && !TemplateSpecializationType::anyDependentTemplateArguments( TemplateArgs.getArgumentArray(), TemplateArgs.size(), InstantiationDependent)) { Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized) << ClassTemplate->getDeclName(); isPartialSpecialization = false; } } void *InsertPos = nullptr; ClassTemplateSpecializationDecl *PrevDecl = nullptr; if (isPartialSpecialization) // FIXME: Template parameter list matters, too PrevDecl = ClassTemplate->findPartialSpecialization(Converted, InsertPos); else PrevDecl = ClassTemplate->findSpecialization(Converted, InsertPos); ClassTemplateSpecializationDecl *Specialization = nullptr; // Check whether we can declare a class template specialization in // the current scope. if (TUK != TUK_Friend && CheckTemplateSpecializationScope(*this, ClassTemplate, PrevDecl, TemplateNameLoc, isPartialSpecialization)) return true; // The canonical type QualType CanonType; if (isPartialSpecialization) { // Build the canonical type that describes the converted template // arguments of the class template partial specialization. TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name); CanonType = Context.getTemplateSpecializationType(CanonTemplate, Converted.data(), Converted.size()); if (Context.hasSameType(CanonType, ClassTemplate->getInjectedClassNameSpecialization())) { // C++ [temp.class.spec]p9b3: // // -- The argument list of the specialization shall not be identical // to the implicit argument list of the primary template. Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template) << /*class template*/0 << (TUK == TUK_Definition) << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc)); return CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS, ClassTemplate->getIdentifier(), TemplateNameLoc, Attr, TemplateParams, AS_none, /*ModulePrivateLoc=*/SourceLocation(), /*FriendLoc*/SourceLocation(), TemplateParameterLists.size() - 1, TemplateParameterLists.data()); } // Create a new class template partial specialization declaration node. ClassTemplatePartialSpecializationDecl *PrevPartial = cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl); ClassTemplatePartialSpecializationDecl *Partial = ClassTemplatePartialSpecializationDecl::Create(Context, Kind, ClassTemplate->getDeclContext(), KWLoc, TemplateNameLoc, TemplateParams, ClassTemplate, Converted.data(), Converted.size(), TemplateArgs, CanonType, PrevPartial); SetNestedNameSpecifier(Partial, SS); if (TemplateParameterLists.size() > 1 && SS.isSet()) { Partial->setTemplateParameterListsInfo(Context, TemplateParameterLists.size() - 1, TemplateParameterLists.data()); } if (!PrevPartial) ClassTemplate->AddPartialSpecialization(Partial, InsertPos); Specialization = Partial; // If we are providing an explicit specialization of a member class // template specialization, make a note of that. if (PrevPartial && PrevPartial->getInstantiatedFromMember()) PrevPartial->setMemberSpecialization(); // Check that all of the template parameters of the class template // partial specialization are deducible from the template // arguments. If not, this class template partial specialization // will never be used. llvm::SmallBitVector DeducibleParams(TemplateParams->size()); MarkUsedTemplateParameters(Partial->getTemplateArgs(), true, TemplateParams->getDepth(), DeducibleParams); if (!DeducibleParams.all()) { unsigned NumNonDeducible = DeducibleParams.size()-DeducibleParams.count(); Diag(TemplateNameLoc, diag::warn_partial_specs_not_deducible) << /*class template*/0 << (NumNonDeducible > 1) << SourceRange(TemplateNameLoc, RAngleLoc); for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I) { if (!DeducibleParams[I]) { NamedDecl *Param = cast<NamedDecl>(TemplateParams->getParam(I)); if (Param->getDeclName()) Diag(Param->getLocation(), diag::note_partial_spec_unused_parameter) << Param->getDeclName(); else Diag(Param->getLocation(), diag::note_partial_spec_unused_parameter) << "(anonymous)"; } } } } else { // Create a new class template specialization declaration node for // this explicit specialization or friend declaration. Specialization = ClassTemplateSpecializationDecl::Create(Context, Kind, ClassTemplate->getDeclContext(), KWLoc, TemplateNameLoc, ClassTemplate, Converted.data(), Converted.size(), PrevDecl); SetNestedNameSpecifier(Specialization, SS); if (TemplateParameterLists.size() > 0) { Specialization->setTemplateParameterListsInfo(Context, TemplateParameterLists.size(), TemplateParameterLists.data()); } if (!PrevDecl) ClassTemplate->AddSpecialization(Specialization, InsertPos); CanonType = Context.getTypeDeclType(Specialization); } // C++ [temp.expl.spec]p6: // If a template, a member template or the member of a class template is // explicitly specialized then that specialization shall be declared // before the first use of that specialization that would cause an implicit // instantiation to take place, in every translation unit in which such a // use occurs; no diagnostic is required. if (PrevDecl && PrevDecl->getPointOfInstantiation().isValid()) { bool Okay = false; for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) { // Is there any previous explicit specialization declaration? if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) { Okay = true; break; } } if (!Okay) { SourceRange Range(TemplateNameLoc, RAngleLoc); Diag(TemplateNameLoc, diag::err_specialization_after_instantiation) << Context.getTypeDeclType(Specialization) << Range; Diag(PrevDecl->getPointOfInstantiation(), diag::note_instantiation_required_here) << (PrevDecl->getTemplateSpecializationKind() != TSK_ImplicitInstantiation); return true; } } // If this is not a friend, note that this is an explicit specialization. if (TUK != TUK_Friend) Specialization->setSpecializationKind(TSK_ExplicitSpecialization); // Check that this isn't a redefinition of this specialization. if (TUK == TUK_Definition) { RecordDecl *Def = Specialization->getDefinition(); NamedDecl *Hidden = nullptr; if (Def && SkipBody && !hasVisibleDefinition(Def, &Hidden)) { SkipBody->ShouldSkip = true; makeMergedDefinitionVisible(Hidden, KWLoc); // From here on out, treat this as just a redeclaration. TUK = TUK_Declaration; } else if (Def) { SourceRange Range(TemplateNameLoc, RAngleLoc); Diag(TemplateNameLoc, diag::err_redefinition) << Context.getTypeDeclType(Specialization) << Range; Diag(Def->getLocation(), diag::note_previous_definition); Specialization->setInvalidDecl(); return true; } } if (Attr) ProcessDeclAttributeList(S, Specialization, Attr); // Add alignment attributes if necessary; these attributes are checked when // the ASTContext lays out the structure. if (TUK == TUK_Definition) { AddAlignmentAttributesForRecord(Specialization); AddMsStructLayoutForRecord(Specialization); } if (ModulePrivateLoc.isValid()) Diag(Specialization->getLocation(), diag::err_module_private_specialization) << (isPartialSpecialization? 1 : 0) << FixItHint::CreateRemoval(ModulePrivateLoc); // Build the fully-sugared type for this class template // specialization as the user wrote in the specialization // itself. This means that we'll pretty-print the type retrieved // from the specialization's declaration the way that the user // actually wrote the specialization, rather than formatting the // name based on the "canonical" representation used to store the // template arguments in the specialization. TypeSourceInfo *WrittenTy = Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc, TemplateArgs, CanonType); if (TUK != TUK_Friend) { Specialization->setTypeAsWritten(WrittenTy); Specialization->setTemplateKeywordLoc(TemplateKWLoc); } // C++ [temp.expl.spec]p9: // A template explicit specialization is in the scope of the // namespace in which the template was defined. // // We actually implement this paragraph where we set the semantic // context (in the creation of the ClassTemplateSpecializationDecl), // but we also maintain the lexical context where the actual // definition occurs. Specialization->setLexicalDeclContext(CurContext); // We may be starting the definition of this specialization. if (TUK == TUK_Definition) Specialization->startDefinition(); if (TUK == TUK_Friend) { FriendDecl *Friend = FriendDecl::Create(Context, CurContext, TemplateNameLoc, WrittenTy, /*FIXME:*/KWLoc); Friend->setAccess(AS_public); CurContext->addDecl(Friend); } else { // Add the specialization into its lexical context, so that it can // be seen when iterating through the list of declarations in that // context. However, specializations are not found by name lookup. CurContext->addDecl(Specialization); } return Specialization; } Decl *Sema::ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D) { Decl *NewDecl = HandleDeclarator(S, D, TemplateParameterLists); ActOnDocumentableDecl(NewDecl); return NewDecl; } Decl *Sema::ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D) { assert(getCurFunctionDecl() == nullptr && "Function parsing confused"); DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo(); if (FTI.hasPrototype) { // FIXME: Diagnose arguments without names in C. } Scope *ParentScope = FnBodyScope->getParent(); D.setFunctionDefinitionKind(FDK_Definition); Decl *DP = HandleDeclarator(ParentScope, D, TemplateParameterLists); return ActOnStartOfFunctionDef(FnBodyScope, DP); } /// \brief Strips various properties off an implicit instantiation /// that has just been explicitly specialized. static void StripImplicitInstantiation(NamedDecl *D) { D->dropAttr<DLLImportAttr>(); D->dropAttr<DLLExportAttr>(); if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) FD->setInlineSpecified(false); } /// \brief Compute the diagnostic location for an explicit instantiation // declaration or definition. static SourceLocation DiagLocForExplicitInstantiation( NamedDecl* D, SourceLocation PointOfInstantiation) { // Explicit instantiations following a specialization have no effect and // hence no PointOfInstantiation. In that case, walk decl backwards // until a valid name loc is found. SourceLocation PrevDiagLoc = PointOfInstantiation; for (Decl *Prev = D; Prev && !PrevDiagLoc.isValid(); Prev = Prev->getPreviousDecl()) { PrevDiagLoc = Prev->getLocation(); } assert(PrevDiagLoc.isValid() && "Explicit instantiation without point of instantiation?"); return PrevDiagLoc; } /// \brief Diagnose cases where we have an explicit template specialization /// before/after an explicit template instantiation, producing diagnostics /// for those cases where they are required and determining whether the /// new specialization/instantiation will have any effect. /// /// \param NewLoc the location of the new explicit specialization or /// instantiation. /// /// \param NewTSK the kind of the new explicit specialization or instantiation. /// /// \param PrevDecl the previous declaration of the entity. /// /// \param PrevTSK the kind of the old explicit specialization or instantiatin. /// /// \param PrevPointOfInstantiation if valid, indicates where the previus /// declaration was instantiated (either implicitly or explicitly). /// /// \param HasNoEffect will be set to true to indicate that the new /// specialization or instantiation has no effect and should be ignored. /// /// \returns true if there was an error that should prevent the introduction of /// the new declaration into the AST, false otherwise. bool Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPointOfInstantiation, bool &HasNoEffect) { HasNoEffect = false; switch (NewTSK) { case TSK_Undeclared: case TSK_ImplicitInstantiation: assert( (PrevTSK == TSK_Undeclared || PrevTSK == TSK_ImplicitInstantiation) && "previous declaration must be implicit!"); return false; case TSK_ExplicitSpecialization: switch (PrevTSK) { case TSK_Undeclared: case TSK_ExplicitSpecialization: // Okay, we're just specializing something that is either already // explicitly specialized or has merely been mentioned without any // instantiation. return false; case TSK_ImplicitInstantiation: if (PrevPointOfInstantiation.isInvalid()) { // The declaration itself has not actually been instantiated, so it is // still okay to specialize it. StripImplicitInstantiation(PrevDecl); return false; } LLVM_FALLTHROUGH; // HLSL Change case TSK_ExplicitInstantiationDeclaration: case TSK_ExplicitInstantiationDefinition: assert((PrevTSK == TSK_ImplicitInstantiation || PrevPointOfInstantiation.isValid()) && "Explicit instantiation without point of instantiation?"); // C++ [temp.expl.spec]p6: // If a template, a member template or the member of a class template // is explicitly specialized then that specialization shall be declared // before the first use of that specialization that would cause an // implicit instantiation to take place, in every translation unit in // which such a use occurs; no diagnostic is required. for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) { // Is there any previous explicit specialization declaration? if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) return false; } Diag(NewLoc, diag::err_specialization_after_instantiation) << PrevDecl; Diag(PrevPointOfInstantiation, diag::note_instantiation_required_here) << (PrevTSK != TSK_ImplicitInstantiation); return true; } llvm_unreachable("Unrecognized PrevTSK!"); case TSK_ExplicitInstantiationDeclaration: switch (PrevTSK) { case TSK_ExplicitInstantiationDeclaration: // This explicit instantiation declaration is redundant (that's okay). HasNoEffect = true; return false; case TSK_Undeclared: case TSK_ImplicitInstantiation: // We're explicitly instantiating something that may have already been // implicitly instantiated; that's fine. return false; case TSK_ExplicitSpecialization: // C++0x [temp.explicit]p4: // For a given set of template parameters, if an explicit instantiation // of a template appears after a declaration of an explicit // specialization for that template, the explicit instantiation has no // effect. HasNoEffect = true; return false; case TSK_ExplicitInstantiationDefinition: // C++0x [temp.explicit]p10: // If an entity is the subject of both an explicit instantiation // declaration and an explicit instantiation definition in the same // translation unit, the definition shall follow the declaration. Diag(NewLoc, diag::err_explicit_instantiation_declaration_after_definition); // Explicit instantiations following a specialization have no effect and // hence no PrevPointOfInstantiation. In that case, walk decl backwards // until a valid name loc is found. Diag(DiagLocForExplicitInstantiation(PrevDecl, PrevPointOfInstantiation), diag::note_explicit_instantiation_definition_here); HasNoEffect = true; return false; } llvm_unreachable("Unrecognized PrevTSK!"); case TSK_ExplicitInstantiationDefinition: switch (PrevTSK) { case TSK_Undeclared: case TSK_ImplicitInstantiation: // We're explicitly instantiating something that may have already been // implicitly instantiated; that's fine. return false; case TSK_ExplicitSpecialization: // C++ DR 259, C++0x [temp.explicit]p4: // For a given set of template parameters, if an explicit // instantiation of a template appears after a declaration of // an explicit specialization for that template, the explicit // instantiation has no effect. // // In C++98/03 mode, we only give an extension warning here, because it // is not harmful to try to explicitly instantiate something that // has been explicitly specialized. Diag(NewLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_explicit_instantiation_after_specialization : diag::ext_explicit_instantiation_after_specialization) << PrevDecl; Diag(PrevDecl->getLocation(), diag::note_previous_template_specialization); HasNoEffect = true; return false; case TSK_ExplicitInstantiationDeclaration: // We're explicity instantiating a definition for something for which we // were previously asked to suppress instantiations. That's fine. // C++0x [temp.explicit]p4: // For a given set of template parameters, if an explicit instantiation // of a template appears after a declaration of an explicit // specialization for that template, the explicit instantiation has no // effect. for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) { // Is there any previous explicit specialization declaration? if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) { HasNoEffect = true; break; } } return false; case TSK_ExplicitInstantiationDefinition: // C++0x [temp.spec]p5: // For a given template and a given set of template-arguments, // - an explicit instantiation definition shall appear at most once // in a program, // MSVCCompat: MSVC silently ignores duplicate explicit instantiations. Diag(NewLoc, (getLangOpts().MSVCCompat) ? diag::ext_explicit_instantiation_duplicate : diag::err_explicit_instantiation_duplicate) << PrevDecl; Diag(DiagLocForExplicitInstantiation(PrevDecl, PrevPointOfInstantiation), diag::note_previous_explicit_instantiation); HasNoEffect = true; return false; } } llvm_unreachable("Missing specialization/instantiation case?"); } /// \brief Perform semantic analysis for the given dependent function /// template specialization. /// /// The only possible way to get a dependent function template specialization /// is with a friend declaration, like so: /// /// \code /// template \<class T> void foo(T); /// template \<class T> class A { /// friend void foo<>(T); /// }; /// \endcode /// /// There really isn't any useful analysis we can do here, so we /// just store the information. bool Sema::CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous) { // Remove anything from Previous that isn't a function template in // the correct context. DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext(); LookupResult::Filter F = Previous.makeFilter(); while (F.hasNext()) { NamedDecl *D = F.next()->getUnderlyingDecl(); if (!isa<FunctionTemplateDecl>(D) || !FDLookupContext->InEnclosingNamespaceSetOf( D->getDeclContext()->getRedeclContext())) F.erase(); } F.done(); // Should this be diagnosed here? if (Previous.empty()) return true; FD->setDependentTemplateSpecialization(Context, Previous.asUnresolvedSet(), ExplicitTemplateArgs); return false; } /// \brief Perform semantic analysis for the given function template /// specialization. /// /// This routine performs all of the semantic analysis required for an /// explicit function template specialization. On successful completion, /// the function declaration \p FD will become a function template /// specialization. /// /// \param FD the function declaration, which will be updated to become a /// function template specialization. /// /// \param ExplicitTemplateArgs the explicitly-provided template arguments, /// if any. Note that this may be valid info even when 0 arguments are /// explicitly provided as in, e.g., \c void sort<>(char*, char*); /// as it anyway contains info on the angle brackets locations. /// /// \param Previous the set of declarations that may be specialized by /// this function specialization. bool Sema::CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous) { // The set of function template specializations that could match this // explicit function template specialization. UnresolvedSet<8> Candidates; TemplateSpecCandidateSet FailedCandidates(FD->getLocation()); DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext(); for (LookupResult::iterator I = Previous.begin(), E = Previous.end(); I != E; ++I) { NamedDecl *Ovl = (*I)->getUnderlyingDecl(); if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Ovl)) { // Only consider templates found within the same semantic lookup scope as // FD. if (!FDLookupContext->InEnclosingNamespaceSetOf( Ovl->getDeclContext()->getRedeclContext())) continue; // When matching a constexpr member function template specialization // against the primary template, we don't yet know whether the // specialization has an implicit 'const' (because we don't know whether // it will be a static member function until we know which template it // specializes), so adjust it now assuming it specializes this template. QualType FT = FD->getType(); if (FD->isConstexpr()) { CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl()); if (OldMD && OldMD->isConst()) { const FunctionProtoType *FPT = FT->castAs<FunctionProtoType>(); FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo(); EPI.TypeQuals |= Qualifiers::Const; FT = Context.getFunctionType(FPT->getReturnType(), FPT->getParamTypes(), EPI, None); // HLSL - assume in (although constexpr not enabled yet) } } // C++ [temp.expl.spec]p11: // A trailing template-argument can be left unspecified in the // template-id naming an explicit function template specialization // provided it can be deduced from the function argument type. // Perform template argument deduction to determine whether we may be // specializing this template. // FIXME: It is somewhat wasteful to build TemplateDeductionInfo Info(FailedCandidates.getLocation()); FunctionDecl *Specialization = nullptr; if (TemplateDeductionResult TDK = DeduceTemplateArguments( cast<FunctionTemplateDecl>(FunTmpl->getFirstDecl()), ExplicitTemplateArgs, FT, Specialization, Info)) { // Template argument deduction failed; record why it failed, so // that we can provide nifty diagnostics. FailedCandidates.addCandidate() .set(FunTmpl->getTemplatedDecl(), MakeDeductionFailureInfo(Context, TDK, Info)); (void)TDK; continue; } // Record this candidate. Candidates.addDecl(Specialization, I.getAccess()); } } // Find the most specialized function template. UnresolvedSetIterator Result = getMostSpecialized( Candidates.begin(), Candidates.end(), FailedCandidates, FD->getLocation(), PDiag(diag::err_function_template_spec_no_match) << FD->getDeclName(), PDiag(diag::err_function_template_spec_ambiguous) << FD->getDeclName() << (ExplicitTemplateArgs != nullptr), PDiag(diag::note_function_template_spec_matched)); if (Result == Candidates.end()) return true; // Ignore access information; it doesn't figure into redeclaration checking. FunctionDecl *Specialization = cast<FunctionDecl>(*Result); FunctionTemplateSpecializationInfo *SpecInfo = Specialization->getTemplateSpecializationInfo(); assert(SpecInfo && "Function template specialization info missing?"); // Note: do not overwrite location info if previous template // specialization kind was explicit. TemplateSpecializationKind TSK = SpecInfo->getTemplateSpecializationKind(); if (TSK == TSK_Undeclared || TSK == TSK_ImplicitInstantiation) { Specialization->setLocation(FD->getLocation()); // C++11 [dcl.constexpr]p1: An explicit specialization of a constexpr // function can differ from the template declaration with respect to // the constexpr specifier. Specialization->setConstexpr(FD->isConstexpr()); } // FIXME: Check if the prior specialization has a point of instantiation. // If so, we have run afoul of . // If this is a friend declaration, then we're not really declaring // an explicit specialization. bool isFriend = (FD->getFriendObjectKind() != Decl::FOK_None); // Check the scope of this explicit specialization. if (!isFriend && CheckTemplateSpecializationScope(*this, Specialization->getPrimaryTemplate(), Specialization, FD->getLocation(), false)) return true; // C++ [temp.expl.spec]p6: // If a template, a member template or the member of a class template is // explicitly specialized then that specialization shall be declared // before the first use of that specialization that would cause an implicit // instantiation to take place, in every translation unit in which such a // use occurs; no diagnostic is required. bool HasNoEffect = false; if (!isFriend && CheckSpecializationInstantiationRedecl(FD->getLocation(), TSK_ExplicitSpecialization, Specialization, SpecInfo->getTemplateSpecializationKind(), SpecInfo->getPointOfInstantiation(), HasNoEffect)) return true; // Mark the prior declaration as an explicit specialization, so that later // clients know that this is an explicit specialization. if (!isFriend) { SpecInfo->setTemplateSpecializationKind(TSK_ExplicitSpecialization); MarkUnusedFileScopedDecl(Specialization); } // Turn the given function declaration into a function template // specialization, with the template arguments from the previous // specialization. // Take copies of (semantic and syntactic) template argument lists. const TemplateArgumentList* TemplArgs = new (Context) TemplateArgumentList(Specialization->getTemplateSpecializationArgs()); FD->setFunctionTemplateSpecialization(Specialization->getPrimaryTemplate(), TemplArgs, /*InsertPos=*/nullptr, SpecInfo->getTemplateSpecializationKind(), ExplicitTemplateArgs); // The "previous declaration" for this function template specialization is // the prior function template specialization. Previous.clear(); Previous.addDecl(Specialization); return false; } /// \brief Perform semantic analysis for the given non-template member /// specialization. /// /// This routine performs all of the semantic analysis required for an /// explicit member function specialization. On successful completion, /// the function declaration \p FD will become a member function /// specialization. /// /// \param Member the member declaration, which will be updated to become a /// specialization. /// /// \param Previous the set of declarations, one of which may be specialized /// by this function specialization; the set will be modified to contain the /// redeclared member. bool Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) { assert(!isa<TemplateDecl>(Member) && "Only for non-template members"); // Try to find the member we are instantiating. NamedDecl *Instantiation = nullptr; NamedDecl *InstantiatedFrom = nullptr; MemberSpecializationInfo *MSInfo = nullptr; if (Previous.empty()) { // Nowhere to look anyway. } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Member)) { for (LookupResult::iterator I = Previous.begin(), E = Previous.end(); I != E; ++I) { NamedDecl *D = (*I)->getUnderlyingDecl(); if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) { QualType Adjusted = Function->getType(); if (!hasExplicitCallingConv(Adjusted)) Adjusted = adjustCCAndNoReturn(Adjusted, Method->getType()); if (Context.hasSameType(Adjusted, Method->getType())) { Instantiation = Method; InstantiatedFrom = Method->getInstantiatedFromMemberFunction(); MSInfo = Method->getMemberSpecializationInfo(); break; } } } } else if (isa<VarDecl>(Member)) { VarDecl *PrevVar; if (Previous.isSingleResult() && (PrevVar = dyn_cast<VarDecl>(Previous.getFoundDecl()))) if (PrevVar->isStaticDataMember()) { Instantiation = PrevVar; InstantiatedFrom = PrevVar->getInstantiatedFromStaticDataMember(); MSInfo = PrevVar->getMemberSpecializationInfo(); } } else if (isa<RecordDecl>(Member)) { CXXRecordDecl *PrevRecord; if (Previous.isSingleResult() && (PrevRecord = dyn_cast<CXXRecordDecl>(Previous.getFoundDecl()))) { Instantiation = PrevRecord; InstantiatedFrom = PrevRecord->getInstantiatedFromMemberClass(); MSInfo = PrevRecord->getMemberSpecializationInfo(); } } else if (isa<EnumDecl>(Member)) { EnumDecl *PrevEnum; if (Previous.isSingleResult() && (PrevEnum = dyn_cast<EnumDecl>(Previous.getFoundDecl()))) { Instantiation = PrevEnum; InstantiatedFrom = PrevEnum->getInstantiatedFromMemberEnum(); MSInfo = PrevEnum->getMemberSpecializationInfo(); } } if (!Instantiation) { // There is no previous declaration that matches. Since member // specializations are always out-of-line, the caller will complain about // this mismatch later. return false; } // If this is a friend, just bail out here before we start turning // things into explicit specializations. if (Member->getFriendObjectKind() != Decl::FOK_None) { // Preserve instantiation information. if (InstantiatedFrom && isa<CXXMethodDecl>(Member)) { cast<CXXMethodDecl>(Member)->setInstantiationOfMemberFunction( cast<CXXMethodDecl>(InstantiatedFrom), cast<CXXMethodDecl>(Instantiation)->getTemplateSpecializationKind()); } else if (InstantiatedFrom && isa<CXXRecordDecl>(Member)) { cast<CXXRecordDecl>(Member)->setInstantiationOfMemberClass( cast<CXXRecordDecl>(InstantiatedFrom), cast<CXXRecordDecl>(Instantiation)->getTemplateSpecializationKind()); } Previous.clear(); Previous.addDecl(Instantiation); return false; } // Make sure that this is a specialization of a member. if (!InstantiatedFrom) { Diag(Member->getLocation(), diag::err_spec_member_not_instantiated) << Member; Diag(Instantiation->getLocation(), diag::note_specialized_decl); return true; } // C++ [temp.expl.spec]p6: // If a template, a member template or the member of a class template is // explicitly specialized then that specialization shall be declared // before the first use of that specialization that would cause an implicit // instantiation to take place, in every translation unit in which such a // use occurs; no diagnostic is required. assert(MSInfo && "Member specialization info missing?"); bool HasNoEffect = false; if (CheckSpecializationInstantiationRedecl(Member->getLocation(), TSK_ExplicitSpecialization, Instantiation, MSInfo->getTemplateSpecializationKind(), MSInfo->getPointOfInstantiation(), HasNoEffect)) return true; // Check the scope of this explicit specialization. if (CheckTemplateSpecializationScope(*this, InstantiatedFrom, Instantiation, Member->getLocation(), false)) return true; // Note that this is an explicit instantiation of a member. // the original declaration to note that it is an explicit specialization // (if it was previously an implicit instantiation). This latter step // makes bookkeeping easier. if (isa<FunctionDecl>(Member)) { FunctionDecl *InstantiationFunction = cast<FunctionDecl>(Instantiation); if (InstantiationFunction->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) { InstantiationFunction->setTemplateSpecializationKind( TSK_ExplicitSpecialization); InstantiationFunction->setLocation(Member->getLocation()); } cast<FunctionDecl>(Member)->setInstantiationOfMemberFunction( cast<CXXMethodDecl>(InstantiatedFrom), TSK_ExplicitSpecialization); MarkUnusedFileScopedDecl(InstantiationFunction); } else if (isa<VarDecl>(Member)) { VarDecl *InstantiationVar = cast<VarDecl>(Instantiation); if (InstantiationVar->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) { InstantiationVar->setTemplateSpecializationKind( TSK_ExplicitSpecialization); InstantiationVar->setLocation(Member->getLocation()); } cast<VarDecl>(Member)->setInstantiationOfStaticDataMember( cast<VarDecl>(InstantiatedFrom), TSK_ExplicitSpecialization); MarkUnusedFileScopedDecl(InstantiationVar); } else if (isa<CXXRecordDecl>(Member)) { CXXRecordDecl *InstantiationClass = cast<CXXRecordDecl>(Instantiation); if (InstantiationClass->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) { InstantiationClass->setTemplateSpecializationKind( TSK_ExplicitSpecialization); InstantiationClass->setLocation(Member->getLocation()); } cast<CXXRecordDecl>(Member)->setInstantiationOfMemberClass( cast<CXXRecordDecl>(InstantiatedFrom), TSK_ExplicitSpecialization); } else { assert(isa<EnumDecl>(Member) && "Only member enums remain"); EnumDecl *InstantiationEnum = cast<EnumDecl>(Instantiation); if (InstantiationEnum->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) { InstantiationEnum->setTemplateSpecializationKind( TSK_ExplicitSpecialization); InstantiationEnum->setLocation(Member->getLocation()); } cast<EnumDecl>(Member)->setInstantiationOfMemberEnum( cast<EnumDecl>(InstantiatedFrom), TSK_ExplicitSpecialization); } // Save the caller the trouble of having to figure out which declaration // this specialization matches. Previous.clear(); Previous.addDecl(Instantiation); return false; } /// \brief Check the scope of an explicit instantiation. /// /// \returns true if a serious error occurs, false otherwise. static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D, SourceLocation InstLoc, bool WasQualifiedName) { DeclContext *OrigContext= D->getDeclContext()->getEnclosingNamespaceContext(); DeclContext *CurContext = S.CurContext->getRedeclContext(); if (CurContext->isRecord()) { S.Diag(InstLoc, diag::err_explicit_instantiation_in_class) << D; return true; } // C++11 [temp.explicit]p3: // An explicit instantiation shall appear in an enclosing namespace of its // template. If the name declared in the explicit instantiation is an // unqualified name, the explicit instantiation shall appear in the // namespace where its template is declared or, if that namespace is inline // (7.3.1), any namespace from its enclosing namespace set. // // This is DR275, which we do not retroactively apply to C++98/03. if (WasQualifiedName) { if (CurContext->Encloses(OrigContext)) return false; } else { if (CurContext->InEnclosingNamespaceSetOf(OrigContext)) return false; } if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(OrigContext)) { if (WasQualifiedName) S.Diag(InstLoc, S.getLangOpts().CPlusPlus11? diag::err_explicit_instantiation_out_of_scope : diag::warn_explicit_instantiation_out_of_scope_0x) << D << NS; else S.Diag(InstLoc, S.getLangOpts().CPlusPlus11? diag::err_explicit_instantiation_unqualified_wrong_namespace : diag::warn_explicit_instantiation_unqualified_wrong_namespace_0x) << D << NS; } else S.Diag(InstLoc, S.getLangOpts().CPlusPlus11? diag::err_explicit_instantiation_must_be_global : diag::warn_explicit_instantiation_must_be_global_0x) << D; S.Diag(D->getLocation(), diag::note_explicit_instantiation_here); return false; } /// \brief Determine whether the given scope specifier has a template-id in it. static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) { if (!SS.isSet()) return false; // C++11 [temp.explicit]p3: // If the explicit instantiation is for a member function, a member class // or a static data member of a class template specialization, the name of // the class template specialization in the qualified-id for the member // name shall be a simple-template-id. // // C++98 has the same restriction, just worded differently. for (NestedNameSpecifier *NNS = SS.getScopeRep(); NNS; NNS = NNS->getPrefix()) if (const Type *T = NNS->getAsType()) if (isa<TemplateSpecializationType>(T)) return true; return false; } // Explicit instantiation of a class template specialization DeclResult Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy TemplateD, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc, AttributeList *Attr) { // Find the class template we're specializing TemplateName Name = TemplateD.get(); TemplateDecl *TD = Name.getAsTemplateDecl(); // Check that the specialization uses the same tag kind as the // original template. TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec); assert(Kind != TTK_Enum && "Invalid enum tag in class template explicit instantiation!"); if (isa<TypeAliasTemplateDecl>(TD)) { Diag(KWLoc, diag::err_tag_reference_non_tag) << Kind; Diag(TD->getTemplatedDecl()->getLocation(), diag::note_previous_use); return true; } ClassTemplateDecl *ClassTemplate = cast<ClassTemplateDecl>(TD); if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(), Kind, /*isDefinition*/false, KWLoc, ClassTemplate->getIdentifier())) { Diag(KWLoc, diag::err_use_with_wrong_tag) << ClassTemplate << FixItHint::CreateReplacement(KWLoc, ClassTemplate->getTemplatedDecl()->getKindName()); Diag(ClassTemplate->getTemplatedDecl()->getLocation(), diag::note_previous_use); Kind = ClassTemplate->getTemplatedDecl()->getTagKind(); } // C++0x [temp.explicit]p2: // There are two forms of explicit instantiation: an explicit instantiation // definition and an explicit instantiation declaration. An explicit // instantiation declaration begins with the extern keyword. [...] TemplateSpecializationKind TSK = ExternLoc.isInvalid() ? TSK_ExplicitInstantiationDefinition : TSK_ExplicitInstantiationDeclaration; if (TSK == TSK_ExplicitInstantiationDeclaration) { // Check for dllexport class template instantiation declarations. for (AttributeList *A = Attr; A; A = A->getNext()) { if (A->getKind() == AttributeList::AT_DLLExport) { Diag(ExternLoc, diag::warn_attribute_dllexport_explicit_instantiation_decl); Diag(A->getLoc(), diag::note_attribute); break; } } if (auto *A = ClassTemplate->getTemplatedDecl()->getAttr<DLLExportAttr>()) { Diag(ExternLoc, diag::warn_attribute_dllexport_explicit_instantiation_decl); Diag(A->getLocation(), diag::note_attribute); } } // Translate the parser's template argument list in our AST format. TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc); translateTemplateArguments(TemplateArgsIn, TemplateArgs); // Check that the template argument list is well-formed for this // template. SmallVector<TemplateArgument, 4> Converted; if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc, TemplateArgs, false, Converted)) return true; // Find the class template specialization declaration that // corresponds to these arguments. void *InsertPos = nullptr; ClassTemplateSpecializationDecl *PrevDecl = ClassTemplate->findSpecialization(Converted, InsertPos); TemplateSpecializationKind PrevDecl_TSK = PrevDecl ? PrevDecl->getTemplateSpecializationKind() : TSK_Undeclared; // C++0x [temp.explicit]p2: // [...] An explicit instantiation shall appear in an enclosing // namespace of its template. [...] // // This is C++ DR 275. if (CheckExplicitInstantiationScope(*this, ClassTemplate, TemplateNameLoc, SS.isSet())) return true; ClassTemplateSpecializationDecl *Specialization = nullptr; bool HasNoEffect = false; if (PrevDecl) { if (CheckSpecializationInstantiationRedecl(TemplateNameLoc, TSK, PrevDecl, PrevDecl_TSK, PrevDecl->getPointOfInstantiation(), HasNoEffect)) return PrevDecl; // Even though HasNoEffect == true means that this explicit instantiation // has no effect on semantics, we go on to put its syntax in the AST. if (PrevDecl_TSK == TSK_ImplicitInstantiation || PrevDecl_TSK == TSK_Undeclared) { // Since the only prior class template specialization with these // arguments was referenced but not declared, reuse that // declaration node as our own, updating the source location // for the template name to reflect our new declaration. // (Other source locations will be updated later.) Specialization = PrevDecl; Specialization->setLocation(TemplateNameLoc); PrevDecl = nullptr; } } if (!Specialization) { // Create a new class template specialization declaration node for // this explicit specialization. Specialization = ClassTemplateSpecializationDecl::Create(Context, Kind, ClassTemplate->getDeclContext(), KWLoc, TemplateNameLoc, ClassTemplate, Converted.data(), Converted.size(), PrevDecl); SetNestedNameSpecifier(Specialization, SS); if (!HasNoEffect && !PrevDecl) { // Insert the new specialization. ClassTemplate->AddSpecialization(Specialization, InsertPos); } } // Build the fully-sugared type for this explicit instantiation as // the user wrote in the explicit instantiation itself. This means // that we'll pretty-print the type retrieved from the // specialization's declaration the way that the user actually wrote // the explicit instantiation, rather than formatting the name based // on the "canonical" representation used to store the template // arguments in the specialization. TypeSourceInfo *WrittenTy = Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc, TemplateArgs, Context.getTypeDeclType(Specialization)); Specialization->setTypeAsWritten(WrittenTy); // Set source locations for keywords. Specialization->setExternLoc(ExternLoc); Specialization->setTemplateKeywordLoc(TemplateLoc); Specialization->setRBraceLoc(SourceLocation()); if (Attr) ProcessDeclAttributeList(S, Specialization, Attr); // Add the explicit instantiation into its lexical context. However, // since explicit instantiations are never found by name lookup, we // just put it into the declaration context directly. Specialization->setLexicalDeclContext(CurContext); CurContext->addDecl(Specialization); // Syntax is now OK, so return if it has no other effect on semantics. if (HasNoEffect) { // Set the template specialization kind. Specialization->setTemplateSpecializationKind(TSK); return Specialization; } // C++ [temp.explicit]p3: // A definition of a class template or class member template // shall be in scope at the point of the explicit instantiation of // the class template or class member template. // // This check comes when we actually try to perform the // instantiation. ClassTemplateSpecializationDecl *Def = cast_or_null<ClassTemplateSpecializationDecl>( Specialization->getDefinition()); if (!Def) InstantiateClassTemplateSpecialization(TemplateNameLoc, Specialization, TSK); else if (TSK == TSK_ExplicitInstantiationDefinition) { MarkVTableUsed(TemplateNameLoc, Specialization, true); Specialization->setPointOfInstantiation(Def->getPointOfInstantiation()); } // Instantiate the members of this class template specialization. Def = cast_or_null<ClassTemplateSpecializationDecl>( Specialization->getDefinition()); if (Def) { TemplateSpecializationKind Old_TSK = Def->getTemplateSpecializationKind(); // Fix a TSK_ExplicitInstantiationDeclaration followed by a // TSK_ExplicitInstantiationDefinition if (Old_TSK == TSK_ExplicitInstantiationDeclaration && TSK == TSK_ExplicitInstantiationDefinition) { // FIXME: Need to notify the ASTMutationListener that we did this. Def->setTemplateSpecializationKind(TSK); if (!getDLLAttr(Def) && getDLLAttr(Specialization) && Context.getTargetInfo().getCXXABI().isMicrosoft()) { // In the MS ABI, an explicit instantiation definition can add a dll // attribute to a template with a previous instantiation declaration. // MinGW doesn't allow this. auto *A = cast<InheritableAttr>( getDLLAttr(Specialization)->clone(getASTContext())); A->setInherited(true); Def->addAttr(A); checkClassLevelDLLAttribute(Def); // Propagate attribute to base class templates. for (auto &B : Def->bases()) { if (auto *BT = dyn_cast_or_null<ClassTemplateSpecializationDecl>( B.getType()->getAsCXXRecordDecl())) propagateDLLAttrToBaseClassTemplate(Def, A, BT, B.getLocStart()); } } } InstantiateClassTemplateSpecializationMembers(TemplateNameLoc, Def, TSK); } // Set the template specialization kind. Specialization->setTemplateSpecializationKind(TSK); return Specialization; } // Explicit instantiation of a member class of a class template. DeclResult Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr) { bool Owned = false; bool IsDependent = false; Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference, KWLoc, SS, Name, NameLoc, Attr, AS_none, /*ModulePrivateLoc=*/SourceLocation(), MultiTemplateParamsArg(), Owned, IsDependent, SourceLocation(), false, TypeResult(), /*IsTypeSpecifier*/false); assert(!IsDependent && "explicit instantiation of dependent name not yet handled"); if (!TagD) return true; TagDecl *Tag = cast<TagDecl>(TagD); assert(!Tag->isEnum() && "shouldn't see enumerations here"); if (Tag->isInvalidDecl()) return true; CXXRecordDecl *Record = cast<CXXRecordDecl>(Tag); CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass(); if (!Pattern) { Diag(TemplateLoc, diag::err_explicit_instantiation_nontemplate_type) << Context.getTypeDeclType(Record); Diag(Record->getLocation(), diag::note_nontemplate_decl_here); return true; } // C++0x [temp.explicit]p2: // If the explicit instantiation is for a class or member class, the // elaborated-type-specifier in the declaration shall include a // simple-template-id. // // C++98 has the same restriction, just worded differently. if (!ScopeSpecifierHasTemplateId(SS)) Diag(TemplateLoc, diag::ext_explicit_instantiation_without_qualified_id) << Record << SS.getRange(); // C++0x [temp.explicit]p2: // There are two forms of explicit instantiation: an explicit instantiation // definition and an explicit instantiation declaration. An explicit // instantiation declaration begins with the extern keyword. [...] TemplateSpecializationKind TSK = ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition : TSK_ExplicitInstantiationDeclaration; // C++0x [temp.explicit]p2: // [...] An explicit instantiation shall appear in an enclosing // namespace of its template. [...] // // This is C++ DR 275. CheckExplicitInstantiationScope(*this, Record, NameLoc, true); // Verify that it is okay to explicitly instantiate here. CXXRecordDecl *PrevDecl = cast_or_null<CXXRecordDecl>(Record->getPreviousDecl()); if (!PrevDecl && Record->getDefinition()) PrevDecl = Record; if (PrevDecl) { MemberSpecializationInfo *MSInfo = PrevDecl->getMemberSpecializationInfo(); bool HasNoEffect = false; assert(MSInfo && "No member specialization information?"); if (CheckSpecializationInstantiationRedecl(TemplateLoc, TSK, PrevDecl, MSInfo->getTemplateSpecializationKind(), MSInfo->getPointOfInstantiation(), HasNoEffect)) return true; if (HasNoEffect) return TagD; } CXXRecordDecl *RecordDef = cast_or_null<CXXRecordDecl>(Record->getDefinition()); if (!RecordDef) { // C++ [temp.explicit]p3: // A definition of a member class of a class template shall be in scope // at the point of an explicit instantiation of the member class. CXXRecordDecl *Def = cast_or_null<CXXRecordDecl>(Pattern->getDefinition()); if (!Def) { Diag(TemplateLoc, diag::err_explicit_instantiation_undefined_member) << 0 << Record->getDeclName() << Record->getDeclContext(); Diag(Pattern->getLocation(), diag::note_forward_declaration) << Pattern; return true; } else { if (InstantiateClass(NameLoc, Record, Def, getTemplateInstantiationArgs(Record), TSK)) return true; RecordDef = cast_or_null<CXXRecordDecl>(Record->getDefinition()); if (!RecordDef) return true; } } // Instantiate all of the members of the class. InstantiateClassMembers(NameLoc, RecordDef, getTemplateInstantiationArgs(Record), TSK); if (TSK == TSK_ExplicitInstantiationDefinition) MarkVTableUsed(NameLoc, RecordDef, true); // FIXME: We don't have any representation for explicit instantiations of // member classes. Such a representation is not needed for compilation, but it // should be available for clients that want to see all of the declarations in // the source code. return TagD; } DeclResult Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D) { // Explicit instantiations always require a name. // TODO: check if/when DNInfo should replace Name. DeclarationNameInfo NameInfo = GetNameForDeclarator(D); DeclarationName Name = NameInfo.getName(); if (!Name) { if (!D.isInvalidType()) Diag(D.getDeclSpec().getLocStart(), diag::err_explicit_instantiation_requires_name) << D.getDeclSpec().getSourceRange() << D.getSourceRange(); return true; } // The scope passed in may not be a decl scope. Zip up the scope tree until // we find one that is. while ((S->getFlags() & Scope::DeclScope) == 0 || (S->getFlags() & Scope::TemplateParamScope) != 0) S = S->getParent(); // Determine the type of the declaration. TypeSourceInfo *T = GetTypeForDeclarator(D, S); QualType R = T->getType(); if (R.isNull()) return true; // C++ [dcl.stc]p1: // A storage-class-specifier shall not be specified in [...] an explicit // instantiation (14.7.2) directive. if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) { Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_of_typedef) << Name; return true; } else if (D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_unspecified) { // Complain about then remove the storage class specifier. Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_storage_class) << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc()); D.getMutableDeclSpec().ClearStorageClassSpecs(); } // C++0x [temp.explicit]p1: // [...] An explicit instantiation of a function template shall not use the // inline or constexpr specifiers. // Presumably, this also applies to member functions of class templates as // well. if (D.getDeclSpec().isInlineSpecified()) Diag(D.getDeclSpec().getInlineSpecLoc(), getLangOpts().CPlusPlus11 ? diag::err_explicit_instantiation_inline : diag::warn_explicit_instantiation_inline_0x) << FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc()); if (D.getDeclSpec().isConstexprSpecified() && R->isFunctionType()) // FIXME: Add a fix-it to remove the 'constexpr' and add a 'const' if one is // not already specified. Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_explicit_instantiation_constexpr); // C++0x [temp.explicit]p2: // There are two forms of explicit instantiation: an explicit instantiation // definition and an explicit instantiation declaration. An explicit // instantiation declaration begins with the extern keyword. [...] TemplateSpecializationKind TSK = ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition : TSK_ExplicitInstantiationDeclaration; LookupResult Previous(*this, NameInfo, LookupOrdinaryName); LookupParsedName(Previous, S, &D.getCXXScopeSpec()); if (!R->isFunctionType()) { // C++ [temp.explicit]p1: // A [...] static data member of a class template can be explicitly // instantiated from the member definition associated with its class // template. // C++1y [temp.explicit]p1: // A [...] variable [...] template specialization can be explicitly // instantiated from its template. if (Previous.isAmbiguous()) return true; VarDecl *Prev = Previous.getAsSingle<VarDecl>(); VarTemplateDecl *PrevTemplate = Previous.getAsSingle<VarTemplateDecl>(); if (!PrevTemplate) { if (!Prev || !Prev->isStaticDataMember()) { // We expect to see a data data member here. Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_not_known) << Name; for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end(); P != PEnd; ++P) Diag((*P)->getLocation(), diag::note_explicit_instantiation_here); return true; } if (!Prev->getInstantiatedFromStaticDataMember()) { // FIXME: Check for explicit specialization? Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_data_member_not_instantiated) << Prev; Diag(Prev->getLocation(), diag::note_explicit_instantiation_here); // FIXME: Can we provide a note showing where this was declared? return true; } } else { // Explicitly instantiate a variable template. // C++1y [dcl.spec.auto]p6: // ... A program that uses auto or decltype(auto) in a context not // explicitly allowed in this section is ill-formed. // // This includes auto-typed variable template instantiations. if (R->isUndeducedType()) { Diag(T->getTypeLoc().getLocStart(), diag::err_auto_not_allowed_var_inst); return true; } if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) { // C++1y [temp.explicit]p3: // If the explicit instantiation is for a variable, the unqualified-id // in the declaration shall be a template-id. Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_without_template_id) << PrevTemplate; Diag(PrevTemplate->getLocation(), diag::note_explicit_instantiation_here); return true; } // Translate the parser's template argument list into our AST format. TemplateArgumentListInfo TemplateArgs = makeTemplateArgumentListInfo(*this, *D.getName().TemplateId); DeclResult Res = CheckVarTemplateId(PrevTemplate, TemplateLoc, D.getIdentifierLoc(), TemplateArgs); if (Res.isInvalid()) return true; // Ignore access control bits, we don't need them for redeclaration // checking. Prev = cast<VarDecl>(Res.get()); } // C++0x [temp.explicit]p2: // If the explicit instantiation is for a member function, a member class // or a static data member of a class template specialization, the name of // the class template specialization in the qualified-id for the member // name shall be a simple-template-id. // // C++98 has the same restriction, just worded differently. // // This does not apply to variable template specializations, where the // template-id is in the unqualified-id instead. if (!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()) && !PrevTemplate) Diag(D.getIdentifierLoc(), diag::ext_explicit_instantiation_without_qualified_id) << Prev << D.getCXXScopeSpec().getRange(); // Check the scope of this explicit instantiation. CheckExplicitInstantiationScope(*this, Prev, D.getIdentifierLoc(), true); // Verify that it is okay to explicitly instantiate here. TemplateSpecializationKind PrevTSK = Prev->getTemplateSpecializationKind(); SourceLocation POI = Prev->getPointOfInstantiation(); bool HasNoEffect = false; if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK, Prev, PrevTSK, POI, HasNoEffect)) return true; if (!HasNoEffect) { // Instantiate static data member or variable template. Prev->setTemplateSpecializationKind(TSK, D.getIdentifierLoc()); if (PrevTemplate) { // Merge attributes. if (AttributeList *Attr = D.getDeclSpec().getAttributes().getList()) ProcessDeclAttributeList(S, Prev, Attr); } if (TSK == TSK_ExplicitInstantiationDefinition) InstantiateVariableDefinition(D.getIdentifierLoc(), Prev); } // Check the new variable specialization against the parsed input. if (PrevTemplate && Prev && !Context.hasSameType(Prev->getType(), R)) { Diag(T->getTypeLoc().getLocStart(), diag::err_invalid_var_template_spec_type) << 0 << PrevTemplate << R << Prev->getType(); Diag(PrevTemplate->getLocation(), diag::note_template_declared_here) << 2 << PrevTemplate->getDeclName(); return true; } // FIXME: Create an ExplicitInstantiation node? return (Decl*) nullptr; } // If the declarator is a template-id, translate the parser's template // argument list into our AST format. bool HasExplicitTemplateArgs = false; TemplateArgumentListInfo TemplateArgs; if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) { TemplateArgs = makeTemplateArgumentListInfo(*this, *D.getName().TemplateId); HasExplicitTemplateArgs = true; } // C++ [temp.explicit]p1: // A [...] function [...] can be explicitly instantiated from its template. // A member function [...] of a class template can be explicitly // instantiated from the member definition associated with its class // template. UnresolvedSet<8> Matches; TemplateSpecCandidateSet FailedCandidates(D.getIdentifierLoc()); for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end(); P != PEnd; ++P) { NamedDecl *Prev = *P; if (!HasExplicitTemplateArgs) { if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Prev)) { QualType Adjusted = adjustCCAndNoReturn(R, Method->getType()); if (Context.hasSameUnqualifiedType(Method->getType(), Adjusted)) { Matches.clear(); Matches.addDecl(Method, P.getAccess()); if (Method->getTemplateSpecializationKind() == TSK_Undeclared) break; } } } FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Prev); if (!FunTmpl) continue; TemplateDeductionInfo Info(FailedCandidates.getLocation()); FunctionDecl *Specialization = nullptr; if (TemplateDeductionResult TDK = DeduceTemplateArguments(FunTmpl, (HasExplicitTemplateArgs ? &TemplateArgs : nullptr), R, Specialization, Info)) { // Keep track of almost-matches. FailedCandidates.addCandidate() .set(FunTmpl->getTemplatedDecl(), MakeDeductionFailureInfo(Context, TDK, Info)); (void)TDK; continue; } Matches.addDecl(Specialization, P.getAccess()); } // Find the most specialized function template specialization. UnresolvedSetIterator Result = getMostSpecialized( Matches.begin(), Matches.end(), FailedCandidates, D.getIdentifierLoc(), PDiag(diag::err_explicit_instantiation_not_known) << Name, PDiag(diag::err_explicit_instantiation_ambiguous) << Name, PDiag(diag::note_explicit_instantiation_candidate)); if (Result == Matches.end()) return true; // Ignore access control bits, we don't need them for redeclaration checking. FunctionDecl *Specialization = cast<FunctionDecl>(*Result); // C++11 [except.spec]p4 // In an explicit instantiation an exception-specification may be specified, // but is not required. // If an exception-specification is specified in an explicit instantiation // directive, it shall be compatible with the exception-specifications of // other declarations of that function. if (auto *FPT = R->getAs<FunctionProtoType>()) if (FPT->hasExceptionSpec()) { unsigned DiagID = diag::err_mismatched_exception_spec_explicit_instantiation; if (getLangOpts().MicrosoftExt) DiagID = diag::ext_mismatched_exception_spec_explicit_instantiation; bool Result = CheckEquivalentExceptionSpec( PDiag(DiagID) << Specialization->getType(), PDiag(diag::note_explicit_instantiation_here), Specialization->getType()->getAs<FunctionProtoType>(), Specialization->getLocation(), FPT, D.getLocStart()); // In Microsoft mode, mismatching exception specifications just cause a // warning. if (!getLangOpts().MicrosoftExt && Result) return true; } if (Specialization->getTemplateSpecializationKind() == TSK_Undeclared) { Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_member_function_not_instantiated) << Specialization << (Specialization->getTemplateSpecializationKind() == TSK_ExplicitSpecialization); Diag(Specialization->getLocation(), diag::note_explicit_instantiation_here); return true; } FunctionDecl *PrevDecl = Specialization->getPreviousDecl(); if (!PrevDecl && Specialization->isThisDeclarationADefinition()) PrevDecl = Specialization; if (PrevDecl) { bool HasNoEffect = false; if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK, PrevDecl, PrevDecl->getTemplateSpecializationKind(), PrevDecl->getPointOfInstantiation(), HasNoEffect)) return true; // FIXME: We may still want to build some representation of this // explicit specialization. if (HasNoEffect) return (Decl*) nullptr; } Specialization->setTemplateSpecializationKind(TSK, D.getIdentifierLoc()); AttributeList *Attr = D.getDeclSpec().getAttributes().getList(); if (Attr) ProcessDeclAttributeList(S, Specialization, Attr); if (Specialization->isDefined()) { // Let the ASTConsumer know that this function has been explicitly // instantiated now, and its linkage might have changed. Consumer.HandleTopLevelDecl(DeclGroupRef(Specialization)); } else if (TSK == TSK_ExplicitInstantiationDefinition) InstantiateFunctionDefinition(D.getIdentifierLoc(), Specialization); // C++0x [temp.explicit]p2: // If the explicit instantiation is for a member function, a member class // or a static data member of a class template specialization, the name of // the class template specialization in the qualified-id for the member // name shall be a simple-template-id. // // C++98 has the same restriction, just worded differently. FunctionTemplateDecl *FunTmpl = Specialization->getPrimaryTemplate(); if (D.getName().getKind() != UnqualifiedId::IK_TemplateId && !FunTmpl && D.getCXXScopeSpec().isSet() && !ScopeSpecifierHasTemplateId(D.getCXXScopeSpec())) Diag(D.getIdentifierLoc(), diag::ext_explicit_instantiation_without_qualified_id) << Specialization << D.getCXXScopeSpec().getRange(); CheckExplicitInstantiationScope(*this, FunTmpl? (NamedDecl *)FunTmpl : Specialization->getInstantiatedFromMemberFunction(), D.getIdentifierLoc(), D.getCXXScopeSpec().isSet()); // FIXME: Create some kind of ExplicitInstantiationDecl here. return (Decl*) nullptr; } TypeResult Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc) { // This has to hold, because SS is expected to be defined. assert(Name && "Expected a name in a dependent tag"); NestedNameSpecifier *NNS = SS.getScopeRep(); if (!NNS) return true; TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec); if (TUK == TUK_Declaration || TUK == TUK_Definition) { Diag(NameLoc, diag::err_dependent_tag_decl) << (TUK == TUK_Definition) << Kind << SS.getRange(); return true; } // Create the resulting type. ElaboratedTypeKeyword Kwd = TypeWithKeyword::getKeywordForTagTypeKind(Kind); QualType Result = Context.getDependentNameType(Kwd, NNS, Name); // Create type-source location information for this type. TypeLocBuilder TLB; DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(Result); TL.setElaboratedKeywordLoc(TagLoc); TL.setQualifierLoc(SS.getWithLocInContext(Context)); TL.setNameLoc(NameLoc); return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result)); } TypeResult Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc) { if (SS.isInvalid()) return true; if (TypenameLoc.isValid() && S && !S->getTemplateParamParent()) Diag(TypenameLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_typename_outside_of_template : diag::ext_typename_outside_of_template) << FixItHint::CreateRemoval(TypenameLoc); NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context); QualType T = CheckTypenameType(TypenameLoc.isValid()? ETK_Typename : ETK_None, TypenameLoc, QualifierLoc, II, IdLoc); if (T.isNull()) return true; TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T); if (isa<DependentNameType>(T)) { DependentNameTypeLoc TL = TSI->getTypeLoc().castAs<DependentNameTypeLoc>(); TL.setElaboratedKeywordLoc(TypenameLoc); TL.setQualifierLoc(QualifierLoc); TL.setNameLoc(IdLoc); } else { ElaboratedTypeLoc TL = TSI->getTypeLoc().castAs<ElaboratedTypeLoc>(); TL.setElaboratedKeywordLoc(TypenameLoc); TL.setQualifierLoc(QualifierLoc); TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IdLoc); } return CreateParsedType(T, TSI); } TypeResult Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateIn, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc) { if (TypenameLoc.isValid() && S && !S->getTemplateParamParent()) Diag(TypenameLoc, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_typename_outside_of_template : diag::ext_typename_outside_of_template) << FixItHint::CreateRemoval(TypenameLoc); // Translate the parser's template argument list in our AST format. TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc); translateTemplateArguments(TemplateArgsIn, TemplateArgs); TemplateName Template = TemplateIn.get(); if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) { // Construct a dependent template specialization type. assert(DTN && "dependent template has non-dependent name?"); assert(DTN->getQualifier() == SS.getScopeRep()); QualType T = Context.getDependentTemplateSpecializationType(ETK_Typename, DTN->getQualifier(), DTN->getIdentifier(), TemplateArgs); // Create source-location information for this type. TypeLocBuilder Builder; DependentTemplateSpecializationTypeLoc SpecTL = Builder.push<DependentTemplateSpecializationTypeLoc>(T); SpecTL.setElaboratedKeywordLoc(TypenameLoc); SpecTL.setQualifierLoc(SS.getWithLocInContext(Context)); SpecTL.setTemplateKeywordLoc(TemplateKWLoc); SpecTL.setTemplateNameLoc(TemplateNameLoc); SpecTL.setLAngleLoc(LAngleLoc); SpecTL.setRAngleLoc(RAngleLoc); for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo()); return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T)); } QualType T = CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs); if (T.isNull()) return true; // Provide source-location information for the template specialization type. TypeLocBuilder Builder; TemplateSpecializationTypeLoc SpecTL = Builder.push<TemplateSpecializationTypeLoc>(T); SpecTL.setTemplateKeywordLoc(TemplateKWLoc); SpecTL.setTemplateNameLoc(TemplateNameLoc); SpecTL.setLAngleLoc(LAngleLoc); SpecTL.setRAngleLoc(RAngleLoc); for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo()); T = Context.getElaboratedType(ETK_Typename, SS.getScopeRep(), T); ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T); TL.setElaboratedKeywordLoc(TypenameLoc); TL.setQualifierLoc(SS.getWithLocInContext(Context)); TypeSourceInfo *TSI = Builder.getTypeSourceInfo(Context, T); return CreateParsedType(T, TSI); } /// Determine whether this failed name lookup should be treated as being /// disabled by a usage of std::enable_if. static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II, SourceRange &CondRange) { // We must be looking for a ::type... if (!II.isStr("type")) return false; // ... within an explicitly-written template specialization... if (!NNS || !NNS.getNestedNameSpecifier()->getAsType()) return false; TypeLoc EnableIfTy = NNS.getTypeLoc(); TemplateSpecializationTypeLoc EnableIfTSTLoc = EnableIfTy.getAs<TemplateSpecializationTypeLoc>(); if (!EnableIfTSTLoc || EnableIfTSTLoc.getNumArgs() == 0) return false; const TemplateSpecializationType *EnableIfTST = cast<TemplateSpecializationType>(EnableIfTSTLoc.getTypePtr()); // ... which names a complete class template declaration... const TemplateDecl *EnableIfDecl = EnableIfTST->getTemplateName().getAsTemplateDecl(); if (!EnableIfDecl || EnableIfTST->isIncompleteType()) return false; // ... called "enable_if". const IdentifierInfo *EnableIfII = EnableIfDecl->getDeclName().getAsIdentifierInfo(); if (!EnableIfII || !EnableIfII->isStr("enable_if")) return false; // Assume the first template argument is the condition. CondRange = EnableIfTSTLoc.getArgLoc(0).getSourceRange(); return true; } /// \brief Build the type that describes a C++ typename specifier, /// e.g., "typename T::type". QualType Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc) { CXXScopeSpec SS; SS.Adopt(QualifierLoc); DeclContext *Ctx = computeDeclContext(SS); if (!Ctx) { // If the nested-name-specifier is dependent and couldn't be // resolved to a type, build a typename type. assert(QualifierLoc.getNestedNameSpecifier()->isDependent()); return Context.getDependentNameType(Keyword, QualifierLoc.getNestedNameSpecifier(), &II); } // If the nested-name-specifier refers to the current instantiation, // the "typename" keyword itself is superfluous. In C++03, the // program is actually ill-formed. However, DR 382 (in C++0x CD1) // allows such extraneous "typename" keywords, and we retroactively // apply this DR to C++03 code with only a warning. In any case we continue. if (RequireCompleteDeclContext(SS, Ctx)) return QualType(); DeclarationName Name(&II); LookupResult Result(*this, Name, IILoc, LookupOrdinaryName); LookupQualifiedName(Result, Ctx, SS); unsigned DiagID = 0; Decl *Referenced = nullptr; switch (Result.getResultKind()) { case LookupResult::NotFound: { // If we're looking up 'type' within a template named 'enable_if', produce // a more specific diagnostic. SourceRange CondRange; if (isEnableIf(QualifierLoc, II, CondRange)) { Diag(CondRange.getBegin(), diag::err_typename_nested_not_found_enable_if) << Ctx << CondRange; return QualType(); } DiagID = diag::err_typename_nested_not_found; break; } case LookupResult::FoundUnresolvedValue: { // We found a using declaration that is a value. Most likely, the using // declaration itself is meant to have the 'typename' keyword. SourceRange FullRange(KeywordLoc.isValid() ? KeywordLoc : SS.getBeginLoc(), IILoc); Diag(IILoc, diag::err_typename_refers_to_using_value_decl) << Name << Ctx << FullRange; if (UnresolvedUsingValueDecl *Using = dyn_cast<UnresolvedUsingValueDecl>(Result.getRepresentativeDecl())){ SourceLocation Loc = Using->getQualifierLoc().getBeginLoc(); Diag(Loc, diag::note_using_value_decl_missing_typename) << FixItHint::CreateInsertion(Loc, "typename "); } } // Fall through to create a dependent typename type, from which we can recover // better. LLVM_FALLTHROUGH; // HLSL Change case LookupResult::NotFoundInCurrentInstantiation: // Okay, it's a member of an unknown instantiation. return Context.getDependentNameType(Keyword, QualifierLoc.getNestedNameSpecifier(), &II); case LookupResult::Found: if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) { // We found a type. Build an ElaboratedType, since the // typename-specifier was just sugar. MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false); return Context.getElaboratedType(ETK_Typename, QualifierLoc.getNestedNameSpecifier(), Context.getTypeDeclType(Type)); } DiagID = diag::err_typename_nested_not_type; Referenced = Result.getFoundDecl(); break; case LookupResult::FoundOverloaded: DiagID = diag::err_typename_nested_not_type; Referenced = *Result.begin(); break; case LookupResult::Ambiguous: return QualType(); } // If we get here, it's because name lookup did not find a // type. Emit an appropriate diagnostic and return an error. SourceRange FullRange(KeywordLoc.isValid() ? KeywordLoc : SS.getBeginLoc(), IILoc); Diag(IILoc, DiagID) << FullRange << Name << Ctx; if (Referenced) Diag(Referenced->getLocation(), diag::note_typename_refers_here) << Name; return QualType(); } namespace { // See Sema::RebuildTypeInCurrentInstantiation class CurrentInstantiationRebuilder : public TreeTransform<CurrentInstantiationRebuilder> { SourceLocation Loc; DeclarationName Entity; public: typedef TreeTransform<CurrentInstantiationRebuilder> inherited; CurrentInstantiationRebuilder(Sema &SemaRef, SourceLocation Loc, DeclarationName Entity) : TreeTransform<CurrentInstantiationRebuilder>(SemaRef), Loc(Loc), Entity(Entity) { } /// \brief Determine whether the given type \p T has already been /// transformed. /// /// For the purposes of type reconstruction, a type has already been /// transformed if it is NULL or if it is not dependent. bool AlreadyTransformed(QualType T) { return T.isNull() || !T->isDependentType(); } /// \brief Returns the location of the entity whose type is being /// rebuilt. SourceLocation getBaseLocation() { return Loc; } /// \brief Returns the name of the entity whose type is being rebuilt. DeclarationName getBaseEntity() { return Entity; } /// \brief Sets the "base" location and entity when that /// information is known based on another transformation. void setBase(SourceLocation Loc, DeclarationName Entity) { this->Loc = Loc; this->Entity = Entity; } ExprResult TransformLambdaExpr(LambdaExpr *E) { // Lambdas never need to be transformed. return E; } }; } /// \brief Rebuilds a type within the context of the current instantiation. /// /// The type \p T is part of the type of an out-of-line member definition of /// a class template (or class template partial specialization) that was parsed /// and constructed before we entered the scope of the class template (or /// partial specialization thereof). This routine will rebuild that type now /// that we have entered the declarator's scope, which may produce different /// canonical types, e.g., /// /// \code /// template<typename T> /// struct X { /// typedef T* pointer; /// pointer data(); /// }; /// /// template<typename T> /// typename X<T>::pointer X<T>::data() { ... } /// \endcode /// /// Here, the type "typename X<T>::pointer" will be created as a DependentNameType, /// since we do not know that we can look into X<T> when we parsed the type. /// This function will rebuild the type, performing the lookup of "pointer" /// in X<T> and returning an ElaboratedType whose canonical type is the same /// as the canonical type of T*, allowing the return types of the out-of-line /// definition and the declaration to match. TypeSourceInfo *Sema::RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name) { if (!T || !T->getType()->isDependentType()) return T; CurrentInstantiationRebuilder Rebuilder(*this, Loc, Name); return Rebuilder.TransformType(T); } ExprResult Sema::RebuildExprInCurrentInstantiation(Expr *E) { CurrentInstantiationRebuilder Rebuilder(*this, E->getExprLoc(), DeclarationName()); return Rebuilder.TransformExpr(E); } bool Sema::RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS) { if (SS.isInvalid()) return true; NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context); CurrentInstantiationRebuilder Rebuilder(*this, SS.getRange().getBegin(), DeclarationName()); NestedNameSpecifierLoc Rebuilt = Rebuilder.TransformNestedNameSpecifierLoc(QualifierLoc); if (!Rebuilt) return true; SS.Adopt(Rebuilt); return false; } /// \brief Rebuild the template parameters now that we know we're in a current /// instantiation. bool Sema::RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params) { for (unsigned I = 0, N = Params->size(); I != N; ++I) { Decl *Param = Params->getParam(I); // There is nothing to rebuild in a type parameter. if (isa<TemplateTypeParmDecl>(Param)) continue; // Rebuild the template parameter list of a template template parameter. if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(Param)) { if (RebuildTemplateParamsInCurrentInstantiation( TTP->getTemplateParameters())) return true; continue; } // Rebuild the type of a non-type template parameter. NonTypeTemplateParmDecl *NTTP = cast<NonTypeTemplateParmDecl>(Param); TypeSourceInfo *NewTSI = RebuildTypeInCurrentInstantiation(NTTP->getTypeSourceInfo(), NTTP->getLocation(), NTTP->getDeclName()); if (!NewTSI) return true; if (NewTSI != NTTP->getTypeSourceInfo()) { NTTP->setTypeSourceInfo(NewTSI); NTTP->setType(NewTSI->getType()); } } return false; } /// \brief Produces a formatted string that describes the binding of /// template parameters to template arguments. std::string Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args) { return getTemplateArgumentBindingsText(Params, Args.data(), Args.size()); } std::string Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs) { SmallString<128> Str; llvm::raw_svector_ostream Out(Str); if (!Params || Params->size() == 0 || NumArgs == 0) return std::string(); for (unsigned I = 0, N = Params->size(); I != N; ++I) { if (I >= NumArgs) break; if (I == 0) Out << "[with "; else Out << ", "; if (const IdentifierInfo *Id = Params->getParam(I)->getIdentifier()) { Out << Id->getName(); } else { Out << '$' << I; } Out << " = "; Args[I].print(getPrintingPolicy(), Out); } Out << ']'; return Out.str(); } void Sema::MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks) { if (!FD) return; LateParsedTemplate *LPT = new LateParsedTemplate; // Take tokens to avoid allocations LPT->Toks.swap(Toks); LPT->D = FnD; LateParsedTemplateMap.insert(std::make_pair(FD, LPT)); FD->setLateTemplateParsed(true); } void Sema::UnmarkAsLateParsedTemplate(FunctionDecl *FD) { if (!FD) return; FD->setLateTemplateParsed(false); } bool Sema::IsInsideALocalClassWithinATemplateFunction() { DeclContext *DC = CurContext; while (DC) { if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(CurContext)) { const FunctionDecl *FD = RD->isLocalClass(); return (FD && FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate); } else if (DC->isTranslationUnit() || DC->isNamespace()) return false; DC = DC->getParent(); } return false; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaExprCXX.cpp
//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// /// /// \file /// \brief Implements semantic analysis for C++ expressions. /// //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "TreeTransform.h" #include "TypeLocBuilder.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTLambda.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/CharUnits.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/PartialDiagnostic.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Initialization.h" #include "clang/Sema/Lookup.h" #include "clang/Sema/ParsedTemplate.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/SemaLambda.h" #include "clang/Sema/TemplateDeduction.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/ErrorHandling.h" #include "clang/Sema/SemaHLSL.h" // HLSL Change using namespace clang; using namespace sema; /// \brief Handle the result of the special case name lookup for inheriting /// constructor declarations. 'NS::X::X' and 'NS::X<...>::X' are treated as /// constructor names in member using declarations, even if 'X' is not the /// name of the corresponding type. ParsedType Sema::getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name) { NestedNameSpecifier *NNS = SS.getScopeRep(); // Convert the nested-name-specifier into a type. QualType Type; switch (NNS->getKind()) { case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: Type = QualType(NNS->getAsType(), 0); break; case NestedNameSpecifier::Identifier: // Strip off the last layer of the nested-name-specifier and build a // typename type for it. assert(NNS->getAsIdentifier() == &Name && "not a constructor name"); Type = Context.getDependentNameType(ETK_None, NNS->getPrefix(), NNS->getAsIdentifier()); break; case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: llvm_unreachable("Nested name specifier is not a type for inheriting ctor"); } // This reference to the type is located entirely at the location of the // final identifier in the qualified-id. return CreateParsedType(Type, Context.getTrivialTypeSourceInfo(Type, NameLoc)); } ParsedType Sema::getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectTypePtr, bool EnteringContext) { // Determine where to perform name lookup. // FIXME: This area of the standard is very messy, and the current // wording is rather unclear about which scopes we search for the // destructor name; see core issues 399 and 555. Issue 399 in // particular shows where the current description of destructor name // lookup is completely out of line with existing practice, e.g., // this appears to be ill-formed: // // namespace N { // template <typename T> struct S { // ~S(); // }; // } // // void f(N::S<int>* s) { // s->N::S<int>::~S(); // } // // See also PR6358 and PR6359. // For this reason, we're currently only doing the C++03 version of this // code; the C++0x version has to wait until we get a proper spec. QualType SearchType; DeclContext *LookupCtx = nullptr; bool isDependent = false; bool LookInScope = false; if (SS.isInvalid()) return ParsedType(); // If we have an object type, it's because we are in a // pseudo-destructor-expression or a member access expression, and // we know what type we're looking for. if (ObjectTypePtr) SearchType = GetTypeFromParser(ObjectTypePtr); if (SS.isSet()) { NestedNameSpecifier *NNS = SS.getScopeRep(); bool AlreadySearched = false; bool LookAtPrefix = true; // C++11 [basic.lookup.qual]p6: // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier, // the type-names are looked up as types in the scope designated by the // nested-name-specifier. Similarly, in a qualified-id of the form: // // nested-name-specifier[opt] class-name :: ~ class-name // // the second class-name is looked up in the same scope as the first. // // Here, we determine whether the code below is permitted to look at the // prefix of the nested-name-specifier. DeclContext *DC = computeDeclContext(SS, EnteringContext); if (DC && DC->isFileContext()) { AlreadySearched = true; LookupCtx = DC; isDependent = false; } else if (DC && isa<CXXRecordDecl>(DC)) { LookAtPrefix = false; LookInScope = true; } // The second case from the C++03 rules quoted further above. NestedNameSpecifier *Prefix = nullptr; if (AlreadySearched) { // Nothing left to do. } else if (LookAtPrefix && (Prefix = NNS->getPrefix())) { CXXScopeSpec PrefixSS; PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data())); LookupCtx = computeDeclContext(PrefixSS, EnteringContext); isDependent = isDependentScopeSpecifier(PrefixSS); } else if (ObjectTypePtr) { LookupCtx = computeDeclContext(SearchType); isDependent = SearchType->isDependentType(); } else { LookupCtx = computeDeclContext(SS, EnteringContext); isDependent = LookupCtx && LookupCtx->isDependentContext(); } } else if (ObjectTypePtr) { // C++ [basic.lookup.classref]p3: // If the unqualified-id is ~type-name, the type-name is looked up // in the context of the entire postfix-expression. If the type T // of the object expression is of a class type C, the type-name is // also looked up in the scope of class C. At least one of the // lookups shall find a name that refers to (possibly // cv-qualified) T. LookupCtx = computeDeclContext(SearchType); isDependent = SearchType->isDependentType(); assert((isDependent || !SearchType->isIncompleteType()) && "Caller should have completed object type"); LookInScope = true; } else { // Perform lookup into the current scope (only). LookInScope = true; } TypeDecl *NonMatchingTypeDecl = nullptr; LookupResult Found(*this, &II, NameLoc, LookupOrdinaryName); for (unsigned Step = 0; Step != 2; ++Step) { // Look for the name first in the computed lookup context (if we // have one) and, if that fails to find a match, in the scope (if // we're allowed to look there). Found.clear(); if (Step == 0 && LookupCtx) LookupQualifiedName(Found, LookupCtx); else if (Step == 1 && LookInScope && S) LookupName(Found, S); else continue; // FIXME: Should we be suppressing ambiguities here? if (Found.isAmbiguous()) return ParsedType(); if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) { QualType T = Context.getTypeDeclType(Type); MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false); if (SearchType.isNull() || SearchType->isDependentType() || Context.hasSameUnqualifiedType(T, SearchType)) { // We found our type! return CreateParsedType(T, Context.getTrivialTypeSourceInfo(T, NameLoc)); } if (!SearchType.isNull()) NonMatchingTypeDecl = Type; } // If the name that we found is a class template name, and it is // the same name as the template name in the last part of the // nested-name-specifier (if present) or the object type, then // this is the destructor for that class. // FIXME: This is a workaround until we get real drafting for core // issue 399, for which there isn't even an obvious direction. if (ClassTemplateDecl *Template = Found.getAsSingle<ClassTemplateDecl>()) { QualType MemberOfType; if (SS.isSet()) { if (DeclContext *Ctx = computeDeclContext(SS, EnteringContext)) { // Figure out the type of the context, if it has one. if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx)) MemberOfType = Context.getTypeDeclType(Record); } } if (MemberOfType.isNull()) MemberOfType = SearchType; if (MemberOfType.isNull()) continue; // We're referring into a class template specialization. If the // class template we found is the same as the template being // specialized, we found what we are looking for. if (const RecordType *Record = MemberOfType->getAs<RecordType>()) { if (ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) { if (Spec->getSpecializedTemplate()->getCanonicalDecl() == Template->getCanonicalDecl()) return CreateParsedType( MemberOfType, Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc)); } continue; } // We're referring to an unresolved class template // specialization. Determine whether we class template we found // is the same as the template being specialized or, if we don't // know which template is being specialized, that it at least // has the same name. if (const TemplateSpecializationType *SpecType = MemberOfType->getAs<TemplateSpecializationType>()) { TemplateName SpecName = SpecType->getTemplateName(); // The class template we found is the same template being // specialized. if (TemplateDecl *SpecTemplate = SpecName.getAsTemplateDecl()) { if (SpecTemplate->getCanonicalDecl() == Template->getCanonicalDecl()) return CreateParsedType( MemberOfType, Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc)); continue; } // The class template we found has the same name as the // (dependent) template name being specialized. if (DependentTemplateName *DepTemplate = SpecName.getAsDependentTemplateName()) { if (DepTemplate->isIdentifier() && DepTemplate->getIdentifier() == Template->getIdentifier()) return CreateParsedType( MemberOfType, Context.getTrivialTypeSourceInfo(MemberOfType, NameLoc)); continue; } } } } if (isDependent) { // We didn't find our type, but that's okay: it's dependent // anyway. // FIXME: What if we have no nested-name-specifier? QualType T = CheckTypenameType(ETK_None, SourceLocation(), SS.getWithLocInContext(Context), II, NameLoc); return ParsedType::make(T); } if (NonMatchingTypeDecl) { QualType T = Context.getTypeDeclType(NonMatchingTypeDecl); Diag(NameLoc, diag::err_destructor_expr_type_mismatch) << T << SearchType; Diag(NonMatchingTypeDecl->getLocation(), diag::note_destructor_type_here) << T; } else if (ObjectTypePtr) Diag(NameLoc, diag::err_ident_in_dtor_not_a_type) << &II; else { SemaDiagnosticBuilder DtorDiag = Diag(NameLoc, diag::err_destructor_class_name); if (S) { const DeclContext *Ctx = S->getEntity(); if (const CXXRecordDecl *Class = dyn_cast_or_null<CXXRecordDecl>(Ctx)) DtorDiag << FixItHint::CreateReplacement(SourceRange(NameLoc), Class->getNameAsString()); } } return ParsedType(); } ParsedType Sema::getDestructorType(const DeclSpec& DS, ParsedType ObjectType) { if (DS.getTypeSpecType() == DeclSpec::TST_error || !ObjectType) return ParsedType(); assert(DS.getTypeSpecType() == DeclSpec::TST_decltype && "only get destructor types from declspecs"); QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc()); QualType SearchType = GetTypeFromParser(ObjectType); if (SearchType->isDependentType() || Context.hasSameUnqualifiedType(SearchType, T)) { return ParsedType::make(T); } Diag(DS.getTypeSpecTypeLoc(), diag::err_destructor_expr_type_mismatch) << T << SearchType; return ParsedType(); } bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Name) { assert(Name.getKind() == UnqualifiedId::IK_LiteralOperatorId); if (!SS.isValid()) return false; switch (SS.getScopeRep()->getKind()) { case NestedNameSpecifier::Identifier: case NestedNameSpecifier::TypeSpec: case NestedNameSpecifier::TypeSpecWithTemplate: // Per C++11 [over.literal]p2, literal operators can only be declared at // namespace scope. Therefore, this unqualified-id cannot name anything. // Reject it early, because we have no AST representation for this in the // case where the scope is dependent. Diag(Name.getLocStart(), diag::err_literal_operator_id_outside_namespace) << SS.getScopeRep(); return true; case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: case NestedNameSpecifier::Namespace: case NestedNameSpecifier::NamespaceAlias: return false; } llvm_unreachable("unknown nested name specifier kind"); } /// \brief Build a C++ typeid expression with a type operand. ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc) { // C++ [expr.typeid]p4: // The top-level cv-qualifiers of the lvalue expression or the type-id // that is the operand of typeid are always ignored. // If the type of the type-id is a class type or a reference to a class // type, the class shall be completely-defined. Qualifiers Quals; QualType T = Context.getUnqualifiedArrayType(Operand->getType().getNonReferenceType(), Quals); if (T->getAs<RecordType>() && RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid)) return ExprError(); if (T->isVariablyModifiedType()) return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid) << T); return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), Operand, SourceRange(TypeidLoc, RParenLoc)); } /// \brief Build a C++ typeid expression with an expression operand. ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *E, SourceLocation RParenLoc) { bool WasEvaluated = false; if (E && !E->isTypeDependent()) { if (E->getType()->isPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return ExprError(); E = result.get(); } QualType T = E->getType(); if (const RecordType *RecordT = T->getAs<RecordType>()) { CXXRecordDecl *RecordD = cast<CXXRecordDecl>(RecordT->getDecl()); // C++ [expr.typeid]p3: // [...] If the type of the expression is a class type, the class // shall be completely-defined. if (RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid)) return ExprError(); // C++ [expr.typeid]p3: // When typeid is applied to an expression other than an glvalue of a // polymorphic class type [...] [the] expression is an unevaluated // operand. [...] if (RecordD->isPolymorphic() && E->isGLValue()) { // The subexpression is potentially evaluated; switch the context // and recheck the subexpression. ExprResult Result = TransformToPotentiallyEvaluated(E); if (Result.isInvalid()) return ExprError(); E = Result.get(); // We require a vtable to query the type at run time. MarkVTableUsed(TypeidLoc, RecordD); WasEvaluated = true; } } // C++ [expr.typeid]p4: // [...] If the type of the type-id is a reference to a possibly // cv-qualified type, the result of the typeid expression refers to a // std::type_info object representing the cv-unqualified referenced // type. Qualifiers Quals; QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals); if (!Context.hasSameType(T, UnqualT)) { T = UnqualT; E = ImpCastExprToType(E, UnqualT, CK_NoOp, E->getValueKind()).get(); } } if (E->getType()->isVariablyModifiedType()) return ExprError(Diag(TypeidLoc, diag::err_variably_modified_typeid) << E->getType()); else if (ActiveTemplateInstantiations.empty() && E->HasSideEffects(Context, WasEvaluated)) { // The expression operand for typeid is in an unevaluated expression // context, so side effects could result in unintended consequences. Diag(E->getExprLoc(), WasEvaluated ? diag::warn_side_effects_typeid : diag::warn_side_effects_unevaluated_context); } return new (Context) CXXTypeidExpr(TypeInfoType.withConst(), E, SourceRange(TypeidLoc, RParenLoc)); } /// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression); ExprResult Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc) { // Find the std::type_info type. if (!getStdNamespace()) return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid)); if (!CXXTypeInfoDecl) { IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get("type_info"); LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName); LookupQualifiedName(R, getStdNamespace()); CXXTypeInfoDecl = R.getAsSingle<RecordDecl>(); // Microsoft's typeinfo doesn't have type_info in std but in the global // namespace if _HAS_EXCEPTIONS is defined to 0. See PR13153. if (!CXXTypeInfoDecl && LangOpts.MSVCCompat) { LookupQualifiedName(R, Context.getTranslationUnitDecl()); CXXTypeInfoDecl = R.getAsSingle<RecordDecl>(); } if (!CXXTypeInfoDecl) return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid)); } if (!getLangOpts().RTTI) { return ExprError(Diag(OpLoc, diag::err_no_typeid_with_fno_rtti)); } QualType TypeInfoType = Context.getTypeDeclType(CXXTypeInfoDecl); if (isType) { // The operand is a type; handle it as such. TypeSourceInfo *TInfo = nullptr; QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr), &TInfo); if (T.isNull()) return ExprError(); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc); return BuildCXXTypeId(TypeInfoType, OpLoc, TInfo, RParenLoc); } // The operand is an expression. return BuildCXXTypeId(TypeInfoType, OpLoc, (Expr*)TyOrExpr, RParenLoc); } /// \brief Build a Microsoft __uuidof expression with a type operand. ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc) { if (!Operand->getType()->isDependentType()) { bool HasMultipleGUIDs = false; if (!CXXUuidofExpr::GetUuidAttrOfType(Operand->getType(), &HasMultipleGUIDs)) { if (HasMultipleGUIDs) return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids)); else return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid)); } } return new (Context) CXXUuidofExpr(TypeInfoType.withConst(), Operand, SourceRange(TypeidLoc, RParenLoc)); } /// \brief Build a Microsoft __uuidof expression with an expression operand. ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *E, SourceLocation RParenLoc) { if (!E->getType()->isDependentType()) { bool HasMultipleGUIDs = false; if (!CXXUuidofExpr::GetUuidAttrOfType(E->getType(), &HasMultipleGUIDs) && !E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { if (HasMultipleGUIDs) return ExprError(Diag(TypeidLoc, diag::err_uuidof_with_multiple_guids)); else return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid)); } } return new (Context) CXXUuidofExpr(TypeInfoType.withConst(), E, SourceRange(TypeidLoc, RParenLoc)); } /// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression); ExprResult Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc) { // If MSVCGuidDecl has not been cached, do the lookup. if (!MSVCGuidDecl) { IdentifierInfo *GuidII = &PP.getIdentifierTable().get("_GUID"); LookupResult R(*this, GuidII, SourceLocation(), LookupTagName); LookupQualifiedName(R, Context.getTranslationUnitDecl()); MSVCGuidDecl = R.getAsSingle<RecordDecl>(); if (!MSVCGuidDecl) return ExprError(Diag(OpLoc, diag::err_need_header_before_ms_uuidof)); } QualType GuidType = Context.getTypeDeclType(MSVCGuidDecl); if (isType) { // The operand is a type; handle it as such. TypeSourceInfo *TInfo = nullptr; QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr), &TInfo); if (T.isNull()) return ExprError(); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc); return BuildCXXUuidof(GuidType, OpLoc, TInfo, RParenLoc); } // The operand is an expression. return BuildCXXUuidof(GuidType, OpLoc, (Expr*)TyOrExpr, RParenLoc); } /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) { assert((Kind == tok::kw_true || Kind == tok::kw_false) && "Unknown C++ Boolean value!"); return new (Context) CXXBoolLiteralExpr(Kind == tok::kw_true, Context.BoolTy, OpLoc); } /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) { return new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc); } /// ActOnCXXThrow - Parse throw expressions. ExprResult Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) { bool IsThrownVarInScope = false; if (Ex) { // C++0x [class.copymove]p31: // When certain criteria are met, an implementation is allowed to omit the // copy/move construction of a class object [...] // // - in a throw-expression, when the operand is the name of a // non-volatile automatic object (other than a function or catch- // clause parameter) whose scope does not extend beyond the end of the // innermost enclosing try-block (if there is one), the copy/move // operation from the operand to the exception object (15.1) can be // omitted by constructing the automatic object directly into the // exception object if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Ex->IgnoreParens())) if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) { if (Var->hasLocalStorage() && !Var->getType().isVolatileQualified()) { for( ; S; S = S->getParent()) { if (S->isDeclScope(Var)) { IsThrownVarInScope = true; break; } if (S->getFlags() & (Scope::FnScope | Scope::ClassScope | Scope::BlockScope | Scope::FunctionPrototypeScope | Scope::ObjCMethodScope | Scope::TryScope)) break; } } } } return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope); } ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope) { // Don't report an error if 'throw' is used in system headers. if (!getLangOpts().CXXExceptions && !getSourceManager().isInSystemHeader(OpLoc)) Diag(OpLoc, diag::err_exceptions_disabled) << "throw"; if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope()) Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw"; if (Ex && !Ex->isTypeDependent()) { QualType ExceptionObjectTy = Context.getExceptionObjectType(Ex->getType()); if (CheckCXXThrowOperand(OpLoc, ExceptionObjectTy, Ex)) return ExprError(); // Initialize the exception result. This implicitly weeds out // abstract types or types with inaccessible copy constructors. // C++0x [class.copymove]p31: // When certain criteria are met, an implementation is allowed to omit the // copy/move construction of a class object [...] // // - in a throw-expression, when the operand is the name of a // non-volatile automatic object (other than a function or // catch-clause // parameter) whose scope does not extend beyond the end of the // innermost enclosing try-block (if there is one), the copy/move // operation from the operand to the exception object (15.1) can be // omitted by constructing the automatic object directly into the // exception object const VarDecl *NRVOVariable = nullptr; if (IsThrownVarInScope) NRVOVariable = getCopyElisionCandidate(QualType(), Ex, false); InitializedEntity Entity = InitializedEntity::InitializeException( OpLoc, ExceptionObjectTy, /*NRVO=*/NRVOVariable != nullptr); ExprResult Res = PerformMoveOrCopyInitialization( Entity, NRVOVariable, QualType(), Ex, IsThrownVarInScope); if (Res.isInvalid()) return ExprError(); Ex = Res.get(); } return new (Context) CXXThrowExpr(Ex, Context.VoidTy, OpLoc, IsThrownVarInScope); } static void collectPublicBases(CXXRecordDecl *RD, llvm::DenseMap<CXXRecordDecl *, unsigned> &SubobjectsSeen, llvm::SmallPtrSetImpl<CXXRecordDecl *> &VBases, llvm::SetVector<CXXRecordDecl *> &PublicSubobjectsSeen, bool ParentIsPublic) { for (const CXXBaseSpecifier &BS : RD->bases()) { CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl(); bool NewSubobject; // Virtual bases constitute the same subobject. Non-virtual bases are // always distinct subobjects. if (BS.isVirtual()) NewSubobject = VBases.insert(BaseDecl).second; else NewSubobject = true; if (NewSubobject) ++SubobjectsSeen[BaseDecl]; // Only add subobjects which have public access throughout the entire chain. bool PublicPath = ParentIsPublic && BS.getAccessSpecifier() == AS_public; if (PublicPath) PublicSubobjectsSeen.insert(BaseDecl); // Recurse on to each base subobject. collectPublicBases(BaseDecl, SubobjectsSeen, VBases, PublicSubobjectsSeen, PublicPath); } } static void getUnambiguousPublicSubobjects( CXXRecordDecl *RD, llvm::SmallVectorImpl<CXXRecordDecl *> &Objects) { llvm::DenseMap<CXXRecordDecl *, unsigned> SubobjectsSeen; llvm::SmallSet<CXXRecordDecl *, 2> VBases; llvm::SetVector<CXXRecordDecl *> PublicSubobjectsSeen; SubobjectsSeen[RD] = 1; PublicSubobjectsSeen.insert(RD); collectPublicBases(RD, SubobjectsSeen, VBases, PublicSubobjectsSeen, /*ParentIsPublic=*/true); for (CXXRecordDecl *PublicSubobject : PublicSubobjectsSeen) { // Skip ambiguous objects. if (SubobjectsSeen[PublicSubobject] > 1) continue; Objects.push_back(PublicSubobject); } } /// CheckCXXThrowOperand - Validate the operand of a throw. bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ExceptionObjectTy, Expr *E) { // If the type of the exception would be an incomplete type or a pointer // to an incomplete type other than (cv) void the program is ill-formed. QualType Ty = ExceptionObjectTy; bool isPointer = false; if (const PointerType* Ptr = Ty->getAs<PointerType>()) { Ty = Ptr->getPointeeType(); isPointer = true; } if (!isPointer || !Ty->isVoidType()) { if (RequireCompleteType(ThrowLoc, Ty, isPointer ? diag::err_throw_incomplete_ptr : diag::err_throw_incomplete, E->getSourceRange())) return true; if (RequireNonAbstractType(ThrowLoc, ExceptionObjectTy, diag::err_throw_abstract_type, E)) return true; } // If the exception has class type, we need additional handling. CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); if (!RD) return false; // If we are throwing a polymorphic class type or pointer thereof, // exception handling will make use of the vtable. MarkVTableUsed(ThrowLoc, RD); // If a pointer is thrown, the referenced object will not be destroyed. if (isPointer) return false; // If the class has a destructor, we must be able to call it. if (!RD->hasIrrelevantDestructor()) { if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) { MarkFunctionReferenced(E->getExprLoc(), Destructor); CheckDestructorAccess(E->getExprLoc(), Destructor, PDiag(diag::err_access_dtor_exception) << Ty); if (DiagnoseUseOfDecl(Destructor, E->getExprLoc())) return true; } } // The MSVC ABI creates a list of all types which can catch the exception // object. This list also references the appropriate copy constructor to call // if the object is caught by value and has a non-trivial copy constructor. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { // We are only interested in the public, unambiguous bases contained within // the exception object. Bases which are ambiguous or otherwise // inaccessible are not catchable types. llvm::SmallVector<CXXRecordDecl *, 2> UnambiguousPublicSubobjects; getUnambiguousPublicSubobjects(RD, UnambiguousPublicSubobjects); for (CXXRecordDecl *Subobject : UnambiguousPublicSubobjects) { // Attempt to lookup the copy constructor. Various pieces of machinery // will spring into action, like template instantiation, which means this // cannot be a simple walk of the class's decls. Instead, we must perform // lookup and overload resolution. CXXConstructorDecl *CD = LookupCopyingConstructor(Subobject, 0); if (!CD) continue; // Mark the constructor referenced as it is used by this throw expression. MarkFunctionReferenced(E->getExprLoc(), CD); // Skip this copy constructor if it is trivial, we don't need to record it // in the catchable type data. if (CD->isTrivial()) continue; // The copy constructor is non-trivial, create a mapping from this class // type to this constructor. // N.B. The selection of copy constructor is not sensitive to this // particular throw-site. Lookup will be performed at the catch-site to // ensure that the copy constructor is, in fact, accessible (via // friendship or any other means). Context.addCopyConstructorForExceptionObject(Subobject, CD); // We don't keep the instantiated default argument expressions around so // we must rebuild them here. for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) { // Skip any default arguments that we've already instantiated. if (Context.getDefaultArgExprForConstructor(CD, I)) continue; Expr *DefaultArg = BuildCXXDefaultArgExpr(ThrowLoc, CD, CD->getParamDecl(I)).get(); Context.addDefaultArgExprForConstructor(CD, I, DefaultArg); } } } return false; } QualType Sema::getCurrentThisType() { DeclContext *DC = getFunctionLevelDeclContext(); QualType ThisTy = CXXThisTypeOverride; if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(DC)) { if (method && method->isInstance()) ThisTy = method->getThisType(Context); } if (ThisTy.isNull()) { if (isGenericLambdaCallOperatorSpecialization(CurContext) && CurContext->getParent()->getParent()->isRecord()) { // This is a generic lambda call operator that is being instantiated // within a default initializer - so use the enclosing class as 'this'. // There is no enclosing member function to retrieve the 'this' pointer // from. QualType ClassTy = Context.getTypeDeclType( cast<CXXRecordDecl>(CurContext->getParent()->getParent())); // There are no cv-qualifiers for 'this' within default initializers, // per [expr.prim.general]p4. return Context.getPointerType(ClassTy); } } return ThisTy; } Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled) : S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false) { if (!Enabled || !ContextDecl) return; CXXRecordDecl *Record = nullptr; if (ClassTemplateDecl *Template = dyn_cast<ClassTemplateDecl>(ContextDecl)) Record = Template->getTemplatedDecl(); else Record = cast<CXXRecordDecl>(ContextDecl); S.CXXThisTypeOverride = S.Context.getPointerType( S.Context.getRecordType(Record).withCVRQualifiers(CXXThisTypeQuals)); this->Enabled = true; } Sema::CXXThisScopeRAII::~CXXThisScopeRAII() { if (Enabled) { S.CXXThisTypeOverride = OldCXXThisTypeOverride; } } static Expr *captureThis(ASTContext &Context, RecordDecl *RD, QualType ThisTy, SourceLocation Loc) { FieldDecl *Field = FieldDecl::Create(Context, RD, Loc, Loc, nullptr, ThisTy, Context.getTrivialTypeSourceInfo(ThisTy, Loc), nullptr, false, ICIS_NoInit); Field->setImplicit(true); Field->setAccess(AS_private); RD->addDecl(Field); return new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit*/true); } bool Sema::CheckCXXThisCapture(SourceLocation Loc, bool Explicit, bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt) { // We don't need to capture this in an unevaluated context. if (isUnevaluatedContext() && !Explicit) return true; const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt ? *FunctionScopeIndexToStopAt : FunctionScopes.size() - 1; // Otherwise, check that we can capture 'this'. unsigned NumClosures = 0; for (unsigned idx = MaxFunctionScopesIndex; idx != 0; idx--) { if (CapturingScopeInfo *CSI = dyn_cast<CapturingScopeInfo>(FunctionScopes[idx])) { if (CSI->CXXThisCaptureIndex != 0) { // 'this' is already being captured; there isn't anything more to do. break; } LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI); if (LSI && isGenericLambdaCallOperatorSpecialization(LSI->CallOperator)) { // This context can't implicitly capture 'this'; fail out. if (BuildAndDiagnose) Diag(Loc, diag::err_this_capture) << Explicit; return true; } if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref || CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval || CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block || CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_CapturedRegion || Explicit) { // This closure can capture 'this'; continue looking upwards. NumClosures++; Explicit = false; continue; } // This context can't implicitly capture 'this'; fail out. if (BuildAndDiagnose) Diag(Loc, diag::err_this_capture) << Explicit; return true; } break; } if (!BuildAndDiagnose) return false; // Mark that we're implicitly capturing 'this' in all the scopes we skipped. // FIXME: We need to delay this marking in PotentiallyPotentiallyEvaluated // contexts. for (unsigned idx = MaxFunctionScopesIndex; NumClosures; --idx, --NumClosures) { CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[idx]); Expr *ThisExpr = nullptr; QualType ThisTy = getCurrentThisType(); if (LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI)) // For lambda expressions, build a field and an initializing expression. ThisExpr = captureThis(Context, LSI->Lambda, ThisTy, Loc); else if (CapturedRegionScopeInfo *RSI = dyn_cast<CapturedRegionScopeInfo>(FunctionScopes[idx])) ThisExpr = captureThis(Context, RSI->TheRecordDecl, ThisTy, Loc); bool isNested = NumClosures > 1; CSI->addThisCapture(isNested, Loc, ThisTy, ThisExpr); } return false; } ExprResult Sema::ActOnCXXThis(SourceLocation Loc) { /// C++ 9.3.2: In the body of a non-static member function, the keyword this /// is a non-lvalue expression whose value is the address of the object for /// which the function is called. QualType ThisTy = getCurrentThisType(); if (ThisTy.isNull()) return Diag(Loc, diag::err_invalid_this_use); CheckCXXThisCapture(Loc); // HLSL Change Starts - adjust this from T* to T&-like if (getLangOpts().HLSL) { return genereateHLSLThis(Loc, ThisTy, /*isImplicit=*/false); } // HLSL Change Ends return new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit=*/false); } // HLSL Change Begin - adjust this from T* to T&-like CXXThisExpr *Sema::genereateHLSLThis(SourceLocation Loc, QualType ThisType, bool isImplicit) { // Expressions cannot be of reference type - instead, they yield // an lvalue on the underlying type. if (ThisType->isPointerType() || ThisType->isReferenceType()) ThisType = ThisType->getPointeeType(); CXXThisExpr *ResultExpr = new (Context) CXXThisExpr(Loc, ThisType, isImplicit); ResultExpr->setValueKind(ExprValueKind::VK_LValue); return ResultExpr; } // HLSL Change End - adjust this from T* to T&-like bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) { // If we're outside the body of a member function, then we'll have a specified // type for 'this'. if (CXXThisTypeOverride.isNull()) return false; // Determine whether we're looking into a class that's currently being // defined. CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl(); return Class && Class->isBeingDefined(); } ExprResult Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg exprs, SourceLocation RParenLoc) { if (!TypeRep) return ExprError(); TypeSourceInfo *TInfo; QualType Ty = GetTypeFromParser(TypeRep, &TInfo); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(Ty, SourceLocation()); // HLSL Change Begin - Check embedded typos for CXXUnresolvedConstructExpr. ExprResult Result = BuildCXXTypeConstructExpr(TInfo, LParenLoc, exprs, RParenLoc); if (!Result.isInvalid() && isa<CXXUnresolvedConstructExpr>(Result.get())) Result = CorrectDelayedTyposInExpr(Result.get()); return Result; // HLSL Change End. } /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc) { QualType Ty = TInfo->getType(); SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc(); if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs)) { return CXXUnresolvedConstructExpr::Create(Context, TInfo, LParenLoc, Exprs, RParenLoc); } bool ListInitialization = LParenLoc.isInvalid(); assert((!ListInitialization || (Exprs.size() == 1 && isa<InitListExpr>(Exprs[0]))) && "List initialization must have initializer list as expression."); SourceRange FullRange = SourceRange(TyBeginLoc, ListInitialization ? Exprs[0]->getSourceRange().getEnd() : RParenLoc); // C++ [expr.type.conv]p1: // If the expression list is a single expression, the type conversion // expression is equivalent (in definedness, and if defined in meaning) to the // corresponding cast expression. if (Exprs.size() == 1 && !ListInitialization) { Expr *Arg = Exprs[0]; return BuildCXXFunctionalCastExpr(TInfo, LParenLoc, Arg, RParenLoc); } QualType ElemTy = Ty; if (Ty->isArrayType()) { if (!ListInitialization) return ExprError(Diag(TyBeginLoc, diag::err_value_init_for_array_type) << FullRange); ElemTy = Context.getBaseElementType(Ty); } if (!Ty->isVoidType() && RequireCompleteType(TyBeginLoc, ElemTy, diag::err_invalid_incomplete_type_use, FullRange)) return ExprError(); if (RequireNonAbstractType(TyBeginLoc, Ty, diag::err_allocation_of_abstract_type)) return ExprError(); InitializedEntity Entity = InitializedEntity::InitializeTemporary(TInfo); InitializationKind Kind = Exprs.size() ? ListInitialization ? InitializationKind::CreateDirectList(TyBeginLoc) : InitializationKind::CreateDirect(TyBeginLoc, LParenLoc, RParenLoc) : InitializationKind::CreateValue(TyBeginLoc, LParenLoc, RParenLoc); InitializationSequence InitSeq(*this, Entity, Kind, Exprs); ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Exprs); // HLSL Change: ListInitialization may be false here, but we may produce an expression // of that type for vector constructors bool HLSLException = getLangOpts().HLSL && !Result.isInvalid() && isa<InitListExpr>(Result.get()); if (Result.isInvalid() || (!ListInitialization && !HLSLException)) return Result; Expr *Inner = Result.get(); if (CXXBindTemporaryExpr *BTE = dyn_cast_or_null<CXXBindTemporaryExpr>(Inner)) Inner = BTE->getSubExpr(); if (!isa<CXXTemporaryObjectExpr>(Inner)) { // If we created a CXXTemporaryObjectExpr, that node also represents the // functional cast. Otherwise, create an explicit cast to represent // the syntactic form of a functional-style cast that was used here. // // FIXME: Creating a CXXFunctionalCastExpr around a CXXConstructExpr // would give a more consistent AST representation than using a // CXXTemporaryObjectExpr. It's also weird that the functional cast // is sometimes handled by initialization and sometimes not. QualType ResultType = Result.get()->getType(); Result = CXXFunctionalCastExpr::Create( Context, ResultType, Expr::getValueKindForType(TInfo->getType()), TInfo, CK_NoOp, Result.get(), /*Path=*/nullptr, LParenLoc, RParenLoc); } return Result; } /// doesUsualArrayDeleteWantSize - Answers whether the usual /// operator delete[] for the given type has a size_t parameter. static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc, QualType allocType) { const RecordType *record = allocType->getBaseElementTypeUnsafe()->getAs<RecordType>(); if (!record) return false; // Try to find an operator delete[] in class scope. DeclarationName deleteName = S.Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete); LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName); S.LookupQualifiedName(ops, record->getDecl()); // We're just doing this for information. ops.suppressDiagnostics(); // Very likely: there's no operator delete[]. if (ops.empty()) return false; // If it's ambiguous, it should be illegal to call operator delete[] // on this thing, so it doesn't matter if we allocate extra space or not. if (ops.isAmbiguous()) return false; LookupResult::Filter filter = ops.makeFilter(); while (filter.hasNext()) { NamedDecl *del = filter.next()->getUnderlyingDecl(); // C++0x [basic.stc.dynamic.deallocation]p2: // A template instance is never a usual deallocation function, // regardless of its signature. if (isa<FunctionTemplateDecl>(del)) { filter.erase(); continue; } // C++0x [basic.stc.dynamic.deallocation]p2: // If class T does not declare [an operator delete[] with one // parameter] but does declare a member deallocation function // named operator delete[] with exactly two parameters, the // second of which has type std::size_t, then this function // is a usual deallocation function. if (!cast<CXXMethodDecl>(del)->isUsualDeallocationFunction()) { filter.erase(); continue; } } filter.done(); if (!ops.isSingleResult()) return false; const FunctionDecl *del = cast<FunctionDecl>(ops.getFoundDecl()); return (del->getNumParams() == 2); } /// \brief Parsed a C++ 'new' expression (C++ 5.3.4). /// /// E.g.: /// @code new (memory) int[size][4] @endcode /// or /// @code ::new Foo(23, "hello") @endcode /// /// \param StartLoc The first location of the expression. /// \param UseGlobal True if 'new' was prefixed with '::'. /// \param PlacementLParen Opening paren of the placement arguments. /// \param PlacementArgs Placement new arguments. /// \param PlacementRParen Closing paren of the placement arguments. /// \param TypeIdParens If the type is in parens, the source range. /// \param D The type to be allocated, as well as array dimensions. /// \param Initializer The initializing expression or initializer-list, or null /// if there is none. ExprResult Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer) { bool TypeContainsAuto = D.getDeclSpec().containsPlaceholderType(); Expr *ArraySize = nullptr; // If the specified type is an array, unwrap it and save the expression. if (D.getNumTypeObjects() > 0 && D.getTypeObject(0).Kind == DeclaratorChunk::Array) { DeclaratorChunk &Chunk = D.getTypeObject(0); if (TypeContainsAuto) return ExprError(Diag(Chunk.Loc, diag::err_new_array_of_auto) << D.getSourceRange()); if (Chunk.Arr.hasStatic) return ExprError(Diag(Chunk.Loc, diag::err_static_illegal_in_new) << D.getSourceRange()); if (!Chunk.Arr.NumElts) return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size) << D.getSourceRange()); ArraySize = static_cast<Expr*>(Chunk.Arr.NumElts); D.DropFirstTypeObject(); } // Every dimension shall be of constant size. if (ArraySize) { for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) { if (D.getTypeObject(I).Kind != DeclaratorChunk::Array) break; DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr; if (Expr *NumElts = (Expr *)Array.NumElts) { if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) { if (getLangOpts().CPlusPlus14) { // C++1y [expr.new]p6: Every constant-expression in a noptr-new-declarator // shall be a converted constant expression (5.19) of type std::size_t // and shall evaluate to a strictly positive value. unsigned IntWidth = Context.getTargetInfo().getIntWidth(); assert(IntWidth && "Builtin type of size 0?"); llvm::APSInt Value(IntWidth); Array.NumElts = CheckConvertedConstantExpression(NumElts, Context.getSizeType(), Value, CCEK_NewExpr) .get(); } else { Array.NumElts = VerifyIntegerConstantExpression(NumElts, nullptr, diag::err_new_array_nonconst) .get(); } if (!Array.NumElts) return ExprError(); } } } } TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/nullptr); QualType AllocType = TInfo->getType(); if (D.isInvalidType()) return ExprError(); SourceRange DirectInitRange; if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer)) DirectInitRange = List->getSourceRange(); return BuildCXXNew(SourceRange(StartLoc, D.getLocEnd()), UseGlobal, PlacementLParen, PlacementArgs, PlacementRParen, TypeIdParens, AllocType, TInfo, ArraySize, DirectInitRange, Initializer, TypeContainsAuto); } static bool isLegalArrayNewInitializer(CXXNewExpr::InitializationStyle Style, Expr *Init) { if (!Init) return true; if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init)) return PLE->getNumExprs() == 0; if (isa<ImplicitValueInitExpr>(Init)) return true; else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) return !CCE->isListInitialization() && CCE->getConstructor()->isDefaultConstructor(); else if (Style == CXXNewExpr::ListInit) { assert(isa<InitListExpr>(Init) && "Shouldn't create list CXXConstructExprs for arrays."); return true; } return false; } ExprResult Sema::BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto) { SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange(); SourceLocation StartLoc = Range.getBegin(); CXXNewExpr::InitializationStyle initStyle; if (DirectInitRange.isValid()) { assert(Initializer && "Have parens but no initializer."); initStyle = CXXNewExpr::CallInit; } else if (Initializer && isa<InitListExpr>(Initializer)) initStyle = CXXNewExpr::ListInit; else { assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) || isa<CXXConstructExpr>(Initializer)) && "Initializer expression that cannot have been implicitly created."); initStyle = CXXNewExpr::NoInit; } Expr **Inits = &Initializer; unsigned NumInits = Initializer ? 1 : 0; if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer)) { assert(initStyle == CXXNewExpr::CallInit && "paren init for non-call init"); Inits = List->getExprs(); NumInits = List->getNumExprs(); } // C++11 [dcl.spec.auto]p6. Deduce the type which 'auto' stands in for. if (TypeMayContainAuto && AllocType->isUndeducedType()) { if (initStyle == CXXNewExpr::NoInit || NumInits == 0) return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg) << AllocType << TypeRange); if (initStyle == CXXNewExpr::ListInit || (NumInits == 1 && isa<InitListExpr>(Inits[0]))) return ExprError(Diag(Inits[0]->getLocStart(), diag::err_auto_new_list_init) << AllocType << TypeRange); if (NumInits > 1) { Expr *FirstBad = Inits[1]; return ExprError(Diag(FirstBad->getLocStart(), diag::err_auto_new_ctor_multiple_expressions) << AllocType << TypeRange); } Expr *Deduce = Inits[0]; QualType DeducedType; if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) == DAR_Failed) return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure) << AllocType << Deduce->getType() << TypeRange << Deduce->getSourceRange()); if (DeducedType.isNull()) return ExprError(); AllocType = DeducedType; } // Per C++0x [expr.new]p5, the type being constructed may be a // typedef of an array type. if (!ArraySize) { if (const ConstantArrayType *Array = Context.getAsConstantArrayType(AllocType)) { ArraySize = IntegerLiteral::Create(Context, Array->getSize(), Context.getSizeType(), TypeRange.getEnd()); AllocType = Array->getElementType(); } } if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange)) return ExprError(); if (initStyle == CXXNewExpr::ListInit && isStdInitializerList(AllocType, nullptr)) { Diag(AllocTypeInfo->getTypeLoc().getBeginLoc(), diag::warn_dangling_std_initializer_list) << /*at end of FE*/0 << Inits[0]->getSourceRange(); } // In ARC, infer 'retaining' for the allocated if (getLangOpts().ObjCAutoRefCount && AllocType.getObjCLifetime() == Qualifiers::OCL_None && AllocType->isObjCLifetimeType()) { AllocType = Context.getLifetimeQualifiedType(AllocType, AllocType->getObjCARCImplicitLifetime()); } QualType ResultType = Context.getPointerType(AllocType); if (ArraySize && ArraySize->getType()->isNonOverloadPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(ArraySize); if (result.isInvalid()) return ExprError(); ArraySize = result.get(); } // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have // integral or enumeration type with a non-negative value." // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped // enumeration type, or a class type for which a single non-explicit // conversion function to integral or unscoped enumeration type exists. // C++1y [expr.new]p6: The expression [...] is implicitly converted to // std::size_t. if (ArraySize && !ArraySize->isTypeDependent()) { ExprResult ConvertedSize; if (getLangOpts().CPlusPlus14) { assert(Context.getTargetInfo().getIntWidth() && "Builtin type of size 0?"); ConvertedSize = PerformImplicitConversion(ArraySize, Context.getSizeType(), AA_Converting); if (!ConvertedSize.isInvalid() && ArraySize->getType()->getAs<RecordType>()) // Diagnose the compatibility of this conversion. Diag(StartLoc, diag::warn_cxx98_compat_array_size_conversion) << ArraySize->getType() << 0 << "'size_t'"; } else { class SizeConvertDiagnoser : public ICEConvertDiagnoser { protected: Expr *ArraySize; public: SizeConvertDiagnoser(Expr *ArraySize) : ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false), ArraySize(ArraySize) {} SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_array_size_not_integral) << S.getLangOpts().CPlusPlus11 << T; } SemaDiagnosticBuilder diagnoseIncomplete( Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_array_size_incomplete_type) << T << ArraySize->getSourceRange(); } SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, diag::err_array_size_explicit_conversion) << T << ConvTy; } SemaDiagnosticBuilder noteExplicitConv( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_array_size_conversion) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseAmbiguous( Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_array_size_ambiguous_conversion) << T; } SemaDiagnosticBuilder noteAmbiguous( Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_array_size_conversion) << ConvTy->isEnumeralType() << ConvTy; } SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, S.getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_array_size_conversion : diag::ext_array_size_conversion) << T << ConvTy->isEnumeralType() << ConvTy; } } SizeDiagnoser(ArraySize); ConvertedSize = PerformContextualImplicitConversion(StartLoc, ArraySize, SizeDiagnoser); } if (ConvertedSize.isInvalid()) return ExprError(); ArraySize = ConvertedSize.get(); QualType SizeType = ArraySize->getType(); if (!SizeType->isIntegralOrUnscopedEnumerationType()) return ExprError(); // C++98 [expr.new]p7: // The expression in a direct-new-declarator shall have integral type // with a non-negative value. // // Let's see if this is a constant < 0. If so, we reject it out of // hand. Otherwise, if it's not a constant, we must have an unparenthesized // array type. // // Note: such a construct has well-defined semantics in C++11: it throws // std::bad_array_new_length. if (!ArraySize->isValueDependent()) { llvm::APSInt Value; // We've already performed any required implicit conversion to integer or // unscoped enumeration type. if (ArraySize->isIntegerConstantExpr(Value, Context)) { if (Value < llvm::APSInt( llvm::APInt::getNullValue(Value.getBitWidth()), Value.isUnsigned())) { if (getLangOpts().CPlusPlus11) Diag(ArraySize->getLocStart(), diag::warn_typecheck_negative_array_new_size) << ArraySize->getSourceRange(); else return ExprError(Diag(ArraySize->getLocStart(), diag::err_typecheck_negative_array_size) << ArraySize->getSourceRange()); } else if (!AllocType->isDependentType()) { unsigned ActiveSizeBits = ConstantArrayType::getNumAddressingBits(Context, AllocType, Value); if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) { if (getLangOpts().CPlusPlus11) Diag(ArraySize->getLocStart(), diag::warn_array_new_too_large) << Value.toString(10) << ArraySize->getSourceRange(); else return ExprError(Diag(ArraySize->getLocStart(), diag::err_array_too_large) << Value.toString(10) << ArraySize->getSourceRange()); } } } else if (TypeIdParens.isValid()) { // Can't have dynamic array size when the type-id is in parentheses. Diag(ArraySize->getLocStart(), diag::ext_new_paren_array_nonconst) << ArraySize->getSourceRange() << FixItHint::CreateRemoval(TypeIdParens.getBegin()) << FixItHint::CreateRemoval(TypeIdParens.getEnd()); TypeIdParens = SourceRange(); } } // Note that we do *not* convert the argument in any way. It can // be signed, larger than size_t, whatever. } FunctionDecl *OperatorNew = nullptr; FunctionDecl *OperatorDelete = nullptr; if (!AllocType->isDependentType() && !Expr::hasAnyTypeDependentArguments(PlacementArgs) && FindAllocationFunctions(StartLoc, SourceRange(PlacementLParen, PlacementRParen), UseGlobal, AllocType, ArraySize, PlacementArgs, OperatorNew, OperatorDelete)) return ExprError(); // If this is an array allocation, compute whether the usual array // deallocation function for the type has a size_t parameter. bool UsualArrayDeleteWantsSize = false; if (ArraySize && !AllocType->isDependentType()) UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(*this, StartLoc, AllocType); SmallVector<Expr *, 8> AllPlaceArgs; if (OperatorNew) { const FunctionProtoType *Proto = OperatorNew->getType()->getAs<FunctionProtoType>(); VariadicCallType CallType = Proto->isVariadic() ? VariadicFunction : VariadicDoesNotApply; // We've already converted the placement args, just fill in any default // arguments. Skip the first parameter because we don't have a corresponding // argument. if (GatherArgumentsForCall(PlacementLParen, OperatorNew, Proto, 1, PlacementArgs, AllPlaceArgs, CallType)) return ExprError(); if (!AllPlaceArgs.empty()) PlacementArgs = AllPlaceArgs; // FIXME: This is wrong: PlacementArgs misses out the first (size) argument. DiagnoseSentinelCalls(OperatorNew, PlacementLParen, PlacementArgs); // FIXME: Missing call to CheckFunctionCall or equivalent } // Warn if the type is over-aligned and is being allocated by global operator // new. if (PlacementArgs.empty() && OperatorNew && (OperatorNew->isImplicit() || getSourceManager().isInSystemHeader(OperatorNew->getLocStart()))) { if (unsigned Align = Context.getPreferredTypeAlign(AllocType.getTypePtr())){ unsigned SuitableAlign = Context.getTargetInfo().getSuitableAlign(); if (Align > SuitableAlign) Diag(StartLoc, diag::warn_overaligned_type) << AllocType << unsigned(Align / Context.getCharWidth()) << unsigned(SuitableAlign / Context.getCharWidth()); } } QualType InitType = AllocType; // Array 'new' can't have any initializers except empty parentheses. // Initializer lists are also allowed, in C++11. Rely on the parser for the // dialect distinction. if (ResultType->isArrayType() || ArraySize) { if (!isLegalArrayNewInitializer(initStyle, Initializer)) { SourceRange InitRange(Inits[0]->getLocStart(), Inits[NumInits - 1]->getLocEnd()); Diag(StartLoc, diag::err_new_array_init_args) << InitRange; return ExprError(); } if (InitListExpr *ILE = dyn_cast_or_null<InitListExpr>(Initializer)) { // We do the initialization typechecking against the array type // corresponding to the number of initializers + 1 (to also check // default-initialization). unsigned NumElements = ILE->getNumInits() + 1; InitType = Context.getConstantArrayType(AllocType, llvm::APInt(Context.getTypeSize(Context.getSizeType()), NumElements), ArrayType::Normal, 0); } } // If we can perform the initialization, and we've not already done so, // do it now. if (!AllocType->isDependentType() && !Expr::hasAnyTypeDependentArguments( llvm::makeArrayRef(Inits, NumInits))) { // C++11 [expr.new]p15: // A new-expression that creates an object of type T initializes that // object as follows: InitializationKind Kind // - If the new-initializer is omitted, the object is default- // initialized (8.5); if no initialization is performed, // the object has indeterminate value = initStyle == CXXNewExpr::NoInit ? InitializationKind::CreateDefault(TypeRange.getBegin()) // - Otherwise, the new-initializer is interpreted according to the // initialization rules of 8.5 for direct-initialization. : initStyle == CXXNewExpr::ListInit ? InitializationKind::CreateDirectList(TypeRange.getBegin()) : InitializationKind::CreateDirect(TypeRange.getBegin(), DirectInitRange.getBegin(), DirectInitRange.getEnd()); InitializedEntity Entity = InitializedEntity::InitializeNew(StartLoc, InitType); InitializationSequence InitSeq(*this, Entity, Kind, MultiExprArg(Inits, NumInits)); ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind, MultiExprArg(Inits, NumInits)); if (FullInit.isInvalid()) return ExprError(); // FullInit is our initializer; strip off CXXBindTemporaryExprs, because // we don't want the initialized object to be destructed. if (CXXBindTemporaryExpr *Binder = dyn_cast_or_null<CXXBindTemporaryExpr>(FullInit.get())) FullInit = Binder->getSubExpr(); Initializer = FullInit.get(); } // Mark the new and delete operators as referenced. if (OperatorNew) { if (DiagnoseUseOfDecl(OperatorNew, StartLoc)) return ExprError(); MarkFunctionReferenced(StartLoc, OperatorNew); } if (OperatorDelete) { if (DiagnoseUseOfDecl(OperatorDelete, StartLoc)) return ExprError(); MarkFunctionReferenced(StartLoc, OperatorDelete); } // C++0x [expr.new]p17: // If the new expression creates an array of objects of class type, // access and ambiguity control are done for the destructor. QualType BaseAllocType = Context.getBaseElementType(AllocType); if (ArraySize && !BaseAllocType->isDependentType()) { if (const RecordType *BaseRecordType = BaseAllocType->getAs<RecordType>()) { if (CXXDestructorDecl *dtor = LookupDestructor( cast<CXXRecordDecl>(BaseRecordType->getDecl()))) { MarkFunctionReferenced(StartLoc, dtor); CheckDestructorAccess(StartLoc, dtor, PDiag(diag::err_access_dtor) << BaseAllocType); if (DiagnoseUseOfDecl(dtor, StartLoc)) return ExprError(); } } } return new (Context) CXXNewExpr(Context, UseGlobal, OperatorNew, OperatorDelete, UsualArrayDeleteWantsSize, PlacementArgs, TypeIdParens, ArraySize, initStyle, Initializer, ResultType, AllocTypeInfo, Range, DirectInitRange); } /// \brief Checks that a type is suitable as the allocated type /// in a new-expression. bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R) { // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an // abstract class type or array thereof. if (AllocType->isFunctionType()) return Diag(Loc, diag::err_bad_new_type) << AllocType << 0 << R; else if (AllocType->isReferenceType()) return Diag(Loc, diag::err_bad_new_type) << AllocType << 1 << R; else if (!AllocType->isDependentType() && RequireCompleteType(Loc, AllocType, diag::err_new_incomplete_type,R)) return true; else if (RequireNonAbstractType(Loc, AllocType, diag::err_allocation_of_abstract_type)) return true; else if (AllocType->isVariablyModifiedType()) return Diag(Loc, diag::err_variably_modified_new_type) << AllocType; else if (unsigned AddressSpace = AllocType.getAddressSpace()) return Diag(Loc, diag::err_address_space_qualified_new) << AllocType.getUnqualifiedType() << AddressSpace; else if (getLangOpts().ObjCAutoRefCount) { if (const ArrayType *AT = Context.getAsArrayType(AllocType)) { QualType BaseAllocType = Context.getBaseElementType(AT); if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None && BaseAllocType->isObjCLifetimeType()) return Diag(Loc, diag::err_arc_new_array_without_ownership) << BaseAllocType; } } return false; } /// \brief Determine whether the given function is a non-placement /// deallocation function. static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) { if (FD->isInvalidDecl()) return false; if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FD)) return Method->isUsualDeallocationFunction(); if (FD->getOverloadedOperator() != OO_Delete && FD->getOverloadedOperator() != OO_Array_Delete) return false; if (FD->getNumParams() == 1) return true; return S.getLangOpts().SizedDeallocation && FD->getNumParams() == 2 && S.Context.hasSameUnqualifiedType(FD->getParamDecl(1)->getType(), S.Context.getSizeType()); } /// FindAllocationFunctions - Finds the overloads of operator new and delete /// that are appropriate for the allocation. bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete) { // HLSL Change Starts Here // No support for new and delete operators. return false; #if 0 // HLSL Change Ends Here // --- Choosing an allocation function --- // C++ 5.3.4p8 - 14 & 18 // 1) If UseGlobal is true, only look in the global scope. Else, also look // in the scope of the allocated class. // 2) If an array size is given, look for operator new[], else look for // operator new. // 3) The first argument is always size_t. Append the arguments from the // placement form. SmallVector<Expr*, 8> AllocArgs(1 + PlaceArgs.size()); // We don't care about the actual value of this argument. // FIXME: Should the Sema create the expression and embed it in the syntax // tree? Or should the consumer just recalculate the value? IntegerLiteral Size(Context, llvm::APInt::getNullValue( Context.getTargetInfo().getPointerWidth(0)), Context.getSizeType(), SourceLocation()); AllocArgs[0] = &Size; std::copy(PlaceArgs.begin(), PlaceArgs.end(), AllocArgs.begin() + 1); // C++ [expr.new]p8: // If the allocated type is a non-array type, the allocation // function's name is operator new and the deallocation function's // name is operator delete. If the allocated type is an array // type, the allocation function's name is operator new[] and the // deallocation function's name is operator delete[]. DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName( IsArray ? OO_Array_New : OO_New); DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName( IsArray ? OO_Array_Delete : OO_Delete); QualType AllocElemType = Context.getBaseElementType(AllocType); if (AllocElemType->isRecordType() && !UseGlobal) { CXXRecordDecl *Record = cast<CXXRecordDecl>(AllocElemType->getAs<RecordType>()->getDecl()); if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, Record, /*AllowMissing=*/true, OperatorNew)) return true; } if (!OperatorNew) { // Didn't find a member overload. Look for a global one. DeclareGlobalNewDelete(); DeclContext *TUDecl = Context.getTranslationUnitDecl(); bool FallbackEnabled = IsArray && Context.getLangOpts().MSVCCompat; if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, TUDecl, /*AllowMissing=*/FallbackEnabled, OperatorNew, /*Diagnose=*/!FallbackEnabled)) { if (!FallbackEnabled) return true; // MSVC will fall back on trying to find a matching global operator new // if operator new[] cannot be found. Also, MSVC will leak by not // generating a call to operator delete or operator delete[], but we // will not replicate that bug. NewName = Context.DeclarationNames.getCXXOperatorName(OO_New); DeleteName = Context.DeclarationNames.getCXXOperatorName(OO_Delete); if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, TUDecl, /*AllowMissing=*/false, OperatorNew)) return true; } } // We don't need an operator delete if we're running under // -fno-exceptions. if (!getLangOpts().Exceptions) { OperatorDelete = nullptr; return false; } // C++ [expr.new]p19: // // If the new-expression begins with a unary :: operator, the // deallocation function's name is looked up in the global // scope. Otherwise, if the allocated type is a class type T or an // array thereof, the deallocation function's name is looked up in // the scope of T. If this lookup fails to find the name, or if // the allocated type is not a class type or array thereof, the // deallocation function's name is looked up in the global scope. LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName); if (AllocElemType->isRecordType() && !UseGlobal) { CXXRecordDecl *RD = cast<CXXRecordDecl>(AllocElemType->getAs<RecordType>()->getDecl()); LookupQualifiedName(FoundDelete, RD); } if (FoundDelete.isAmbiguous()) return true; // FIXME: clean up expressions? if (FoundDelete.empty()) { DeclareGlobalNewDelete(); LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl()); } FoundDelete.suppressDiagnostics(); SmallVector<std::pair<DeclAccessPair,FunctionDecl*>, 2> Matches; // Whether we're looking for a placement operator delete is dictated // by whether we selected a placement operator new, not by whether // we had explicit placement arguments. This matters for things like // struct A { void *operator new(size_t, int = 0); ... }; // A *a = new A() bool isPlacementNew = (!PlaceArgs.empty() || OperatorNew->param_size() != 1); if (isPlacementNew) { // C++ [expr.new]p20: // A declaration of a placement deallocation function matches the // declaration of a placement allocation function if it has the // same number of parameters and, after parameter transformations // (8.3.5), all parameter types except the first are // identical. [...] // // To perform this comparison, we compute the function type that // the deallocation function should have, and use that type both // for template argument deduction and for comparison purposes. // // FIXME: this comparison should ignore CC and the like. QualType ExpectedFunctionType; { const FunctionProtoType *Proto = OperatorNew->getType()->getAs<FunctionProtoType>(); SmallVector<QualType, 4> ArgTypes; ArgTypes.push_back(Context.VoidPtrTy); for (unsigned I = 1, N = Proto->getNumParams(); I < N; ++I) ArgTypes.push_back(Proto->getParamType(I)); FunctionProtoType::ExtProtoInfo EPI; EPI.Variadic = Proto->isVariadic(); ExpectedFunctionType = Context.getFunctionType(Context.VoidTy, ArgTypes, EPI); } for (LookupResult::iterator D = FoundDelete.begin(), DEnd = FoundDelete.end(); D != DEnd; ++D) { FunctionDecl *Fn = nullptr; if (FunctionTemplateDecl *FnTmpl = dyn_cast<FunctionTemplateDecl>((*D)->getUnderlyingDecl())) { // Perform template argument deduction to try to match the // expected function type. TemplateDeductionInfo Info(StartLoc); if (DeduceTemplateArguments(FnTmpl, nullptr, ExpectedFunctionType, Fn, Info)) continue; } else Fn = cast<FunctionDecl>((*D)->getUnderlyingDecl()); if (Context.hasSameType(Fn->getType(), ExpectedFunctionType)) Matches.push_back(std::make_pair(D.getPair(), Fn)); } } else { // C++ [expr.new]p20: // [...] Any non-placement deallocation function matches a // non-placement allocation function. [...] for (LookupResult::iterator D = FoundDelete.begin(), DEnd = FoundDelete.end(); D != DEnd; ++D) { if (FunctionDecl *Fn = dyn_cast<FunctionDecl>((*D)->getUnderlyingDecl())) if (isNonPlacementDeallocationFunction(*this, Fn)) Matches.push_back(std::make_pair(D.getPair(), Fn)); } // C++1y [expr.new]p22: // For a non-placement allocation function, the normal deallocation // function lookup is used // C++1y [expr.delete]p?: // If [...] deallocation function lookup finds both a usual deallocation // function with only a pointer parameter and a usual deallocation // function with both a pointer parameter and a size parameter, then the // selected deallocation function shall be the one with two parameters. // Otherwise, the selected deallocation function shall be the function // with one parameter. if (getLangOpts().SizedDeallocation && Matches.size() == 2) { if (Matches[0].second->getNumParams() == 1) Matches.erase(Matches.begin()); else Matches.erase(Matches.begin() + 1); assert(Matches[0].second->getNumParams() == 2 && "found an unexpected usual deallocation function"); } } // C++ [expr.new]p20: // [...] If the lookup finds a single matching deallocation // function, that function will be called; otherwise, no // deallocation function will be called. if (Matches.size() == 1) { OperatorDelete = Matches[0].second; // C++0x [expr.new]p20: // If the lookup finds the two-parameter form of a usual // deallocation function (3.7.4.2) and that function, considered // as a placement deallocation function, would have been // selected as a match for the allocation function, the program // is ill-formed. if (!PlaceArgs.empty() && getLangOpts().CPlusPlus11 && isNonPlacementDeallocationFunction(*this, OperatorDelete)) { Diag(StartLoc, diag::err_placement_new_non_placement_delete) << SourceRange(PlaceArgs.front()->getLocStart(), PlaceArgs.back()->getLocEnd()); if (!OperatorDelete->isImplicit()) Diag(OperatorDelete->getLocation(), diag::note_previous_decl) << DeleteName; } else { CheckAllocationAccess(StartLoc, Range, FoundDelete.getNamingClass(), Matches[0].first); } } return false; #endif // HLSL Change } /// \brief Find an fitting overload for the allocation function /// in the specified scope. /// /// \param StartLoc The location of the 'new' token. /// \param Range The range of the placement arguments. /// \param Name The name of the function ('operator new' or 'operator new[]'). /// \param Args The placement arguments specified. /// \param Ctx The scope in which we should search; either a class scope or the /// translation unit. /// \param AllowMissing If \c true, report an error if we can't find any /// allocation functions. Otherwise, succeed but don't fill in \p /// Operator. /// \param Operator Filled in with the found allocation function. Unchanged if /// no allocation function was found. /// \param Diagnose If \c true, issue errors if the allocation function is not /// usable. bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose) { LookupResult R(*this, Name, StartLoc, LookupOrdinaryName); LookupQualifiedName(R, Ctx); if (R.empty()) { if (AllowMissing || !Diagnose) return false; return Diag(StartLoc, diag::err_ovl_no_viable_function_in_call) << Name << Range; } if (R.isAmbiguous()) return true; R.suppressDiagnostics(); OverloadCandidateSet Candidates(StartLoc, OverloadCandidateSet::CSK_Normal); for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end(); Alloc != AllocEnd; ++Alloc) { // Even member operator new/delete are implicitly treated as // static, so don't use AddMemberCandidate. NamedDecl *D = (*Alloc)->getUnderlyingDecl(); if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) { AddTemplateOverloadCandidate(FnTemplate, Alloc.getPair(), /*ExplicitTemplateArgs=*/nullptr, Args, Candidates, /*SuppressUserConversions=*/false); continue; } FunctionDecl *Fn = cast<FunctionDecl>(D); AddOverloadCandidate(Fn, Alloc.getPair(), Args, Candidates, /*SuppressUserConversions=*/false); } // Do the resolution. OverloadCandidateSet::iterator Best; switch (Candidates.BestViableFunction(*this, StartLoc, Best)) { case OR_Success: { // Got one! FunctionDecl *FnDecl = Best->Function; if (CheckAllocationAccess(StartLoc, Range, R.getNamingClass(), Best->FoundDecl, Diagnose) == AR_inaccessible) return true; Operator = FnDecl; return false; } case OR_No_Viable_Function: if (Diagnose) { Diag(StartLoc, diag::err_ovl_no_viable_function_in_call) << Name << Range; Candidates.NoteCandidates(*this, OCD_AllCandidates, Args); } return true; case OR_Ambiguous: if (Diagnose) { Diag(StartLoc, diag::err_ovl_ambiguous_call) << Name << Range; Candidates.NoteCandidates(*this, OCD_ViableCandidates, Args); } return true; case OR_Deleted: { if (Diagnose) { Diag(StartLoc, diag::err_ovl_deleted_call) << Best->Function->isDeleted() << Name << getDeletedOrUnavailableSuffix(Best->Function) << Range; Candidates.NoteCandidates(*this, OCD_AllCandidates, Args); } return true; } } llvm_unreachable("Unreachable, bad result from BestViableFunction"); } /// DeclareGlobalNewDelete - Declare the global forms of operator new and /// delete. These are: /// @code /// // C++03: /// void* operator new(std::size_t) throw(std::bad_alloc); /// void* operator new[](std::size_t) throw(std::bad_alloc); /// void operator delete(void *) throw(); /// void operator delete[](void *) throw(); /// // C++11: /// void* operator new(std::size_t); /// void* operator new[](std::size_t); /// void operator delete(void *) noexcept; /// void operator delete[](void *) noexcept; /// // C++1y: /// void* operator new(std::size_t); /// void* operator new[](std::size_t); /// void operator delete(void *) noexcept; /// void operator delete[](void *) noexcept; /// void operator delete(void *, std::size_t) noexcept; /// void operator delete[](void *, std::size_t) noexcept; /// @endcode /// Note that the placement and nothrow forms of new are *not* implicitly /// declared. Their use requires including \<new\>. void Sema::DeclareGlobalNewDelete() { if (GlobalNewDeleteDeclared) return; // C++ [basic.std.dynamic]p2: // [...] The following allocation and deallocation functions (18.4) are // implicitly declared in global scope in each translation unit of a // program // // C++03: // void* operator new(std::size_t) throw(std::bad_alloc); // void* operator new[](std::size_t) throw(std::bad_alloc); // void operator delete(void*) throw(); // void operator delete[](void*) throw(); // C++11: // void* operator new(std::size_t); // void* operator new[](std::size_t); // void operator delete(void*) noexcept; // void operator delete[](void*) noexcept; // C++1y: // void* operator new(std::size_t); // void* operator new[](std::size_t); // void operator delete(void*) noexcept; // void operator delete[](void*) noexcept; // void operator delete(void*, std::size_t) noexcept; // void operator delete[](void*, std::size_t) noexcept; // // These implicit declarations introduce only the function names operator // new, operator new[], operator delete, operator delete[]. // // Here, we need to refer to std::bad_alloc, so we will implicitly declare // "std" or "bad_alloc" as necessary to form the exception specification. // However, we do not make these implicit declarations visible to name // lookup. if (!StdBadAlloc && !getLangOpts().CPlusPlus11) { // The "std::bad_alloc" class has not yet been declared, so build it // implicitly. StdBadAlloc = CXXRecordDecl::Create(Context, TTK_Class, getOrCreateStdNamespace(), SourceLocation(), SourceLocation(), &PP.getIdentifierTable().get("bad_alloc"), nullptr); getStdBadAlloc()->setImplicit(true); } GlobalNewDeleteDeclared = true; QualType VoidPtr = Context.getPointerType(Context.VoidTy); QualType SizeT = Context.getSizeType(); bool AssumeSaneOperatorNew = getLangOpts().AssumeSaneOperatorNew; DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_New), VoidPtr, SizeT, QualType(), AssumeSaneOperatorNew); DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_Array_New), VoidPtr, SizeT, QualType(), AssumeSaneOperatorNew); DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_Delete), Context.VoidTy, VoidPtr); DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete), Context.VoidTy, VoidPtr); if (getLangOpts().SizedDeallocation) { DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_Delete), Context.VoidTy, VoidPtr, Context.getSizeType()); DeclareGlobalAllocationFunction( Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete), Context.VoidTy, VoidPtr, Context.getSizeType()); } } /// DeclareGlobalAllocationFunction - Declares a single implicit global /// allocation function if it doesn't already exist. void Sema::DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2, bool AddRestrictAttr) { // HLSL Change Starts Here // No support for new and delete operators. llvm_unreachable("no support for new and delete in HLSL"); #if 0 // HLSL Change Ends Here DeclContext *GlobalCtx = Context.getTranslationUnitDecl(); unsigned NumParams = Param2.isNull() ? 1 : 2; // Check if this function is already declared. DeclContext::lookup_result R = GlobalCtx->lookup(Name); for (DeclContext::lookup_iterator Alloc = R.begin(), AllocEnd = R.end(); Alloc != AllocEnd; ++Alloc) { // Only look at non-template functions, as it is the predefined, // non-templated allocation function we are trying to declare here. if (FunctionDecl *Func = dyn_cast<FunctionDecl>(*Alloc)) { if (Func->getNumParams() == NumParams) { QualType InitialParam1Type = Context.getCanonicalType(Func->getParamDecl(0) ->getType().getUnqualifiedType()); QualType InitialParam2Type = NumParams == 2 ? Context.getCanonicalType(Func->getParamDecl(1) ->getType().getUnqualifiedType()) : QualType(); // FIXME: Do we need to check for default arguments here? if (InitialParam1Type == Param1 && (NumParams == 1 || InitialParam2Type == Param2)) { if (AddRestrictAttr && !Func->hasAttr<RestrictAttr>()) Func->addAttr(RestrictAttr::CreateImplicit( Context, RestrictAttr::GNU_malloc)); // Make the function visible to name lookup, even if we found it in // an unimported module. It either is an implicitly-declared global // allocation function, or is suppressing that function. Func->setHidden(false); return; } } } } FunctionProtoType::ExtProtoInfo EPI; QualType BadAllocType; bool HasBadAllocExceptionSpec = (Name.getCXXOverloadedOperator() == OO_New || Name.getCXXOverloadedOperator() == OO_Array_New); if (HasBadAllocExceptionSpec) { if (!getLangOpts().CPlusPlus11) { BadAllocType = Context.getTypeDeclType(getStdBadAlloc()); assert(StdBadAlloc && "Must have std::bad_alloc declared"); EPI.ExceptionSpec.Type = EST_Dynamic; EPI.ExceptionSpec.Exceptions = llvm::makeArrayRef(BadAllocType); } } else { EPI.ExceptionSpec = getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone; } QualType Params[] = { Param1, Param2 }; QualType FnType = Context.getFunctionType( Return, llvm::makeArrayRef(Params, NumParams), EPI); FunctionDecl *Alloc = FunctionDecl::Create(Context, GlobalCtx, SourceLocation(), SourceLocation(), Name, FnType, /*TInfo=*/nullptr, SC_None, false, true); Alloc->setImplicit(); // Implicit sized deallocation functions always have default visibility. Alloc->addAttr(VisibilityAttr::CreateImplicit(Context, VisibilityAttr::Default)); if (AddRestrictAttr) Alloc->addAttr( RestrictAttr::CreateImplicit(Context, RestrictAttr::GNU_malloc)); ParmVarDecl *ParamDecls[2]; for (unsigned I = 0; I != NumParams; ++I) { ParamDecls[I] = ParmVarDecl::Create(Context, Alloc, SourceLocation(), SourceLocation(), nullptr, Params[I], /*TInfo=*/nullptr, SC_None, nullptr); ParamDecls[I]->setImplicit(); } Alloc->setParams(llvm::makeArrayRef(ParamDecls, NumParams)); Context.getTranslationUnitDecl()->addDecl(Alloc); IdResolver.tryAddTopLevelDecl(Alloc, Name); #endif // HLSL Change } FunctionDecl *Sema::FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name) { DeclareGlobalNewDelete(); LookupResult FoundDelete(*this, Name, StartLoc, LookupOrdinaryName); LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl()); // C++ [expr.new]p20: // [...] Any non-placement deallocation function matches a // non-placement allocation function. [...] llvm::SmallVector<FunctionDecl*, 2> Matches; for (LookupResult::iterator D = FoundDelete.begin(), DEnd = FoundDelete.end(); D != DEnd; ++D) { if (FunctionDecl *Fn = dyn_cast<FunctionDecl>(*D)) if (isNonPlacementDeallocationFunction(*this, Fn)) Matches.push_back(Fn); } // C++1y [expr.delete]p?: // If the type is complete and deallocation function lookup finds both a // usual deallocation function with only a pointer parameter and a usual // deallocation function with both a pointer parameter and a size // parameter, then the selected deallocation function shall be the one // with two parameters. Otherwise, the selected deallocation function // shall be the function with one parameter. if (getLangOpts().SizedDeallocation && Matches.size() == 2) { unsigned NumArgs = CanProvideSize ? 2 : 1; if (Matches[0]->getNumParams() != NumArgs) Matches.erase(Matches.begin()); else Matches.erase(Matches.begin() + 1); assert(Matches[0]->getNumParams() == NumArgs && "found an unexpected usual deallocation function"); } assert(Matches.size() == 1 && "unexpectedly have multiple usual deallocation functions"); return Matches.front(); } bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose) { LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName); // Try to find operator delete/operator delete[] in class scope. LookupQualifiedName(Found, RD); if (Found.isAmbiguous()) return true; Found.suppressDiagnostics(); SmallVector<DeclAccessPair,4> Matches; for (LookupResult::iterator F = Found.begin(), FEnd = Found.end(); F != FEnd; ++F) { NamedDecl *ND = (*F)->getUnderlyingDecl(); // Ignore template operator delete members from the check for a usual // deallocation function. if (isa<FunctionTemplateDecl>(ND)) continue; if (cast<CXXMethodDecl>(ND)->isUsualDeallocationFunction()) Matches.push_back(F.getPair()); } // There's exactly one suitable operator; pick it. if (Matches.size() == 1) { Operator = cast<CXXMethodDecl>(Matches[0]->getUnderlyingDecl()); if (Operator->isDeleted()) { if (Diagnose) { Diag(StartLoc, diag::err_deleted_function_use); NoteDeletedFunction(Operator); } return true; } if (CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(), Matches[0], Diagnose) == AR_inaccessible) return true; return false; // We found multiple suitable operators; complain about the ambiguity. } else if (!Matches.empty()) { if (Diagnose) { Diag(StartLoc, diag::err_ambiguous_suitable_delete_member_function_found) << Name << RD; for (SmallVectorImpl<DeclAccessPair>::iterator F = Matches.begin(), FEnd = Matches.end(); F != FEnd; ++F) Diag((*F)->getUnderlyingDecl()->getLocation(), diag::note_member_declared_here) << Name; } return true; } // We did find operator delete/operator delete[] declarations, but // none of them were suitable. if (!Found.empty()) { if (Diagnose) { Diag(StartLoc, diag::err_no_suitable_delete_member_function_found) << Name << RD; for (LookupResult::iterator F = Found.begin(), FEnd = Found.end(); F != FEnd; ++F) Diag((*F)->getUnderlyingDecl()->getLocation(), diag::note_member_declared_here) << Name; } return true; } Operator = nullptr; return false; } namespace { /// \brief Checks whether delete-expression, and new-expression used for /// initializing deletee have the same array form. class MismatchingNewDeleteDetector { public: enum MismatchResult { /// Indicates that there is no mismatch or a mismatch cannot be proven. NoMismatch, /// Indicates that variable is initialized with mismatching form of \a new. VarInitMismatches, /// Indicates that member is initialized with mismatching form of \a new. MemberInitMismatches, /// Indicates that 1 or more constructors' definitions could not been /// analyzed, and they will be checked again at the end of translation unit. AnalyzeLater }; /// \param EndOfTU True, if this is the final analysis at the end of /// translation unit. False, if this is the initial analysis at the point /// delete-expression was encountered. explicit MismatchingNewDeleteDetector(bool EndOfTU) : IsArrayForm(false), Field(nullptr), EndOfTU(EndOfTU), HasUndefinedConstructors(false) {} /// \brief Checks whether pointee of a delete-expression is initialized with /// matching form of new-expression. /// /// If return value is \c VarInitMismatches or \c MemberInitMismatches at the /// point where delete-expression is encountered, then a warning will be /// issued immediately. If return value is \c AnalyzeLater at the point where /// delete-expression is seen, then member will be analyzed at the end of /// translation unit. \c AnalyzeLater is returned iff at least one constructor /// couldn't be analyzed. If at least one constructor initializes the member /// with matching type of new, the return value is \c NoMismatch. MismatchResult analyzeDeleteExpr(const CXXDeleteExpr *DE); /// \brief Analyzes a class member. /// \param Field Class member to analyze. /// \param DeleteWasArrayForm Array form-ness of the delete-expression used /// for deleting the \p Field. MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm); /// List of mismatching new-expressions used for initialization of the pointee llvm::SmallVector<const CXXNewExpr *, 4> NewExprs; /// Indicates whether delete-expression was in array form. bool IsArrayForm; FieldDecl *Field; private: const bool EndOfTU; /// \brief Indicates that there is at least one constructor without body. bool HasUndefinedConstructors; /// \brief Returns \c CXXNewExpr from given initialization expression. /// \param E Expression used for initializing pointee in delete-expression. /// E can be a single-element \c InitListExpr consisting of new-expression. const CXXNewExpr *getNewExprFromInitListOrExpr(const Expr *E); /// \brief Returns whether member is initialized with mismatching form of /// \c new either by the member initializer or in-class initialization. /// /// If bodies of all constructors are not visible at the end of translation /// unit or at least one constructor initializes member with the matching /// form of \c new, mismatch cannot be proven, and this function will return /// \c NoMismatch. MismatchResult analyzeMemberExpr(const MemberExpr *ME); /// \brief Returns whether variable is initialized with mismatching form of /// \c new. /// /// If variable is initialized with matching form of \c new or variable is not /// initialized with a \c new expression, this function will return true. /// If variable is initialized with mismatching form of \c new, returns false. /// \param D Variable to analyze. bool hasMatchingVarInit(const DeclRefExpr *D); /// \brief Checks whether the constructor initializes pointee with mismatching /// form of \c new. /// /// Returns true, if member is initialized with matching form of \c new in /// member initializer list. Returns false, if member is initialized with the /// matching form of \c new in this constructor's initializer or given /// constructor isn't defined at the point where delete-expression is seen, or /// member isn't initialized by the constructor. bool hasMatchingNewInCtor(const CXXConstructorDecl *CD); /// \brief Checks whether member is initialized with matching form of /// \c new in member initializer list. bool hasMatchingNewInCtorInit(const CXXCtorInitializer *CI); /// Checks whether member is initialized with mismatching form of \c new by /// in-class initializer. MismatchResult analyzeInClassInitializer(); }; } MismatchingNewDeleteDetector::MismatchResult MismatchingNewDeleteDetector::analyzeDeleteExpr(const CXXDeleteExpr *DE) { NewExprs.clear(); assert(DE && "Expected delete-expression"); IsArrayForm = DE->isArrayForm(); const Expr *E = DE->getArgument()->IgnoreParenImpCasts(); if (const MemberExpr *ME = dyn_cast<const MemberExpr>(E)) { return analyzeMemberExpr(ME); } else if (const DeclRefExpr *D = dyn_cast<const DeclRefExpr>(E)) { if (!hasMatchingVarInit(D)) return VarInitMismatches; } return NoMismatch; } const CXXNewExpr * MismatchingNewDeleteDetector::getNewExprFromInitListOrExpr(const Expr *E) { assert(E != nullptr && "Expected a valid initializer expression"); E = E->IgnoreParenImpCasts(); if (const InitListExpr *ILE = dyn_cast<const InitListExpr>(E)) { if (ILE->getNumInits() == 1) E = dyn_cast<const CXXNewExpr>(ILE->getInit(0)->IgnoreParenImpCasts()); } return dyn_cast_or_null<const CXXNewExpr>(E); } bool MismatchingNewDeleteDetector::hasMatchingNewInCtorInit( const CXXCtorInitializer *CI) { const CXXNewExpr *NE = nullptr; if (Field == CI->getMember() && (NE = getNewExprFromInitListOrExpr(CI->getInit()))) { if (NE->isArray() == IsArrayForm) return true; else NewExprs.push_back(NE); } return false; } bool MismatchingNewDeleteDetector::hasMatchingNewInCtor( const CXXConstructorDecl *CD) { if (CD->isImplicit()) return false; const FunctionDecl *Definition = CD; if (!CD->isThisDeclarationADefinition() && !CD->isDefined(Definition)) { HasUndefinedConstructors = true; return EndOfTU; } for (const auto *CI : cast<const CXXConstructorDecl>(Definition)->inits()) { if (hasMatchingNewInCtorInit(CI)) return true; } return false; } MismatchingNewDeleteDetector::MismatchResult MismatchingNewDeleteDetector::analyzeInClassInitializer() { assert(Field != nullptr && "This should be called only for members"); if (const CXXNewExpr *NE = getNewExprFromInitListOrExpr(Field->getInClassInitializer())) { if (NE->isArray() != IsArrayForm) { NewExprs.push_back(NE); return MemberInitMismatches; } } return NoMismatch; } MismatchingNewDeleteDetector::MismatchResult MismatchingNewDeleteDetector::analyzeField(FieldDecl *Field, bool DeleteWasArrayForm) { assert(Field != nullptr && "Analysis requires a valid class member."); this->Field = Field; IsArrayForm = DeleteWasArrayForm; const CXXRecordDecl *RD = cast<const CXXRecordDecl>(Field->getParent()); for (const auto *CD : RD->ctors()) { if (hasMatchingNewInCtor(CD)) return NoMismatch; } if (HasUndefinedConstructors) return EndOfTU ? NoMismatch : AnalyzeLater; if (!NewExprs.empty()) return MemberInitMismatches; return Field->hasInClassInitializer() ? analyzeInClassInitializer() : NoMismatch; } MismatchingNewDeleteDetector::MismatchResult MismatchingNewDeleteDetector::analyzeMemberExpr(const MemberExpr *ME) { assert(ME != nullptr && "Expected a member expression"); if (FieldDecl *F = dyn_cast<FieldDecl>(ME->getMemberDecl())) return analyzeField(F, IsArrayForm); return NoMismatch; } bool MismatchingNewDeleteDetector::hasMatchingVarInit(const DeclRefExpr *D) { const CXXNewExpr *NE = nullptr; if (const VarDecl *VD = dyn_cast<const VarDecl>(D->getDecl())) { if (VD->hasInit() && (NE = getNewExprFromInitListOrExpr(VD->getInit())) && NE->isArray() != IsArrayForm) { NewExprs.push_back(NE); } } return NewExprs.empty(); } static void DiagnoseMismatchedNewDelete(Sema &SemaRef, SourceLocation DeleteLoc, const MismatchingNewDeleteDetector &Detector) { SourceLocation EndOfDelete = SemaRef.getLocForEndOfToken(DeleteLoc); FixItHint H; if (!Detector.IsArrayForm) H = FixItHint::CreateInsertion(EndOfDelete, "[]"); else { SourceLocation RSquare = Lexer::findLocationAfterToken( DeleteLoc, tok::l_square, SemaRef.getSourceManager(), SemaRef.getLangOpts(), true); if (RSquare.isValid()) H = FixItHint::CreateRemoval(SourceRange(EndOfDelete, RSquare)); } SemaRef.Diag(DeleteLoc, diag::warn_mismatched_delete_new) << Detector.IsArrayForm << H; for (const auto *NE : Detector.NewExprs) SemaRef.Diag(NE->getExprLoc(), diag::note_allocated_here) << Detector.IsArrayForm; } void Sema::AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE) { if (Diags.isIgnored(diag::warn_mismatched_delete_new, SourceLocation())) return; MismatchingNewDeleteDetector Detector(/*EndOfTU=*/false); switch (Detector.analyzeDeleteExpr(DE)) { case MismatchingNewDeleteDetector::VarInitMismatches: case MismatchingNewDeleteDetector::MemberInitMismatches: { DiagnoseMismatchedNewDelete(*this, DE->getLocStart(), Detector); break; } case MismatchingNewDeleteDetector::AnalyzeLater: { DeleteExprs[Detector.Field].push_back( std::make_pair(DE->getLocStart(), DE->isArrayForm())); break; } case MismatchingNewDeleteDetector::NoMismatch: break; } } void Sema::AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm) { MismatchingNewDeleteDetector Detector(/*EndOfTU=*/true); switch (Detector.analyzeField(Field, DeleteWasArrayForm)) { case MismatchingNewDeleteDetector::VarInitMismatches: llvm_unreachable("This analysis should have been done for class members."); case MismatchingNewDeleteDetector::AnalyzeLater: llvm_unreachable("Analysis cannot be postponed any point beyond end of " "translation unit."); case MismatchingNewDeleteDetector::MemberInitMismatches: DiagnoseMismatchedNewDelete(*this, DeleteLoc, Detector); break; case MismatchingNewDeleteDetector::NoMismatch: break; } } /// ActOnCXXDelete - Parsed a C++ 'delete' expression (C++ 5.3.5), as in: /// @code ::delete ptr; @endcode /// or /// @code delete [] ptr; @endcode ExprResult Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *ExE) { // C++ [expr.delete]p1: // The operand shall have a pointer type, or a class type having a single // non-explicit conversion function to a pointer type. The result has type // void. // // DR599 amends "pointer type" to "pointer to object type" in both cases. ExprResult Ex = ExE; FunctionDecl *OperatorDelete = nullptr; bool ArrayFormAsWritten = ArrayForm; bool UsualArrayDeleteWantsSize = false; if (!Ex.get()->isTypeDependent()) { // Perform lvalue-to-rvalue cast, if needed. Ex = DefaultLvalueConversion(Ex.get()); if (Ex.isInvalid()) return ExprError(); QualType Type = Ex.get()->getType(); class DeleteConverter : public ContextualImplicitConverter { public: DeleteConverter() : ContextualImplicitConverter(false, true) {} bool match(QualType ConvType) override { // FIXME: If we have an operator T* and an operator void*, we must pick // the operator T*. if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>()) if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType()) return true; return false; } SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_delete_operand) << T; } SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_delete_incomplete_class_type) << T; } SemaDiagnosticBuilder diagnoseExplicitConv(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { return S.Diag(Loc, diag::err_delete_explicit_conversion) << T << ConvTy; } SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_delete_conversion) << ConvTy; } SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) override { return S.Diag(Loc, diag::err_ambiguous_delete_operand) << T; } SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) override { return S.Diag(Conv->getLocation(), diag::note_delete_conversion) << ConvTy; } SemaDiagnosticBuilder diagnoseConversion(Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) override { llvm_unreachable("conversion functions are permitted"); } } Converter; Ex = PerformContextualImplicitConversion(StartLoc, Ex.get(), Converter); if (Ex.isInvalid()) return ExprError(); Type = Ex.get()->getType(); if (!Converter.match(Type)) // FIXME: PerformContextualImplicitConversion should return ExprError // itself in this case. return ExprError(); QualType Pointee = Type->getAs<PointerType>()->getPointeeType(); QualType PointeeElem = Context.getBaseElementType(Pointee); if (unsigned AddressSpace = Pointee.getAddressSpace()) return Diag(Ex.get()->getLocStart(), diag::err_address_space_qualified_delete) << Pointee.getUnqualifiedType() << AddressSpace; CXXRecordDecl *PointeeRD = nullptr; if (Pointee->isVoidType() && !isSFINAEContext()) { // The C++ standard bans deleting a pointer to a non-object type, which // effectively bans deletion of "void*". However, most compilers support // this, so we treat it as a warning unless we're in a SFINAE context. Diag(StartLoc, diag::ext_delete_void_ptr_operand) << Type << Ex.get()->getSourceRange(); } else if (Pointee->isFunctionType() || Pointee->isVoidType()) { return ExprError(Diag(StartLoc, diag::err_delete_operand) << Type << Ex.get()->getSourceRange()); } else if (!Pointee->isDependentType()) { if (!RequireCompleteType(StartLoc, Pointee, diag::warn_delete_incomplete, Ex.get())) { if (const RecordType *RT = PointeeElem->getAs<RecordType>()) PointeeRD = cast<CXXRecordDecl>(RT->getDecl()); } } if (Pointee->isArrayType() && !ArrayForm) { Diag(StartLoc, diag::warn_delete_array_type) << Type << Ex.get()->getSourceRange() << FixItHint::CreateInsertion(PP.getLocForEndOfToken(StartLoc), "[]"); ArrayForm = true; } DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName( ArrayForm ? OO_Array_Delete : OO_Delete); if (PointeeRD) { if (!UseGlobal && FindDeallocationFunction(StartLoc, PointeeRD, DeleteName, OperatorDelete)) return ExprError(); // If we're allocating an array of records, check whether the // usual operator delete[] has a size_t parameter. if (ArrayForm) { // If the user specifically asked to use the global allocator, // we'll need to do the lookup into the class. if (UseGlobal) UsualArrayDeleteWantsSize = doesUsualArrayDeleteWantSize(*this, StartLoc, PointeeElem); // Otherwise, the usual operator delete[] should be the // function we just found. else if (OperatorDelete && isa<CXXMethodDecl>(OperatorDelete)) UsualArrayDeleteWantsSize = (OperatorDelete->getNumParams() == 2); } if (!PointeeRD->hasIrrelevantDestructor()) if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) { MarkFunctionReferenced(StartLoc, const_cast<CXXDestructorDecl*>(Dtor)); if (DiagnoseUseOfDecl(Dtor, StartLoc)) return ExprError(); } // C++ [expr.delete]p3: // In the first alternative (delete object), if the static type of the // object to be deleted is different from its dynamic type, the static // type shall be a base class of the dynamic type of the object to be // deleted and the static type shall have a virtual destructor or the // behavior is undefined. // // Note: a final class cannot be derived from, no issue there if (PointeeRD->isPolymorphic() && !PointeeRD->hasAttr<FinalAttr>()) { CXXDestructorDecl *dtor = PointeeRD->getDestructor(); if (dtor && !dtor->isVirtual()) { if (PointeeRD->isAbstract()) { // If the class is abstract, we warn by default, because we're // sure the code has undefined behavior. Diag(StartLoc, diag::warn_delete_abstract_non_virtual_dtor) << PointeeElem; } else if (!ArrayForm) { // Otherwise, if this is not an array delete, it's a bit suspect, // but not necessarily wrong. Diag(StartLoc, diag::warn_delete_non_virtual_dtor) << PointeeElem; } } } } if (!OperatorDelete) // Look for a global declaration. OperatorDelete = FindUsualDeallocationFunction( StartLoc, !RequireCompleteType(StartLoc, Pointee, 0) && (!ArrayForm || UsualArrayDeleteWantsSize || Pointee.isDestructedType()), DeleteName); MarkFunctionReferenced(StartLoc, OperatorDelete); // Check access and ambiguity of operator delete and destructor. if (PointeeRD) { if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) { CheckDestructorAccess(Ex.get()->getExprLoc(), Dtor, PDiag(diag::err_access_dtor) << PointeeElem); } } } CXXDeleteExpr *Result = new (Context) CXXDeleteExpr( Context.VoidTy, UseGlobal, ArrayForm, ArrayFormAsWritten, UsualArrayDeleteWantsSize, OperatorDelete, Ex.get(), StartLoc); AnalyzeDeleteExprMismatch(Result); return Result; } /// \brief Check the use of the given variable as a C++ condition in an if, /// while, do-while, or switch statement. ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean) { if (ConditionVar->isInvalidDecl()) return ExprError(); QualType T = ConditionVar->getType(); // C++ [stmt.select]p2: // The declarator shall not specify a function or an array. if (T->isFunctionType()) return ExprError(Diag(ConditionVar->getLocation(), diag::err_invalid_use_of_function_type) << ConditionVar->getSourceRange()); else if (T->isArrayType()) return ExprError(Diag(ConditionVar->getLocation(), diag::err_invalid_use_of_array_type) << ConditionVar->getSourceRange()); ExprResult Condition = DeclRefExpr::Create( Context, NestedNameSpecifierLoc(), SourceLocation(), ConditionVar, /*enclosing*/ false, ConditionVar->getLocation(), ConditionVar->getType().getNonReferenceType(), VK_LValue); MarkDeclRefReferenced(cast<DeclRefExpr>(Condition.get())); if (ConvertToBoolean) { Condition = CheckBooleanCondition(Condition.get(), StmtLoc); if (Condition.isInvalid()) return ExprError(); } return Condition; } /// CheckCXXBooleanCondition - Returns true if a conversion to bool is invalid. ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr) { // C++ 6.4p4: // The value of a condition that is an initialized declaration in a statement // other than a switch statement is the value of the declared variable // implicitly converted to type bool. If that conversion is ill-formed, the // program is ill-formed. // The value of a condition that is an expression is the value of the // expression, implicitly converted to bool. // return PerformContextuallyConvertToBool(CondExpr); } /// Helper function to determine whether this is the (deprecated) C++ /// conversion from a string literal to a pointer to non-const char or /// non-const wchar_t (for narrow and wide string literals, /// respectively). bool Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) { // Look inside the implicit cast, if it exists. if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(From)) From = Cast->getSubExpr(); // A string literal (2.13.4) that is not a wide string literal can // be converted to an rvalue of type "pointer to char"; a wide // string literal can be converted to an rvalue of type "pointer // to wchar_t" (C++ 4.2p2). if (StringLiteral *StrLit = dyn_cast<StringLiteral>(From->IgnoreParens())) if (const PointerType *ToPtrType = ToType->getAs<PointerType>()) if (const BuiltinType *ToPointeeType = ToPtrType->getPointeeType()->getAs<BuiltinType>()) { // This conversion is considered only when there is an // explicit appropriate pointer target type (C++ 4.2p2). if (!ToPtrType->getPointeeType().hasQualifiers()) { switch (StrLit->getKind()) { case StringLiteral::UTF8: case StringLiteral::UTF16: case StringLiteral::UTF32: // We don't allow UTF literals to be implicitly converted break; case StringLiteral::Ascii: return (ToPointeeType->getKind() == BuiltinType::Char_U || ToPointeeType->getKind() == BuiltinType::Char_S); case StringLiteral::Wide: return ToPointeeType->isWideCharType(); } } } return false; } static ExprResult BuildCXXCastArgument(Sema &S, SourceLocation CastLoc, QualType Ty, CastKind Kind, CXXMethodDecl *Method, DeclAccessPair FoundDecl, bool HadMultipleCandidates, Expr *From) { switch (Kind) { default: llvm_unreachable("Unhandled cast kind!"); case CK_ConstructorConversion: { CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Method); SmallVector<Expr*, 8> ConstructorArgs; if (S.RequireNonAbstractType(CastLoc, Ty, diag::err_allocation_of_abstract_type)) return ExprError(); if (S.CompleteConstructorCall(Constructor, From, CastLoc, ConstructorArgs)) return ExprError(); S.CheckConstructorAccess(CastLoc, Constructor, InitializedEntity::InitializeTemporary(Ty), Constructor->getAccess()); if (S.DiagnoseUseOfDecl(Method, CastLoc)) return ExprError(); ExprResult Result = S.BuildCXXConstructExpr( CastLoc, Ty, cast<CXXConstructorDecl>(Method), ConstructorArgs, HadMultipleCandidates, /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, SourceRange()); if (Result.isInvalid()) return ExprError(); return S.MaybeBindToTemporary(Result.getAs<Expr>()); } case CK_UserDefinedConversion: { assert(!From->getType()->isPointerType() && "Arg can't have pointer type!"); S.CheckMemberOperatorAccess(CastLoc, From, /*arg*/ nullptr, FoundDecl); if (S.DiagnoseUseOfDecl(Method, CastLoc)) return ExprError(); // Create an implicit call expr that calls it. CXXConversionDecl *Conv = cast<CXXConversionDecl>(Method); ExprResult Result = S.BuildCXXMemberCallExpr(From, FoundDecl, Conv, HadMultipleCandidates); if (Result.isInvalid()) return ExprError(); // Record usage of conversion in an implicit cast. Result = ImplicitCastExpr::Create(S.Context, Result.get()->getType(), CK_UserDefinedConversion, Result.get(), nullptr, Result.get()->getValueKind()); return S.MaybeBindToTemporary(Result.get()); } } } /// PerformImplicitConversion - Perform an implicit conversion of the /// expression From to the type ToType using the pre-computed implicit /// conversion sequence ICS. Returns the converted /// expression. Action is the kind of conversion we're performing, /// used in the error message. ExprResult Sema::PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence &ICS, AssignmentAction Action, CheckedConversionKind CCK) { switch (ICS.getKind()) { case ImplicitConversionSequence::StandardConversion: { ExprResult Res = PerformImplicitConversion(From, ToType, ICS.Standard, Action, CCK); if (Res.isInvalid()) return ExprError(); From = Res.get(); break; } case ImplicitConversionSequence::UserDefinedConversion: { FunctionDecl *FD = ICS.UserDefined.ConversionFunction; CastKind CastKind; QualType BeforeToType; assert(FD && "no conversion function for user-defined conversion seq"); if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(FD)) { CastKind = CK_UserDefinedConversion; // If the user-defined conversion is specified by a conversion function, // the initial standard conversion sequence converts the source type to // the implicit object parameter of the conversion function. BeforeToType = Context.getTagDeclType(Conv->getParent()); } else { const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(FD); CastKind = CK_ConstructorConversion; // Do no conversion if dealing with ... for the first conversion. if (!ICS.UserDefined.EllipsisConversion) { // If the user-defined conversion is specified by a constructor, the // initial standard conversion sequence converts the source type to // the type required by the argument of the constructor BeforeToType = Ctor->getParamDecl(0)->getType().getNonReferenceType(); } } // Watch out for ellipsis conversion. if (!ICS.UserDefined.EllipsisConversion) { ExprResult Res = PerformImplicitConversion(From, BeforeToType, ICS.UserDefined.Before, AA_Converting, CCK); if (Res.isInvalid()) return ExprError(); From = Res.get(); } ExprResult CastArg = BuildCXXCastArgument(*this, From->getLocStart(), ToType.getNonReferenceType(), CastKind, cast<CXXMethodDecl>(FD), ICS.UserDefined.FoundConversionFunction, ICS.UserDefined.HadMultipleCandidates, From); if (CastArg.isInvalid()) return ExprError(); From = CastArg.get(); return PerformImplicitConversion(From, ToType, ICS.UserDefined.After, AA_Converting, CCK); } case ImplicitConversionSequence::AmbiguousConversion: ICS.DiagnoseAmbiguousConversion(*this, From->getExprLoc(), PDiag(diag::err_typecheck_ambiguous_condition) << From->getSourceRange()); return ExprError(); case ImplicitConversionSequence::EllipsisConversion: llvm_unreachable("Cannot perform an ellipsis conversion"); case ImplicitConversionSequence::BadConversion: return ExprError(); } // Everything went well. return From; } /// PerformImplicitConversion - Perform an implicit conversion of the /// expression From to the type ToType by following the standard /// conversion sequence SCS. Returns the converted /// expression. Flavor is the context in which we're performing this /// conversion, for use in error messages. ExprResult Sema::PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK) { bool CStyle = (CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast); // Overall FIXME: we are recomputing too many types here and doing far too // much extra work. What this means is that we need to keep track of more // information that is computed when we try the implicit conversion initially, // so that we don't need to recompute anything here. QualType FromType = From->getType(); if (SCS.CopyConstructor) { // FIXME: When can ToType be a reference type? assert(!ToType->isReferenceType()); if (SCS.Second == ICK_Derived_To_Base) { SmallVector<Expr*, 8> ConstructorArgs; if (CompleteConstructorCall(cast<CXXConstructorDecl>(SCS.CopyConstructor), From, /*FIXME:ConstructLoc*/SourceLocation(), ConstructorArgs)) return ExprError(); return BuildCXXConstructExpr( /*FIXME:ConstructLoc*/ SourceLocation(), ToType, SCS.CopyConstructor, ConstructorArgs, /*HadMultipleCandidates*/ false, /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, SourceRange()); } return BuildCXXConstructExpr( /*FIXME:ConstructLoc*/ SourceLocation(), ToType, SCS.CopyConstructor, From, /*HadMultipleCandidates*/ false, /*ListInit*/ false, /*StdInitListInit*/ false, /*ZeroInit*/ false, CXXConstructExpr::CK_Complete, SourceRange()); } // Resolve overloaded function references. if (Context.hasSameType(FromType, Context.OverloadTy)) { DeclAccessPair Found; FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(From, ToType, true, Found); if (!Fn) return ExprError(); if (DiagnoseUseOfDecl(Fn, From->getLocStart())) return ExprError(); From = FixOverloadedFunctionReference(From, Found, Fn); FromType = From->getType(); } // If we're converting to an atomic type, first convert to the corresponding // non-atomic type. QualType ToAtomicType; if (const AtomicType *ToAtomic = ToType->getAs<AtomicType>()) { ToAtomicType = ToType; ToType = ToAtomic->getValueType(); } // Perform the first implicit conversion. switch (SCS.First) { case ICK_Identity: if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) { FromType = FromAtomic->getValueType().getUnqualifiedType(); From = ImplicitCastExpr::Create(Context, FromType, CK_AtomicToNonAtomic, From, /*BasePath=*/nullptr, VK_RValue); } break; case ICK_Lvalue_To_Rvalue: { assert(From->getObjectKind() != OK_ObjCProperty); ExprResult FromRes = DefaultLvalueConversion(From); if (FromRes.isInvalid()) { return ExprError(); } From = FromRes.get(); FromType = From->getType(); break; } case ICK_Array_To_Pointer: FromType = Context.getArrayDecayedType(FromType); From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Function_To_Pointer: FromType = Context.getPointerType(FromType); From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; default: llvm_unreachable("Improper first standard conversion"); } // Perform the second implicit conversion switch (SCS.Second) { case ICK_Identity: // C++ [except.spec]p5: // [For] assignment to and initialization of pointers to functions, // pointers to member functions, and references to functions: the // target entity shall allow at least the exceptions allowed by the // source value in the assignment or initialization. switch (Action) { case AA_Assigning: case AA_Initializing: // Note, function argument passing and returning are initialization. case AA_Passing: case AA_Returning: case AA_Sending: case AA_Passing_CFAudited: if (CheckExceptionSpecCompatibility(From, ToType)) return ExprError(); break; case AA_Casting: case AA_Converting: // Casts and implicit conversions are not initialization, so are not // checked for exception specification mismatches. break; } // Nothing else to do. break; case ICK_NoReturn_Adjustment: // If both sides are functions (or pointers/references to them), there could // be incompatible exception declarations. if (CheckExceptionSpecCompatibility(From, ToType)) return ExprError(); From = ImpCastExprToType(From, ToType, CK_NoOp, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Integral_Promotion: case ICK_Integral_Conversion: if (ToType->isBooleanType()) { assert(FromType->castAs<EnumType>()->getDecl()->isFixed() && SCS.Second == ICK_Integral_Promotion && "only enums with fixed underlying type can promote to bool"); From = ImpCastExprToType(From, ToType, CK_IntegralToBoolean, VK_RValue, /*BasePath=*/nullptr, CCK).get(); } else { From = ImpCastExprToType(From, ToType, CK_IntegralCast, VK_RValue, /*BasePath=*/nullptr, CCK).get(); } break; case ICK_Floating_Promotion: case ICK_Floating_Conversion: From = ImpCastExprToType(From, ToType, CK_FloatingCast, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Complex_Promotion: case ICK_Complex_Conversion: { QualType FromEl = From->getType()->getAs<ComplexType>()->getElementType(); QualType ToEl = ToType->getAs<ComplexType>()->getElementType(); CastKind CK; if (FromEl->isRealFloatingType()) { if (ToEl->isRealFloatingType()) CK = CK_FloatingComplexCast; else CK = CK_FloatingComplexToIntegralComplex; } else if (ToEl->isRealFloatingType()) { CK = CK_IntegralComplexToFloatingComplex; } else { CK = CK_IntegralComplexCast; } From = ImpCastExprToType(From, ToType, CK, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; } case ICK_Floating_Integral: if (ToType->isRealFloatingType()) From = ImpCastExprToType(From, ToType, CK_IntegralToFloating, VK_RValue, /*BasePath=*/nullptr, CCK).get(); else From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Compatible_Conversion: From = ImpCastExprToType(From, ToType, CK_NoOp, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Writeback_Conversion: case ICK_Pointer_Conversion: { if (SCS.IncompatibleObjC && Action != AA_Casting) { // Diagnose incompatible Objective-C conversions if (Action == AA_Initializing || Action == AA_Assigning) Diag(From->getLocStart(), diag::ext_typecheck_convert_incompatible_pointer) << ToType << From->getType() << Action << From->getSourceRange() << 0; else Diag(From->getLocStart(), diag::ext_typecheck_convert_incompatible_pointer) << From->getType() << ToType << Action << From->getSourceRange() << 0; if (From->getType()->isObjCObjectPointerType() && ToType->isObjCObjectPointerType()) EmitRelatedResultTypeNote(From); } else if (getLangOpts().ObjCAutoRefCount && !CheckObjCARCUnavailableWeakConversion(ToType, From->getType())) { if (Action == AA_Initializing) Diag(From->getLocStart(), diag::err_arc_weak_unavailable_assign); else Diag(From->getLocStart(), diag::err_arc_convesion_of_weak_unavailable) << (Action == AA_Casting) << From->getType() << ToType << From->getSourceRange(); } CastKind Kind = CK_Invalid; CXXCastPath BasePath; if (CheckPointerConversion(From, ToType, Kind, BasePath, CStyle)) return ExprError(); // Make sure we extend blocks if necessary. // FIXME: doing this here is really ugly. if (Kind == CK_BlockPointerToObjCPointerCast) { ExprResult E = From; (void) PrepareCastToObjCObjectPointer(E); From = E.get(); } if (getLangOpts().ObjCAutoRefCount) CheckObjCARCConversion(SourceRange(), ToType, From, CCK); From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK) .get(); break; } case ICK_Pointer_Member: { CastKind Kind = CK_Invalid; CXXCastPath BasePath; if (CheckMemberPointerConversion(From, ToType, Kind, BasePath, CStyle)) return ExprError(); if (CheckExceptionSpecCompatibility(From, ToType)) return ExprError(); // We may not have been able to figure out what this member pointer resolved // to up until this exact point. Attempt to lock-in it's inheritance model. if (Context.getTargetInfo().getCXXABI().isMicrosoft()) { RequireCompleteType(From->getExprLoc(), From->getType(), 0); RequireCompleteType(From->getExprLoc(), ToType, 0); } From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK) .get(); break; } case ICK_Boolean_Conversion: // Perform half-to-boolean conversion via float. if (From->getType()->isHalfType()) { From = ImpCastExprToType(From, Context.FloatTy, CK_FloatingCast).get(); FromType = Context.FloatTy; } From = ImpCastExprToType(From, Context.BoolTy, ScalarTypeToBooleanCastKind(FromType), VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Derived_To_Base: { CXXCastPath BasePath; if (CheckDerivedToBaseConversion(From->getType(), ToType.getNonReferenceType(), From->getLocStart(), From->getSourceRange(), &BasePath, CStyle)) return ExprError(); From = ImpCastExprToType(From, ToType.getNonReferenceType(), CK_DerivedToBase, From->getValueKind(), &BasePath, CCK).get(); break; } case ICK_Vector_Conversion: From = ImpCastExprToType(From, ToType, CK_BitCast, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; case ICK_Vector_Splat: // Vector splat from any arithmetic type to a vector. // Cast to the element type. { QualType elType = ToType->getAs<ExtVectorType>()->getElementType(); if (elType != From->getType()) { ExprResult E = From; From = ImpCastExprToType(From, elType, PrepareScalarCast(E, elType)).get(); } From = ImpCastExprToType(From, ToType, CK_VectorSplat, VK_RValue, /*BasePath=*/nullptr, CCK).get(); } break; case ICK_Complex_Real: // Case 1. x -> _Complex y if (const ComplexType *ToComplex = ToType->getAs<ComplexType>()) { QualType ElType = ToComplex->getElementType(); bool isFloatingComplex = ElType->isRealFloatingType(); // x -> y if (Context.hasSameUnqualifiedType(ElType, From->getType())) { // do nothing } else if (From->getType()->isRealFloatingType()) { From = ImpCastExprToType(From, ElType, isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral).get(); } else { assert(From->getType()->isIntegerType()); From = ImpCastExprToType(From, ElType, isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast).get(); } // y -> _Complex y From = ImpCastExprToType(From, ToType, isFloatingComplex ? CK_FloatingRealToComplex : CK_IntegralRealToComplex).get(); // Case 2. _Complex x -> y } else { const ComplexType *FromComplex = From->getType()->getAs<ComplexType>(); assert(FromComplex); QualType ElType = FromComplex->getElementType(); bool isFloatingComplex = ElType->isRealFloatingType(); // _Complex x -> x From = ImpCastExprToType(From, ElType, isFloatingComplex ? CK_FloatingComplexToReal : CK_IntegralComplexToReal, VK_RValue, /*BasePath=*/nullptr, CCK).get(); // x -> y if (Context.hasSameUnqualifiedType(ElType, ToType)) { // do nothing } else if (ToType->isRealFloatingType()) { From = ImpCastExprToType(From, ToType, isFloatingComplex ? CK_FloatingCast : CK_IntegralToFloating, VK_RValue, /*BasePath=*/nullptr, CCK).get(); } else { assert(ToType->isIntegerType()); From = ImpCastExprToType(From, ToType, isFloatingComplex ? CK_FloatingToIntegral : CK_IntegralCast, VK_RValue, /*BasePath=*/nullptr, CCK).get(); } } break; case ICK_Block_Pointer_Conversion: { From = ImpCastExprToType(From, ToType.getUnqualifiedType(), CK_BitCast, VK_RValue, /*BasePath=*/nullptr, CCK).get(); break; } // HLSL Change Starts case ICK_Flat_Conversion: case ICK_HLSL_Derived_To_Base: case ICK_HLSLVector_Splat: case ICK_HLSLVector_Scalar: case ICK_HLSLVector_Truncation: case ICK_HLSLVector_Conversion: From = hlsl::PerformHLSLConversion(this, From, ToType.getUnqualifiedType(), SCS, CCK).get(); break; // HLSL Change Ends case ICK_TransparentUnionConversion: { ExprResult FromRes = From; Sema::AssignConvertType ConvTy = CheckTransparentUnionArgumentConstraints(ToType, FromRes); if (FromRes.isInvalid()) return ExprError(); From = FromRes.get(); assert ((ConvTy == Sema::Compatible) && "Improper transparent union conversion"); (void)ConvTy; break; } case ICK_Zero_Event_Conversion: From = ImpCastExprToType(From, ToType, CK_ZeroToOCLEvent, From->getValueKind()).get(); break; case ICK_Lvalue_To_Rvalue: case ICK_Array_To_Pointer: case ICK_Function_To_Pointer: case ICK_Qualification: case ICK_Num_Conversion_Kinds: llvm_unreachable("Improper second standard conversion"); } switch (SCS.Third) { case ICK_Identity: // Nothing to do. break; case ICK_Qualification: { // The qualification keeps the category of the inner expression, unless the // target type isn't a reference. ExprValueKind VK = ToType->isReferenceType() ? From->getValueKind() : VK_RValue; From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context), CK_NoOp, VK, /*BasePath=*/nullptr, CCK).get(); if (SCS.DeprecatedStringLiteralToCharPtr && !getLangOpts().WritableStrings) { Diag(From->getLocStart(), getLangOpts().CPlusPlus11 ? diag::ext_deprecated_string_literal_conversion : diag::warn_deprecated_string_literal_conversion) << ToType.getNonReferenceType(); } break; } default: llvm_unreachable("Improper third standard conversion"); } // If this conversion sequence involved a scalar -> atomic conversion, perform // that conversion now. if (!ToAtomicType.isNull()) { assert(Context.hasSameType( ToAtomicType->castAs<AtomicType>()->getValueType(), From->getType())); From = ImpCastExprToType(From, ToAtomicType, CK_NonAtomicToAtomic, VK_RValue, nullptr, CCK).get(); } return From; } /// \brief Check the completeness of a type in a unary type trait. /// /// If the particular type trait requires a complete type, tries to complete /// it. If completing the type fails, a diagnostic is emitted and false /// returned. If completing the type succeeds or no completion was required, /// returns true. static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT, SourceLocation Loc, QualType ArgTy) { // C++0x [meta.unary.prop]p3: // For all of the class templates X declared in this Clause, instantiating // that template with a template argument that is a class template // specialization may result in the implicit instantiation of the template // argument if and only if the semantics of X require that the argument // must be a complete type. // We apply this rule to all the type trait expressions used to implement // these class templates. We also try to follow any GCC documented behavior // in these expressions to ensure portability of standard libraries. switch (UTT) { default: llvm_unreachable("not a UTT"); // is_complete_type somewhat obviously cannot require a complete type. case UTT_IsCompleteType: // Fall-through // These traits are modeled on the type predicates in C++0x // [meta.unary.cat] and [meta.unary.comp]. They are not specified as // requiring a complete type, as whether or not they return true cannot be // impacted by the completeness of the type. case UTT_IsVoid: case UTT_IsIntegral: case UTT_IsFloatingPoint: case UTT_IsArray: case UTT_IsPointer: case UTT_IsLvalueReference: case UTT_IsRvalueReference: case UTT_IsMemberFunctionPointer: case UTT_IsMemberObjectPointer: case UTT_IsEnum: case UTT_IsUnion: case UTT_IsClass: case UTT_IsFunction: case UTT_IsReference: case UTT_IsArithmetic: case UTT_IsFundamental: case UTT_IsObject: case UTT_IsScalar: case UTT_IsCompound: case UTT_IsMemberPointer: // Fall-through // These traits are modeled on type predicates in C++0x [meta.unary.prop] // which requires some of its traits to have the complete type. However, // the completeness of the type cannot impact these traits' semantics, and // so they don't require it. This matches the comments on these traits in // Table 49. case UTT_IsConst: case UTT_IsVolatile: case UTT_IsSigned: case UTT_IsUnsigned: return true; // C++0x [meta.unary.prop] Table 49 requires the following traits to be // applied to a complete type. case UTT_IsTrivial: case UTT_IsTriviallyCopyable: case UTT_IsStandardLayout: case UTT_IsPOD: case UTT_IsLiteral: case UTT_IsEmpty: case UTT_IsPolymorphic: case UTT_IsAbstract: case UTT_IsInterfaceClass: case UTT_IsDestructible: case UTT_IsNothrowDestructible: // Fall-through // These traits require a complete type. case UTT_IsFinal: case UTT_IsSealed: // These trait expressions are designed to help implement predicates in // [meta.unary.prop] despite not being named the same. They are specified // by both GCC and the Embarcadero C++ compiler, and require the complete // type due to the overarching C++0x type predicates being implemented // requiring the complete type. case UTT_HasNothrowAssign: case UTT_HasNothrowMoveAssign: case UTT_HasNothrowConstructor: case UTT_HasNothrowCopy: case UTT_HasTrivialAssign: case UTT_HasTrivialMoveAssign: case UTT_HasTrivialDefaultConstructor: case UTT_HasTrivialMoveConstructor: case UTT_HasTrivialCopy: case UTT_HasTrivialDestructor: case UTT_HasVirtualDestructor: // Arrays of unknown bound are expressly allowed. QualType ElTy = ArgTy; if (ArgTy->isIncompleteArrayType()) ElTy = S.Context.getAsArrayType(ArgTy)->getElementType(); // The void type is expressly allowed. if (ElTy->isVoidType()) return true; return !S.RequireCompleteType( Loc, ElTy, diag::err_incomplete_type_used_in_type_trait_expr); } } static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op, Sema &Self, SourceLocation KeyLoc, ASTContext &C, bool (CXXRecordDecl::*HasTrivial)() const, bool (CXXRecordDecl::*HasNonTrivial)() const, bool (CXXMethodDecl::*IsDesiredOp)() const) { CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); if ((RD->*HasTrivial)() && !(RD->*HasNonTrivial)()) return true; DeclarationName Name = C.DeclarationNames.getCXXOperatorName(Op); DeclarationNameInfo NameInfo(Name, KeyLoc); LookupResult Res(Self, NameInfo, Sema::LookupOrdinaryName); if (Self.LookupQualifiedName(Res, RD)) { bool FoundOperator = false; Res.suppressDiagnostics(); for (LookupResult::iterator Op = Res.begin(), OpEnd = Res.end(); Op != OpEnd; ++Op) { if (isa<FunctionTemplateDecl>(*Op)) continue; CXXMethodDecl *Operator = cast<CXXMethodDecl>(*Op); if((Operator->*IsDesiredOp)()) { FoundOperator = true; const FunctionProtoType *CPT = Operator->getType()->getAs<FunctionProtoType>(); CPT = Self.ResolveExceptionSpec(KeyLoc, CPT); if (!CPT || !CPT->isNothrow(C)) return false; } } return FoundOperator; } return false; } static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT, SourceLocation KeyLoc, QualType T) { assert(!T->isDependentType() && "Cannot evaluate traits of dependent type"); ASTContext &C = Self.Context; switch(UTT) { default: llvm_unreachable("not a UTT"); // Type trait expressions corresponding to the primary type category // predicates in C++0x [meta.unary.cat]. case UTT_IsVoid: return T->isVoidType(); case UTT_IsIntegral: return T->isIntegralType(C); case UTT_IsFloatingPoint: return T->isFloatingType(); case UTT_IsArray: return T->isArrayType(); case UTT_IsPointer: return T->isPointerType(); case UTT_IsLvalueReference: return T->isLValueReferenceType(); case UTT_IsRvalueReference: return T->isRValueReferenceType(); case UTT_IsMemberFunctionPointer: return T->isMemberFunctionPointerType(); case UTT_IsMemberObjectPointer: return T->isMemberDataPointerType(); case UTT_IsEnum: return T->isEnumeralType(); case UTT_IsUnion: return T->isUnionType(); case UTT_IsClass: return T->isClassType() || T->isStructureType() || T->isInterfaceType(); case UTT_IsFunction: return T->isFunctionType(); // Type trait expressions which correspond to the convenient composition // predicates in C++0x [meta.unary.comp]. case UTT_IsReference: return T->isReferenceType(); case UTT_IsArithmetic: return T->isArithmeticType() && !T->isEnumeralType(); case UTT_IsFundamental: return T->isFundamentalType(); case UTT_IsObject: return T->isObjectType(); case UTT_IsScalar: // Note: semantic analysis depends on Objective-C lifetime types to be // considered scalar types. However, such types do not actually behave // like scalar types at run time (since they may require retain/release // operations), so we report them as non-scalar. if (T->isObjCLifetimeType()) { switch (T.getObjCLifetime()) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: return true; case Qualifiers::OCL_Strong: case Qualifiers::OCL_Weak: case Qualifiers::OCL_Autoreleasing: return false; } } return T->isScalarType(); case UTT_IsCompound: return T->isCompoundType(); case UTT_IsMemberPointer: return T->isMemberPointerType(); // Type trait expressions which correspond to the type property predicates // in C++0x [meta.unary.prop]. case UTT_IsConst: return T.isConstQualified(); case UTT_IsVolatile: return T.isVolatileQualified(); case UTT_IsTrivial: return T.isTrivialType(Self.Context); case UTT_IsTriviallyCopyable: return T.isTriviallyCopyableType(Self.Context); case UTT_IsStandardLayout: return T->isStandardLayoutType(); case UTT_IsPOD: return T.isPODType(Self.Context); case UTT_IsLiteral: return T->isLiteralType(Self.Context); case UTT_IsEmpty: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return !RD->isUnion() && RD->isEmpty(); return false; case UTT_IsPolymorphic: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->isPolymorphic(); return false; case UTT_IsAbstract: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->isAbstract(); return false; case UTT_IsInterfaceClass: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->isInterface(); return false; case UTT_IsFinal: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->hasAttr<FinalAttr>(); return false; case UTT_IsSealed: if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) if (FinalAttr *FA = RD->getAttr<FinalAttr>()) return FA->isSpelledAsSealed(); return false; case UTT_IsSigned: return T->isSignedIntegerType(); case UTT_IsUnsigned: return T->isUnsignedIntegerType(); // Type trait expressions which query classes regarding their construction, // destruction, and copying. Rather than being based directly on the // related type predicates in the standard, they are specified by both // GCC[1] and the Embarcadero C++ compiler[2], and Clang implements those // specifications. // // 1: http://gcc.gnu/.org/onlinedocs/gcc/Type-Traits.html // 2: http://docwiki.embarcadero.com/RADStudio/XE/en/Type_Trait_Functions_(C%2B%2B0x)_Index // // Note that these builtins do not behave as documented in g++: if a class // has both a trivial and a non-trivial special member of a particular kind, // they return false! For now, we emulate this behavior. // FIXME: This appears to be a g++ bug: more complex cases reveal that it // does not correctly compute triviality in the presence of multiple special // members of the same kind. Revisit this once the g++ bug is fixed. case UTT_HasTrivialDefaultConstructor: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If __is_pod (type) is true then the trait is true, else if type is // a cv class or union type (or array thereof) with a trivial default // constructor ([class.ctor]) then the trait is true, else it is false. if (T.isPODType(Self.Context)) return true; if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) return RD->hasTrivialDefaultConstructor() && !RD->hasNonTrivialDefaultConstructor(); return false; case UTT_HasTrivialMoveConstructor: // This trait is implemented by MSVC 2012 and needed to parse the // standard library headers. Specifically this is used as the logic // behind std::is_trivially_move_constructible (20.9.4.3). if (T.isPODType(Self.Context)) return true; if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) return RD->hasTrivialMoveConstructor() && !RD->hasNonTrivialMoveConstructor(); return false; case UTT_HasTrivialCopy: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If __is_pod (type) is true or type is a reference type then // the trait is true, else if type is a cv class or union type // with a trivial copy constructor ([class.copy]) then the trait // is true, else it is false. if (T.isPODType(Self.Context) || T->isReferenceType()) return true; if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->hasTrivialCopyConstructor() && !RD->hasNonTrivialCopyConstructor(); return false; case UTT_HasTrivialMoveAssign: // This trait is implemented by MSVC 2012 and needed to parse the // standard library headers. Specifically it is used as the logic // behind std::is_trivially_move_assignable (20.9.4.3) if (T.isPODType(Self.Context)) return true; if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) return RD->hasTrivialMoveAssignment() && !RD->hasNonTrivialMoveAssignment(); return false; case UTT_HasTrivialAssign: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If type is const qualified or is a reference type then the // trait is false. Otherwise if __is_pod (type) is true then the // trait is true, else if type is a cv class or union type with // a trivial copy assignment ([class.copy]) then the trait is // true, else it is false. // Note: the const and reference restrictions are interesting, // given that const and reference members don't prevent a class // from having a trivial copy assignment operator (but do cause // errors if the copy assignment operator is actually used, q.v. // [class.copy]p12). if (T.isConstQualified()) return false; if (T.isPODType(Self.Context)) return true; if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) return RD->hasTrivialCopyAssignment() && !RD->hasNonTrivialCopyAssignment(); return false; case UTT_IsDestructible: case UTT_IsNothrowDestructible: // FIXME: Implement UTT_IsDestructible and UTT_IsNothrowDestructible. // For now, let's fall through. case UTT_HasTrivialDestructor: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html // If __is_pod (type) is true or type is a reference type // then the trait is true, else if type is a cv class or union // type (or array thereof) with a trivial destructor // ([class.dtor]) then the trait is true, else it is // false. if (T.isPODType(Self.Context) || T->isReferenceType()) return true; // Objective-C++ ARC: autorelease types don't require destruction. if (T->isObjCLifetimeType() && T.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) return true; if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) return RD->hasTrivialDestructor(); return false; // TODO: Propagate nothrowness for implicitly declared special members. case UTT_HasNothrowAssign: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If type is const qualified or is a reference type then the // trait is false. Otherwise if __has_trivial_assign (type) // is true then the trait is true, else if type is a cv class // or union type with copy assignment operators that are known // not to throw an exception then the trait is true, else it is // false. if (C.getBaseElementType(T).isConstQualified()) return false; if (T->isReferenceType()) return false; if (T.isPODType(Self.Context) || T->isObjCLifetimeType()) return true; if (const RecordType *RT = T->getAs<RecordType>()) return HasNoThrowOperator(RT, OO_Equal, Self, KeyLoc, C, &CXXRecordDecl::hasTrivialCopyAssignment, &CXXRecordDecl::hasNonTrivialCopyAssignment, &CXXMethodDecl::isCopyAssignmentOperator); return false; case UTT_HasNothrowMoveAssign: // This trait is implemented by MSVC 2012 and needed to parse the // standard library headers. Specifically this is used as the logic // behind std::is_nothrow_move_assignable (20.9.4.3). if (T.isPODType(Self.Context)) return true; if (const RecordType *RT = C.getBaseElementType(T)->getAs<RecordType>()) return HasNoThrowOperator(RT, OO_Equal, Self, KeyLoc, C, &CXXRecordDecl::hasTrivialMoveAssignment, &CXXRecordDecl::hasNonTrivialMoveAssignment, &CXXMethodDecl::isMoveAssignmentOperator); return false; case UTT_HasNothrowCopy: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If __has_trivial_copy (type) is true then the trait is true, else // if type is a cv class or union type with copy constructors that are // known not to throw an exception then the trait is true, else it is // false. if (T.isPODType(C) || T->isReferenceType() || T->isObjCLifetimeType()) return true; if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) { if (RD->hasTrivialCopyConstructor() && !RD->hasNonTrivialCopyConstructor()) return true; bool FoundConstructor = false; unsigned FoundTQs; DeclContext::lookup_result R = Self.LookupConstructors(RD); for (DeclContext::lookup_iterator Con = R.begin(), ConEnd = R.end(); Con != ConEnd; ++Con) { // A template constructor is never a copy constructor. // FIXME: However, it may actually be selected at the actual overload // resolution point. if (isa<FunctionTemplateDecl>(*Con)) continue; CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con); if (Constructor->isCopyConstructor(FoundTQs)) { FoundConstructor = true; const FunctionProtoType *CPT = Constructor->getType()->getAs<FunctionProtoType>(); CPT = Self.ResolveExceptionSpec(KeyLoc, CPT); if (!CPT) return false; // TODO: check whether evaluating default arguments can throw. // For now, we'll be conservative and assume that they can throw. if (!CPT->isNothrow(Self.Context) || CPT->getNumParams() > 1) return false; } } return FoundConstructor; } return false; case UTT_HasNothrowConstructor: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html // If __has_trivial_constructor (type) is true then the trait is // true, else if type is a cv class or union type (or array // thereof) with a default constructor that is known not to // throw an exception then the trait is true, else it is false. if (T.isPODType(C) || T->isObjCLifetimeType()) return true; if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl()) { if (RD->hasTrivialDefaultConstructor() && !RD->hasNonTrivialDefaultConstructor()) return true; bool FoundConstructor = false; DeclContext::lookup_result R = Self.LookupConstructors(RD); for (DeclContext::lookup_iterator Con = R.begin(), ConEnd = R.end(); Con != ConEnd; ++Con) { // FIXME: In C++0x, a constructor template can be a default constructor. if (isa<FunctionTemplateDecl>(*Con)) continue; CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con); if (Constructor->isDefaultConstructor()) { FoundConstructor = true; const FunctionProtoType *CPT = Constructor->getType()->getAs<FunctionProtoType>(); CPT = Self.ResolveExceptionSpec(KeyLoc, CPT); if (!CPT) return false; // FIXME: check whether evaluating default arguments can throw. // For now, we'll be conservative and assume that they can throw. if (!CPT->isNothrow(Self.Context) || CPT->getNumParams() > 0) return false; } } return FoundConstructor; } return false; case UTT_HasVirtualDestructor: // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html: // If type is a class type with a virtual destructor ([class.dtor]) // then the trait is true, else it is false. if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) if (CXXDestructorDecl *Destructor = Self.LookupDestructor(RD)) return Destructor->isVirtual(); return false; // These type trait expressions are modeled on the specifications for the // Embarcadero C++0x type trait functions: // http://docwiki.embarcadero.com/RADStudio/XE/en/Type_Trait_Functions_(C%2B%2B0x)_Index case UTT_IsCompleteType: // http://docwiki.embarcadero.com/RADStudio/XE/en/Is_complete_type_(typename_T_): // Returns True if and only if T is a complete type at the point of the // function call. return !T->isIncompleteType(); } } /// \brief Determine whether T has a non-trivial Objective-C lifetime in /// ARC mode. static bool hasNontrivialObjCLifetime(QualType T) { switch (T.getObjCLifetime()) { case Qualifiers::OCL_ExplicitNone: return false; case Qualifiers::OCL_Strong: case Qualifiers::OCL_Weak: case Qualifiers::OCL_Autoreleasing: return true; case Qualifiers::OCL_None: return T->isObjCLifetimeType(); } llvm_unreachable("Unknown ObjC lifetime qualifier"); } static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT, QualType RhsT, SourceLocation KeyLoc); static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc) { if (Kind <= UTT_Last) return EvaluateUnaryTypeTrait(S, Kind, KWLoc, Args[0]->getType()); if (Kind <= BTT_Last) return EvaluateBinaryTypeTrait(S, Kind, Args[0]->getType(), Args[1]->getType(), RParenLoc); switch (Kind) { case clang::TT_IsConstructible: case clang::TT_IsNothrowConstructible: case clang::TT_IsTriviallyConstructible: { // C++11 [meta.unary.prop]: // is_trivially_constructible is defined as: // // is_constructible<T, Args...>::value is true and the variable // definition for is_constructible, as defined below, is known to call // no operation that is not trivial. // // The predicate condition for a template specialization // is_constructible<T, Args...> shall be satisfied if and only if the // following variable definition would be well-formed for some invented // variable t: // // T t(create<Args>()...); assert(!Args.empty()); // Precondition: T and all types in the parameter pack Args shall be // complete types, (possibly cv-qualified) void, or arrays of // unknown bound. for (unsigned I = 0, N = Args.size(); I != N; ++I) { QualType ArgTy = Args[I]->getType(); if (ArgTy->isVoidType() || ArgTy->isIncompleteArrayType()) continue; if (S.RequireCompleteType(KWLoc, ArgTy, diag::err_incomplete_type_used_in_type_trait_expr)) return false; } // Make sure the first argument is a complete type. if (Args[0]->getType()->isIncompleteType()) return false; // Make sure the first argument is not an abstract type. CXXRecordDecl *RD = Args[0]->getType()->getAsCXXRecordDecl(); if (RD && RD->isAbstract()) return false; SmallVector<OpaqueValueExpr, 2> OpaqueArgExprs; SmallVector<Expr *, 2> ArgExprs; ArgExprs.reserve(Args.size() - 1); for (unsigned I = 1, N = Args.size(); I != N; ++I) { QualType T = Args[I]->getType(); if (T->isObjectType() || T->isFunctionType()) T = S.Context.getRValueReferenceType(T); OpaqueArgExprs.push_back( OpaqueValueExpr(Args[I]->getTypeLoc().getLocStart(), T.getNonLValueExprType(S.Context), Expr::getValueKindForType(T))); } for (Expr &E : OpaqueArgExprs) ArgExprs.push_back(&E); // Perform the initialization in an unevaluated context within a SFINAE // trap at translation unit scope. EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated); Sema::SFINAETrap SFINAE(S, /*AccessCheckingSFINAE=*/true); Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl()); InitializedEntity To(InitializedEntity::InitializeTemporary(Args[0])); InitializationKind InitKind(InitializationKind::CreateDirect(KWLoc, KWLoc, RParenLoc)); InitializationSequence Init(S, To, InitKind, ArgExprs); if (Init.Failed()) return false; ExprResult Result = Init.Perform(S, To, InitKind, ArgExprs); if (Result.isInvalid() || SFINAE.hasErrorOccurred()) return false; if (Kind == clang::TT_IsConstructible) return true; if (Kind == clang::TT_IsNothrowConstructible) return S.canThrow(Result.get()) == CT_Cannot; if (Kind == clang::TT_IsTriviallyConstructible) { // Under Objective-C ARC, if the destination has non-trivial Objective-C // lifetime, this is a non-trivial construction. if (S.getLangOpts().ObjCAutoRefCount && hasNontrivialObjCLifetime(Args[0]->getType().getNonReferenceType())) return false; // The initialization succeeded; now make sure there are no non-trivial // calls. return !Result.get()->hasNonTrivialCall(S.Context); } llvm_unreachable("unhandled type trait"); return false; } default: llvm_unreachable("not a TT"); } return false; } ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc) { QualType ResultType = Context.getLogicalOperationType(); if (Kind <= UTT_Last && !CheckUnaryTypeTraitTypeCompleteness( *this, Kind, KWLoc, Args[0]->getType())) return ExprError(); bool Dependent = false; for (unsigned I = 0, N = Args.size(); I != N; ++I) { if (Args[I]->getType()->isDependentType()) { Dependent = true; break; } } bool Result = false; if (!Dependent) Result = evaluateTypeTrait(*this, Kind, KWLoc, Args, RParenLoc); return TypeTraitExpr::Create(Context, ResultType, KWLoc, Kind, Args, RParenLoc, Result); } ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc) { SmallVector<TypeSourceInfo *, 4> ConvertedArgs; ConvertedArgs.reserve(Args.size()); for (unsigned I = 0, N = Args.size(); I != N; ++I) { TypeSourceInfo *TInfo; QualType T = GetTypeFromParser(Args[I], &TInfo); if (!TInfo) TInfo = Context.getTrivialTypeSourceInfo(T, KWLoc); ConvertedArgs.push_back(TInfo); } return BuildTypeTrait(Kind, KWLoc, ConvertedArgs, RParenLoc); } static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT, QualType RhsT, SourceLocation KeyLoc) { assert(!LhsT->isDependentType() && !RhsT->isDependentType() && "Cannot evaluate traits of dependent types"); switch(BTT) { case BTT_IsBaseOf: { // C++0x [meta.rel]p2 // Base is a base class of Derived without regard to cv-qualifiers or // Base and Derived are not unions and name the same class type without // regard to cv-qualifiers. const RecordType *lhsRecord = LhsT->getAs<RecordType>(); if (!lhsRecord) return false; const RecordType *rhsRecord = RhsT->getAs<RecordType>(); if (!rhsRecord) return false; assert(Self.Context.hasSameUnqualifiedType(LhsT, RhsT) == (lhsRecord == rhsRecord)); if (lhsRecord == rhsRecord) return !lhsRecord->getDecl()->isUnion(); // C++0x [meta.rel]p2: // If Base and Derived are class types and are different types // (ignoring possible cv-qualifiers) then Derived shall be a // complete type. if (Self.RequireCompleteType(KeyLoc, RhsT, diag::err_incomplete_type_used_in_type_trait_expr)) return false; return cast<CXXRecordDecl>(rhsRecord->getDecl()) ->isDerivedFrom(cast<CXXRecordDecl>(lhsRecord->getDecl())); } case BTT_IsSame: return Self.Context.hasSameType(LhsT, RhsT); case BTT_TypeCompatible: return Self.Context.typesAreCompatible(LhsT.getUnqualifiedType(), RhsT.getUnqualifiedType()); case BTT_IsConvertible: case BTT_IsConvertibleTo: { // C++0x [meta.rel]p4: // Given the following function prototype: // // template <class T> // typename add_rvalue_reference<T>::type create(); // // the predicate condition for a template specialization // is_convertible<From, To> shall be satisfied if and only if // the return expression in the following code would be // well-formed, including any implicit conversions to the return // type of the function: // // To test() { // return create<From>(); // } // // Access checking is performed as if in a context unrelated to To and // From. Only the validity of the immediate context of the expression // of the return-statement (including conversions to the return type) // is considered. // // We model the initialization as a copy-initialization of a temporary // of the appropriate type, which for this expression is identical to the // return statement (since NRVO doesn't apply). // Functions aren't allowed to return function or array types. if (RhsT->isFunctionType() || RhsT->isArrayType()) return false; // A return statement in a void function must have void type. if (RhsT->isVoidType()) return LhsT->isVoidType(); // A function definition requires a complete, non-abstract return type. if (Self.RequireCompleteType(KeyLoc, RhsT, 0) || Self.RequireNonAbstractType(KeyLoc, RhsT, 0)) return false; // Compute the result of add_rvalue_reference. if (LhsT->isObjectType() || LhsT->isFunctionType()) LhsT = Self.Context.getRValueReferenceType(LhsT); // Build a fake source and destination for initialization. InitializedEntity To(InitializedEntity::InitializeTemporary(RhsT)); OpaqueValueExpr From(KeyLoc, LhsT.getNonLValueExprType(Self.Context), Expr::getValueKindForType(LhsT)); Expr *FromPtr = &From; InitializationKind Kind(InitializationKind::CreateCopy(KeyLoc, SourceLocation())); // Perform the initialization in an unevaluated context within a SFINAE // trap at translation unit scope. EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated); Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true); Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl()); InitializationSequence Init(Self, To, Kind, FromPtr); if (Init.Failed()) return false; ExprResult Result = Init.Perform(Self, To, Kind, FromPtr); return !Result.isInvalid() && !SFINAE.hasErrorOccurred(); } case BTT_IsNothrowAssignable: case BTT_IsTriviallyAssignable: { // C++11 [meta.unary.prop]p3: // is_trivially_assignable is defined as: // is_assignable<T, U>::value is true and the assignment, as defined by // is_assignable, is known to call no operation that is not trivial // // is_assignable is defined as: // The expression declval<T>() = declval<U>() is well-formed when // treated as an unevaluated operand (Clause 5). // // For both, T and U shall be complete types, (possibly cv-qualified) // void, or arrays of unknown bound. if (!LhsT->isVoidType() && !LhsT->isIncompleteArrayType() && Self.RequireCompleteType(KeyLoc, LhsT, diag::err_incomplete_type_used_in_type_trait_expr)) return false; if (!RhsT->isVoidType() && !RhsT->isIncompleteArrayType() && Self.RequireCompleteType(KeyLoc, RhsT, diag::err_incomplete_type_used_in_type_trait_expr)) return false; // cv void is never assignable. if (LhsT->isVoidType() || RhsT->isVoidType()) return false; // Build expressions that emulate the effect of declval<T>() and // declval<U>(). if (LhsT->isObjectType() || LhsT->isFunctionType()) LhsT = Self.Context.getRValueReferenceType(LhsT); if (RhsT->isObjectType() || RhsT->isFunctionType()) RhsT = Self.Context.getRValueReferenceType(RhsT); OpaqueValueExpr Lhs(KeyLoc, LhsT.getNonLValueExprType(Self.Context), Expr::getValueKindForType(LhsT)); OpaqueValueExpr Rhs(KeyLoc, RhsT.getNonLValueExprType(Self.Context), Expr::getValueKindForType(RhsT)); // Attempt the assignment in an unevaluated context within a SFINAE // trap at translation unit scope. EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated); Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true); Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl()); ExprResult Result = Self.BuildBinOp(/*S=*/nullptr, KeyLoc, BO_Assign, &Lhs, &Rhs); if (Result.isInvalid() || SFINAE.hasErrorOccurred()) return false; if (BTT == BTT_IsNothrowAssignable) return Self.canThrow(Result.get()) == CT_Cannot; if (BTT == BTT_IsTriviallyAssignable) { // Under Objective-C ARC, if the destination has non-trivial Objective-C // lifetime, this is a non-trivial assignment. if (Self.getLangOpts().ObjCAutoRefCount && hasNontrivialObjCLifetime(LhsT.getNonReferenceType())) return false; return !Result.get()->hasNonTrivialCall(Self.Context); } llvm_unreachable("unhandled type trait"); return false; } default: llvm_unreachable("not a BTT"); } llvm_unreachable("Unknown type trait or not implemented"); } ExprResult Sema::ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType Ty, Expr* DimExpr, SourceLocation RParen) { TypeSourceInfo *TSInfo; QualType T = GetTypeFromParser(Ty, &TSInfo); if (!TSInfo) TSInfo = Context.getTrivialTypeSourceInfo(T); return BuildArrayTypeTrait(ATT, KWLoc, TSInfo, DimExpr, RParen); } static uint64_t EvaluateArrayTypeTrait(Sema &Self, ArrayTypeTrait ATT, QualType T, Expr *DimExpr, SourceLocation KeyLoc) { assert(!T->isDependentType() && "Cannot evaluate traits of dependent type"); switch(ATT) { case ATT_ArrayRank: if (T->isArrayType()) { unsigned Dim = 0; while (const ArrayType *AT = Self.Context.getAsArrayType(T)) { ++Dim; T = AT->getElementType(); } return Dim; } return 0; case ATT_ArrayExtent: { llvm::APSInt Value; uint64_t Dim; if (Self.VerifyIntegerConstantExpression(DimExpr, &Value, diag::err_dimension_expr_not_constant_integer, false).isInvalid()) return 0; if (Value.isSigned() && Value.isNegative()) { Self.Diag(KeyLoc, diag::err_dimension_expr_not_constant_integer) << DimExpr->getSourceRange(); return 0; } Dim = Value.getLimitedValue(); if (T->isArrayType()) { unsigned D = 0; bool Matched = false; while (const ArrayType *AT = Self.Context.getAsArrayType(T)) { if (Dim == D) { Matched = true; break; } ++D; T = AT->getElementType(); } if (Matched && T->isArrayType()) { if (const ConstantArrayType *CAT = Self.Context.getAsConstantArrayType(T)) return CAT->getSize().getLimitedValue(); } } return 0; } } llvm_unreachable("Unknown type trait or not implemented"); } ExprResult Sema::BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr* DimExpr, SourceLocation RParen) { QualType T = TSInfo->getType(); // FIXME: This should likely be tracked as an APInt to remove any host // assumptions about the width of size_t on the target. uint64_t Value = 0; if (!T->isDependentType()) Value = EvaluateArrayTypeTrait(*this, ATT, T, DimExpr, KWLoc); // While the specification for these traits from the Embarcadero C++ // compiler's documentation says the return type is 'unsigned int', Clang // returns 'size_t'. On Windows, the primary platform for the Embarcadero // compiler, there is no difference. On several other platforms this is an // important distinction. return new (Context) ArrayTypeTraitExpr(KWLoc, ATT, TSInfo, Value, DimExpr, RParen, Context.getSizeType()); } ExprResult Sema::ActOnExpressionTrait(ExpressionTrait ET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen) { // If error parsing the expression, ignore. if (!Queried) return ExprError(); ExprResult Result = BuildExpressionTrait(ET, KWLoc, Queried, RParen); return Result; } static bool EvaluateExpressionTrait(ExpressionTrait ET, Expr *E) { switch (ET) { case ET_IsLValueExpr: return E->isLValue(); case ET_IsRValueExpr: return E->isRValue(); } llvm_unreachable("Expression trait not covered by switch"); } ExprResult Sema::BuildExpressionTrait(ExpressionTrait ET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen) { if (Queried->isTypeDependent()) { // Delay type-checking for type-dependent expressions. } else if (Queried->getType()->isPlaceholderType()) { ExprResult PE = CheckPlaceholderExpr(Queried); if (PE.isInvalid()) return ExprError(); return BuildExpressionTrait(ET, KWLoc, PE.get(), RParen); } bool Value = EvaluateExpressionTrait(ET, Queried); return new (Context) ExpressionTraitExpr(KWLoc, ET, Queried, Value, RParen, Context.BoolTy); } QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation Loc, bool isIndirect) { assert(!LHS.get()->getType()->isPlaceholderType() && !RHS.get()->getType()->isPlaceholderType() && "placeholders should have been weeded out by now"); // The LHS undergoes lvalue conversions if this is ->*. if (isIndirect) { LHS = DefaultLvalueConversion(LHS.get()); if (LHS.isInvalid()) return QualType(); } // The RHS always undergoes lvalue conversions. RHS = DefaultLvalueConversion(RHS.get()); if (RHS.isInvalid()) return QualType(); const char *OpSpelling = isIndirect ? "->*" : ".*"; // C++ 5.5p2 // The binary operator .* [p3: ->*] binds its second operand, which shall // be of type "pointer to member of T" (where T is a completely-defined // class type) [...] QualType RHSType = RHS.get()->getType(); const MemberPointerType *MemPtr = RHSType->getAs<MemberPointerType>(); if (!MemPtr) { Diag(Loc, diag::err_bad_memptr_rhs) << OpSpelling << RHSType << RHS.get()->getSourceRange(); return QualType(); } QualType Class(MemPtr->getClass(), 0); // Note: C++ [expr.mptr.oper]p2-3 says that the class type into which the // member pointer points must be completely-defined. However, there is no // reason for this semantic distinction, and the rule is not enforced by // other compilers. Therefore, we do not check this property, as it is // likely to be considered a defect. // C++ 5.5p2 // [...] to its first operand, which shall be of class T or of a class of // which T is an unambiguous and accessible base class. [p3: a pointer to // such a class] QualType LHSType = LHS.get()->getType(); if (isIndirect) { if (const PointerType *Ptr = LHSType->getAs<PointerType>()) LHSType = Ptr->getPointeeType(); else { Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling << 1 << LHSType << FixItHint::CreateReplacement(SourceRange(Loc), ".*"); return QualType(); } } if (!Context.hasSameUnqualifiedType(Class, LHSType)) { // If we want to check the hierarchy, we need a complete type. if (RequireCompleteType(Loc, LHSType, diag::err_bad_memptr_lhs, OpSpelling, (int)isIndirect)) { return QualType(); } if (!IsDerivedFrom(LHSType, Class)) { Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling << (int)isIndirect << LHS.get()->getType(); return QualType(); } CXXCastPath BasePath; if (CheckDerivedToBaseConversion(LHSType, Class, Loc, SourceRange(LHS.get()->getLocStart(), RHS.get()->getLocEnd()), &BasePath)) return QualType(); // Cast LHS to type of use. QualType UseType = isIndirect ? Context.getPointerType(Class) : Class; ExprValueKind VK = isIndirect ? VK_RValue : LHS.get()->getValueKind(); LHS = ImpCastExprToType(LHS.get(), UseType, CK_DerivedToBase, VK, &BasePath); } if (isa<CXXScalarValueInitExpr>(RHS.get()->IgnoreParens())) { // Diagnose use of pointer-to-member type which when used as // the functional cast in a pointer-to-member expression. Diag(Loc, diag::err_pointer_to_member_type) << isIndirect; return QualType(); } // C++ 5.5p2 // The result is an object or a function of the type specified by the // second operand. // The cv qualifiers are the union of those in the pointer and the left side, // in accordance with 5.5p5 and 5.2.5. QualType Result = MemPtr->getPointeeType(); Result = Context.getCVRQualifiedType(Result, LHSType.getCVRQualifiers()); // C++0x [expr.mptr.oper]p6: // In a .* expression whose object expression is an rvalue, the program is // ill-formed if the second operand is a pointer to member function with // ref-qualifier &. In a ->* expression or in a .* expression whose object // expression is an lvalue, the program is ill-formed if the second operand // is a pointer to member function with ref-qualifier &&. if (const FunctionProtoType *Proto = Result->getAs<FunctionProtoType>()) { switch (Proto->getRefQualifier()) { case RQ_None: // Do nothing break; case RQ_LValue: if (!isIndirect && !LHS.get()->Classify(Context).isLValue()) Diag(Loc, diag::err_pointer_to_member_oper_value_classify) << RHSType << 1 << LHS.get()->getSourceRange(); break; case RQ_RValue: if (isIndirect || !LHS.get()->Classify(Context).isRValue()) Diag(Loc, diag::err_pointer_to_member_oper_value_classify) << RHSType << 0 << LHS.get()->getSourceRange(); break; } } // C++ [expr.mptr.oper]p6: // The result of a .* expression whose second operand is a pointer // to a data member is of the same value category as its // first operand. The result of a .* expression whose second // operand is a pointer to a member function is a prvalue. The // result of an ->* expression is an lvalue if its second operand // is a pointer to data member and a prvalue otherwise. if (Result->isFunctionType()) { VK = VK_RValue; return Context.BoundMemberTy; } else if (isIndirect) { VK = VK_LValue; } else { VK = LHS.get()->getValueKind(); } return Result; } /// \brief Try to convert a type to another according to C++0x 5.16p3. /// /// This is part of the parameter validation for the ? operator. If either /// value operand is a class type, the two operands are attempted to be /// converted to each other. This function does the conversion in one direction. /// It returns true if the program is ill-formed and has already been diagnosed /// as such. static bool TryClassUnification(Sema &Self, Expr *From, Expr *To, SourceLocation QuestionLoc, bool &HaveConversion, QualType &ToType) { HaveConversion = false; ToType = To->getType(); InitializationKind Kind = InitializationKind::CreateCopy(To->getLocStart(), SourceLocation()); // C++0x 5.16p3 // The process for determining whether an operand expression E1 of type T1 // can be converted to match an operand expression E2 of type T2 is defined // as follows: // -- If E2 is an lvalue: bool ToIsLvalue = To->isLValue(); if (ToIsLvalue) { // E1 can be converted to match E2 if E1 can be implicitly converted to // type "lvalue reference to T2", subject to the constraint that in the // conversion the reference must bind directly to E1. QualType T = Self.Context.getLValueReferenceType(ToType); InitializedEntity Entity = InitializedEntity::InitializeTemporary(T); InitializationSequence InitSeq(Self, Entity, Kind, From); if (InitSeq.isDirectReferenceBinding()) { ToType = T; HaveConversion = true; return false; } if (InitSeq.isAmbiguous()) return InitSeq.Diagnose(Self, Entity, Kind, From); } // -- If E2 is an rvalue, or if the conversion above cannot be done: // -- if E1 and E2 have class type, and the underlying class types are // the same or one is a base class of the other: QualType FTy = From->getType(); QualType TTy = To->getType(); const RecordType *FRec = FTy->getAs<RecordType>(); const RecordType *TRec = TTy->getAs<RecordType>(); bool FDerivedFromT = FRec && TRec && FRec != TRec && Self.IsDerivedFrom(FTy, TTy); if (FRec && TRec && (FRec == TRec || FDerivedFromT || Self.IsDerivedFrom(TTy, FTy))) { // E1 can be converted to match E2 if the class of T2 is the // same type as, or a base class of, the class of T1, and // [cv2 > cv1]. if (FRec == TRec || FDerivedFromT) { if (TTy.isAtLeastAsQualifiedAs(FTy)) { InitializedEntity Entity = InitializedEntity::InitializeTemporary(TTy); InitializationSequence InitSeq(Self, Entity, Kind, From); if (InitSeq) { HaveConversion = true; return false; } if (InitSeq.isAmbiguous()) return InitSeq.Diagnose(Self, Entity, Kind, From); } } return false; } // -- Otherwise: E1 can be converted to match E2 if E1 can be // implicitly converted to the type that expression E2 would have // if E2 were converted to an rvalue (or the type it has, if E2 is // an rvalue). // // This actually refers very narrowly to the lvalue-to-rvalue conversion, not // to the array-to-pointer or function-to-pointer conversions. if (!TTy->getAs<TagType>()) TTy = TTy.getUnqualifiedType(); InitializedEntity Entity = InitializedEntity::InitializeTemporary(TTy); InitializationSequence InitSeq(Self, Entity, Kind, From); HaveConversion = !InitSeq.Failed(); ToType = TTy; if (InitSeq.isAmbiguous()) return InitSeq.Diagnose(Self, Entity, Kind, From); return false; } /// \brief Try to find a common type for two according to C++0x 5.16p5. /// /// This is part of the parameter validation for the ? operator. If either /// value operand is a class type, overload resolution is used to find a /// conversion to a common type. static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc) { Expr *Args[2] = { LHS.get(), RHS.get() }; OverloadCandidateSet CandidateSet(QuestionLoc, OverloadCandidateSet::CSK_Operator); Self.AddBuiltinOperatorCandidates(OO_Conditional, QuestionLoc, Args, CandidateSet); OverloadCandidateSet::iterator Best; switch (CandidateSet.BestViableFunction(Self, QuestionLoc, Best)) { case OR_Success: { // We found a match. Perform the conversions on the arguments and move on. ExprResult LHSRes = Self.PerformImplicitConversion(LHS.get(), Best->BuiltinTypes.ParamTypes[0], Best->Conversions[0], Sema::AA_Converting); if (LHSRes.isInvalid()) break; LHS = LHSRes; ExprResult RHSRes = Self.PerformImplicitConversion(RHS.get(), Best->BuiltinTypes.ParamTypes[1], Best->Conversions[1], Sema::AA_Converting); if (RHSRes.isInvalid()) break; RHS = RHSRes; if (Best->Function) Self.MarkFunctionReferenced(QuestionLoc, Best->Function); return false; } case OR_No_Viable_Function: // Emit a better diagnostic if one of the expressions is a null pointer // constant and the other is a pointer type. In this case, the user most // likely forgot to take the address of the other expression. if (Self.DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc)) return true; Self.Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return true; case OR_Ambiguous: Self.Diag(QuestionLoc, diag::err_conditional_ambiguous_ovl) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); // FIXME: Print the possible common types by printing the return types of // the viable candidates. break; case OR_Deleted: llvm_unreachable("Conditional operator has only built-in overloads"); } return true; } /// \brief Perform an "extended" implicit conversion as returned by /// TryClassUnification. static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) { InitializedEntity Entity = InitializedEntity::InitializeTemporary(T); InitializationKind Kind = InitializationKind::CreateCopy(E.get()->getLocStart(), SourceLocation()); Expr *Arg = E.get(); InitializationSequence InitSeq(Self, Entity, Kind, Arg); ExprResult Result = InitSeq.Perform(Self, Entity, Kind, Arg); if (Result.isInvalid()) return true; E = Result; return false; } /// \brief Check the operands of ?: under C++ semantics. /// /// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y /// extension. In this case, LHS == Cond. (But they're not aliases.) QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc) { // FIXME: Handle C99's complex types, vector types, block pointers and Obj-C++ // interface pointers. // C++11 [expr.cond]p1 // The first expression is contextually converted to bool. if (!Cond.get()->isTypeDependent()) { ExprResult CondRes = CheckCXXBooleanCondition(Cond.get()); if (CondRes.isInvalid()) return QualType(); Cond = CondRes; } // Assume r-value. VK = VK_RValue; OK = OK_Ordinary; // Either of the arguments dependent? if (LHS.get()->isTypeDependent() || RHS.get()->isTypeDependent()) return Context.DependentTy; // C++11 [expr.cond]p2 // If either the second or the third operand has type (cv) void, ... QualType LTy = LHS.get()->getType(); QualType RTy = RHS.get()->getType(); bool LVoid = LTy->isVoidType(); bool RVoid = RTy->isVoidType(); if (LVoid || RVoid) { // ... one of the following shall hold: // -- The second or the third operand (but not both) is a (possibly // parenthesized) throw-expression; the result is of the type // and value category of the other. bool LThrow = isa<CXXThrowExpr>(LHS.get()->IgnoreParenImpCasts()); bool RThrow = isa<CXXThrowExpr>(RHS.get()->IgnoreParenImpCasts()); if (LThrow != RThrow) { Expr *NonThrow = LThrow ? RHS.get() : LHS.get(); VK = NonThrow->getValueKind(); // DR (no number yet): the result is a bit-field if the // non-throw-expression operand is a bit-field. OK = NonThrow->getObjectKind(); return NonThrow->getType(); } // -- Both the second and third operands have type void; the result is of // type void and is a prvalue. if (LVoid && RVoid) return Context.VoidTy; // Neither holds, error. Diag(QuestionLoc, diag::err_conditional_void_nonvoid) << (LVoid ? RTy : LTy) << (LVoid ? 0 : 1) << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // Neither is void. // C++11 [expr.cond]p3 // Otherwise, if the second and third operand have different types, and // either has (cv) class type [...] an attempt is made to convert each of // those operands to the type of the other. if (!Context.hasSameType(LTy, RTy) && (LTy->isRecordType() || RTy->isRecordType())) { // These return true if a single direction is already ambiguous. QualType L2RType, R2LType; bool HaveL2R, HaveR2L; if (TryClassUnification(*this, LHS.get(), RHS.get(), QuestionLoc, HaveL2R, L2RType)) return QualType(); if (TryClassUnification(*this, RHS.get(), LHS.get(), QuestionLoc, HaveR2L, R2LType)) return QualType(); // If both can be converted, [...] the program is ill-formed. if (HaveL2R && HaveR2L) { Diag(QuestionLoc, diag::err_conditional_ambiguous) << LTy << RTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } // If exactly one conversion is possible, that conversion is applied to // the chosen operand and the converted operands are used in place of the // original operands for the remainder of this section. if (HaveL2R) { if (ConvertForConditional(*this, LHS, L2RType) || LHS.isInvalid()) return QualType(); LTy = LHS.get()->getType(); } else if (HaveR2L) { if (ConvertForConditional(*this, RHS, R2LType) || RHS.isInvalid()) return QualType(); RTy = RHS.get()->getType(); } } // C++11 [expr.cond]p3 // if both are glvalues of the same value category and the same type except // for cv-qualification, an attempt is made to convert each of those // operands to the type of the other. ExprValueKind LVK = LHS.get()->getValueKind(); ExprValueKind RVK = RHS.get()->getValueKind(); if (!Context.hasSameType(LTy, RTy) && Context.hasSameUnqualifiedType(LTy, RTy) && LVK == RVK && LVK != VK_RValue) { // Since the unqualified types are reference-related and we require the // result to be as if a reference bound directly, the only conversion // we can perform is to add cv-qualifiers. Qualifiers LCVR = Qualifiers::fromCVRMask(LTy.getCVRQualifiers()); Qualifiers RCVR = Qualifiers::fromCVRMask(RTy.getCVRQualifiers()); if (RCVR.isStrictSupersetOf(LCVR)) { LHS = ImpCastExprToType(LHS.get(), RTy, CK_NoOp, LVK); LTy = LHS.get()->getType(); } else if (LCVR.isStrictSupersetOf(RCVR)) { RHS = ImpCastExprToType(RHS.get(), LTy, CK_NoOp, RVK); RTy = RHS.get()->getType(); } } // C++11 [expr.cond]p4 // If the second and third operands are glvalues of the same value // category and have the same type, the result is of that type and // value category and it is a bit-field if the second or the third // operand is a bit-field, or if both are bit-fields. // We only extend this to bitfields, not to the crazy other kinds of // l-values. bool Same = Context.hasSameType(LTy, RTy); if (Same && LVK == RVK && LVK != VK_RValue && LHS.get()->isOrdinaryOrBitFieldObject() && RHS.get()->isOrdinaryOrBitFieldObject()) { VK = LHS.get()->getValueKind(); if (LHS.get()->getObjectKind() == OK_BitField || RHS.get()->getObjectKind() == OK_BitField) OK = OK_BitField; return LTy; } // C++11 [expr.cond]p5 // Otherwise, the result is a prvalue. If the second and third operands // do not have the same type, and either has (cv) class type, ... if (!Same && (LTy->isRecordType() || RTy->isRecordType())) { // ... overload resolution is used to determine the conversions (if any) // to be applied to the operands. If the overload resolution fails, the // program is ill-formed. if (FindConditionalOverload(*this, LHS, RHS, QuestionLoc)) return QualType(); } // C++11 [expr.cond]p6 // Lvalue-to-rvalue, array-to-pointer, and function-to-pointer standard // conversions are performed on the second and third operands. LHS = DefaultFunctionArrayLvalueConversion(LHS.get()); RHS = DefaultFunctionArrayLvalueConversion(RHS.get()); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); LTy = LHS.get()->getType(); RTy = RHS.get()->getType(); // After those conversions, one of the following shall hold: // -- The second and third operands have the same type; the result // is of that type. If the operands have class type, the result // is a prvalue temporary of the result type, which is // copy-initialized from either the second operand or the third // operand depending on the value of the first operand. if (Context.getCanonicalType(LTy) == Context.getCanonicalType(RTy)) { if (LTy->isRecordType()) { // The operands have class type. Make a temporary copy. if (RequireNonAbstractType(QuestionLoc, LTy, diag::err_allocation_of_abstract_type)) return QualType(); InitializedEntity Entity = InitializedEntity::InitializeTemporary(LTy); ExprResult LHSCopy = PerformCopyInitialization(Entity, SourceLocation(), LHS); if (LHSCopy.isInvalid()) return QualType(); ExprResult RHSCopy = PerformCopyInitialization(Entity, SourceLocation(), RHS); if (RHSCopy.isInvalid()) return QualType(); LHS = LHSCopy; RHS = RHSCopy; } return LTy; } // Extension: conditional operator involving vector types. if (LTy->isVectorType() || RTy->isVectorType()) return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false, /*AllowBothBool*/true, /*AllowBoolConversions*/false); // -- The second and third operands have arithmetic or enumeration type; // the usual arithmetic conversions are performed to bring them to a // common type, and the result is of that type. if (LTy->isArithmeticType() && RTy->isArithmeticType()) { QualType ResTy = UsualArithmeticConversions(LHS, RHS); if (LHS.isInvalid() || RHS.isInvalid()) return QualType(); LHS = ImpCastExprToType(LHS.get(), ResTy, PrepareScalarCast(LHS, ResTy)); RHS = ImpCastExprToType(RHS.get(), ResTy, PrepareScalarCast(RHS, ResTy)); return ResTy; } // -- The second and third operands have pointer type, or one has pointer // type and the other is a null pointer constant, or both are null // pointer constants, at least one of which is non-integral; pointer // conversions and qualification conversions are performed to bring them // to their composite pointer type. The result is of the composite // pointer type. // -- The second and third operands have pointer to member type, or one has // pointer to member type and the other is a null pointer constant; // pointer to member conversions and qualification conversions are // performed to bring them to a common type, whose cv-qualification // shall match the cv-qualification of either the second or the third // operand. The result is of the common type. bool NonStandardCompositeType = false; QualType Composite = FindCompositePointerType(QuestionLoc, LHS, RHS, isSFINAEContext() ? nullptr : &NonStandardCompositeType); if (!Composite.isNull()) { if (NonStandardCompositeType) Diag(QuestionLoc, diag::ext_typecheck_cond_incompatible_operands_nonstandard) << LTy << RTy << Composite << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return Composite; } // Similarly, attempt to find composite type of two objective-c pointers. Composite = FindCompositeObjCPointerType(LHS, RHS, QuestionLoc); if (!Composite.isNull()) return Composite; // Check if we are using a null with a non-pointer type. if (DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc)) return QualType(); Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands) << LHS.get()->getType() << RHS.get()->getType() << LHS.get()->getSourceRange() << RHS.get()->getSourceRange(); return QualType(); } /// \brief Find a merged pointer type and convert the two expressions to it. /// /// This finds the composite pointer type (or member pointer type) for @p E1 /// and @p E2 according to C++11 5.9p2. It converts both expressions to this /// type and returns it. /// It does not emit diagnostics. /// /// \param Loc The location of the operator requiring these two expressions to /// be converted to the composite pointer type. /// /// If \p NonStandardCompositeType is non-NULL, then we are permitted to find /// a non-standard (but still sane) composite type to which both expressions /// can be converted. When such a type is chosen, \c *NonStandardCompositeType /// will be set true. QualType Sema::FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType) { if (NonStandardCompositeType) *NonStandardCompositeType = false; assert(getLangOpts().CPlusPlus && "This function assumes C++"); QualType T1 = E1->getType(), T2 = E2->getType(); // C++11 5.9p2 // Pointer conversions and qualification conversions are performed on // pointer operands to bring them to their composite pointer type. If // one operand is a null pointer constant, the composite pointer type is // std::nullptr_t if the other operand is also a null pointer constant or, // if the other operand is a pointer, the type of the other operand. if (!T1->isAnyPointerType() && !T1->isMemberPointerType() && !T2->isAnyPointerType() && !T2->isMemberPointerType()) { if (T1->isNullPtrType() && E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { E2 = ImpCastExprToType(E2, T1, CK_NullToPointer).get(); return T1; } if (T2->isNullPtrType() && E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { E1 = ImpCastExprToType(E1, T2, CK_NullToPointer).get(); return T2; } return QualType(); } if (E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { if (T2->isMemberPointerType()) E1 = ImpCastExprToType(E1, T2, CK_NullToMemberPointer).get(); else E1 = ImpCastExprToType(E1, T2, CK_NullToPointer).get(); return T2; } if (E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) { if (T1->isMemberPointerType()) E2 = ImpCastExprToType(E2, T1, CK_NullToMemberPointer).get(); else E2 = ImpCastExprToType(E2, T1, CK_NullToPointer).get(); return T1; } // Now both have to be pointers or member pointers. if ((!T1->isPointerType() && !T1->isMemberPointerType()) || (!T2->isPointerType() && !T2->isMemberPointerType())) return QualType(); // Otherwise, of one of the operands has type "pointer to cv1 void," then // the other has type "pointer to cv2 T" and the composite pointer type is // "pointer to cv12 void," where cv12 is the union of cv1 and cv2. // Otherwise, the composite pointer type is a pointer type similar to the // type of one of the operands, with a cv-qualification signature that is // the union of the cv-qualification signatures of the operand types. // In practice, the first part here is redundant; it's subsumed by the second. // What we do here is, we build the two possible composite types, and try the // conversions in both directions. If only one works, or if the two composite // types are the same, we have succeeded. // FIXME: extended qualifiers? typedef SmallVector<unsigned, 4> QualifierVector; QualifierVector QualifierUnion; typedef SmallVector<std::pair<const Type *, const Type *>, 4> ContainingClassVector; ContainingClassVector MemberOfClass; QualType Composite1 = Context.getCanonicalType(T1), Composite2 = Context.getCanonicalType(T2); unsigned NeedConstBefore = 0; do { const PointerType *Ptr1, *Ptr2; if ((Ptr1 = Composite1->getAs<PointerType>()) && (Ptr2 = Composite2->getAs<PointerType>())) { Composite1 = Ptr1->getPointeeType(); Composite2 = Ptr2->getPointeeType(); // If we're allowed to create a non-standard composite type, keep track // of where we need to fill in additional 'const' qualifiers. if (NonStandardCompositeType && Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers()) NeedConstBefore = QualifierUnion.size(); QualifierUnion.push_back( Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers()); MemberOfClass.push_back(std::make_pair(nullptr, nullptr)); continue; } const MemberPointerType *MemPtr1, *MemPtr2; if ((MemPtr1 = Composite1->getAs<MemberPointerType>()) && (MemPtr2 = Composite2->getAs<MemberPointerType>())) { Composite1 = MemPtr1->getPointeeType(); Composite2 = MemPtr2->getPointeeType(); // If we're allowed to create a non-standard composite type, keep track // of where we need to fill in additional 'const' qualifiers. if (NonStandardCompositeType && Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers()) NeedConstBefore = QualifierUnion.size(); QualifierUnion.push_back( Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers()); MemberOfClass.push_back(std::make_pair(MemPtr1->getClass(), MemPtr2->getClass())); continue; } // FIXME: block pointer types? // Cannot unwrap any more types. break; } while (true); if (NeedConstBefore && NonStandardCompositeType) { // Extension: Add 'const' to qualifiers that come before the first qualifier // mismatch, so that our (non-standard!) composite type meets the // requirements of C++ [conv.qual]p4 bullet 3. for (unsigned I = 0; I != NeedConstBefore; ++I) { if ((QualifierUnion[I] & Qualifiers::Const) == 0) { QualifierUnion[I] = QualifierUnion[I] | Qualifiers::Const; *NonStandardCompositeType = true; } } } // Rewrap the composites as pointers or member pointers with the union CVRs. ContainingClassVector::reverse_iterator MOC = MemberOfClass.rbegin(); for (QualifierVector::reverse_iterator I = QualifierUnion.rbegin(), E = QualifierUnion.rend(); I != E; (void)++I, ++MOC) { Qualifiers Quals = Qualifiers::fromCVRMask(*I); if (MOC->first && MOC->second) { // Rebuild member pointer type Composite1 = Context.getMemberPointerType( Context.getQualifiedType(Composite1, Quals), MOC->first); Composite2 = Context.getMemberPointerType( Context.getQualifiedType(Composite2, Quals), MOC->second); } else { // Rebuild pointer type Composite1 = Context.getPointerType(Context.getQualifiedType(Composite1, Quals)); Composite2 = Context.getPointerType(Context.getQualifiedType(Composite2, Quals)); } } // Try to convert to the first composite pointer type. InitializedEntity Entity1 = InitializedEntity::InitializeTemporary(Composite1); InitializationKind Kind = InitializationKind::CreateCopy(Loc, SourceLocation()); InitializationSequence E1ToC1(*this, Entity1, Kind, E1); InitializationSequence E2ToC1(*this, Entity1, Kind, E2); if (E1ToC1 && E2ToC1) { // Conversion to Composite1 is viable. if (!Context.hasSameType(Composite1, Composite2)) { // Composite2 is a different type from Composite1. Check whether // Composite2 is also viable. InitializedEntity Entity2 = InitializedEntity::InitializeTemporary(Composite2); InitializationSequence E1ToC2(*this, Entity2, Kind, E1); InitializationSequence E2ToC2(*this, Entity2, Kind, E2); if (E1ToC2 && E2ToC2) { // Both Composite1 and Composite2 are viable and are different; // this is an ambiguity. return QualType(); } } // Convert E1 to Composite1 ExprResult E1Result = E1ToC1.Perform(*this, Entity1, Kind, E1); if (E1Result.isInvalid()) return QualType(); E1 = E1Result.getAs<Expr>(); // Convert E2 to Composite1 ExprResult E2Result = E2ToC1.Perform(*this, Entity1, Kind, E2); if (E2Result.isInvalid()) return QualType(); E2 = E2Result.getAs<Expr>(); return Composite1; } // Check whether Composite2 is viable. InitializedEntity Entity2 = InitializedEntity::InitializeTemporary(Composite2); InitializationSequence E1ToC2(*this, Entity2, Kind, E1); InitializationSequence E2ToC2(*this, Entity2, Kind, E2); if (!E1ToC2 || !E2ToC2) return QualType(); // Convert E1 to Composite2 ExprResult E1Result = E1ToC2.Perform(*this, Entity2, Kind, E1); if (E1Result.isInvalid()) return QualType(); E1 = E1Result.getAs<Expr>(); // Convert E2 to Composite2 ExprResult E2Result = E2ToC2.Perform(*this, Entity2, Kind, E2); if (E2Result.isInvalid()) return QualType(); E2 = E2Result.getAs<Expr>(); return Composite2; } ExprResult Sema::MaybeBindToTemporary(Expr *E) { if (!E) return ExprError(); assert(!isa<CXXBindTemporaryExpr>(E) && "Double-bound temporary?"); // If the result is a glvalue, we shouldn't bind it. if (!E->isRValue()) return E; // In ARC, calls that return a retainable type can return retained, // in which case we have to insert a consuming cast. if (getLangOpts().ObjCAutoRefCount && E->getType()->isObjCRetainableType()) { bool ReturnsRetained; // For actual calls, we compute this by examining the type of the // called value. if (CallExpr *Call = dyn_cast<CallExpr>(E)) { Expr *Callee = Call->getCallee()->IgnoreParens(); QualType T = Callee->getType(); if (T == Context.BoundMemberTy) { // Handle pointer-to-members. if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Callee)) T = BinOp->getRHS()->getType(); else if (MemberExpr *Mem = dyn_cast<MemberExpr>(Callee)) T = Mem->getMemberDecl()->getType(); } if (const PointerType *Ptr = T->getAs<PointerType>()) T = Ptr->getPointeeType(); else if (const BlockPointerType *Ptr = T->getAs<BlockPointerType>()) T = Ptr->getPointeeType(); else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>()) T = MemPtr->getPointeeType(); const FunctionType *FTy = T->getAs<FunctionType>(); assert(FTy && "call to value not of function type?"); ReturnsRetained = FTy->getExtInfo().getProducesResult(); // ActOnStmtExpr arranges things so that StmtExprs of retainable // type always produce a +1 object. } else if (isa<StmtExpr>(E)) { ReturnsRetained = true; // We hit this case with the lambda conversion-to-block optimization; // we don't want any extra casts here. } else if (isa<CastExpr>(E) && isa<BlockExpr>(cast<CastExpr>(E)->getSubExpr())) { return E; // For message sends and property references, we try to find an // actual method. FIXME: we should infer retention by selector in // cases where we don't have an actual method. } else { ObjCMethodDecl *D = nullptr; if (ObjCMessageExpr *Send = dyn_cast<ObjCMessageExpr>(E)) { D = Send->getMethodDecl(); } else if (ObjCBoxedExpr *BoxedExpr = dyn_cast<ObjCBoxedExpr>(E)) { D = BoxedExpr->getBoxingMethod(); } else if (ObjCArrayLiteral *ArrayLit = dyn_cast<ObjCArrayLiteral>(E)) { D = ArrayLit->getArrayWithObjectsMethod(); } else if (ObjCDictionaryLiteral *DictLit = dyn_cast<ObjCDictionaryLiteral>(E)) { D = DictLit->getDictWithObjectsMethod(); } ReturnsRetained = (D && D->hasAttr<NSReturnsRetainedAttr>()); // Don't do reclaims on performSelector calls; despite their // return type, the invoked method doesn't necessarily actually // return an object. if (!ReturnsRetained && D && D->getMethodFamily() == OMF_performSelector) return E; } // Don't reclaim an object of Class type. if (!ReturnsRetained && E->getType()->isObjCARCImplicitlyUnretainedType()) return E; ExprNeedsCleanups = true; CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject : CK_ARCReclaimReturnedObject); return ImplicitCastExpr::Create(Context, E->getType(), ck, E, nullptr, VK_RValue); } if (!getLangOpts().CPlusPlus) return E; // Search for the base element type (cf. ASTContext::getBaseElementType) with // a fast path for the common case that the type is directly a RecordType. const Type *T = Context.getCanonicalType(E->getType().getTypePtr()); const RecordType *RT = nullptr; while (!RT) { switch (T->getTypeClass()) { case Type::Record: RT = cast<RecordType>(T); break; case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: case Type::DependentSizedArray: T = cast<ArrayType>(T)->getElementType().getTypePtr(); break; default: return E; } } // That should be enough to guarantee that this type is complete, if we're // not processing a decltype expression. CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); if (RD->isInvalidDecl() || RD->isDependentContext()) return E; bool IsDecltype = ExprEvalContexts.back().IsDecltype; CXXDestructorDecl *Destructor = IsDecltype ? nullptr : LookupDestructor(RD); if (Destructor) { MarkFunctionReferenced(E->getExprLoc(), Destructor); CheckDestructorAccess(E->getExprLoc(), Destructor, PDiag(diag::err_access_dtor_temp) << E->getType()); if (DiagnoseUseOfDecl(Destructor, E->getExprLoc())) return ExprError(); // If destructor is trivial, we can avoid the extra copy. if (Destructor->isTrivial()) return E; // We need a cleanup, but we don't need to remember the temporary. ExprNeedsCleanups = true; } CXXTemporary *Temp = CXXTemporary::Create(Context, Destructor); CXXBindTemporaryExpr *Bind = CXXBindTemporaryExpr::Create(Context, Temp, E); if (IsDecltype) ExprEvalContexts.back().DelayedDecltypeBinds.push_back(Bind); return Bind; } ExprResult Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) { if (SubExpr.isInvalid()) return ExprError(); return MaybeCreateExprWithCleanups(SubExpr.get()); } Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) { assert(SubExpr && "subexpression can't be null!"); CleanupVarDeclMarking(); unsigned FirstCleanup = ExprEvalContexts.back().NumCleanupObjects; assert(ExprCleanupObjects.size() >= FirstCleanup); assert(ExprNeedsCleanups || ExprCleanupObjects.size() == FirstCleanup); if (!ExprNeedsCleanups) return SubExpr; auto Cleanups = llvm::makeArrayRef(ExprCleanupObjects.begin() + FirstCleanup, ExprCleanupObjects.size() - FirstCleanup); Expr *E = ExprWithCleanups::Create(Context, SubExpr, Cleanups); DiscardCleanupsInEvaluationContext(); return E; } Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) { assert(SubStmt && "sub-statement can't be null!"); CleanupVarDeclMarking(); if (!ExprNeedsCleanups) return SubStmt; // FIXME: In order to attach the temporaries, wrap the statement into // a StmtExpr; currently this is only used for asm statements. // This is hacky, either create a new CXXStmtWithTemporaries statement or // a new AsmStmtWithTemporaries. CompoundStmt *CompStmt = new (Context) CompoundStmt(Context, SubStmt, SourceLocation(), SourceLocation()); Expr *E = new (Context) StmtExpr(CompStmt, Context.VoidTy, SourceLocation(), SourceLocation()); return MaybeCreateExprWithCleanups(E); } /// Process the expression contained within a decltype. For such expressions, /// certain semantic checks on temporaries are delayed until this point, and /// are omitted for the 'topmost' call in the decltype expression. If the /// topmost call bound a temporary, strip that temporary off the expression. ExprResult Sema::ActOnDecltypeExpression(Expr *E) { assert(ExprEvalContexts.back().IsDecltype && "not in a decltype expression"); // C++11 [expr.call]p11: // If a function call is a prvalue of object type, // -- if the function call is either // -- the operand of a decltype-specifier, or // -- the right operand of a comma operator that is the operand of a // decltype-specifier, // a temporary object is not introduced for the prvalue. // Recursively rebuild ParenExprs and comma expressions to strip out the // outermost CXXBindTemporaryExpr, if any. if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) { ExprResult SubExpr = ActOnDecltypeExpression(PE->getSubExpr()); if (SubExpr.isInvalid()) return ExprError(); if (SubExpr.get() == PE->getSubExpr()) return E; return ActOnParenExpr(PE->getLParen(), PE->getRParen(), SubExpr.get()); } if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { if (BO->getOpcode() == BO_Comma) { ExprResult RHS = ActOnDecltypeExpression(BO->getRHS()); if (RHS.isInvalid()) return ExprError(); if (RHS.get() == BO->getRHS()) return E; return new (Context) BinaryOperator( BO->getLHS(), RHS.get(), BO_Comma, BO->getType(), BO->getValueKind(), BO->getObjectKind(), BO->getOperatorLoc(), BO->isFPContractable()); } } CXXBindTemporaryExpr *TopBind = dyn_cast<CXXBindTemporaryExpr>(E); CallExpr *TopCall = TopBind ? dyn_cast<CallExpr>(TopBind->getSubExpr()) : nullptr; if (TopCall) E = TopCall; else TopBind = nullptr; // Disable the special decltype handling now. ExprEvalContexts.back().IsDecltype = false; // In MS mode, don't perform any extra checking of call return types within a // decltype expression. if (getLangOpts().MSVCCompat) return E; // Perform the semantic checks we delayed until this point. for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeCalls.size(); I != N; ++I) { CallExpr *Call = ExprEvalContexts.back().DelayedDecltypeCalls[I]; if (Call == TopCall) continue; if (CheckCallReturnType(Call->getCallReturnType(Context), Call->getLocStart(), Call, Call->getDirectCallee())) return ExprError(); } // Now all relevant types are complete, check the destructors are accessible // and non-deleted, and annotate them on the temporaries. for (unsigned I = 0, N = ExprEvalContexts.back().DelayedDecltypeBinds.size(); I != N; ++I) { CXXBindTemporaryExpr *Bind = ExprEvalContexts.back().DelayedDecltypeBinds[I]; if (Bind == TopBind) continue; CXXTemporary *Temp = Bind->getTemporary(); CXXRecordDecl *RD = Bind->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl(); CXXDestructorDecl *Destructor = LookupDestructor(RD); Temp->setDestructor(Destructor); MarkFunctionReferenced(Bind->getExprLoc(), Destructor); CheckDestructorAccess(Bind->getExprLoc(), Destructor, PDiag(diag::err_access_dtor_temp) << Bind->getType()); if (DiagnoseUseOfDecl(Destructor, Bind->getExprLoc())) return ExprError(); // We need a cleanup, but we don't need to remember the temporary. ExprNeedsCleanups = true; } // Possibly strip off the top CXXBindTemporaryExpr. return E; } /// Note a set of 'operator->' functions that were used for a member access. static void noteOperatorArrows(Sema &S, ArrayRef<FunctionDecl *> OperatorArrows) { unsigned SkipStart = OperatorArrows.size(), SkipCount = 0; // FIXME: Make this configurable? unsigned Limit = 9; if (OperatorArrows.size() > Limit) { // Produce Limit-1 normal notes and one 'skipping' note. SkipStart = (Limit - 1) / 2 + (Limit - 1) % 2; SkipCount = OperatorArrows.size() - (Limit - 1); } for (unsigned I = 0; I < OperatorArrows.size(); /**/) { if (I == SkipStart) { S.Diag(OperatorArrows[I]->getLocation(), diag::note_operator_arrows_suppressed) << SkipCount; I += SkipCount; } else { S.Diag(OperatorArrows[I]->getLocation(), diag::note_operator_arrow_here) << OperatorArrows[I]->getCallResultType(); ++I; } } } ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor) { // Since this might be a postfix expression, get rid of ParenListExprs. ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base); if (Result.isInvalid()) return ExprError(); Base = Result.get(); Result = CheckPlaceholderExpr(Base); if (Result.isInvalid()) return ExprError(); Base = Result.get(); // HLSL Change Starts assert(OpKind == tok::arrow || OpKind == tok::period); if (getLangOpts().HLSL && OpKind != tok::period) { // continue processing as if it was a period accessor Diag(OpLoc, diag::err_hlsl_unsupported_operator); OpKind = tok::period; } // HLSL Change Ends QualType BaseType = Base->getType(); MayBePseudoDestructor = false; if (BaseType->isDependentType()) { // If we have a pointer to a dependent type and are using the -> operator, // the object type is the type that the pointer points to. We might still // have enough information about that type to do something useful. if (OpKind == tok::arrow) if (const PointerType *Ptr = BaseType->getAs<PointerType>()) BaseType = Ptr->getPointeeType(); ObjectType = ParsedType::make(BaseType); MayBePseudoDestructor = true; return Base; } // C++ [over.match.oper]p8: // [...] When operator->returns, the operator-> is applied to the value // returned, with the original second operand. if (OpKind == tok::arrow) { QualType StartingType = BaseType; bool NoArrowOperatorFound = false; bool FirstIteration = true; FunctionDecl *CurFD = dyn_cast<FunctionDecl>(CurContext); // The set of types we've considered so far. llvm::SmallPtrSet<CanQualType,8> CTypes; SmallVector<FunctionDecl*, 8> OperatorArrows; CTypes.insert(Context.getCanonicalType(BaseType)); while (BaseType->isRecordType()) { if (OperatorArrows.size() >= getLangOpts().ArrowDepth) { Diag(OpLoc, diag::err_operator_arrow_depth_exceeded) << StartingType << getLangOpts().ArrowDepth << Base->getSourceRange(); noteOperatorArrows(*this, OperatorArrows); Diag(OpLoc, diag::note_operator_arrow_depth) << getLangOpts().ArrowDepth; return ExprError(); } Result = BuildOverloadedArrowExpr( S, Base, OpLoc, // When in a template specialization and on the first loop iteration, // potentially give the default diagnostic (with the fixit in a // separate note) instead of having the error reported back to here // and giving a diagnostic with a fixit attached to the error itself. (FirstIteration && CurFD && CurFD->isFunctionTemplateSpecialization()) ? nullptr : &NoArrowOperatorFound); if (Result.isInvalid()) { if (NoArrowOperatorFound) { if (FirstIteration) { Diag(OpLoc, diag::err_typecheck_member_reference_suggestion) << BaseType << 1 << Base->getSourceRange() << FixItHint::CreateReplacement(OpLoc, "."); OpKind = tok::period; break; } Diag(OpLoc, diag::err_typecheck_member_reference_arrow) << BaseType << Base->getSourceRange(); CallExpr *CE = dyn_cast<CallExpr>(Base); if (Decl *CD = (CE ? CE->getCalleeDecl() : nullptr)) { Diag(CD->getLocStart(), diag::note_member_reference_arrow_from_operator_arrow); } } return ExprError(); } Base = Result.get(); if (CXXOperatorCallExpr *OpCall = dyn_cast<CXXOperatorCallExpr>(Base)) OperatorArrows.push_back(OpCall->getDirectCallee()); BaseType = Base->getType(); CanQualType CBaseType = Context.getCanonicalType(BaseType); if (!CTypes.insert(CBaseType).second) { Diag(OpLoc, diag::err_operator_arrow_circular) << StartingType; noteOperatorArrows(*this, OperatorArrows); return ExprError(); } FirstIteration = false; } if (OpKind == tok::arrow && (BaseType->isPointerType() || BaseType->isObjCObjectPointerType())) BaseType = BaseType->getPointeeType(); } // Objective-C properties allow "." access on Objective-C pointer types, // so adjust the base type to the object type itself. if (BaseType->isObjCObjectPointerType()) BaseType = BaseType->getPointeeType(); // C++ [basic.lookup.classref]p2: // [...] If the type of the object expression is of pointer to scalar // type, the unqualified-id is looked up in the context of the complete // postfix-expression. // // This also indicates that we could be parsing a pseudo-destructor-name. // Note that Objective-C class and object types can be pseudo-destructor // expressions or normal member (ivar or property) access expressions. if (BaseType->isObjCObjectOrInterfaceType()) { MayBePseudoDestructor = true; } else if (!BaseType->isRecordType()) { ObjectType = ParsedType(); MayBePseudoDestructor = true; return Base; } // The object type must be complete (or dependent), or // C++11 [expr.prim.general]p3: // Unlike the object expression in other contexts, *this is not required to // be of complete type for purposes of class member access (5.2.5) outside // the member function body. if (!BaseType->isDependentType() && !isThisOutsideMemberFunctionBody(BaseType) && RequireCompleteType(OpLoc, BaseType, diag::err_incomplete_member_access)) return ExprError(); // C++ [basic.lookup.classref]p2: // If the id-expression in a class member access (5.2.5) is an // unqualified-id, and the type of the object expression is of a class // type C (or of pointer to a class type C), the unqualified-id is looked // up in the scope of class C. [...] ObjectType = ParsedType::make(BaseType); return Base; } static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base, tok::TokenKind& OpKind, SourceLocation OpLoc) { if (Base->hasPlaceholderType()) { ExprResult result = S.CheckPlaceholderExpr(Base); if (result.isInvalid()) return true; Base = result.get(); } ObjectType = Base->getType(); // C++ [expr.pseudo]p2: // The left-hand side of the dot operator shall be of scalar type. The // left-hand side of the arrow operator shall be of pointer to scalar type. // This scalar type is the object type. // Note that this is rather different from the normal handling for the // arrow operator. if (OpKind == tok::arrow) { if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) { ObjectType = Ptr->getPointeeType(); } else if (!Base->isTypeDependent()) { // The user wrote "p->" when she probably meant "p."; fix it. S.Diag(OpLoc, diag::err_typecheck_member_reference_suggestion) << ObjectType << true << FixItHint::CreateReplacement(OpLoc, "."); if (S.isSFINAEContext()) return true; OpKind = tok::period; } } return false; } ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeTypeInfo, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage Destructed) { TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo(); QualType ObjectType; if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc)) return ExprError(); if (!ObjectType->isDependentType() && !ObjectType->isScalarType() && !ObjectType->isVectorType()) { if (getLangOpts().MSVCCompat && ObjectType->isVoidType()) Diag(OpLoc, diag::ext_pseudo_dtor_on_void) << Base->getSourceRange(); else { Diag(OpLoc, diag::err_pseudo_dtor_base_not_scalar) << ObjectType << Base->getSourceRange(); return ExprError(); } } // C++ [expr.pseudo]p2: // [...] The cv-unqualified versions of the object type and of the type // designated by the pseudo-destructor-name shall be the same type. if (DestructedTypeInfo) { QualType DestructedType = DestructedTypeInfo->getType(); SourceLocation DestructedTypeStart = DestructedTypeInfo->getTypeLoc().getLocalSourceRange().getBegin(); if (!DestructedType->isDependentType() && !ObjectType->isDependentType()) { if (!Context.hasSameUnqualifiedType(DestructedType, ObjectType)) { Diag(DestructedTypeStart, diag::err_pseudo_dtor_type_mismatch) << ObjectType << DestructedType << Base->getSourceRange() << DestructedTypeInfo->getTypeLoc().getLocalSourceRange(); // Recover by setting the destructed type to the object type. DestructedType = ObjectType; DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType, DestructedTypeStart); Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo); } else if (DestructedType.getObjCLifetime() != ObjectType.getObjCLifetime()) { if (DestructedType.getObjCLifetime() == Qualifiers::OCL_None) { // Okay: just pretend that the user provided the correctly-qualified // type. } else { Diag(DestructedTypeStart, diag::err_arc_pseudo_dtor_inconstant_quals) << ObjectType << DestructedType << Base->getSourceRange() << DestructedTypeInfo->getTypeLoc().getLocalSourceRange(); } // Recover by setting the destructed type to the object type. DestructedType = ObjectType; DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType, DestructedTypeStart); Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo); } } } // C++ [expr.pseudo]p2: // [...] Furthermore, the two type-names in a pseudo-destructor-name of the // form // // ::[opt] nested-name-specifier[opt] type-name :: ~ type-name // // shall designate the same scalar type. if (ScopeTypeInfo) { QualType ScopeType = ScopeTypeInfo->getType(); if (!ScopeType->isDependentType() && !ObjectType->isDependentType() && !Context.hasSameUnqualifiedType(ScopeType, ObjectType)) { Diag(ScopeTypeInfo->getTypeLoc().getLocalSourceRange().getBegin(), diag::err_pseudo_dtor_type_mismatch) << ObjectType << ScopeType << Base->getSourceRange() << ScopeTypeInfo->getTypeLoc().getLocalSourceRange(); ScopeType = QualType(); ScopeTypeInfo = nullptr; } } Expr *Result = new (Context) CXXPseudoDestructorExpr(Context, Base, OpKind == tok::arrow, OpLoc, SS.getWithLocInContext(Context), ScopeTypeInfo, CCLoc, TildeLoc, Destructed); return Result; } ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName) { assert((FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId || FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) && "Invalid first type name in pseudo-destructor"); assert((SecondTypeName.getKind() == UnqualifiedId::IK_TemplateId || SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) && "Invalid second type name in pseudo-destructor"); QualType ObjectType; if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc)) return ExprError(); // Compute the object type that we should use for name lookup purposes. Only // record types and dependent types matter. ParsedType ObjectTypePtrForLookup; if (!SS.isSet()) { if (ObjectType->isRecordType()) ObjectTypePtrForLookup = ParsedType::make(ObjectType); else if (ObjectType->isDependentType()) ObjectTypePtrForLookup = ParsedType::make(Context.DependentTy); } // Convert the name of the type being destructed (following the ~) into a // type (with source-location information). QualType DestructedType; TypeSourceInfo *DestructedTypeInfo = nullptr; PseudoDestructorTypeStorage Destructed; if (SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) { ParsedType T = getTypeName(*SecondTypeName.Identifier, SecondTypeName.StartLocation, S, &SS, true, false, ObjectTypePtrForLookup); if (!T && ((SS.isSet() && !computeDeclContext(SS, false)) || (!SS.isSet() && ObjectType->isDependentType()))) { // The name of the type being destroyed is a dependent name, and we // couldn't find anything useful in scope. Just store the identifier and // it's location, and we'll perform (qualified) name lookup again at // template instantiation time. Destructed = PseudoDestructorTypeStorage(SecondTypeName.Identifier, SecondTypeName.StartLocation); } else if (!T) { Diag(SecondTypeName.StartLocation, diag::err_pseudo_dtor_destructor_non_type) << SecondTypeName.Identifier << ObjectType; if (isSFINAEContext()) return ExprError(); // Recover by assuming we had the right type all along. DestructedType = ObjectType; } else DestructedType = GetTypeFromParser(T, &DestructedTypeInfo); } else { // Resolve the template-id to a type. TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId; ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); TypeResult T = ActOnTemplateIdType(TemplateId->SS, TemplateId->TemplateKWLoc, TemplateId->Template, TemplateId->TemplateNameLoc, TemplateId->LAngleLoc, TemplateArgsPtr, TemplateId->RAngleLoc); if (T.isInvalid() || !T.get()) { // Recover by assuming we had the right type all along. DestructedType = ObjectType; } else DestructedType = GetTypeFromParser(T.get(), &DestructedTypeInfo); } // If we've performed some kind of recovery, (re-)build the type source // information. if (!DestructedType.isNull()) { if (!DestructedTypeInfo) DestructedTypeInfo = Context.getTrivialTypeSourceInfo(DestructedType, SecondTypeName.StartLocation); Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo); } // Convert the name of the scope type (the type prior to '::') into a type. TypeSourceInfo *ScopeTypeInfo = nullptr; QualType ScopeType; if (FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId || FirstTypeName.Identifier) { if (FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) { ParsedType T = getTypeName(*FirstTypeName.Identifier, FirstTypeName.StartLocation, S, &SS, true, false, ObjectTypePtrForLookup); if (!T) { Diag(FirstTypeName.StartLocation, diag::err_pseudo_dtor_destructor_non_type) << FirstTypeName.Identifier << ObjectType; if (isSFINAEContext()) return ExprError(); // Just drop this type. It's unnecessary anyway. ScopeType = QualType(); } else ScopeType = GetTypeFromParser(T, &ScopeTypeInfo); } else { // Resolve the template-id to a type. TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId; ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(), TemplateId->NumArgs); TypeResult T = ActOnTemplateIdType(TemplateId->SS, TemplateId->TemplateKWLoc, TemplateId->Template, TemplateId->TemplateNameLoc, TemplateId->LAngleLoc, TemplateArgsPtr, TemplateId->RAngleLoc); if (T.isInvalid() || !T.get()) { // Recover by dropping this type. ScopeType = QualType(); } else ScopeType = GetTypeFromParser(T.get(), &ScopeTypeInfo); } } if (!ScopeType.isNull() && !ScopeTypeInfo) ScopeTypeInfo = Context.getTrivialTypeSourceInfo(ScopeType, FirstTypeName.StartLocation); return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS, ScopeTypeInfo, CCLoc, TildeLoc, Destructed); } ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS) { QualType ObjectType; if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc)) return ExprError(); QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc(), false); TypeLocBuilder TLB; DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T); DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc()); TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T); PseudoDestructorTypeStorage Destructed(DestructedTypeInfo); return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, CXXScopeSpec(), nullptr, SourceLocation(), TildeLoc, Destructed); } ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates) { if (Method->getParent()->isLambda() && Method->getConversionType()->isBlockPointerType()) { // This is a lambda coversion to block pointer; check if the argument // is a LambdaExpr. Expr *SubE = E; CastExpr *CE = dyn_cast<CastExpr>(SubE); if (CE && CE->getCastKind() == CK_NoOp) SubE = CE->getSubExpr(); SubE = SubE->IgnoreParens(); if (CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(SubE)) SubE = BE->getSubExpr(); if (isa<LambdaExpr>(SubE)) { // For the conversion to block pointer on a lambda expression, we // construct a special BlockLiteral instead; this doesn't really make // a difference in ARC, but outside of ARC the resulting block literal // follows the normal lifetime rules for block literals instead of being // autoreleased. DiagnosticErrorTrap Trap(Diags); ExprResult Exp = BuildBlockForLambdaConversion(E->getExprLoc(), E->getExprLoc(), Method, E); if (Exp.isInvalid()) Diag(E->getExprLoc(), diag::note_lambda_to_block_conv); return Exp; } } ExprResult Exp = PerformObjectArgumentInitialization(E, /*Qualifier=*/nullptr, FoundDecl, Method); if (Exp.isInvalid()) return true; MemberExpr *ME = new (Context) MemberExpr( Exp.get(), /*IsArrow=*/false, SourceLocation(), Method, SourceLocation(), Context.BoundMemberTy, VK_RValue, OK_Ordinary); if (HadMultipleCandidates) ME->setHadMultipleCandidates(true); MarkMemberReferenced(ME); QualType ResultType = Method->getReturnType(); ExprValueKind VK = Expr::getValueKindForType(ResultType); ResultType = ResultType.getNonLValueExprType(Context); CXXMemberCallExpr *CE = new (Context) CXXMemberCallExpr(Context, ME, None, ResultType, VK, Exp.get()->getLocEnd()); return CE; } ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen) { // If the operand is an unresolved lookup expression, the expression is ill- // formed per [over.over]p1, because overloaded function names cannot be used // without arguments except in explicit contexts. ExprResult R = CheckPlaceholderExpr(Operand); if (R.isInvalid()) return R; // The operand may have been modified when checking the placeholder type. Operand = R.get(); if (ActiveTemplateInstantiations.empty() && Operand->HasSideEffects(Context, false)) { // The expression operand for noexcept is in an unevaluated expression // context, so side effects could result in unintended consequences. Diag(Operand->getExprLoc(), diag::warn_side_effects_unevaluated_context); } CanThrowResult CanThrow = canThrow(Operand); return new (Context) CXXNoexceptExpr(Context.BoolTy, Operand, CanThrow, KeyLoc, RParen); } ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation, Expr *Operand, SourceLocation RParen) { return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen); } static bool IsSpecialDiscardedValue(Expr *E) { // In C++11, discarded-value expressions of a certain form are special, // according to [expr]p10: // The lvalue-to-rvalue conversion (4.1) is applied only if the // expression is an lvalue of volatile-qualified type and it has // one of the following forms: E = E->IgnoreParens(); // - id-expression (5.1.1), if (isa<DeclRefExpr>(E)) return true; // - subscripting (5.2.1), if (isa<ArraySubscriptExpr>(E)) return true; // - class member access (5.2.5), if (isa<MemberExpr>(E)) return true; // - indirection (5.3.1), if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) if (UO->getOpcode() == UO_Deref) return true; if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) { // - pointer-to-member operation (5.5), if (BO->isPtrMemOp()) return true; // - comma expression (5.18) where the right operand is one of the above. if (BO->getOpcode() == BO_Comma) return IsSpecialDiscardedValue(BO->getRHS()); } // - conditional expression (5.16) where both the second and the third // operands are one of the above, or if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) return IsSpecialDiscardedValue(CO->getTrueExpr()) && IsSpecialDiscardedValue(CO->getFalseExpr()); // The related edge case of "*x ?: *x". if (BinaryConditionalOperator *BCO = dyn_cast<BinaryConditionalOperator>(E)) { if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(BCO->getTrueExpr())) return IsSpecialDiscardedValue(OVE->getSourceExpr()) && IsSpecialDiscardedValue(BCO->getFalseExpr()); } // Objective-C++ extensions to the rule. if (isa<PseudoObjectExpr>(E) || isa<ObjCIvarRefExpr>(E)) return true; return false; } /// Perform the conversions required for an expression used in a /// context that ignores the result. ExprResult Sema::IgnoredValueConversions(Expr *E) { if (E->hasPlaceholderType()) { ExprResult result = CheckPlaceholderExpr(E); if (result.isInvalid()) return E; E = result.get(); } // C99 6.3.2.1: // [Except in specific positions,] an lvalue that does not have // array type is converted to the value stored in the // designated object (and is no longer an lvalue). if (E->isRValue()) { // In C, function designators (i.e. expressions of function type) // are r-values, but we still want to do function-to-pointer decay // on them. This is both technically correct and convenient for // some clients. if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType()) return DefaultFunctionArrayConversion(E); return E; } if (getLangOpts().CPlusPlus) { // The C++11 standard defines the notion of a discarded-value expression; // normally, we don't need to do anything to handle it, but if it is a // volatile lvalue with a special form, we perform an lvalue-to-rvalue // conversion. if (getLangOpts().CPlusPlus11 && E->isGLValue() && E->getType().isVolatileQualified() && IsSpecialDiscardedValue(E)) { ExprResult Res = DefaultLvalueConversion(E); if (Res.isInvalid()) return E; E = Res.get(); } return E; } // GCC seems to also exclude expressions of incomplete enum type. if (const EnumType *T = E->getType()->getAs<EnumType>()) { if (!T->getDecl()->isComplete()) { // FIXME: stupid workaround for a codegen bug! E = ImpCastExprToType(E, Context.VoidTy, CK_ToVoid).get(); return E; } } ExprResult Res = DefaultFunctionArrayLvalueConversion(E); if (Res.isInvalid()) return E; E = Res.get(); if (!E->getType()->isVoidType()) RequireCompleteType(E->getExprLoc(), E->getType(), diag::err_incomplete_type); return E; } // If we can unambiguously determine whether Var can never be used // in a constant expression, return true. // - if the variable and its initializer are non-dependent, then // we can unambiguously check if the variable is a constant expression. // - if the initializer is not value dependent - we can determine whether // it can be used to initialize a constant expression. If Init can not // be used to initialize a constant expression we conclude that Var can // never be a constant expression. // - FXIME: if the initializer is dependent, we can still do some analysis and // identify certain cases unambiguously as non-const by using a Visitor: // - such as those that involve odr-use of a ParmVarDecl, involve a new // delete, lambda-expr, dynamic-cast, reinterpret-cast etc... static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var, ASTContext &Context) { if (isa<ParmVarDecl>(Var)) return true; const VarDecl *DefVD = nullptr; // If there is no initializer - this can not be a constant expression. if (!Var->getAnyInitializer(DefVD)) return true; assert(DefVD); if (DefVD->isWeak()) return false; EvaluatedStmt *Eval = DefVD->ensureEvaluatedStmt(); Expr *Init = cast<Expr>(Eval->Value); if (Var->getType()->isDependentType() || Init->isValueDependent()) { // FIXME: Teach the constant evaluator to deal with the non-dependent parts // of value-dependent expressions, and use it here to determine whether the // initializer is a potential constant expression. return false; } return !IsVariableAConstantExpression(Var, Context); } /// \brief Check if the current lambda has any potential captures /// that must be captured by any of its enclosing lambdas that are ready to /// capture. If there is a lambda that can capture a nested /// potential-capture, go ahead and do so. Also, check to see if any /// variables are uncaptureable or do not involve an odr-use so do not /// need to be captured. static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures( Expr *const FE, LambdaScopeInfo *const CurrentLSI, Sema &S) { assert(!S.isUnevaluatedContext()); assert(S.CurContext->isDependentContext()); assert(CurrentLSI->CallOperator == S.CurContext && "The current call operator must be synchronized with Sema's CurContext"); const bool IsFullExprInstantiationDependent = FE->isInstantiationDependent(); ArrayRef<const FunctionScopeInfo *> FunctionScopesArrayRef( S.FunctionScopes.data(), S.FunctionScopes.size()); // All the potentially captureable variables in the current nested // lambda (within a generic outer lambda), must be captured by an // outer lambda that is enclosed within a non-dependent context. const unsigned NumPotentialCaptures = CurrentLSI->getNumPotentialVariableCaptures(); for (unsigned I = 0; I != NumPotentialCaptures; ++I) { Expr *VarExpr = nullptr; VarDecl *Var = nullptr; CurrentLSI->getPotentialVariableCapture(I, Var, VarExpr); // If the variable is clearly identified as non-odr-used and the full // expression is not instantiation dependent, only then do we not // need to check enclosing lambda's for speculative captures. // For e.g.: // Even though 'x' is not odr-used, it should be captured. // int test() { // const int x = 10; // auto L = [=](auto a) { // (void) +x + a; // }; // } if (CurrentLSI->isVariableExprMarkedAsNonODRUsed(VarExpr) && !IsFullExprInstantiationDependent) continue; // If we have a capture-capable lambda for the variable, go ahead and // capture the variable in that lambda (and all its enclosing lambdas). if (const Optional<unsigned> Index = getStackIndexOfNearestEnclosingCaptureCapableLambda( FunctionScopesArrayRef, Var, S)) { const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue(); MarkVarDeclODRUsed(Var, VarExpr->getExprLoc(), S, &FunctionScopeIndexOfCapturableLambda); } const bool IsVarNeverAConstantExpression = VariableCanNeverBeAConstantExpression(Var, S.Context); if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) { // This full expression is not instantiation dependent or the variable // can not be used in a constant expression - which means // this variable must be odr-used here, so diagnose a // capture violation early, if the variable is un-captureable. // This is purely for diagnosing errors early. Otherwise, this // error would get diagnosed when the lambda becomes capture ready. QualType CaptureType, DeclRefType; SourceLocation ExprLoc = VarExpr->getExprLoc(); if (S.tryCaptureVariable(Var, ExprLoc, S.TryCapture_Implicit, /*EllipsisLoc*/ SourceLocation(), /*BuildAndDiagnose*/false, CaptureType, DeclRefType, nullptr)) { // We will never be able to capture this variable, and we need // to be able to in any and all instantiations, so diagnose it. S.tryCaptureVariable(Var, ExprLoc, S.TryCapture_Implicit, /*EllipsisLoc*/ SourceLocation(), /*BuildAndDiagnose*/true, CaptureType, DeclRefType, nullptr); } } } // Check if 'this' needs to be captured. if (CurrentLSI->hasPotentialThisCapture()) { // If we have a capture-capable lambda for 'this', go ahead and capture // 'this' in that lambda (and all its enclosing lambdas). if (const Optional<unsigned> Index = getStackIndexOfNearestEnclosingCaptureCapableLambda( FunctionScopesArrayRef, /*0 is 'this'*/ nullptr, S)) { const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue(); S.CheckCXXThisCapture(CurrentLSI->PotentialThisCaptureLocation, /*Explicit*/ false, /*BuildAndDiagnose*/ true, &FunctionScopeIndexOfCapturableLambda); } } // Reset all the potential captures at the end of each full-expression. CurrentLSI->clearPotentialCaptures(); } static ExprResult attemptRecovery(Sema &SemaRef, const TypoCorrectionConsumer &Consumer, TypoCorrection TC) { LookupResult R(SemaRef, Consumer.getLookupResult().getLookupNameInfo(), Consumer.getLookupResult().getLookupKind()); const CXXScopeSpec *SS = Consumer.getSS(); CXXScopeSpec NewSS; // Use an approprate CXXScopeSpec for building the expr. if (auto *NNS = TC.getCorrectionSpecifier()) NewSS.MakeTrivial(SemaRef.Context, NNS, TC.getCorrectionRange()); else if (SS && !TC.WillReplaceSpecifier()) NewSS = *SS; if (auto *ND = TC.getCorrectionDecl()) { R.setLookupName(ND->getDeclName()); R.addDecl(ND); if (ND->isCXXClassMember()) { // Figure out the correct naming class to add to the LookupResult. CXXRecordDecl *Record = nullptr; if (auto *NNS = TC.getCorrectionSpecifier()) Record = NNS->getAsType()->getAsCXXRecordDecl(); if (!Record) Record = dyn_cast<CXXRecordDecl>(ND->getDeclContext()->getRedeclContext()); if (Record) R.setNamingClass(Record); // Detect and handle the case where the decl might be an implicit // member. bool MightBeImplicitMember; if (!Consumer.isAddressOfOperand()) MightBeImplicitMember = true; else if (!NewSS.isEmpty()) MightBeImplicitMember = false; else if (R.isOverloadedResult()) MightBeImplicitMember = false; else if (R.isUnresolvableResult()) MightBeImplicitMember = true; else MightBeImplicitMember = isa<FieldDecl>(ND) || isa<IndirectFieldDecl>(ND) || isa<MSPropertyDecl>(ND); if (MightBeImplicitMember) return SemaRef.BuildPossibleImplicitMemberExpr( NewSS, /*TemplateKWLoc*/ SourceLocation(), R, /*TemplateArgs*/ nullptr); } else if (auto *Ivar = dyn_cast<ObjCIvarDecl>(ND)) { return SemaRef.LookupInObjCMethod(R, Consumer.getScope(), Ivar->getIdentifier()); } } return SemaRef.BuildDeclarationNameExpr(NewSS, R, /*NeedsADL*/ false, /*AcceptInvalidDecl*/ true); } namespace { class FindTypoExprs : public RecursiveASTVisitor<FindTypoExprs> { llvm::SmallSetVector<TypoExpr *, 2> &TypoExprs; public: explicit FindTypoExprs(llvm::SmallSetVector<TypoExpr *, 2> &TypoExprs) : TypoExprs(TypoExprs) {} bool VisitTypoExpr(TypoExpr *TE) { TypoExprs.insert(TE); return true; } }; class TransformTypos : public TreeTransform<TransformTypos> { typedef TreeTransform<TransformTypos> BaseTransform; VarDecl *InitDecl; // A decl to avoid as a correction because it is in the // process of being initialized. llvm::function_ref<ExprResult(Expr *)> ExprFilter; llvm::SmallSetVector<TypoExpr *, 2> TypoExprs, AmbiguousTypoExprs; llvm::SmallDenseMap<TypoExpr *, ExprResult, 2> TransformCache; llvm::SmallDenseMap<OverloadExpr *, Expr *, 4> OverloadResolution; /// \brief Emit diagnostics for all of the TypoExprs encountered. /// If the TypoExprs were successfully corrected, then the diagnostics should /// suggest the corrections. Otherwise the diagnostics will not suggest /// anything (having been passed an empty TypoCorrection). void EmitAllDiagnostics() { for (auto E : TypoExprs) { TypoExpr *TE = cast<TypoExpr>(E); auto &State = SemaRef.getTypoExprState(TE); if (State.DiagHandler) { TypoCorrection TC = State.Consumer->getCurrentCorrection(); ExprResult Replacement = TransformCache[TE]; // Extract the NamedDecl from the transformed TypoExpr and add it to the // TypoCorrection, replacing the existing decls. This ensures the right // NamedDecl is used in diagnostics e.g. in the case where overload // resolution was used to select one from several possible decls that // had been stored in the TypoCorrection. if (auto *ND = getDeclFromExpr( Replacement.isInvalid() ? nullptr : Replacement.get())) TC.setCorrectionDecl(ND); State.DiagHandler(TC); } SemaRef.clearDelayedTypo(TE); } } /// \brief If corrections for the first TypoExpr have been exhausted for a /// given combination of the other TypoExprs, retry those corrections against /// the next combination of substitutions for the other TypoExprs by advancing /// to the next potential correction of the second TypoExpr. For the second /// and subsequent TypoExprs, if its stream of corrections has been exhausted, /// the stream is reset and the next TypoExpr's stream is advanced by one (a /// TypoExpr's correction stream is advanced by removing the TypoExpr from the /// TransformCache). Returns true if there is still any untried combinations /// of corrections. bool CheckAndAdvanceTypoExprCorrectionStreams() { for (auto TE : TypoExprs) { auto &State = SemaRef.getTypoExprState(TE); TransformCache.erase(TE); if (!State.Consumer->finished()) return true; State.Consumer->resetCorrectionStream(); } return false; } NamedDecl *getDeclFromExpr(Expr *E) { if (auto *OE = dyn_cast_or_null<OverloadExpr>(E)) E = OverloadResolution[OE]; if (!E) return nullptr; if (auto *DRE = dyn_cast<DeclRefExpr>(E)) return DRE->getDecl(); if (auto *ME = dyn_cast<MemberExpr>(E)) return ME->getMemberDecl(); // FIXME: Add any other expr types that could be be seen by the delayed typo // correction TreeTransform for which the corresponding TypoCorrection could // contain multiple decls. return nullptr; } ExprResult TryTransform(Expr *E) { Sema::SFINAETrap Trap(SemaRef); ExprResult Res = TransformExpr(E); if (Trap.hasErrorOccurred() || Res.isInvalid()) return ExprError(); return ExprFilter(Res.get()); } public: TransformTypos(Sema &SemaRef, VarDecl *InitDecl, llvm::function_ref<ExprResult(Expr *)> Filter) : BaseTransform(SemaRef), InitDecl(InitDecl), ExprFilter(Filter) {} ExprResult RebuildCallExpr(Expr *Callee, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig = nullptr) { auto Result = BaseTransform::RebuildCallExpr(Callee, LParenLoc, Args, RParenLoc, ExecConfig); if (auto *OE = dyn_cast<OverloadExpr>(Callee)) { if (Result.isUsable()) { Expr *ResultCall = Result.get(); if (auto *BE = dyn_cast<CXXBindTemporaryExpr>(ResultCall)) ResultCall = BE->getSubExpr(); if (auto *CE = dyn_cast<CallExpr>(ResultCall)) OverloadResolution[OE] = CE->getCallee(); } } return Result; } ExprResult TransformLambdaExpr(LambdaExpr *E) { return Owned(E); } ExprResult Transform(Expr *E) { ExprResult Res; while (true) { Res = TryTransform(E); // Exit if either the transform was valid or if there were no TypoExprs // to transform that still have any untried correction candidates.. if (!Res.isInvalid() || !CheckAndAdvanceTypoExprCorrectionStreams()) break; } // Ensure none of the TypoExprs have multiple typo correction candidates // with the same edit length that pass all the checks and filters. // TODO: Properly handle various permutations of possible corrections when // there is more than one potentially ambiguous typo correction. // Also, disable typo correction while attempting the transform when // handling potentially ambiguous typo corrections as any new TypoExprs will // have been introduced by the application of one of the correction // candidates and add little to no value if corrected. SemaRef.DisableTypoCorrection = true; while (!AmbiguousTypoExprs.empty()) { auto TE = AmbiguousTypoExprs.back(); auto Cached = TransformCache[TE]; auto &State = SemaRef.getTypoExprState(TE); State.Consumer->saveCurrentPosition(); TransformCache.erase(TE); if (!TryTransform(E).isInvalid()) { State.Consumer->resetCorrectionStream(); TransformCache.erase(TE); Res = ExprError(); break; } AmbiguousTypoExprs.remove(TE); State.Consumer->restoreSavedPosition(); TransformCache[TE] = Cached; } SemaRef.DisableTypoCorrection = false; // Ensure that all of the TypoExprs within the current Expr have been found. if (!Res.isUsable()) FindTypoExprs(TypoExprs).TraverseStmt(E); EmitAllDiagnostics(); return Res; } ExprResult TransformTypoExpr(TypoExpr *E) { // If the TypoExpr hasn't been seen before, record it. Otherwise, return the // cached transformation result if there is one and the TypoExpr isn't the // first one that was encountered. auto &CacheEntry = TransformCache[E]; if (!TypoExprs.insert(E) && !CacheEntry.isUnset()) { return CacheEntry; } auto &State = SemaRef.getTypoExprState(E); assert(State.Consumer && "Cannot transform a cleared TypoExpr"); // For the first TypoExpr and an uncached TypoExpr, find the next likely // typo correction and return it. while (TypoCorrection TC = State.Consumer->getNextCorrection()) { if (InitDecl && TC.getCorrectionDecl() == InitDecl) continue; ExprResult NE = State.RecoveryHandler ? State.RecoveryHandler(SemaRef, E, TC) : attemptRecovery(SemaRef, *State.Consumer, TC); if (!NE.isInvalid()) { // Check whether there may be a second viable correction with the same // edit distance; if so, remember this TypoExpr may have an ambiguous // correction so it can be more thoroughly vetted later. TypoCorrection Next; if ((Next = State.Consumer->peekNextCorrection()) && Next.getEditDistance(false) == TC.getEditDistance(false)) { AmbiguousTypoExprs.insert(E); } else { AmbiguousTypoExprs.remove(E); } assert(!NE.isUnset() && "Typo was transformed into a valid-but-null ExprResult"); return CacheEntry = NE; } } return CacheEntry = ExprError(); } }; } ExprResult Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl, llvm::function_ref<ExprResult(Expr *)> Filter) { // If the current evaluation context indicates there are uncorrected typos // and the current expression isn't guaranteed to not have typos, try to // resolve any TypoExpr nodes that might be in the expression. if (E && !ExprEvalContexts.empty() && ExprEvalContexts.back().NumTypos && (E->isTypeDependent() || E->isValueDependent() || E->isInstantiationDependent())) { auto TyposInContext = ExprEvalContexts.back().NumTypos; assert(TyposInContext < ~0U && "Recursive call of CorrectDelayedTyposInExpr"); ExprEvalContexts.back().NumTypos = ~0U; auto TyposResolved = DelayedTypos.size(); auto Result = TransformTypos(*this, InitDecl, Filter).Transform(E); ExprEvalContexts.back().NumTypos = TyposInContext; TyposResolved -= DelayedTypos.size(); if (Result.isInvalid() || Result.get() != E) { ExprEvalContexts.back().NumTypos -= TyposResolved; return Result; } assert(TyposResolved == 0 && "Corrected typo but got same Expr back?"); } return E; } ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC, bool DiscardedValue, bool IsConstexpr, bool IsLambdaInitCaptureInitializer) { ExprResult FullExpr = FE; if (!FullExpr.get()) return ExprError(); // If we are an init-expression in a lambdas init-capture, we should not // diagnose an unexpanded pack now (will be diagnosed once lambda-expr // containing full-expression is done). // template<class ... Ts> void test(Ts ... t) { // test([&a(t)]() { <-- (t) is an init-expr that shouldn't be diagnosed now. // return a; // }() ...); // } // FIXME: This is a hack. It would be better if we pushed the lambda scope // when we parse the lambda introducer, and teach capturing (but not // unexpanded pack detection) to walk over LambdaScopeInfos which don't have a // corresponding class yet (that is, have LambdaScopeInfo either represent a // lambda where we've entered the introducer but not the body, or represent a // lambda where we've entered the body, depending on where the // parser/instantiation has got to). if (!IsLambdaInitCaptureInitializer && DiagnoseUnexpandedParameterPack(FullExpr.get())) return ExprError(); // Top-level expressions default to 'id' when we're in a debugger. if (DiscardedValue && getLangOpts().DebuggerCastResultToId && FullExpr.get()->getType() == Context.UnknownAnyTy) { FullExpr = forceUnknownAnyToType(FullExpr.get(), Context.getObjCIdType()); if (FullExpr.isInvalid()) return ExprError(); } if (DiscardedValue) { FullExpr = CheckPlaceholderExpr(FullExpr.get()); if (FullExpr.isInvalid()) return ExprError(); FullExpr = IgnoredValueConversions(FullExpr.get()); if (FullExpr.isInvalid()) return ExprError(); } FullExpr = CorrectDelayedTyposInExpr(FullExpr.get()); if (FullExpr.isInvalid()) return ExprError(); CheckCompletedExpr(FullExpr.get(), CC, IsConstexpr); // At the end of this full expression (which could be a deeply nested // lambda), if there is a potential capture within the nested lambda, // have the outer capture-able lambda try and capture it. // Consider the following code: // void f(int, int); // void f(const int&, double); // void foo() { // const int x = 10, y = 20; // auto L = [=](auto a) { // auto M = [=](auto b) { // f(x, b); <-- requires x to be captured by L and M // f(y, a); <-- requires y to be captured by L, but not all Ms // }; // }; // } // FIXME: Also consider what happens for something like this that involves // the gnu-extension statement-expressions or even lambda-init-captures: // void f() { // const int n = 0; // auto L = [&](auto a) { // +n + ({ 0; a; }); // }; // } // // Here, we see +n, and then the full-expression 0; ends, so we don't // capture n (and instead remove it from our list of potential captures), // and then the full-expression +n + ({ 0; }); ends, but it's too late // for us to see that we need to capture n after all. LambdaScopeInfo *const CurrentLSI = getCurLambda(); // FIXME: PR 17877 showed that getCurLambda() can return a valid pointer // even if CurContext is not a lambda call operator. Refer to that Bug Report // for an example of the code that might cause this asynchrony. // By ensuring we are in the context of a lambda's call operator // we can fix the bug (we only need to check whether we need to capture // if we are within a lambda's body); but per the comments in that // PR, a proper fix would entail : // "Alternative suggestion: // - Add to Sema an integer holding the smallest (outermost) scope // index that we are *lexically* within, and save/restore/set to // FunctionScopes.size() in InstantiatingTemplate's // constructor/destructor. // - Teach the handful of places that iterate over FunctionScopes to // stop at the outermost enclosing lexical scope." const bool IsInLambdaDeclContext = isLambdaCallOperator(CurContext); if (IsInLambdaDeclContext && CurrentLSI && CurrentLSI->hasPotentialCaptures() && !FullExpr.isInvalid()) CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(FE, CurrentLSI, *this); return MaybeCreateExprWithCleanups(FullExpr); } StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) { if (!FullStmt) return StmtError(); return MaybeCreateStmtWithCleanups(FullStmt); } Sema::IfExistsResult Sema::CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo) { DeclarationName TargetName = TargetNameInfo.getName(); if (!TargetName) return IER_DoesNotExist; // If the name itself is dependent, then the result is dependent. if (TargetName.isDependentName()) return IER_Dependent; // Do the redeclaration lookup in the current scope. LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName, Sema::NotForRedeclaration); LookupParsedName(R, S, &SS); R.suppressDiagnostics(); switch (R.getResultKind()) { case LookupResult::Found: case LookupResult::FoundOverloaded: case LookupResult::FoundUnresolvedValue: case LookupResult::Ambiguous: return IER_Exists; case LookupResult::NotFound: return IER_DoesNotExist; case LookupResult::NotFoundInCurrentInstantiation: return IER_Dependent; } llvm_unreachable("Invalid LookupResult Kind!"); } Sema::IfExistsResult Sema::CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name) { DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name); // Check for unexpanded parameter packs. SmallVector<UnexpandedParameterPack, 4> Unexpanded; collectUnexpandedParameterPacks(SS, Unexpanded); collectUnexpandedParameterPacks(TargetNameInfo, Unexpanded); if (!Unexpanded.empty()) { DiagnoseUnexpandedParameterPacks(KeywordLoc, IsIfExists? UPPC_IfExists : UPPC_IfNotExists, Unexpanded); return IER_Error; } return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaLookup.cpp
//===--------------------- SemaLookup.cpp - Name Lookup ------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements name lookup for C, C++, Objective-C, and // Objective-C++. // //===----------------------------------------------------------------------===// #include "clang/Sema/Lookup.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ASTMutationListener.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclLookups.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/Basic/Builtins.h" #include "clang/Basic/LangOptions.h" #include "clang/Lex/HeaderSearch.h" #include "clang/Lex/ModuleLoader.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/Overload.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/Sema.h" #include "clang/Sema/SemaInternal.h" #include "clang/Sema/TemplateDeduction.h" #include "clang/Sema/TypoCorrection.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/ADT/edit_distance.h" #include "llvm/Support/ErrorHandling.h" #include <algorithm> #include <iterator> #include <limits> #include <list> #include <map> #include <set> #include <utility> #include <vector> using namespace clang; using namespace sema; namespace { class UnqualUsingEntry { const DeclContext *Nominated; const DeclContext *CommonAncestor; public: UnqualUsingEntry(const DeclContext *Nominated, const DeclContext *CommonAncestor) : Nominated(Nominated), CommonAncestor(CommonAncestor) { } const DeclContext *getCommonAncestor() const { return CommonAncestor; } const DeclContext *getNominatedNamespace() const { return Nominated; } // Sort by the pointer value of the common ancestor. struct Comparator { bool operator()(const UnqualUsingEntry &L, const UnqualUsingEntry &R) { return L.getCommonAncestor() < R.getCommonAncestor(); } bool operator()(const UnqualUsingEntry &E, const DeclContext *DC) { return E.getCommonAncestor() < DC; } bool operator()(const DeclContext *DC, const UnqualUsingEntry &E) { return DC < E.getCommonAncestor(); } }; }; /// A collection of using directives, as used by C++ unqualified /// lookup. class UnqualUsingDirectiveSet { typedef SmallVector<UnqualUsingEntry, 8> ListTy; ListTy list; llvm::SmallPtrSet<DeclContext*, 8> visited; public: UnqualUsingDirectiveSet() {} void visitScopeChain(Scope *S, Scope *InnermostFileScope) { // C++ [namespace.udir]p1: // During unqualified name lookup, the names appear as if they // were declared in the nearest enclosing namespace which contains // both the using-directive and the nominated namespace. DeclContext *InnermostFileDC = InnermostFileScope->getEntity(); assert(InnermostFileDC && InnermostFileDC->isFileContext()); for (; S; S = S->getParent()) { // C++ [namespace.udir]p1: // A using-directive shall not appear in class scope, but may // appear in namespace scope or in block scope. DeclContext *Ctx = S->getEntity(); if (Ctx && Ctx->isFileContext()) { visit(Ctx, Ctx); } else if (!Ctx || Ctx->isFunctionOrMethod()) { for (auto *I : S->using_directives()) visit(I, InnermostFileDC); } } } // Visits a context and collect all of its using directives // recursively. Treats all using directives as if they were // declared in the context. // // A given context is only every visited once, so it is important // that contexts be visited from the inside out in order to get // the effective DCs right. void visit(DeclContext *DC, DeclContext *EffectiveDC) { if (!visited.insert(DC).second) return; addUsingDirectives(DC, EffectiveDC); } // Visits a using directive and collects all of its using // directives recursively. Treats all using directives as if they // were declared in the effective DC. void visit(UsingDirectiveDecl *UD, DeclContext *EffectiveDC) { DeclContext *NS = UD->getNominatedNamespace(); if (!visited.insert(NS).second) return; addUsingDirective(UD, EffectiveDC); addUsingDirectives(NS, EffectiveDC); } // Adds all the using directives in a context (and those nominated // by its using directives, transitively) as if they appeared in // the given effective context. void addUsingDirectives(DeclContext *DC, DeclContext *EffectiveDC) { SmallVector<DeclContext*,4> queue; while (true) { for (auto UD : DC->using_directives()) { DeclContext *NS = UD->getNominatedNamespace(); if (visited.insert(NS).second) { addUsingDirective(UD, EffectiveDC); queue.push_back(NS); } } if (queue.empty()) return; DC = queue.pop_back_val(); } } // Add a using directive as if it had been declared in the given // context. This helps implement C++ [namespace.udir]p3: // The using-directive is transitive: if a scope contains a // using-directive that nominates a second namespace that itself // contains using-directives, the effect is as if the // using-directives from the second namespace also appeared in // the first. void addUsingDirective(UsingDirectiveDecl *UD, DeclContext *EffectiveDC) { // Find the common ancestor between the effective context and // the nominated namespace. DeclContext *Common = UD->getNominatedNamespace(); while (!Common->Encloses(EffectiveDC)) Common = Common->getParent(); Common = Common->getPrimaryContext(); list.push_back(UnqualUsingEntry(UD->getNominatedNamespace(), Common)); } void done() { std::sort(list.begin(), list.end(), UnqualUsingEntry::Comparator()); } typedef ListTy::const_iterator const_iterator; const_iterator begin() const { return list.begin(); } const_iterator end() const { return list.end(); } llvm::iterator_range<const_iterator> getNamespacesFor(DeclContext *DC) const { return llvm::make_range(std::equal_range(begin(), end(), DC->getPrimaryContext(), UnqualUsingEntry::Comparator())); } }; } // Retrieve the set of identifier namespaces that correspond to a // specific kind of name lookup. static inline unsigned getIDNS(Sema::LookupNameKind NameKind, bool CPlusPlus, bool Redeclaration) { unsigned IDNS = 0; switch (NameKind) { case Sema::LookupObjCImplicitSelfParam: case Sema::LookupOrdinaryName: case Sema::LookupRedeclarationWithLinkage: case Sema::LookupLocalFriendName: IDNS = Decl::IDNS_Ordinary; if (CPlusPlus) { IDNS |= Decl::IDNS_Tag | Decl::IDNS_Member | Decl::IDNS_Namespace; if (Redeclaration) IDNS |= Decl::IDNS_TagFriend | Decl::IDNS_OrdinaryFriend; } if (Redeclaration) IDNS |= Decl::IDNS_LocalExtern; break; case Sema::LookupOperatorName: // Operator lookup is its own crazy thing; it is not the same // as (e.g.) looking up an operator name for redeclaration. assert(!Redeclaration && "cannot do redeclaration operator lookup"); IDNS = Decl::IDNS_NonMemberOperator; break; case Sema::LookupTagName: if (CPlusPlus) { IDNS = Decl::IDNS_Type; // When looking for a redeclaration of a tag name, we add: // 1) TagFriend to find undeclared friend decls // 2) Namespace because they can't "overload" with tag decls. // 3) Tag because it includes class templates, which can't // "overload" with tag decls. if (Redeclaration) IDNS |= Decl::IDNS_Tag | Decl::IDNS_TagFriend | Decl::IDNS_Namespace; } else { IDNS = Decl::IDNS_Tag; } break; case Sema::LookupLabel: IDNS = Decl::IDNS_Label; break; case Sema::LookupMemberName: IDNS = Decl::IDNS_Member; if (CPlusPlus) IDNS |= Decl::IDNS_Tag | Decl::IDNS_Ordinary; break; case Sema::LookupNestedNameSpecifierName: IDNS = Decl::IDNS_Type | Decl::IDNS_Namespace; break; case Sema::LookupNamespaceName: IDNS = Decl::IDNS_Namespace; break; case Sema::LookupUsingDeclName: assert(Redeclaration && "should only be used for redecl lookup"); IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Member | Decl::IDNS_Using | Decl::IDNS_TagFriend | Decl::IDNS_OrdinaryFriend | Decl::IDNS_LocalExtern; break; case Sema::LookupObjCProtocolName: IDNS = Decl::IDNS_ObjCProtocol; break; case Sema::LookupAnyName: IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Member | Decl::IDNS_Using | Decl::IDNS_Namespace | Decl::IDNS_ObjCProtocol | Decl::IDNS_Type; break; } return IDNS; } void LookupResult::configure() { IDNS = getIDNS(LookupKind, getSema().getLangOpts().CPlusPlus, isForRedeclaration()); // HLSL Change Starts - do not handle new and delete // // Without this if-statement, the following HLSL example will just meet the // llvm_unreachable(..) in Sema::DeclareGlobalAllocationFunction(..) method: // // struct S // { // float foo; // void * operator new(int size) { // return (void *)0; // } // void operator delete(void *ptr) { // (void) ptr; // } // }; // // The llvm_unreachable(..) just prints the following message without // reporting the exact HLSL code line that causes the failure: // // no support for new and delete in HLSL // UNREACHABLE executed at ../../tools/clang/lib/Sema/SemaExprCXX.cpp:2163! // Aborted if (!getSema().getLangOpts().HLSL || getSema().getLangOpts().HLSLVersion < hlsl::LangStd::v2021) { // If we're looking for one of the allocation or deallocation // operators, make sure that the implicitly-declared new and delete // operators can be found. switch (NameInfo.getName().getCXXOverloadedOperator()) { case OO_New: case OO_Delete: case OO_Array_New: case OO_Array_Delete: getSema().DeclareGlobalNewDelete(); break; default: break; } } // HLSL Change Ends // Compiler builtins are always visible, regardless of where they end // up being declared. if (IdentifierInfo *Id = NameInfo.getName().getAsIdentifierInfo()) { if (unsigned BuiltinID = Id->getBuiltinID()) { if (!getSema().Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) AllowHidden = true; } } } bool LookupResult::sanity() const { // This function is never called by NDEBUG builds. assert(ResultKind != NotFound || Decls.size() == 0); assert(ResultKind != Found || Decls.size() == 1); assert(ResultKind != FoundOverloaded || Decls.size() > 1 || (Decls.size() == 1 && isa<FunctionTemplateDecl>((*begin())->getUnderlyingDecl()))); assert(ResultKind != FoundUnresolvedValue || sanityCheckUnresolved()); assert(ResultKind != Ambiguous || Decls.size() > 1 || (Decls.size() == 1 && (Ambiguity == AmbiguousBaseSubobjects || Ambiguity == AmbiguousBaseSubobjectTypes))); assert((Paths != nullptr) == (ResultKind == Ambiguous && (Ambiguity == AmbiguousBaseSubobjectTypes || Ambiguity == AmbiguousBaseSubobjects))); return true; } // Necessary because CXXBasePaths is not complete in Sema.h void LookupResult::deletePaths(CXXBasePaths *Paths) { delete Paths; } /// Get a representative context for a declaration such that two declarations /// will have the same context if they were found within the same scope. static DeclContext *getContextForScopeMatching(Decl *D) { // For function-local declarations, use that function as the context. This // doesn't account for scopes within the function; the caller must deal with // those. DeclContext *DC = D->getLexicalDeclContext(); if (DC->isFunctionOrMethod()) return DC; // Otherwise, look at the semantic context of the declaration. The // declaration must have been found there. return D->getDeclContext()->getRedeclContext(); } /// Resolves the result kind of this lookup. void LookupResult::resolveKind() { unsigned N = Decls.size(); // Fast case: no possible ambiguity. if (N == 0) { assert(ResultKind == NotFound || ResultKind == NotFoundInCurrentInstantiation); return; } // If there's a single decl, we need to examine it to decide what // kind of lookup this is. if (N == 1) { NamedDecl *D = (*Decls.begin())->getUnderlyingDecl(); if (isa<FunctionTemplateDecl>(D)) ResultKind = FoundOverloaded; else if (isa<UnresolvedUsingValueDecl>(D)) ResultKind = FoundUnresolvedValue; return; } // Don't do any extra resolution if we've already resolved as ambiguous. if (ResultKind == Ambiguous) return; llvm::SmallPtrSet<NamedDecl*, 16> Unique; llvm::SmallPtrSet<QualType, 16> UniqueTypes; bool Ambiguous = false; bool HasTag = false, HasFunction = false, HasNonFunction = false; bool HasFunctionTemplate = false, HasUnresolved = false; unsigned UniqueTagIndex = 0; unsigned I = 0; while (I < N) { NamedDecl *D = Decls[I]->getUnderlyingDecl(); D = cast<NamedDecl>(D->getCanonicalDecl()); // Ignore an invalid declaration unless it's the only one left. if (D->isInvalidDecl() && I < N-1) { Decls[I] = Decls[--N]; continue; } // Redeclarations of types via typedef can occur both within a scope // and, through using declarations and directives, across scopes. There is // no ambiguity if they all refer to the same type, so unique based on the // canonical type. if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) { if (!TD->getDeclContext()->isRecord()) { QualType T = getSema().Context.getTypeDeclType(TD); if (!UniqueTypes.insert(getSema().Context.getCanonicalType(T)).second) { // The type is not unique; pull something off the back and continue // at this index. Decls[I] = Decls[--N]; continue; } } } if (!Unique.insert(D).second) { // If it's not unique, pull something off the back (and // continue at this index). // FIXME: This is wrong. We need to take the more recent declaration in // order to get the right type, default arguments, etc. We also need to // prefer visible declarations to hidden ones (for redeclaration lookup // in modules builds). Decls[I] = Decls[--N]; continue; } // Otherwise, do some decl type analysis and then continue. if (isa<UnresolvedUsingValueDecl>(D)) { HasUnresolved = true; } else if (isa<TagDecl>(D)) { if (HasTag) Ambiguous = true; UniqueTagIndex = I; HasTag = true; } else if (isa<FunctionTemplateDecl>(D)) { HasFunction = true; HasFunctionTemplate = true; } else if (isa<FunctionDecl>(D)) { HasFunction = true; } else { if (HasNonFunction) Ambiguous = true; HasNonFunction = true; } I++; } // C++ [basic.scope.hiding]p2: // A class name or enumeration name can be hidden by the name of // an object, function, or enumerator declared in the same // scope. If a class or enumeration name and an object, function, // or enumerator are declared in the same scope (in any order) // with the same name, the class or enumeration name is hidden // wherever the object, function, or enumerator name is visible. // But it's still an error if there are distinct tag types found, // even if they're not visible. (ref?) if (HideTags && HasTag && !Ambiguous && (HasFunction || HasNonFunction || HasUnresolved)) { if (getContextForScopeMatching(Decls[UniqueTagIndex])->Equals( getContextForScopeMatching(Decls[UniqueTagIndex ? 0 : N - 1]))) Decls[UniqueTagIndex] = Decls[--N]; else Ambiguous = true; } Decls.set_size(N); if (HasNonFunction && (HasFunction || HasUnresolved)) Ambiguous = true; if (Ambiguous) setAmbiguous(LookupResult::AmbiguousReference); else if (HasUnresolved) ResultKind = LookupResult::FoundUnresolvedValue; else if (N > 1 || HasFunctionTemplate) ResultKind = LookupResult::FoundOverloaded; else ResultKind = LookupResult::Found; } void LookupResult::addDeclsFromBasePaths(const CXXBasePaths &P) { CXXBasePaths::const_paths_iterator I, E; for (I = P.begin(), E = P.end(); I != E; ++I) for (DeclContext::lookup_iterator DI = I->Decls.begin(), DE = I->Decls.end(); DI != DE; ++DI) addDecl(*DI); } void LookupResult::setAmbiguousBaseSubobjects(CXXBasePaths &P) { Paths = new CXXBasePaths; Paths->swap(P); addDeclsFromBasePaths(*Paths); resolveKind(); setAmbiguous(AmbiguousBaseSubobjects); } void LookupResult::setAmbiguousBaseSubobjectTypes(CXXBasePaths &P) { Paths = new CXXBasePaths; Paths->swap(P); addDeclsFromBasePaths(*Paths); resolveKind(); setAmbiguous(AmbiguousBaseSubobjectTypes); } void LookupResult::print(raw_ostream &Out) { Out << Decls.size() << " result(s)"; if (isAmbiguous()) Out << ", ambiguous"; if (Paths) Out << ", base paths present"; for (iterator I = begin(), E = end(); I != E; ++I) { Out << "\n"; (*I)->print(Out, 2); } } /// \brief Lookup a builtin function, when name lookup would otherwise /// fail. static bool LookupBuiltin(Sema &S, LookupResult &R) { Sema::LookupNameKind NameKind = R.getLookupKind(); // If we didn't find a use of this identifier, and if the identifier // corresponds to a compiler builtin, create the decl object for the builtin // now, injecting it into translation unit scope, and return it. if (NameKind == Sema::LookupOrdinaryName || NameKind == Sema::LookupRedeclarationWithLinkage) { IdentifierInfo *II = R.getLookupName().getAsIdentifierInfo(); if (II) { if (S.getLangOpts().CPlusPlus11 && S.getLangOpts().GNUMode && II == S.getFloat128Identifier()) { // libstdc++4.7's type_traits expects type __float128 to exist, so // insert a dummy type to make that header build in gnu++11 mode. R.addDecl(S.getASTContext().getFloat128StubType()); return true; } // If this is a builtin on this (or all) targets, create the decl. if (unsigned BuiltinID = II->getBuiltinID()) { // In C++, we don't have any predefined library functions like // 'malloc'. Instead, we'll just error. if (S.getLangOpts().CPlusPlus && S.Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) return false; if (NamedDecl *D = S.LazilyCreateBuiltin((IdentifierInfo *)II, BuiltinID, S.TUScope, R.isForRedeclaration(), R.getNameLoc())) { R.addDecl(D); return true; } } } } return false; } /// \brief Determine whether we can declare a special member function within /// the class at this point. static bool CanDeclareSpecialMemberFunction(const CXXRecordDecl *Class) { // We need to have a definition for the class. if (!Class->getDefinition() || Class->isDependentContext()) return false; // We can't be in the middle of defining the class. return !Class->isBeingDefined(); } void Sema::ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class) { if (!CanDeclareSpecialMemberFunction(Class)) return; // If the default constructor has not yet been declared, do so now. if (Class->needsImplicitDefaultConstructor()) DeclareImplicitDefaultConstructor(Class); // If the copy constructor has not yet been declared, do so now. if (Class->needsImplicitCopyConstructor()) DeclareImplicitCopyConstructor(Class); // If the copy assignment operator has not yet been declared, do so now. if (Class->needsImplicitCopyAssignment()) DeclareImplicitCopyAssignment(Class); if (getLangOpts().CPlusPlus11) { // If the move constructor has not yet been declared, do so now. if (Class->needsImplicitMoveConstructor()) DeclareImplicitMoveConstructor(Class); // might not actually do it // If the move assignment operator has not yet been declared, do so now. if (Class->needsImplicitMoveAssignment()) DeclareImplicitMoveAssignment(Class); // might not actually do it } // If the destructor has not yet been declared, do so now. if (Class->needsImplicitDestructor()) DeclareImplicitDestructor(Class); } /// \brief Determine whether this is the name of an implicitly-declared /// special member function. static bool isImplicitlyDeclaredMemberFunctionName(DeclarationName Name) { switch (Name.getNameKind()) { case DeclarationName::CXXConstructorName: case DeclarationName::CXXDestructorName: return true; case DeclarationName::CXXOperatorName: return Name.getCXXOverloadedOperator() == OO_Equal; default: break; } return false; } /// \brief If there are any implicit member functions with the given name /// that need to be declared in the given declaration context, do so. static void DeclareImplicitMemberFunctionsWithName(Sema &S, DeclarationName Name, const DeclContext *DC) { if (!DC) return; switch (Name.getNameKind()) { case DeclarationName::CXXConstructorName: if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC)) if (Record->getDefinition() && CanDeclareSpecialMemberFunction(Record)) { CXXRecordDecl *Class = const_cast<CXXRecordDecl *>(Record); if (Record->needsImplicitDefaultConstructor()) S.DeclareImplicitDefaultConstructor(Class); if (Record->needsImplicitCopyConstructor()) S.DeclareImplicitCopyConstructor(Class); if (S.getLangOpts().CPlusPlus11 && Record->needsImplicitMoveConstructor()) S.DeclareImplicitMoveConstructor(Class); } break; case DeclarationName::CXXDestructorName: if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC)) if (Record->getDefinition() && Record->needsImplicitDestructor() && CanDeclareSpecialMemberFunction(Record)) S.DeclareImplicitDestructor(const_cast<CXXRecordDecl *>(Record)); break; case DeclarationName::CXXOperatorName: if (Name.getCXXOverloadedOperator() != OO_Equal) break; if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC)) { if (Record->getDefinition() && CanDeclareSpecialMemberFunction(Record)) { CXXRecordDecl *Class = const_cast<CXXRecordDecl *>(Record); if (Record->needsImplicitCopyAssignment()) S.DeclareImplicitCopyAssignment(Class); if (S.getLangOpts().CPlusPlus11 && Record->needsImplicitMoveAssignment()) S.DeclareImplicitMoveAssignment(Class); } } break; default: break; } } // Adds all qualifying matches for a name within a decl context to the // given lookup result. Returns true if any matches were found. static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) { bool Found = false; // Lazily declare C++ special member functions. if (S.getLangOpts().CPlusPlus) DeclareImplicitMemberFunctionsWithName(S, R.getLookupName(), DC); // Perform lookup into this declaration context. DeclContext::lookup_result DR = DC->lookup(R.getLookupName()); for (DeclContext::lookup_iterator I = DR.begin(), E = DR.end(); I != E; ++I) { NamedDecl *D = *I; if ((D = R.getAcceptableDecl(D))) { R.addDecl(D); Found = true; } } if (!Found && DC->isTranslationUnit() && LookupBuiltin(S, R)) return true; if (R.getLookupName().getNameKind() != DeclarationName::CXXConversionFunctionName || R.getLookupName().getCXXNameType()->isDependentType() || !isa<CXXRecordDecl>(DC)) return Found; // C++ [temp.mem]p6: // A specialization of a conversion function template is not found by // name lookup. Instead, any conversion function templates visible in the // context of the use are considered. [...] const CXXRecordDecl *Record = cast<CXXRecordDecl>(DC); if (!Record->isCompleteDefinition()) return Found; for (CXXRecordDecl::conversion_iterator U = Record->conversion_begin(), UEnd = Record->conversion_end(); U != UEnd; ++U) { FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(*U); if (!ConvTemplate) continue; // When we're performing lookup for the purposes of redeclaration, just // add the conversion function template. When we deduce template // arguments for specializations, we'll end up unifying the return // type of the new declaration with the type of the function template. if (R.isForRedeclaration()) { R.addDecl(ConvTemplate); Found = true; continue; } // C++ [temp.mem]p6: // [...] For each such operator, if argument deduction succeeds // (14.9.2.3), the resulting specialization is used as if found by // name lookup. // // When referencing a conversion function for any purpose other than // a redeclaration (such that we'll be building an expression with the // result), perform template argument deduction and place the // specialization into the result set. We do this to avoid forcing all // callers to perform special deduction for conversion functions. TemplateDeductionInfo Info(R.getNameLoc()); FunctionDecl *Specialization = nullptr; const FunctionProtoType *ConvProto = ConvTemplate->getTemplatedDecl()->getType()->getAs<FunctionProtoType>(); assert(ConvProto && "Nonsensical conversion function template type"); // Compute the type of the function that we would expect the conversion // function to have, if it were to match the name given. // FIXME: Calling convention! FunctionProtoType::ExtProtoInfo EPI = ConvProto->getExtProtoInfo(); EPI.ExtInfo = EPI.ExtInfo.withCallingConv(CC_C); EPI.ExceptionSpec = EST_None; QualType ExpectedType = R.getSema().Context.getFunctionType(R.getLookupName().getCXXNameType(), None, EPI, None); // HLSL Change - conversion params are all in // Perform template argument deduction against the type that we would // expect the function to have. if (R.getSema().DeduceTemplateArguments(ConvTemplate, nullptr, ExpectedType, Specialization, Info) == Sema::TDK_Success) { R.addDecl(Specialization); Found = true; } } return Found; } // Performs C++ unqualified lookup into the given file context. static bool CppNamespaceLookup(Sema &S, LookupResult &R, ASTContext &Context, DeclContext *NS, UnqualUsingDirectiveSet &UDirs) { assert(NS && NS->isFileContext() && "CppNamespaceLookup() requires namespace!"); // Perform direct name lookup into the LookupCtx. bool Found = LookupDirect(S, R, NS); // Perform direct name lookup into the namespaces nominated by the // using directives whose common ancestor is this namespace. for (const UnqualUsingEntry &UUE : UDirs.getNamespacesFor(NS)) if (LookupDirect(S, R, UUE.getNominatedNamespace())) Found = true; R.resolveKind(); return Found; } static bool isNamespaceOrTranslationUnitScope(Scope *S) { if (DeclContext *Ctx = S->getEntity()) return Ctx->isFileContext(); return false; } // Find the next outer declaration context from this scope. This // routine actually returns the semantic outer context, which may // differ from the lexical context (encoded directly in the Scope // stack) when we are parsing a member of a class template. In this // case, the second element of the pair will be true, to indicate that // name lookup should continue searching in this semantic context when // it leaves the current template parameter scope. static std::pair<DeclContext *, bool> findOuterContext(Scope *S) { DeclContext *DC = S->getEntity(); DeclContext *Lexical = nullptr; for (Scope *OuterS = S->getParent(); OuterS; OuterS = OuterS->getParent()) { if (OuterS->getEntity()) { Lexical = OuterS->getEntity(); break; } } // C++ [temp.local]p8: // In the definition of a member of a class template that appears // outside of the namespace containing the class template // definition, the name of a template-parameter hides the name of // a member of this namespace. // // Example: // // namespace N { // class C { }; // // template<class T> class B { // void f(T); // }; // } // // template<class C> void N::B<C>::f(C) { // C b; // C is the template parameter, not N::C // } // // In this example, the lexical context we return is the // TranslationUnit, while the semantic context is the namespace N. if (!Lexical || !DC || !S->getParent() || !S->getParent()->isTemplateParamScope()) return std::make_pair(Lexical, false); // Find the outermost template parameter scope. // For the example, this is the scope for the template parameters of // template<class C>. Scope *OutermostTemplateScope = S->getParent(); while (OutermostTemplateScope->getParent() && OutermostTemplateScope->getParent()->isTemplateParamScope()) OutermostTemplateScope = OutermostTemplateScope->getParent(); // Find the namespace context in which the original scope occurs. In // the example, this is namespace N. DeclContext *Semantic = DC; while (!Semantic->isFileContext()) Semantic = Semantic->getParent(); // Find the declaration context just outside of the template // parameter scope. This is the context in which the template is // being lexically declaration (a namespace context). In the // example, this is the global scope. if (Lexical->isFileContext() && !Lexical->Equals(Semantic) && Lexical->Encloses(Semantic)) return std::make_pair(Semantic, true); return std::make_pair(Lexical, false); } namespace { /// An RAII object to specify that we want to find block scope extern /// declarations. struct FindLocalExternScope { FindLocalExternScope(LookupResult &R) : R(R), OldFindLocalExtern(R.getIdentifierNamespace() & Decl::IDNS_LocalExtern) { R.setFindLocalExtern(R.getIdentifierNamespace() & Decl::IDNS_Ordinary); } void restore() { R.setFindLocalExtern(OldFindLocalExtern); } ~FindLocalExternScope() { restore(); } LookupResult &R; bool OldFindLocalExtern; }; } bool Sema::CppLookupName(LookupResult &R, Scope *S) { assert(getLangOpts().CPlusPlus && "Can perform only C++ lookup"); DeclarationName Name = R.getLookupName(); Sema::LookupNameKind NameKind = R.getLookupKind(); // If this is the name of an implicitly-declared special member function, // go through the scope stack to implicitly declare if (isImplicitlyDeclaredMemberFunctionName(Name)) { for (Scope *PreS = S; PreS; PreS = PreS->getParent()) if (DeclContext *DC = PreS->getEntity()) DeclareImplicitMemberFunctionsWithName(*this, Name, DC); } // Implicitly declare member functions with the name we're looking for, if in // fact we are in a scope where it matters. Scope *Initial = S; IdentifierResolver::iterator I = IdResolver.begin(Name), IEnd = IdResolver.end(); // First we lookup local scope. // We don't consider using-directives, as per 7.3.4.p1 [namespace.udir] // ...During unqualified name lookup (3.4.1), the names appear as if // they were declared in the nearest enclosing namespace which contains // both the using-directive and the nominated namespace. // [Note: in this context, "contains" means "contains directly or // indirectly". // // For example: // namespace A { int i; } // void foo() { // int i; // { // using namespace A; // ++i; // finds local 'i', A::i appears at global scope // } // } // UnqualUsingDirectiveSet UDirs; bool VisitedUsingDirectives = false; bool LeftStartingScope = false; DeclContext *OutsideOfTemplateParamDC = nullptr; // When performing a scope lookup, we want to find local extern decls. FindLocalExternScope FindLocals(R); for (; S && !isNamespaceOrTranslationUnitScope(S); S = S->getParent()) { DeclContext *Ctx = S->getEntity(); // Check whether the IdResolver has anything in this scope. bool Found = false; for (; I != IEnd && S->isDeclScope(*I); ++I) { if (NamedDecl *ND = R.getAcceptableDecl(*I)) { if (NameKind == LookupRedeclarationWithLinkage) { // Determine whether this (or a previous) declaration is // out-of-scope. if (!LeftStartingScope && !Initial->isDeclScope(*I)) LeftStartingScope = true; // If we found something outside of our starting scope that // does not have linkage, skip it. If it's a template parameter, // we still find it, so we can diagnose the invalid redeclaration. if (LeftStartingScope && !((*I)->hasLinkage()) && !(*I)->isTemplateParameter()) { R.setShadowed(); continue; } } Found = true; R.addDecl(ND); } } if (Found) { R.resolveKind(); if (S->isClassScope()) if (CXXRecordDecl *Record = dyn_cast_or_null<CXXRecordDecl>(Ctx)) R.setNamingClass(Record); return true; } if (NameKind == LookupLocalFriendName && !S->isClassScope()) { // C++11 [class.friend]p11: // If a friend declaration appears in a local class and the name // specified is an unqualified name, a prior declaration is // looked up without considering scopes that are outside the // innermost enclosing non-class scope. return false; } if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC && S->getParent() && !S->getParent()->isTemplateParamScope()) { // We've just searched the last template parameter scope and // found nothing, so look into the contexts between the // lexical and semantic declaration contexts returned by // findOuterContext(). This implements the name lookup behavior // of C++ [temp.local]p8. Ctx = OutsideOfTemplateParamDC; OutsideOfTemplateParamDC = nullptr; } if (Ctx) { DeclContext *OuterCtx; bool SearchAfterTemplateScope; std::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S); if (SearchAfterTemplateScope) OutsideOfTemplateParamDC = OuterCtx; for (; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) { // We do not directly look into transparent contexts, since // those entities will be found in the nearest enclosing // non-transparent context. if (Ctx->isTransparentContext()) continue; // We do not look directly into function or method contexts, // since all of the local variables and parameters of the // function/method are present within the Scope. if (Ctx->isFunctionOrMethod()) { // If we have an Objective-C instance method, look for ivars // in the corresponding interface. if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(Ctx)) { if (Method->isInstanceMethod() && Name.getAsIdentifierInfo()) if (ObjCInterfaceDecl *Class = Method->getClassInterface()) { ObjCInterfaceDecl *ClassDeclared; if (ObjCIvarDecl *Ivar = Class->lookupInstanceVariable( Name.getAsIdentifierInfo(), ClassDeclared)) { if (NamedDecl *ND = R.getAcceptableDecl(Ivar)) { R.addDecl(ND); R.resolveKind(); return true; } } } } continue; } // If this is a file context, we need to perform unqualified name // lookup considering using directives. if (Ctx->isFileContext()) { // If we haven't handled using directives yet, do so now. if (!VisitedUsingDirectives) { // Add using directives from this context up to the top level. for (DeclContext *UCtx = Ctx; UCtx; UCtx = UCtx->getParent()) { if (UCtx->isTransparentContext()) continue; UDirs.visit(UCtx, UCtx); } // Find the innermost file scope, so we can add using directives // from local scopes. Scope *InnermostFileScope = S; while (InnermostFileScope && !isNamespaceOrTranslationUnitScope(InnermostFileScope)) InnermostFileScope = InnermostFileScope->getParent(); UDirs.visitScopeChain(Initial, InnermostFileScope); UDirs.done(); VisitedUsingDirectives = true; } if (CppNamespaceLookup(*this, R, Context, Ctx, UDirs)) { R.resolveKind(); return true; } continue; } // Perform qualified name lookup into this context. // FIXME: In some cases, we know that every name that could be found by // this qualified name lookup will also be on the identifier chain. For // example, inside a class without any base classes, we never need to // perform qualified lookup because all of the members are on top of the // identifier chain. if (LookupQualifiedName(R, Ctx, /*InUnqualifiedLookup=*/true)) return true; } } } // Stop if we ran out of scopes. // FIXME: This really, really shouldn't be happening. if (!S) return false; // If we are looking for members, no need to look into global/namespace scope. if (NameKind == LookupMemberName) return false; // Collect UsingDirectiveDecls in all scopes, and recursively all // nominated namespaces by those using-directives. // // FIXME: Cache this sorted list in Scope structure, and DeclContext, so we // don't build it for each lookup! if (!VisitedUsingDirectives) { UDirs.visitScopeChain(Initial, S); UDirs.done(); } // If we're not performing redeclaration lookup, do not look for local // extern declarations outside of a function scope. if (!R.isForRedeclaration()) FindLocals.restore(); // Lookup namespace scope, and global scope. // Unqualified name lookup in C++ requires looking into scopes // that aren't strictly lexical, and therefore we walk through the // context as well as walking through the scopes. for (; S; S = S->getParent()) { // Check whether the IdResolver has anything in this scope. bool Found = false; for (; I != IEnd && S->isDeclScope(*I); ++I) { if (NamedDecl *ND = R.getAcceptableDecl(*I)) { // We found something. Look for anything else in our scope // with this same name and in an acceptable identifier // namespace, so that we can construct an overload set if we // need to. Found = true; R.addDecl(ND); } } if (Found && S->isTemplateParamScope()) { R.resolveKind(); return true; } DeclContext *Ctx = S->getEntity(); if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC && S->getParent() && !S->getParent()->isTemplateParamScope()) { // We've just searched the last template parameter scope and // found nothing, so look into the contexts between the // lexical and semantic declaration contexts returned by // findOuterContext(). This implements the name lookup behavior // of C++ [temp.local]p8. Ctx = OutsideOfTemplateParamDC; OutsideOfTemplateParamDC = nullptr; } if (Ctx) { DeclContext *OuterCtx; bool SearchAfterTemplateScope; std::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S); if (SearchAfterTemplateScope) OutsideOfTemplateParamDC = OuterCtx; for (; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) { // We do not directly look into transparent contexts, since // those entities will be found in the nearest enclosing // non-transparent context. if (Ctx->isTransparentContext()) continue; // If we have a context, and it's not a context stashed in the // template parameter scope for an out-of-line definition, also // look into that context. if (!(Found && S && S->isTemplateParamScope())) { assert(Ctx->isFileContext() && "We should have been looking only at file context here already."); // Look into context considering using-directives. if (CppNamespaceLookup(*this, R, Context, Ctx, UDirs)) Found = true; } if (Found) { R.resolveKind(); return true; } if (R.isForRedeclaration() && !Ctx->isTransparentContext()) return false; } } if (R.isForRedeclaration() && Ctx && !Ctx->isTransparentContext()) return false; } return !R.empty(); } /// \brief Find the declaration that a class temploid member specialization was /// instantiated from, or the member itself if it is an explicit specialization. static Decl *getInstantiatedFrom(Decl *D, MemberSpecializationInfo *MSInfo) { return MSInfo->isExplicitSpecialization() ? D : MSInfo->getInstantiatedFrom(); } Module *Sema::getOwningModule(Decl *Entity) { // If it's imported, grab its owning module. Module *M = Entity->getImportedOwningModule(); if (M || !isa<NamedDecl>(Entity) || !cast<NamedDecl>(Entity)->isHidden()) return M; assert(!Entity->isFromASTFile() && "hidden entity from AST file has no owning module"); if (!getLangOpts().ModulesLocalVisibility) { // If we're not tracking visibility locally, the only way a declaration // can be hidden and local is if it's hidden because it's parent is (for // instance, maybe this is a lazily-declared special member of an imported // class). auto *Parent = cast<NamedDecl>(Entity->getDeclContext()); assert(Parent->isHidden() && "unexpectedly hidden decl"); return getOwningModule(Parent); } // It's local and hidden; grab or compute its owning module. M = Entity->getLocalOwningModule(); if (M) return M; if (auto *Containing = PP.getModuleContainingLocation(Entity->getLocation())) { M = Containing; } else if (Entity->isInvalidDecl() || Entity->getLocation().isInvalid()) { // Don't bother tracking visibility for invalid declarations with broken // locations. cast<NamedDecl>(Entity)->setHidden(false); } else { // We need to assign a module to an entity that exists outside of any // module, so that we can hide it from modules that we textually enter. // Invent a fake module for all such entities. if (!CachedFakeTopLevelModule) { CachedFakeTopLevelModule = PP.getHeaderSearchInfo().getModuleMap().findOrCreateModule( "<top-level>", nullptr, false, false).first; auto &SrcMgr = PP.getSourceManager(); SourceLocation StartLoc = SrcMgr.getLocForStartOfFile(SrcMgr.getMainFileID()); auto &TopLevel = VisibleModulesStack.empty() ? VisibleModules : VisibleModulesStack[0]; TopLevel.setVisible(CachedFakeTopLevelModule, StartLoc); } M = CachedFakeTopLevelModule; } if (M) Entity->setLocalOwningModule(M); return M; } void Sema::makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc) { if (auto *M = PP.getModuleContainingLocation(Loc)) Context.mergeDefinitionIntoModule(ND, M); else // We're not building a module; just make the definition visible. ND->setHidden(false); // If ND is a template declaration, make the template parameters // visible too. They're not (necessarily) within a mergeable DeclContext. if (auto *TD = dyn_cast<TemplateDecl>(ND)) for (auto *Param : *TD->getTemplateParameters()) makeMergedDefinitionVisible(Param, Loc); } /// \brief Find the module in which the given declaration was defined. static Module *getDefiningModule(Sema &S, Decl *Entity) { if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Entity)) { // If this function was instantiated from a template, the defining module is // the module containing the pattern. if (FunctionDecl *Pattern = FD->getTemplateInstantiationPattern()) Entity = Pattern; } else if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Entity)) { if (CXXRecordDecl *Pattern = RD->getTemplateInstantiationPattern()) Entity = Pattern; } else if (EnumDecl *ED = dyn_cast<EnumDecl>(Entity)) { if (MemberSpecializationInfo *MSInfo = ED->getMemberSpecializationInfo()) Entity = getInstantiatedFrom(ED, MSInfo); } else if (VarDecl *VD = dyn_cast<VarDecl>(Entity)) { // FIXME: Map from variable template specializations back to the template. if (MemberSpecializationInfo *MSInfo = VD->getMemberSpecializationInfo()) Entity = getInstantiatedFrom(VD, MSInfo); } // Walk up to the containing context. That might also have been instantiated // from a template. DeclContext *Context = Entity->getDeclContext(); if (Context->isFileContext()) return S.getOwningModule(Entity); return getDefiningModule(S, cast<Decl>(Context)); } llvm::DenseSet<Module*> &Sema::getLookupModules() { unsigned N = ActiveTemplateInstantiations.size(); for (unsigned I = ActiveTemplateInstantiationLookupModules.size(); I != N; ++I) { Module *M = getDefiningModule(*this, ActiveTemplateInstantiations[I].Entity); if (M && !LookupModulesCache.insert(M).second) M = nullptr; ActiveTemplateInstantiationLookupModules.push_back(M); } return LookupModulesCache; } bool Sema::hasVisibleMergedDefinition(NamedDecl *Def) { for (Module *Merged : Context.getModulesWithMergedDefinition(Def)) if (isModuleVisible(Merged)) return true; return false; } template<typename ParmDecl> static bool hasVisibleDefaultArgument(Sema &S, const ParmDecl *D, llvm::SmallVectorImpl<Module *> *Modules) { if (!D->hasDefaultArgument()) return false; while (D) { auto &DefaultArg = D->getDefaultArgStorage(); if (!DefaultArg.isInherited() && S.isVisible(D)) return true; if (!DefaultArg.isInherited() && Modules) { auto *NonConstD = const_cast<ParmDecl*>(D); Modules->push_back(S.getOwningModule(NonConstD)); const auto &Merged = S.Context.getModulesWithMergedDefinition(NonConstD); Modules->insert(Modules->end(), Merged.begin(), Merged.end()); } // If there was a previous default argument, maybe its parameter is visible. D = DefaultArg.getInheritedFrom(); } return false; } bool Sema::hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) { if (auto *P = dyn_cast<TemplateTypeParmDecl>(D)) return ::hasVisibleDefaultArgument(*this, P, Modules); if (auto *P = dyn_cast<NonTypeTemplateParmDecl>(D)) return ::hasVisibleDefaultArgument(*this, P, Modules); return ::hasVisibleDefaultArgument(*this, cast<TemplateTemplateParmDecl>(D), Modules); } /// \brief Determine whether a declaration is visible to name lookup. /// /// This routine determines whether the declaration D is visible in the current /// lookup context, taking into account the current template instantiation /// stack. During template instantiation, a declaration is visible if it is /// visible from a module containing any entity on the template instantiation /// path (by instantiating a template, you allow it to see the declarations that /// your module can see, including those later on in your module). bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) { assert(D->isHidden() && "should not call this: not in slow case"); Module *DeclModule = SemaRef.getOwningModule(D); if (!DeclModule) { // getOwningModule() may have decided the declaration should not be hidden. assert(!D->isHidden() && "hidden decl not from a module"); return true; } // If the owning module is visible, and the decl is not module private, // then the decl is visible too. (Module private is ignored within the same // top-level module.) if (!D->isFromASTFile() || !D->isModulePrivate()) { if (SemaRef.isModuleVisible(DeclModule)) return true; // Also check merged definitions. if (SemaRef.getLangOpts().ModulesLocalVisibility && SemaRef.hasVisibleMergedDefinition(D)) return true; } // If this declaration is not at namespace scope nor module-private, // then it is visible if its lexical parent has a visible definition. DeclContext *DC = D->getLexicalDeclContext(); if (!D->isModulePrivate() && DC && !DC->isFileContext() && !isa<LinkageSpecDecl>(DC)) { // For a parameter, check whether our current template declaration's // lexical context is visible, not whether there's some other visible // definition of it, because parameters aren't "within" the definition. if ((D->isTemplateParameter() || isa<ParmVarDecl>(D)) ? isVisible(SemaRef, cast<NamedDecl>(DC)) : SemaRef.hasVisibleDefinition(cast<NamedDecl>(DC))) { if (SemaRef.ActiveTemplateInstantiations.empty() && // FIXME: Do something better in this case. !SemaRef.getLangOpts().ModulesLocalVisibility) { // Cache the fact that this declaration is implicitly visible because // its parent has a visible definition. D->setHidden(false); } return true; } return false; } // Find the extra places where we need to look. llvm::DenseSet<Module*> &LookupModules = SemaRef.getLookupModules(); if (LookupModules.empty()) return false; // If our lookup set contains the decl's module, it's visible. if (LookupModules.count(DeclModule)) return true; // If the declaration isn't exported, it's not visible in any other module. if (D->isModulePrivate()) return false; // Check whether DeclModule is transitively exported to an import of // the lookup set. for (llvm::DenseSet<Module *>::iterator I = LookupModules.begin(), E = LookupModules.end(); I != E; ++I) if ((*I)->isModuleVisible(DeclModule)) return true; return false; } bool Sema::isVisibleSlow(const NamedDecl *D) { return LookupResult::isVisible(*this, const_cast<NamedDecl*>(D)); } /// \brief Retrieve the visible declaration corresponding to D, if any. /// /// This routine determines whether the declaration D is visible in the current /// module, with the current imports. If not, it checks whether any /// redeclaration of D is visible, and if so, returns that declaration. /// /// \returns D, or a visible previous declaration of D, whichever is more recent /// and visible. If no declaration of D is visible, returns null. static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D) { assert(!LookupResult::isVisible(SemaRef, D) && "not in slow case"); for (auto RD : D->redecls()) { if (auto ND = dyn_cast<NamedDecl>(RD)) { // FIXME: This is wrong in the case where the previous declaration is not // visible in the same scope as D. This needs to be done much more // carefully. if (LookupResult::isVisible(SemaRef, ND)) return ND; } } return nullptr; } NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const { return findAcceptableDecl(getSema(), D); } /// @brief Perform unqualified name lookup starting from a given /// scope. /// /// Unqualified name lookup (C++ [basic.lookup.unqual], C99 6.2.1) is /// used to find names within the current scope. For example, 'x' in /// @code /// int x; /// int f() { /// return x; // unqualified name look finds 'x' in the global scope /// } /// @endcode /// /// Different lookup criteria can find different names. For example, a /// particular scope can have both a struct and a function of the same /// name, and each can be found by certain lookup criteria. For more /// information about lookup criteria, see the documentation for the /// class LookupCriteria. /// /// @param S The scope from which unqualified name lookup will /// begin. If the lookup criteria permits, name lookup may also search /// in the parent scopes. /// /// @param [in,out] R Specifies the lookup to perform (e.g., the name to /// look up and the lookup kind), and is updated with the results of lookup /// including zero or more declarations and possibly additional information /// used to diagnose ambiguities. /// /// @returns \c true if lookup succeeded and false otherwise. bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) { DeclarationName Name = R.getLookupName(); if (!Name) return false; LookupNameKind NameKind = R.getLookupKind(); if (!getLangOpts().CPlusPlus) { // Unqualified name lookup in C/Objective-C is purely lexical, so // search in the declarations attached to the name. if (NameKind == Sema::LookupRedeclarationWithLinkage) { // Find the nearest non-transparent declaration scope. while (!(S->getFlags() & Scope::DeclScope) || (S->getEntity() && S->getEntity()->isTransparentContext())) S = S->getParent(); } // When performing a scope lookup, we want to find local extern decls. FindLocalExternScope FindLocals(R); // Scan up the scope chain looking for a decl that matches this // identifier that is in the appropriate namespace. This search // should not take long, as shadowing of names is uncommon, and // deep shadowing is extremely uncommon. bool LeftStartingScope = false; for (IdentifierResolver::iterator I = IdResolver.begin(Name), IEnd = IdResolver.end(); I != IEnd; ++I) if (NamedDecl *D = R.getAcceptableDecl(*I)) { if (NameKind == LookupRedeclarationWithLinkage) { // Determine whether this (or a previous) declaration is // out-of-scope. if (!LeftStartingScope && !S->isDeclScope(*I)) LeftStartingScope = true; // If we found something outside of our starting scope that // does not have linkage, skip it. if (LeftStartingScope && !((*I)->hasLinkage())) { R.setShadowed(); continue; } } else if (NameKind == LookupObjCImplicitSelfParam && !isa<ImplicitParamDecl>(*I)) continue; R.addDecl(D); // Check whether there are any other declarations with the same name // and in the same scope. if (I != IEnd) { // Find the scope in which this declaration was declared (if it // actually exists in a Scope). while (S && !S->isDeclScope(D)) S = S->getParent(); // If the scope containing the declaration is the translation unit, // then we'll need to perform our checks based on the matching // DeclContexts rather than matching scopes. if (S && isNamespaceOrTranslationUnitScope(S)) S = nullptr; // Compute the DeclContext, if we need it. DeclContext *DC = nullptr; if (!S) DC = (*I)->getDeclContext()->getRedeclContext(); IdentifierResolver::iterator LastI = I; for (++LastI; LastI != IEnd; ++LastI) { if (S) { // Match based on scope. if (!S->isDeclScope(*LastI)) break; } else { // Match based on DeclContext. DeclContext *LastDC = (*LastI)->getDeclContext()->getRedeclContext(); if (!LastDC->Equals(DC)) break; } // If the declaration is in the right namespace and visible, add it. if (NamedDecl *LastD = R.getAcceptableDecl(*LastI)) R.addDecl(LastD); } R.resolveKind(); } return true; } } else { // Perform C++ unqualified name lookup. if (CppLookupName(R, S)) return true; } // If we didn't find a use of this identifier, and if the identifier // corresponds to a compiler builtin, create the decl object for the builtin // now, injecting it into translation unit scope, and return it. if (AllowBuiltinCreation && LookupBuiltin(*this, R)) return true; // If we didn't find a use of this identifier, the ExternalSource // may be able to handle the situation. // Note: some lookup failures are expected! // See e.g. R.isForRedeclaration(). return (ExternalSource && ExternalSource->LookupUnqualified(R, S)); } /// @brief Perform qualified name lookup in the namespaces nominated by /// using directives by the given context. /// /// C++98 [namespace.qual]p2: /// Given X::m (where X is a user-declared namespace), or given \::m /// (where X is the global namespace), let S be the set of all /// declarations of m in X and in the transitive closure of all /// namespaces nominated by using-directives in X and its used /// namespaces, except that using-directives are ignored in any /// namespace, including X, directly containing one or more /// declarations of m. No namespace is searched more than once in /// the lookup of a name. If S is the empty set, the program is /// ill-formed. Otherwise, if S has exactly one member, or if the /// context of the reference is a using-declaration /// (namespace.udecl), S is the required set of declarations of /// m. Otherwise if the use of m is not one that allows a unique /// declaration to be chosen from S, the program is ill-formed. /// /// C++98 [namespace.qual]p5: /// During the lookup of a qualified namespace member name, if the /// lookup finds more than one declaration of the member, and if one /// declaration introduces a class name or enumeration name and the /// other declarations either introduce the same object, the same /// enumerator or a set of functions, the non-type name hides the /// class or enumeration name if and only if the declarations are /// from the same namespace; otherwise (the declarations are from /// different namespaces), the program is ill-formed. static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R, DeclContext *StartDC) { assert(StartDC->isFileContext() && "start context is not a file context"); DeclContext::udir_range UsingDirectives = StartDC->using_directives(); if (UsingDirectives.begin() == UsingDirectives.end()) return false; // We have at least added all these contexts to the queue. llvm::SmallPtrSet<DeclContext*, 8> Visited; Visited.insert(StartDC); // We have not yet looked into these namespaces, much less added // their "using-children" to the queue. SmallVector<NamespaceDecl*, 8> Queue; // We have already looked into the initial namespace; seed the queue // with its using-children. for (auto *I : UsingDirectives) { NamespaceDecl *ND = I->getNominatedNamespace()->getOriginalNamespace(); if (Visited.insert(ND).second) Queue.push_back(ND); } // The easiest way to implement the restriction in [namespace.qual]p5 // is to check whether any of the individual results found a tag // and, if so, to declare an ambiguity if the final result is not // a tag. bool FoundTag = false; bool FoundNonTag = false; LookupResult LocalR(LookupResult::Temporary, R); bool Found = false; while (!Queue.empty()) { NamespaceDecl *ND = Queue.pop_back_val(); // We go through some convolutions here to avoid copying results // between LookupResults. bool UseLocal = !R.empty(); LookupResult &DirectR = UseLocal ? LocalR : R; bool FoundDirect = LookupDirect(S, DirectR, ND); if (FoundDirect) { // First do any local hiding. DirectR.resolveKind(); // If the local result is a tag, remember that. if (DirectR.isSingleTagDecl()) FoundTag = true; else FoundNonTag = true; // Append the local results to the total results if necessary. if (UseLocal) { R.addAllDecls(LocalR); LocalR.clear(); } } // If we find names in this namespace, ignore its using directives. if (FoundDirect) { Found = true; continue; } for (auto I : ND->using_directives()) { NamespaceDecl *Nom = I->getNominatedNamespace(); if (Visited.insert(Nom).second) Queue.push_back(Nom); } } if (Found) { if (FoundTag && FoundNonTag) R.setAmbiguousQualifiedTagHiding(); else R.resolveKind(); } return Found; } /// \brief Callback that looks for any member of a class with the given name. static bool LookupAnyMember(const CXXBaseSpecifier *Specifier, CXXBasePath &Path, void *Name) { RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl(); DeclarationName N = DeclarationName::getFromOpaquePtr(Name); Path.Decls = BaseRecord->lookup(N); return !Path.Decls.empty(); } /// \brief Determine whether the given set of member declarations contains only /// static members, nested types, and enumerators. template<typename InputIterator> static bool HasOnlyStaticMembers(InputIterator First, InputIterator Last) { Decl *D = (*First)->getUnderlyingDecl(); if (isa<VarDecl>(D) || isa<TypeDecl>(D) || isa<EnumConstantDecl>(D)) return true; if (isa<CXXMethodDecl>(D)) { // Determine whether all of the methods are static. bool AllMethodsAreStatic = true; for(; First != Last; ++First) { D = (*First)->getUnderlyingDecl(); if (!isa<CXXMethodDecl>(D)) { assert(isa<TagDecl>(D) && "Non-function must be a tag decl"); break; } if (!cast<CXXMethodDecl>(D)->isStatic()) { AllMethodsAreStatic = false; break; } } if (AllMethodsAreStatic) return true; } return false; } /// \brief Perform qualified name lookup into a given context. /// /// Qualified name lookup (C++ [basic.lookup.qual]) is used to find /// names when the context of those names is explicit specified, e.g., /// "std::vector" or "x->member", or as part of unqualified name lookup. /// /// Different lookup criteria can find different names. For example, a /// particular scope can have both a struct and a function of the same /// name, and each can be found by certain lookup criteria. For more /// information about lookup criteria, see the documentation for the /// class LookupCriteria. /// /// \param R captures both the lookup criteria and any lookup results found. /// /// \param LookupCtx The context in which qualified name lookup will /// search. If the lookup criteria permits, name lookup may also search /// in the parent contexts or (for C++ classes) base classes. /// /// \param InUnqualifiedLookup true if this is qualified name lookup that /// occurs as part of unqualified name lookup. /// /// \returns true if lookup succeeded, false if it failed. bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup) { assert(LookupCtx && "Sema::LookupQualifiedName requires a lookup context"); if (!R.getLookupName()) return false; // Make sure that the declaration context is complete. assert((!isa<TagDecl>(LookupCtx) || LookupCtx->isDependentContext() || cast<TagDecl>(LookupCtx)->isCompleteDefinition() || cast<TagDecl>(LookupCtx)->isBeingDefined()) && "Declaration context must already be complete!"); // Perform qualified name lookup into the LookupCtx. if (LookupDirect(*this, R, LookupCtx)) { R.resolveKind(); if (isa<CXXRecordDecl>(LookupCtx)) R.setNamingClass(cast<CXXRecordDecl>(LookupCtx)); return true; } // Don't descend into implied contexts for redeclarations. // C++98 [namespace.qual]p6: // In a declaration for a namespace member in which the // declarator-id is a qualified-id, given that the qualified-id // for the namespace member has the form // nested-name-specifier unqualified-id // the unqualified-id shall name a member of the namespace // designated by the nested-name-specifier. // See also [class.mfct]p5 and [class.static.data]p2. if (R.isForRedeclaration()) return false; // If this is a namespace, look it up in the implied namespaces. if (LookupCtx->isFileContext()) return LookupQualifiedNameInUsingDirectives(*this, R, LookupCtx); // If this isn't a C++ class, we aren't allowed to look into base // classes, we're done. CXXRecordDecl *LookupRec = dyn_cast<CXXRecordDecl>(LookupCtx); if (!LookupRec || !LookupRec->getDefinition()) return false; // If we're performing qualified name lookup into a dependent class, // then we are actually looking into a current instantiation. If we have any // dependent base classes, then we either have to delay lookup until // template instantiation time (at which point all bases will be available) // or we have to fail. if (!InUnqualifiedLookup && LookupRec->isDependentContext() && LookupRec->hasAnyDependentBases()) { R.setNotFoundInCurrentInstantiation(); return false; } // Perform lookup into our base classes. CXXBasePaths Paths; Paths.setOrigin(LookupRec); // Look for this member in our base classes CXXRecordDecl::BaseMatchesCallback *BaseCallback = nullptr; switch (R.getLookupKind()) { case LookupObjCImplicitSelfParam: case LookupOrdinaryName: case LookupMemberName: case LookupRedeclarationWithLinkage: case LookupLocalFriendName: BaseCallback = &CXXRecordDecl::FindOrdinaryMember; break; case LookupTagName: BaseCallback = &CXXRecordDecl::FindTagMember; break; case LookupAnyName: BaseCallback = &LookupAnyMember; break; case LookupUsingDeclName: // This lookup is for redeclarations only. case LookupOperatorName: case LookupNamespaceName: case LookupObjCProtocolName: case LookupLabel: // These lookups will never find a member in a C++ class (or base class). return false; case LookupNestedNameSpecifierName: BaseCallback = &CXXRecordDecl::FindNestedNameSpecifierMember; break; } if (!LookupRec->lookupInBases(BaseCallback, R.getLookupName().getAsOpaquePtr(), Paths)) return false; R.setNamingClass(LookupRec); // C++ [class.member.lookup]p2: // [...] If the resulting set of declarations are not all from // sub-objects of the same type, or the set has a nonstatic member // and includes members from distinct sub-objects, there is an // ambiguity and the program is ill-formed. Otherwise that set is // the result of the lookup. QualType SubobjectType; int SubobjectNumber = 0; AccessSpecifier SubobjectAccess = AS_none; for (CXXBasePaths::paths_iterator Path = Paths.begin(), PathEnd = Paths.end(); Path != PathEnd; ++Path) { const CXXBasePathElement &PathElement = Path->back(); // Pick the best (i.e. most permissive i.e. numerically lowest) access // across all paths. SubobjectAccess = std::min(SubobjectAccess, Path->Access); // Determine whether we're looking at a distinct sub-object or not. if (SubobjectType.isNull()) { // This is the first subobject we've looked at. Record its type. SubobjectType = Context.getCanonicalType(PathElement.Base->getType()); SubobjectNumber = PathElement.SubobjectNumber; continue; } if (SubobjectType != Context.getCanonicalType(PathElement.Base->getType())) { // We found members of the given name in two subobjects of // different types. If the declaration sets aren't the same, this // lookup is ambiguous. if (HasOnlyStaticMembers(Path->Decls.begin(), Path->Decls.end())) { CXXBasePaths::paths_iterator FirstPath = Paths.begin(); DeclContext::lookup_iterator FirstD = FirstPath->Decls.begin(); DeclContext::lookup_iterator CurrentD = Path->Decls.begin(); while (FirstD != FirstPath->Decls.end() && CurrentD != Path->Decls.end()) { if ((*FirstD)->getUnderlyingDecl()->getCanonicalDecl() != (*CurrentD)->getUnderlyingDecl()->getCanonicalDecl()) break; ++FirstD; ++CurrentD; } if (FirstD == FirstPath->Decls.end() && CurrentD == Path->Decls.end()) continue; } R.setAmbiguousBaseSubobjectTypes(Paths); return true; } if (SubobjectNumber != PathElement.SubobjectNumber) { // We have a different subobject of the same type. // C++ [class.member.lookup]p5: // A static member, a nested type or an enumerator defined in // a base class T can unambiguously be found even if an object // has more than one base class subobject of type T. if (HasOnlyStaticMembers(Path->Decls.begin(), Path->Decls.end())) continue; // We have found a nonstatic member name in multiple, distinct // subobjects. Name lookup is ambiguous. R.setAmbiguousBaseSubobjects(Paths); return true; } } // Lookup in a base class succeeded; return these results. for (auto *D : Paths.front().Decls) { AccessSpecifier AS = CXXRecordDecl::MergeAccess(SubobjectAccess, D->getAccess()); R.addDecl(D, AS); } R.resolveKind(); return true; } /// \brief Performs qualified name lookup or special type of lookup for /// "__super::" scope specifier. /// /// This routine is a convenience overload meant to be called from contexts /// that need to perform a qualified name lookup with an optional C++ scope /// specifier that might require special kind of lookup. /// /// \param R captures both the lookup criteria and any lookup results found. /// /// \param LookupCtx The context in which qualified name lookup will /// search. /// /// \param SS An optional C++ scope-specifier. /// /// \returns true if lookup succeeded, false if it failed. bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS) { auto *NNS = SS.getScopeRep(); if (NNS && NNS->getKind() == NestedNameSpecifier::Super) return LookupInSuper(R, NNS->getAsRecordDecl()); else return LookupQualifiedName(R, LookupCtx); } /// @brief Performs name lookup for a name that was parsed in the /// source code, and may contain a C++ scope specifier. /// /// This routine is a convenience routine meant to be called from /// contexts that receive a name and an optional C++ scope specifier /// (e.g., "N::M::x"). It will then perform either qualified or /// unqualified name lookup (with LookupQualifiedName or LookupName, /// respectively) on the given name and return those results. It will /// perform a special type of lookup for "__super::" scope specifier. /// /// @param S The scope from which unqualified name lookup will /// begin. /// /// @param SS An optional C++ scope-specifier, e.g., "::N::M". /// /// @param EnteringContext Indicates whether we are going to enter the /// context of the scope-specifier SS (if present). /// /// @returns True if any decls were found (but possibly ambiguous) bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation, bool EnteringContext) { if (SS && SS->isInvalid()) { // When the scope specifier is invalid, don't even look for // anything. return false; } if (SS && SS->isSet()) { NestedNameSpecifier *NNS = SS->getScopeRep(); if (NNS->getKind() == NestedNameSpecifier::Super) return LookupInSuper(R, NNS->getAsRecordDecl()); if (DeclContext *DC = computeDeclContext(*SS, EnteringContext)) { // We have resolved the scope specifier to a particular declaration // contex, and will perform name lookup in that context. if (!DC->isDependentContext() && RequireCompleteDeclContext(*SS, DC)) return false; R.setContextRange(SS->getRange()); return LookupQualifiedName(R, DC); } // We could not resolve the scope specified to a specific declaration // context, which means that SS refers to an unknown specialization. // Name lookup can't find anything in this case. R.setNotFoundInCurrentInstantiation(); R.setContextRange(SS->getRange()); return false; } // Perform unqualified name lookup starting in the given scope. return LookupName(R, S, AllowBuiltinCreation); } /// \brief Perform qualified name lookup into all base classes of the given /// class. /// /// \param R captures both the lookup criteria and any lookup results found. /// /// \param Class The context in which qualified name lookup will /// search. Name lookup will search in all base classes merging the results. /// /// @returns True if any decls were found (but possibly ambiguous) bool Sema::LookupInSuper(LookupResult &R, CXXRecordDecl *Class) { for (const auto &BaseSpec : Class->bases()) { CXXRecordDecl *RD = cast<CXXRecordDecl>( BaseSpec.getType()->castAs<RecordType>()->getDecl()); LookupResult Result(*this, R.getLookupNameInfo(), R.getLookupKind()); Result.setBaseObjectType(Context.getRecordType(Class)); LookupQualifiedName(Result, RD); for (auto *Decl : Result) R.addDecl(Decl); } R.resolveKind(); return !R.empty(); } /// \brief Produce a diagnostic describing the ambiguity that resulted /// from name lookup. /// /// \param Result The result of the ambiguous lookup to be diagnosed. void Sema::DiagnoseAmbiguousLookup(LookupResult &Result) { assert(Result.isAmbiguous() && "Lookup result must be ambiguous"); DeclarationName Name = Result.getLookupName(); SourceLocation NameLoc = Result.getNameLoc(); SourceRange LookupRange = Result.getContextRange(); switch (Result.getAmbiguityKind()) { case LookupResult::AmbiguousBaseSubobjects: { CXXBasePaths *Paths = Result.getBasePaths(); QualType SubobjectType = Paths->front().back().Base->getType(); Diag(NameLoc, diag::err_ambiguous_member_multiple_subobjects) << Name << SubobjectType << getAmbiguousPathsDisplayString(*Paths) << LookupRange; DeclContext::lookup_iterator Found = Paths->front().Decls.begin(); while (isa<CXXMethodDecl>(*Found) && cast<CXXMethodDecl>(*Found)->isStatic()) ++Found; Diag((*Found)->getLocation(), diag::note_ambiguous_member_found); break; } case LookupResult::AmbiguousBaseSubobjectTypes: { Diag(NameLoc, diag::err_ambiguous_member_multiple_subobject_types) << Name << LookupRange; CXXBasePaths *Paths = Result.getBasePaths(); std::set<Decl *> DeclsPrinted; for (CXXBasePaths::paths_iterator Path = Paths->begin(), PathEnd = Paths->end(); Path != PathEnd; ++Path) { Decl *D = Path->Decls.front(); if (DeclsPrinted.insert(D).second) Diag(D->getLocation(), diag::note_ambiguous_member_found); } break; } case LookupResult::AmbiguousTagHiding: { Diag(NameLoc, diag::err_ambiguous_tag_hiding) << Name << LookupRange; llvm::SmallPtrSet<NamedDecl*,8> TagDecls; for (auto *D : Result) if (TagDecl *TD = dyn_cast<TagDecl>(D)) { TagDecls.insert(TD); Diag(TD->getLocation(), diag::note_hidden_tag); } for (auto *D : Result) if (!isa<TagDecl>(D)) Diag(D->getLocation(), diag::note_hiding_object); // For recovery purposes, go ahead and implement the hiding. LookupResult::Filter F = Result.makeFilter(); while (F.hasNext()) { if (TagDecls.count(F.next())) F.erase(); } F.done(); break; } case LookupResult::AmbiguousReference: { Diag(NameLoc, diag::err_ambiguous_reference) << Name << LookupRange; for (auto *D : Result) Diag(D->getLocation(), diag::note_ambiguous_candidate) << D; break; } } } namespace { struct AssociatedLookup { AssociatedLookup(Sema &S, SourceLocation InstantiationLoc, Sema::AssociatedNamespaceSet &Namespaces, Sema::AssociatedClassSet &Classes) : S(S), Namespaces(Namespaces), Classes(Classes), InstantiationLoc(InstantiationLoc) { } Sema &S; Sema::AssociatedNamespaceSet &Namespaces; Sema::AssociatedClassSet &Classes; SourceLocation InstantiationLoc; }; } static void addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType T); static void CollectEnclosingNamespace(Sema::AssociatedNamespaceSet &Namespaces, DeclContext *Ctx) { // Add the associated namespace for this class. // We don't use DeclContext::getEnclosingNamespaceContext() as this may // be a locally scoped record. // We skip out of inline namespaces. The innermost non-inline namespace // contains all names of all its nested inline namespaces anyway, so we can // replace the entire inline namespace tree with its root. while (Ctx->isRecord() || Ctx->isTransparentContext() || Ctx->isInlineNamespace()) Ctx = Ctx->getParent(); if (Ctx->isFileContext()) Namespaces.insert(Ctx->getPrimaryContext()); } // \brief Add the associated classes and namespaces for argument-dependent // lookup that involves a template argument (C++ [basic.lookup.koenig]p2). static void addAssociatedClassesAndNamespaces(AssociatedLookup &Result, const TemplateArgument &Arg) { // C++ [basic.lookup.koenig]p2, last bullet: // -- [...] ; switch (Arg.getKind()) { case TemplateArgument::Null: break; case TemplateArgument::Type: // [...] the namespaces and classes associated with the types of the // template arguments provided for template type parameters (excluding // template template parameters) addAssociatedClassesAndNamespaces(Result, Arg.getAsType()); break; case TemplateArgument::Template: case TemplateArgument::TemplateExpansion: { // [...] the namespaces in which any template template arguments are // defined; and the classes in which any member templates used as // template template arguments are defined. TemplateName Template = Arg.getAsTemplateOrTemplatePattern(); if (ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(Template.getAsTemplateDecl())) { DeclContext *Ctx = ClassTemplate->getDeclContext(); if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx)) Result.Classes.insert(EnclosingClass); // Add the associated namespace for this class. CollectEnclosingNamespace(Result.Namespaces, Ctx); } break; } case TemplateArgument::Declaration: case TemplateArgument::Integral: case TemplateArgument::Expression: case TemplateArgument::NullPtr: // [Note: non-type template arguments do not contribute to the set of // associated namespaces. ] break; case TemplateArgument::Pack: for (const auto &P : Arg.pack_elements()) addAssociatedClassesAndNamespaces(Result, P); break; } } // \brief Add the associated classes and namespaces for // argument-dependent lookup with an argument of class type // (C++ [basic.lookup.koenig]p2). static void addAssociatedClassesAndNamespaces(AssociatedLookup &Result, CXXRecordDecl *Class) { // Just silently ignore anything whose name is __va_list_tag. if (Class->getDeclName() == Result.S.VAListTagName) return; // C++ [basic.lookup.koenig]p2: // [...] // -- If T is a class type (including unions), its associated // classes are: the class itself; the class of which it is a // member, if any; and its direct and indirect base // classes. Its associated namespaces are the namespaces in // which its associated classes are defined. // Add the class of which it is a member, if any. DeclContext *Ctx = Class->getDeclContext(); if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx)) Result.Classes.insert(EnclosingClass); // Add the associated namespace for this class. CollectEnclosingNamespace(Result.Namespaces, Ctx); // Add the class itself. If we've already seen this class, we don't // need to visit base classes. // // FIXME: That's not correct, we may have added this class only because it // was the enclosing class of another class, and in that case we won't have // added its base classes yet. if (!Result.Classes.insert(Class).second) return; // -- If T is a template-id, its associated namespaces and classes are // the namespace in which the template is defined; for member // templates, the member template's class; the namespaces and classes // associated with the types of the template arguments provided for // template type parameters (excluding template template parameters); the // namespaces in which any template template arguments are defined; and // the classes in which any member templates used as template template // arguments are defined. [Note: non-type template arguments do not // contribute to the set of associated namespaces. ] if (ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Class)) { DeclContext *Ctx = Spec->getSpecializedTemplate()->getDeclContext(); if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx)) Result.Classes.insert(EnclosingClass); // Add the associated namespace for this class. CollectEnclosingNamespace(Result.Namespaces, Ctx); const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I) addAssociatedClassesAndNamespaces(Result, TemplateArgs[I]); } // Only recurse into base classes for complete types. if (!Class->hasDefinition()) return; // Add direct and indirect base classes along with their associated // namespaces. SmallVector<CXXRecordDecl *, 32> Bases; Bases.push_back(Class); while (!Bases.empty()) { // Pop this class off the stack. Class = Bases.pop_back_val(); // Visit the base classes. for (const auto &Base : Class->bases()) { const RecordType *BaseType = Base.getType()->getAs<RecordType>(); // In dependent contexts, we do ADL twice, and the first time around, // the base type might be a dependent TemplateSpecializationType, or a // TemplateTypeParmType. If that happens, simply ignore it. // FIXME: If we want to support export, we probably need to add the // namespace of the template in a TemplateSpecializationType, or even // the classes and namespaces of known non-dependent arguments. if (!BaseType) continue; CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getDecl()); if (Result.Classes.insert(BaseDecl).second) { // Find the associated namespace for this base class. DeclContext *BaseCtx = BaseDecl->getDeclContext(); CollectEnclosingNamespace(Result.Namespaces, BaseCtx); // Make sure we visit the bases of this base class. if (BaseDecl->bases_begin() != BaseDecl->bases_end()) Bases.push_back(BaseDecl); } } } } // \brief Add the associated classes and namespaces for // argument-dependent lookup with an argument of type T // (C++ [basic.lookup.koenig]p2). static void addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) { // C++ [basic.lookup.koenig]p2: // // For each argument type T in the function call, there is a set // of zero or more associated namespaces and a set of zero or more // associated classes to be considered. The sets of namespaces and // classes is determined entirely by the types of the function // arguments (and the namespace of any template template // argument). Typedef names and using-declarations used to specify // the types do not contribute to this set. The sets of namespaces // and classes are determined in the following way: SmallVector<const Type *, 16> Queue; const Type *T = Ty->getCanonicalTypeInternal().getTypePtr(); while (true) { switch (T->getTypeClass()) { #define TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: #define ABSTRACT_TYPE(Class, Base) #include "clang/AST/TypeNodes.def" // T is canonical. We can also ignore dependent types because // we don't need to do ADL at the definition point, but if we // wanted to implement template export (or if we find some other // use for associated classes and namespaces...) this would be // wrong. break; // -- If T is a pointer to U or an array of U, its associated // namespaces and classes are those associated with U. case Type::Pointer: T = cast<PointerType>(T)->getPointeeType().getTypePtr(); continue; case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: T = cast<ArrayType>(T)->getElementType().getTypePtr(); continue; // -- If T is a fundamental type, its associated sets of // namespaces and classes are both empty. case Type::Builtin: break; // -- If T is a class type (including unions), its associated // classes are: the class itself; the class of which it is a // member, if any; and its direct and indirect base // classes. Its associated namespaces are the namespaces in // which its associated classes are defined. case Type::Record: { Result.S.RequireCompleteType(Result.InstantiationLoc, QualType(T, 0), /*no diagnostic*/ 0); CXXRecordDecl *Class = cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl()); addAssociatedClassesAndNamespaces(Result, Class); break; } // -- If T is an enumeration type, its associated namespace is // the namespace in which it is defined. If it is class // member, its associated class is the member's class; else // it has no associated class. case Type::Enum: { EnumDecl *Enum = cast<EnumType>(T)->getDecl(); DeclContext *Ctx = Enum->getDeclContext(); if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx)) Result.Classes.insert(EnclosingClass); // Add the associated namespace for this class. CollectEnclosingNamespace(Result.Namespaces, Ctx); break; } // -- If T is a function type, its associated namespaces and // classes are those associated with the function parameter // types and those associated with the return type. case Type::FunctionProto: { const FunctionProtoType *Proto = cast<FunctionProtoType>(T); for (const auto &Arg : Proto->param_types()) Queue.push_back(Arg.getTypePtr()); // fallthrough LLVM_FALLTHROUGH; // HLSL Change } case Type::FunctionNoProto: { const FunctionType *FnType = cast<FunctionType>(T); T = FnType->getReturnType().getTypePtr(); continue; } // -- If T is a pointer to a member function of a class X, its // associated namespaces and classes are those associated // with the function parameter types and return type, // together with those associated with X. // // -- If T is a pointer to a data member of class X, its // associated namespaces and classes are those associated // with the member type together with those associated with // X. case Type::MemberPointer: { const MemberPointerType *MemberPtr = cast<MemberPointerType>(T); // Queue up the class type into which this points. Queue.push_back(MemberPtr->getClass()); // And directly continue with the pointee type. T = MemberPtr->getPointeeType().getTypePtr(); continue; } // As an extension, treat this like a normal pointer. case Type::BlockPointer: T = cast<BlockPointerType>(T)->getPointeeType().getTypePtr(); continue; // References aren't covered by the standard, but that's such an // obvious defect that we cover them anyway. case Type::LValueReference: case Type::RValueReference: T = cast<ReferenceType>(T)->getPointeeType().getTypePtr(); continue; // These are fundamental types. case Type::Vector: case Type::ExtVector: case Type::Complex: break; // Non-deduced auto types only get here for error cases. case Type::Auto: break; // If T is an Objective-C object or interface type, or a pointer to an // object or interface type, the associated namespace is the global // namespace. case Type::ObjCObject: case Type::ObjCInterface: case Type::ObjCObjectPointer: Result.Namespaces.insert(Result.S.Context.getTranslationUnitDecl()); break; // Atomic types are just wrappers; use the associations of the // contained type. case Type::Atomic: T = cast<AtomicType>(T)->getValueType().getTypePtr(); continue; } if (Queue.empty()) break; T = Queue.pop_back_val(); } } /// \brief Find the associated classes and namespaces for /// argument-dependent lookup for a call with the given set of /// arguments. /// /// This routine computes the sets of associated classes and associated /// namespaces searched by argument-dependent lookup /// (C++ [basic.lookup.argdep]) for a given set of arguments. void Sema::FindAssociatedClassesAndNamespaces( SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses) { AssociatedNamespaces.clear(); AssociatedClasses.clear(); AssociatedLookup Result(*this, InstantiationLoc, AssociatedNamespaces, AssociatedClasses); // C++ [basic.lookup.koenig]p2: // For each argument type T in the function call, there is a set // of zero or more associated namespaces and a set of zero or more // associated classes to be considered. The sets of namespaces and // classes is determined entirely by the types of the function // arguments (and the namespace of any template template // argument). for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) { Expr *Arg = Args[ArgIdx]; if (Arg->getType() != Context.OverloadTy) { addAssociatedClassesAndNamespaces(Result, Arg->getType()); continue; } // [...] In addition, if the argument is the name or address of a // set of overloaded functions and/or function templates, its // associated classes and namespaces are the union of those // associated with each of the members of the set: the namespace // in which the function or function template is defined and the // classes and namespaces associated with its (non-dependent) // parameter types and return type. Arg = Arg->IgnoreParens(); if (UnaryOperator *unaryOp = dyn_cast<UnaryOperator>(Arg)) if (unaryOp->getOpcode() == UO_AddrOf) Arg = unaryOp->getSubExpr(); UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Arg); if (!ULE) continue; for (const auto *D : ULE->decls()) { // Look through any using declarations to find the underlying function. const FunctionDecl *FDecl = D->getUnderlyingDecl()->getAsFunction(); // Add the classes and namespaces associated with the parameter // types and return type of this function. addAssociatedClassesAndNamespaces(Result, FDecl->getType()); } } } NamedDecl *Sema::LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl) { LookupResult R(*this, Name, Loc, NameKind, Redecl); LookupName(R, S); return R.getAsSingle<NamedDecl>(); } /// \brief Find the protocol with the given name, if any. ObjCProtocolDecl *Sema::LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl) { Decl *D = LookupSingleName(TUScope, II, IdLoc, LookupObjCProtocolName, Redecl); return cast_or_null<ObjCProtocolDecl>(D); } void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions) { // C++ [over.match.oper]p3: // -- The set of non-member candidates is the result of the // unqualified lookup of operator@ in the context of the // expression according to the usual rules for name lookup in // unqualified function calls (3.4.2) except that all member // functions are ignored. DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op); LookupResult Operators(*this, OpName, SourceLocation(), LookupOperatorName); LookupName(Operators, S); assert(!Operators.isAmbiguous() && "Operator lookup cannot be ambiguous"); Functions.append(Operators.begin(), Operators.end()); } Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis) { assert(CanDeclareSpecialMemberFunction(RD) && "doing special member lookup into record that isn't fully complete"); RD = RD->getDefinition(); if (RValueThis || ConstThis || VolatileThis) assert((SM == CXXCopyAssignment || SM == CXXMoveAssignment) && "constructors and destructors always have unqualified lvalue this"); if (ConstArg || VolatileArg) assert((SM != CXXDefaultConstructor && SM != CXXDestructor) && "parameter-less special members can't have qualified arguments"); llvm::FoldingSetNodeID ID; ID.AddPointer(RD); ID.AddInteger(SM); ID.AddInteger(ConstArg); ID.AddInteger(VolatileArg); ID.AddInteger(RValueThis); ID.AddInteger(ConstThis); ID.AddInteger(VolatileThis); void *InsertPoint; SpecialMemberOverloadResult *Result = SpecialMemberCache.FindNodeOrInsertPos(ID, InsertPoint); // This was already cached if (Result) return Result; Result = BumpAlloc.Allocate<SpecialMemberOverloadResult>(); Result = new (Result) SpecialMemberOverloadResult(ID); SpecialMemberCache.InsertNode(Result, InsertPoint); if (SM == CXXDestructor) { if (RD->needsImplicitDestructor()) DeclareImplicitDestructor(RD); CXXDestructorDecl *DD = RD->getDestructor(); assert(DD && "record without a destructor"); Result->setMethod(DD); Result->setKind(DD->isDeleted() ? SpecialMemberOverloadResult::NoMemberOrDeleted : SpecialMemberOverloadResult::Success); return Result; } // Prepare for overload resolution. Here we construct a synthetic argument // if necessary and make sure that implicit functions are declared. CanQualType CanTy = Context.getCanonicalType(Context.getTagDeclType(RD)); DeclarationName Name; Expr *Arg = nullptr; unsigned NumArgs; QualType ArgType = CanTy; ExprValueKind VK = VK_LValue; if (SM == CXXDefaultConstructor) { Name = Context.DeclarationNames.getCXXConstructorName(CanTy); NumArgs = 0; if (RD->needsImplicitDefaultConstructor()) DeclareImplicitDefaultConstructor(RD); } else { if (SM == CXXCopyConstructor || SM == CXXMoveConstructor) { Name = Context.DeclarationNames.getCXXConstructorName(CanTy); if (RD->needsImplicitCopyConstructor()) DeclareImplicitCopyConstructor(RD); if (getLangOpts().CPlusPlus11 && RD->needsImplicitMoveConstructor()) DeclareImplicitMoveConstructor(RD); } else { Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal); if (RD->needsImplicitCopyAssignment()) DeclareImplicitCopyAssignment(RD); if (getLangOpts().CPlusPlus11 && RD->needsImplicitMoveAssignment()) DeclareImplicitMoveAssignment(RD); } if (ConstArg) ArgType.addConst(); if (VolatileArg) ArgType.addVolatile(); // This isn't /really/ specified by the standard, but it's implied // we should be working from an RValue in the case of move to ensure // that we prefer to bind to rvalue references, and an LValue in the // case of copy to ensure we don't bind to rvalue references. // Possibly an XValue is actually correct in the case of move, but // there is no semantic difference for class types in this restricted // case. if (SM == CXXCopyConstructor || SM == CXXCopyAssignment) VK = VK_LValue; else VK = VK_RValue; } OpaqueValueExpr FakeArg(SourceLocation(), ArgType, VK); if (SM != CXXDefaultConstructor) { NumArgs = 1; Arg = &FakeArg; } // Create the object argument QualType ThisTy = CanTy; if (ConstThis) ThisTy.addConst(); if (VolatileThis) ThisTy.addVolatile(); Expr::Classification Classification = OpaqueValueExpr(SourceLocation(), ThisTy, RValueThis ? VK_RValue : VK_LValue).Classify(Context); // Now we perform lookup on the name we computed earlier and do overload // resolution. Lookup is only performed directly into the class since there // will always be a (possibly implicit) declaration to shadow any others. OverloadCandidateSet OCS(RD->getLocation(), OverloadCandidateSet::CSK_Normal); DeclContext::lookup_result R = RD->lookup(Name); if (R.empty()) { // We might have no default constructor because we have a lambda's closure // type, rather than because there's some other declared constructor. // Every class has a copy/move constructor, copy/move assignment, and // destructor. assert(SM == CXXDefaultConstructor && "lookup for a constructor or assignment operator was empty"); Result->setMethod(nullptr); Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted); return Result; } // Copy the candidates as our processing of them may load new declarations // from an external source and invalidate lookup_result. SmallVector<NamedDecl *, 8> Candidates(R.begin(), R.end()); for (auto *Cand : Candidates) { if (Cand->isInvalidDecl()) continue; if (UsingShadowDecl *U = dyn_cast<UsingShadowDecl>(Cand)) { // FIXME: [namespace.udecl]p15 says that we should only consider a // using declaration here if it does not match a declaration in the // derived class. We do not implement this correctly in other cases // either. Cand = U->getTargetDecl(); if (Cand->isInvalidDecl()) continue; } if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Cand)) { if (SM == CXXCopyAssignment || SM == CXXMoveAssignment) AddMethodCandidate(M, DeclAccessPair::make(M, AS_public), RD, ThisTy, Classification, llvm::makeArrayRef(&Arg, NumArgs), OCS, true); else AddOverloadCandidate(M, DeclAccessPair::make(M, AS_public), llvm::makeArrayRef(&Arg, NumArgs), OCS, true); } else if (FunctionTemplateDecl *Tmpl = dyn_cast<FunctionTemplateDecl>(Cand)) { if (SM == CXXCopyAssignment || SM == CXXMoveAssignment) AddMethodTemplateCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public), RD, nullptr, ThisTy, Classification, llvm::makeArrayRef(&Arg, NumArgs), OCS, true); else AddTemplateOverloadCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public), nullptr, llvm::makeArrayRef(&Arg, NumArgs), OCS, true); } else { assert(isa<UsingDecl>(Cand) && "illegal Kind of operator = Decl"); } } OverloadCandidateSet::iterator Best; switch (OCS.BestViableFunction(*this, SourceLocation(), Best)) { case OR_Success: Result->setMethod(cast<CXXMethodDecl>(Best->Function)); Result->setKind(SpecialMemberOverloadResult::Success); break; case OR_Deleted: Result->setMethod(cast<CXXMethodDecl>(Best->Function)); Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted); break; case OR_Ambiguous: Result->setMethod(nullptr); Result->setKind(SpecialMemberOverloadResult::Ambiguous); break; case OR_No_Viable_Function: Result->setMethod(nullptr); Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted); break; } return Result; } /// \brief Look up the default constructor for the given class. CXXConstructorDecl *Sema::LookupDefaultConstructor(CXXRecordDecl *Class) { SpecialMemberOverloadResult *Result = LookupSpecialMember(Class, CXXDefaultConstructor, false, false, false, false, false); return cast_or_null<CXXConstructorDecl>(Result->getMethod()); } /// \brief Look up the copying constructor for the given class. CXXConstructorDecl *Sema::LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals) { assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) && "non-const, non-volatile qualifiers for copy ctor arg"); SpecialMemberOverloadResult *Result = LookupSpecialMember(Class, CXXCopyConstructor, Quals & Qualifiers::Const, Quals & Qualifiers::Volatile, false, false, false); return cast_or_null<CXXConstructorDecl>(Result->getMethod()); } /// \brief Look up the moving constructor for the given class. CXXConstructorDecl *Sema::LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals) { SpecialMemberOverloadResult *Result = LookupSpecialMember(Class, CXXMoveConstructor, Quals & Qualifiers::Const, Quals & Qualifiers::Volatile, false, false, false); return cast_or_null<CXXConstructorDecl>(Result->getMethod()); } /// \brief Look up the constructors for the given class. DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) { // If the implicit constructors have not yet been declared, do so now. if (CanDeclareSpecialMemberFunction(Class)) { if (Class->needsImplicitDefaultConstructor()) DeclareImplicitDefaultConstructor(Class); if (Class->needsImplicitCopyConstructor()) DeclareImplicitCopyConstructor(Class); if (getLangOpts().CPlusPlus11 && Class->needsImplicitMoveConstructor()) DeclareImplicitMoveConstructor(Class); } CanQualType T = Context.getCanonicalType(Context.getTypeDeclType(Class)); DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(T); return Class->lookup(Name); } /// \brief Look up the copying assignment operator for the given class. CXXMethodDecl *Sema::LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals) { assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) && "non-const, non-volatile qualifiers for copy assignment arg"); assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) && "non-const, non-volatile qualifiers for copy assignment this"); SpecialMemberOverloadResult *Result = LookupSpecialMember(Class, CXXCopyAssignment, Quals & Qualifiers::Const, Quals & Qualifiers::Volatile, RValueThis, ThisQuals & Qualifiers::Const, ThisQuals & Qualifiers::Volatile); return Result->getMethod(); } /// \brief Look up the moving assignment operator for the given class. CXXMethodDecl *Sema::LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals) { assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) && "non-const, non-volatile qualifiers for copy assignment this"); SpecialMemberOverloadResult *Result = LookupSpecialMember(Class, CXXMoveAssignment, Quals & Qualifiers::Const, Quals & Qualifiers::Volatile, RValueThis, ThisQuals & Qualifiers::Const, ThisQuals & Qualifiers::Volatile); return Result->getMethod(); } /// \brief Look for the destructor of the given class. /// /// During semantic analysis, this routine should be used in lieu of /// CXXRecordDecl::getDestructor(). /// /// \returns The destructor for this class. CXXDestructorDecl *Sema::LookupDestructor(CXXRecordDecl *Class) { return cast<CXXDestructorDecl>(LookupSpecialMember(Class, CXXDestructor, false, false, false, false, false)->getMethod()); } /// LookupLiteralOperator - Determine which literal operator should be used for /// a user-defined literal, per C++11 [lex.ext]. /// /// Normal overload resolution is not used to select which literal operator to /// call for a user-defined literal. Look up the provided literal operator name, /// and filter the results to the appropriate set for the given argument types. Sema::LiteralOperatorLookupResult Sema::LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate) { LookupName(R, S); assert(R.getResultKind() != LookupResult::Ambiguous && "literal operator lookup can't be ambiguous"); // Filter the lookup results appropriately. LookupResult::Filter F = R.makeFilter(); bool FoundRaw = false; bool FoundTemplate = false; bool FoundStringTemplate = false; bool FoundExactMatch = false; while (F.hasNext()) { Decl *D = F.next(); if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D)) D = USD->getTargetDecl(); // If the declaration we found is invalid, skip it. if (D->isInvalidDecl()) { F.erase(); continue; } bool IsRaw = false; bool IsTemplate = false; bool IsStringTemplate = false; bool IsExactMatch = false; if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { if (FD->getNumParams() == 1 && FD->getParamDecl(0)->getType()->getAs<PointerType>()) IsRaw = true; else if (FD->getNumParams() == ArgTys.size()) { IsExactMatch = true; for (unsigned ArgIdx = 0; ArgIdx != ArgTys.size(); ++ArgIdx) { QualType ParamTy = FD->getParamDecl(ArgIdx)->getType(); if (!Context.hasSameUnqualifiedType(ArgTys[ArgIdx], ParamTy)) { IsExactMatch = false; break; } } } } if (FunctionTemplateDecl *FD = dyn_cast<FunctionTemplateDecl>(D)) { TemplateParameterList *Params = FD->getTemplateParameters(); if (Params->size() == 1) IsTemplate = true; else IsStringTemplate = true; } if (IsExactMatch) { FoundExactMatch = true; AllowRaw = false; AllowTemplate = false; AllowStringTemplate = false; if (FoundRaw || FoundTemplate || FoundStringTemplate) { // Go through again and remove the raw and template decls we've // already found. F.restart(); FoundRaw = FoundTemplate = FoundStringTemplate = false; } } else if (AllowRaw && IsRaw) { FoundRaw = true; } else if (AllowTemplate && IsTemplate) { FoundTemplate = true; } else if (AllowStringTemplate && IsStringTemplate) { FoundStringTemplate = true; } else { F.erase(); } } F.done(); // C++11 [lex.ext]p3, p4: If S contains a literal operator with a matching // parameter type, that is used in preference to a raw literal operator // or literal operator template. if (FoundExactMatch) return LOLR_Cooked; // C++11 [lex.ext]p3, p4: S shall contain a raw literal operator or a literal // operator template, but not both. if (FoundRaw && FoundTemplate) { Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName(); for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) NoteOverloadCandidate((*I)->getUnderlyingDecl()->getAsFunction()); return LOLR_Error; } if (FoundRaw) return LOLR_Raw; if (FoundTemplate) return LOLR_Template; if (FoundStringTemplate) return LOLR_StringTemplate; // Didn't find anything we could use. Diag(R.getNameLoc(), diag::err_ovl_no_viable_literal_operator) << R.getLookupName() << (int)ArgTys.size() << ArgTys[0] << (ArgTys.size() == 2 ? ArgTys[1] : QualType()) << AllowRaw << (AllowTemplate || AllowStringTemplate); return LOLR_Error; } void ADLResult::insert(NamedDecl *New) { NamedDecl *&Old = Decls[cast<NamedDecl>(New->getCanonicalDecl())]; // If we haven't yet seen a decl for this key, or the last decl // was exactly this one, we're done. if (Old == nullptr || Old == New) { Old = New; return; } // Otherwise, decide which is a more recent redeclaration. FunctionDecl *OldFD = Old->getAsFunction(); FunctionDecl *NewFD = New->getAsFunction(); FunctionDecl *Cursor = NewFD; while (true) { Cursor = Cursor->getPreviousDecl(); // If we got to the end without finding OldFD, OldFD is the newer // declaration; leave things as they are. if (!Cursor) return; // If we do find OldFD, then NewFD is newer. if (Cursor == OldFD) break; // Otherwise, keep looking. } Old = New; } void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Result) { // Find all of the associated namespaces and classes based on the // arguments we have. AssociatedNamespaceSet AssociatedNamespaces; AssociatedClassSet AssociatedClasses; FindAssociatedClassesAndNamespaces(Loc, Args, AssociatedNamespaces, AssociatedClasses); // C++ [basic.lookup.argdep]p3: // Let X be the lookup set produced by unqualified lookup (3.4.1) // and let Y be the lookup set produced by argument dependent // lookup (defined as follows). If X contains [...] then Y is // empty. Otherwise Y is the set of declarations found in the // namespaces associated with the argument types as described // below. The set of declarations found by the lookup of the name // is the union of X and Y. // // Here, we compute Y and add its members to the overloaded // candidate set. for (auto *NS : AssociatedNamespaces) { // When considering an associated namespace, the lookup is the // same as the lookup performed when the associated namespace is // used as a qualifier (3.4.3.2) except that: // // -- Any using-directives in the associated namespace are // ignored. // // -- Any namespace-scope friend functions declared in // associated classes are visible within their respective // namespaces even if they are not visible during an ordinary // lookup (11.4). DeclContext::lookup_result R = NS->lookup(Name); for (auto *D : R) { // If the only declaration here is an ordinary friend, consider // it only if it was declared in an associated classes. if ((D->getIdentifierNamespace() & Decl::IDNS_Ordinary) == 0) { // If it's neither ordinarily visible nor a friend, we can't find it. if ((D->getIdentifierNamespace() & Decl::IDNS_OrdinaryFriend) == 0) continue; bool DeclaredInAssociatedClass = false; for (Decl *DI = D; DI; DI = DI->getPreviousDecl()) { DeclContext *LexDC = DI->getLexicalDeclContext(); if (isa<CXXRecordDecl>(LexDC) && AssociatedClasses.count(cast<CXXRecordDecl>(LexDC))) { DeclaredInAssociatedClass = true; break; } } if (!DeclaredInAssociatedClass) continue; } if (isa<UsingShadowDecl>(D)) D = cast<UsingShadowDecl>(D)->getTargetDecl(); if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) continue; if (!isVisible(D) && !(D = findAcceptableDecl(*this, D))) continue; Result.insert(D); } } } //---------------------------------------------------------------------------- // Search for all visible declarations. //---------------------------------------------------------------------------- VisibleDeclConsumer::~VisibleDeclConsumer() { } bool VisibleDeclConsumer::includeHiddenDecls() const { return false; } namespace { class ShadowContextRAII; class VisibleDeclsRecord { public: /// \brief An entry in the shadow map, which is optimized to store a /// single declaration (the common case) but can also store a list /// of declarations. typedef llvm::TinyPtrVector<NamedDecl*> ShadowMapEntry; private: /// \brief A mapping from declaration names to the declarations that have /// this name within a particular scope. typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap; /// \brief A list of shadow maps, which is used to model name hiding. std::list<ShadowMap> ShadowMaps; /// \brief The declaration contexts we have already visited. llvm::SmallPtrSet<DeclContext *, 8> VisitedContexts; friend class ShadowContextRAII; public: /// \brief Determine whether we have already visited this context /// (and, if not, note that we are going to visit that context now). bool visitedContext(DeclContext *Ctx) { return !VisitedContexts.insert(Ctx).second; } bool alreadyVisitedContext(DeclContext *Ctx) { return VisitedContexts.count(Ctx); } /// \brief Determine whether the given declaration is hidden in the /// current scope. /// /// \returns the declaration that hides the given declaration, or /// NULL if no such declaration exists. NamedDecl *checkHidden(NamedDecl *ND); /// \brief Add a declaration to the current shadow map. void add(NamedDecl *ND) { ShadowMaps.back()[ND->getDeclName()].push_back(ND); } }; /// \brief RAII object that records when we've entered a shadow context. class ShadowContextRAII { VisibleDeclsRecord &Visible; typedef VisibleDeclsRecord::ShadowMap ShadowMap; public: ShadowContextRAII(VisibleDeclsRecord &Visible) : Visible(Visible) { Visible.ShadowMaps.emplace_back(); } ~ShadowContextRAII() { Visible.ShadowMaps.pop_back(); } }; } // end anonymous namespace NamedDecl *VisibleDeclsRecord::checkHidden(NamedDecl *ND) { // Look through using declarations. ND = ND->getUnderlyingDecl(); unsigned IDNS = ND->getIdentifierNamespace(); std::list<ShadowMap>::reverse_iterator SM = ShadowMaps.rbegin(); for (std::list<ShadowMap>::reverse_iterator SMEnd = ShadowMaps.rend(); SM != SMEnd; ++SM) { ShadowMap::iterator Pos = SM->find(ND->getDeclName()); if (Pos == SM->end()) continue; for (auto *D : Pos->second) { // A tag declaration does not hide a non-tag declaration. if (D->hasTagIdentifierNamespace() && (IDNS & (Decl::IDNS_Member | Decl::IDNS_Ordinary | Decl::IDNS_ObjCProtocol))) continue; // Protocols are in distinct namespaces from everything else. if (((D->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol) || (IDNS & Decl::IDNS_ObjCProtocol)) && D->getIdentifierNamespace() != IDNS) continue; // Functions and function templates in the same scope overload // rather than hide. FIXME: Look for hiding based on function // signatures! if (D->getUnderlyingDecl()->isFunctionOrFunctionTemplate() && ND->getUnderlyingDecl()->isFunctionOrFunctionTemplate() && SM == ShadowMaps.rbegin()) continue; // We've found a declaration that hides this one. return D; } } return nullptr; } static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result, bool QualifiedNameLookup, bool InBaseClass, VisibleDeclConsumer &Consumer, VisibleDeclsRecord &Visited) { if (!Ctx) return; // Make sure we don't visit the same context twice. if (Visited.visitedContext(Ctx->getPrimaryContext())) return; // Outside C++, lookup results for the TU live on identifiers. if (isa<TranslationUnitDecl>(Ctx) && !Result.getSema().getLangOpts().CPlusPlus) { auto &S = Result.getSema(); auto &Idents = S.Context.Idents; // Ensure all external identifiers are in the identifier table. if (IdentifierInfoLookup *External = Idents.getExternalIdentifierLookup()) { std::unique_ptr<IdentifierIterator> Iter(External->getIdentifiers()); for (StringRef Name = Iter->Next(); !Name.empty(); Name = Iter->Next()) Idents.get(Name); } // Walk all lookup results in the TU for each identifier. for (const auto &Ident : Idents) { for (auto I = S.IdResolver.begin(Ident.getValue()), E = S.IdResolver.end(); I != E; ++I) { if (S.IdResolver.isDeclInScope(*I, Ctx)) { if (NamedDecl *ND = Result.getAcceptableDecl(*I)) { Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass); Visited.add(ND); } } } } return; } if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) Result.getSema().ForceDeclarationOfImplicitMembers(Class); // Enumerate all of the results in this context. for (DeclContextLookupResult R : Ctx->lookups()) { for (auto *D : R) { if (auto *ND = Result.getAcceptableDecl(D)) { Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass); Visited.add(ND); } } } // Traverse using directives for qualified name lookup. if (QualifiedNameLookup) { ShadowContextRAII Shadow(Visited); for (auto I : Ctx->using_directives()) { LookupVisibleDecls(I->getNominatedNamespace(), Result, QualifiedNameLookup, InBaseClass, Consumer, Visited); } } // Traverse the contexts of inherited C++ classes. if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx)) { if (!Record->hasDefinition()) return; for (const auto &B : Record->bases()) { QualType BaseType = B.getType(); // Don't look into dependent bases, because name lookup can't look // there anyway. if (BaseType->isDependentType()) continue; const RecordType *Record = BaseType->getAs<RecordType>(); if (!Record) continue; // FIXME: It would be nice to be able to determine whether referencing // a particular member would be ambiguous. For example, given // // struct A { int member; }; // struct B { int member; }; // struct C : A, B { }; // // void f(C *c) { c->### } // // accessing 'member' would result in an ambiguity. However, we // could be smart enough to qualify the member with the base // class, e.g., // // c->B::member // // or // // c->A::member // Find results in this base class (and its bases). ShadowContextRAII Shadow(Visited); LookupVisibleDecls(Record->getDecl(), Result, QualifiedNameLookup, true, Consumer, Visited); } } // Traverse the contexts of Objective-C classes. if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Ctx)) { // Traverse categories. for (auto *Cat : IFace->visible_categories()) { ShadowContextRAII Shadow(Visited); LookupVisibleDecls(Cat, Result, QualifiedNameLookup, false, Consumer, Visited); } // Traverse protocols. for (auto *I : IFace->all_referenced_protocols()) { ShadowContextRAII Shadow(Visited); LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer, Visited); } // Traverse the superclass. if (IFace->getSuperClass()) { ShadowContextRAII Shadow(Visited); LookupVisibleDecls(IFace->getSuperClass(), Result, QualifiedNameLookup, true, Consumer, Visited); } // If there is an implementation, traverse it. We do this to find // synthesized ivars. if (IFace->getImplementation()) { ShadowContextRAII Shadow(Visited); LookupVisibleDecls(IFace->getImplementation(), Result, QualifiedNameLookup, InBaseClass, Consumer, Visited); } } else if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Ctx)) { for (auto *I : Protocol->protocols()) { ShadowContextRAII Shadow(Visited); LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer, Visited); } } else if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Ctx)) { for (auto *I : Category->protocols()) { ShadowContextRAII Shadow(Visited); LookupVisibleDecls(I, Result, QualifiedNameLookup, false, Consumer, Visited); } // If there is an implementation, traverse it. if (Category->getImplementation()) { ShadowContextRAII Shadow(Visited); LookupVisibleDecls(Category->getImplementation(), Result, QualifiedNameLookup, true, Consumer, Visited); } } } static void LookupVisibleDecls(Scope *S, LookupResult &Result, UnqualUsingDirectiveSet &UDirs, VisibleDeclConsumer &Consumer, VisibleDeclsRecord &Visited) { if (!S) return; if (!S->getEntity() || (!S->getParent() && !Visited.alreadyVisitedContext(S->getEntity())) || (S->getEntity())->isFunctionOrMethod()) { FindLocalExternScope FindLocals(Result); // Walk through the declarations in this Scope. for (auto *D : S->decls()) { if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) if ((ND = Result.getAcceptableDecl(ND))) { Consumer.FoundDecl(ND, Visited.checkHidden(ND), nullptr, false); Visited.add(ND); } } } // FIXME: C++ [temp.local]p8 DeclContext *Entity = nullptr; if (S->getEntity()) { // Look into this scope's declaration context, along with any of its // parent lookup contexts (e.g., enclosing classes), up to the point // where we hit the context stored in the next outer scope. Entity = S->getEntity(); DeclContext *OuterCtx = findOuterContext(S).first; // FIXME for (DeclContext *Ctx = Entity; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) { if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(Ctx)) { if (Method->isInstanceMethod()) { // For instance methods, look for ivars in the method's interface. LookupResult IvarResult(Result.getSema(), Result.getLookupName(), Result.getNameLoc(), Sema::LookupMemberName); if (ObjCInterfaceDecl *IFace = Method->getClassInterface()) { LookupVisibleDecls(IFace, IvarResult, /*QualifiedNameLookup=*/false, /*InBaseClass=*/false, Consumer, Visited); } } // We've already performed all of the name lookup that we need // to for Objective-C methods; the next context will be the // outer scope. break; } if (Ctx->isFunctionOrMethod()) continue; LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/false, /*InBaseClass=*/false, Consumer, Visited); } } else if (!S->getParent()) { // Look into the translation unit scope. We walk through the translation // unit's declaration context, because the Scope itself won't have all of // the declarations if we loaded a precompiled header. // FIXME: We would like the translation unit's Scope object to point to the // translation unit, so we don't need this special "if" branch. However, // doing so would force the normal C++ name-lookup code to look into the // translation unit decl when the IdentifierInfo chains would suffice. // Once we fix that problem (which is part of a more general "don't look // in DeclContexts unless we have to" optimization), we can eliminate this. Entity = Result.getSema().Context.getTranslationUnitDecl(); LookupVisibleDecls(Entity, Result, /*QualifiedNameLookup=*/false, /*InBaseClass=*/false, Consumer, Visited); } if (Entity) { // Lookup visible declarations in any namespaces found by using // directives. for (const UnqualUsingEntry &UUE : UDirs.getNamespacesFor(Entity)) LookupVisibleDecls(const_cast<DeclContext *>(UUE.getNominatedNamespace()), Result, /*QualifiedNameLookup=*/false, /*InBaseClass=*/false, Consumer, Visited); } // Lookup names in the parent scope. ShadowContextRAII Shadow(Visited); LookupVisibleDecls(S->getParent(), Result, UDirs, Consumer, Visited); } void Sema::LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope) { // Determine the set of using directives available during // unqualified name lookup. Scope *Initial = S; UnqualUsingDirectiveSet UDirs; if (getLangOpts().CPlusPlus) { // Find the first namespace or translation-unit scope. while (S && !isNamespaceOrTranslationUnitScope(S)) S = S->getParent(); UDirs.visitScopeChain(Initial, S); } UDirs.done(); // Look for visible declarations. LookupResult Result(*this, DeclarationName(), SourceLocation(), Kind); Result.setAllowHidden(Consumer.includeHiddenDecls()); VisibleDeclsRecord Visited; if (!IncludeGlobalScope) Visited.visitedContext(Context.getTranslationUnitDecl()); ShadowContextRAII Shadow(Visited); ::LookupVisibleDecls(Initial, Result, UDirs, Consumer, Visited); } void Sema::LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope) { LookupResult Result(*this, DeclarationName(), SourceLocation(), Kind); Result.setAllowHidden(Consumer.includeHiddenDecls()); VisibleDeclsRecord Visited; if (!IncludeGlobalScope) Visited.visitedContext(Context.getTranslationUnitDecl()); ShadowContextRAII Shadow(Visited); ::LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/true, /*InBaseClass=*/false, Consumer, Visited); } /// LookupOrCreateLabel - Do a name lookup of a label with the specified name. /// If GnuLabelLoc is a valid source location, then this is a definition /// of an __label__ label name, otherwise it is a normal label definition /// or use. LabelDecl *Sema::LookupOrCreateLabel(IdentifierInfo *II, SourceLocation Loc, SourceLocation GnuLabelLoc) { // Do a lookup to see if we have a label with this name already. NamedDecl *Res = nullptr; if (GnuLabelLoc.isValid()) { // Local label definitions always shadow existing labels. Res = LabelDecl::Create(Context, CurContext, Loc, II, GnuLabelLoc); Scope *S = CurScope; PushOnScopeChains(Res, S, true); return cast<LabelDecl>(Res); } // Not a GNU local label. Res = LookupSingleName(CurScope, II, Loc, LookupLabel, NotForRedeclaration); // If we found a label, check to see if it is in the same context as us. // When in a Block, we don't want to reuse a label in an enclosing function. if (Res && Res->getDeclContext() != CurContext) Res = nullptr; if (!Res) { // If not forward referenced or defined already, create the backing decl. Res = LabelDecl::Create(Context, CurContext, Loc, II); Scope *S = CurScope->getFnParent(); assert(S && "Not in a function?"); PushOnScopeChains(Res, S, true); } return cast<LabelDecl>(Res); } //===----------------------------------------------------------------------===// // Typo correction //===----------------------------------------------------------------------===// static bool isCandidateViable(CorrectionCandidateCallback &CCC, TypoCorrection &Candidate) { Candidate.setCallbackDistance(CCC.RankCandidate(Candidate)); return Candidate.getEditDistance(false) != TypoCorrection::InvalidDistance; } static void LookupPotentialTypoResult(Sema &SemaRef, LookupResult &Res, IdentifierInfo *Name, Scope *S, CXXScopeSpec *SS, DeclContext *MemberContext, bool EnteringContext, bool isObjCIvarLookup, bool FindHidden); /// \brief Check whether the declarations found for a typo correction are /// visible, and if none of them are, convert the correction to an 'import /// a module' correction. static void checkCorrectionVisibility(Sema &SemaRef, TypoCorrection &TC) { if (TC.begin() == TC.end()) return; TypoCorrection::decl_iterator DI = TC.begin(), DE = TC.end(); for (/**/; DI != DE; ++DI) if (!LookupResult::isVisible(SemaRef, *DI)) break; // Nothing to do if all decls are visible. if (DI == DE) return; llvm::SmallVector<NamedDecl*, 4> NewDecls(TC.begin(), DI); bool AnyVisibleDecls = !NewDecls.empty(); for (/**/; DI != DE; ++DI) { NamedDecl *VisibleDecl = *DI; if (!LookupResult::isVisible(SemaRef, *DI)) VisibleDecl = findAcceptableDecl(SemaRef, *DI); if (VisibleDecl) { if (!AnyVisibleDecls) { // Found a visible decl, discard all hidden ones. AnyVisibleDecls = true; NewDecls.clear(); } NewDecls.push_back(VisibleDecl); } else if (!AnyVisibleDecls && !(*DI)->isModulePrivate()) NewDecls.push_back(*DI); } if (NewDecls.empty()) TC = TypoCorrection(); else { TC.setCorrectionDecls(NewDecls); TC.setRequiresImport(!AnyVisibleDecls); } } // Fill the supplied vector with the IdentifierInfo pointers for each piece of // the given NestedNameSpecifier (i.e. given a NestedNameSpecifier "foo::bar::", // fill the vector with the IdentifierInfo pointers for "foo" and "bar"). static void getNestedNameSpecifierIdentifiers( NestedNameSpecifier *NNS, SmallVectorImpl<const IdentifierInfo*> &Identifiers) { if (NestedNameSpecifier *Prefix = NNS->getPrefix()) getNestedNameSpecifierIdentifiers(Prefix, Identifiers); else Identifiers.clear(); const IdentifierInfo *II = nullptr; switch (NNS->getKind()) { case NestedNameSpecifier::Identifier: II = NNS->getAsIdentifier(); break; case NestedNameSpecifier::Namespace: if (NNS->getAsNamespace()->isAnonymousNamespace()) return; II = NNS->getAsNamespace()->getIdentifier(); break; case NestedNameSpecifier::NamespaceAlias: II = NNS->getAsNamespaceAlias()->getIdentifier(); break; case NestedNameSpecifier::TypeSpecWithTemplate: case NestedNameSpecifier::TypeSpec: II = QualType(NNS->getAsType(), 0).getBaseTypeIdentifier(); break; case NestedNameSpecifier::Global: case NestedNameSpecifier::Super: return; } if (II) Identifiers.push_back(II); } void TypoCorrectionConsumer::FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx, bool InBaseClass) { // Don't consider hidden names for typo correction. if (Hiding) return; // Only consider entities with identifiers for names, ignoring // special names (constructors, overloaded operators, selectors, // etc.). IdentifierInfo *Name = ND->getIdentifier(); if (!Name) return; // Only consider visible declarations and declarations from modules with // names that exactly match. if (!LookupResult::isVisible(SemaRef, ND) && Name != Typo && !findAcceptableDecl(SemaRef, ND)) return; FoundName(Name->getName()); } void TypoCorrectionConsumer::FoundName(StringRef Name) { // Compute the edit distance between the typo and the name of this // entity, and add the identifier to the list of results. addName(Name, nullptr); } void TypoCorrectionConsumer::addKeywordResult(StringRef Keyword) { // Compute the edit distance between the typo and this keyword, // and add the keyword to the list of results. addName(Keyword, nullptr, nullptr, true); } void TypoCorrectionConsumer::addName(StringRef Name, NamedDecl *ND, NestedNameSpecifier *NNS, bool isKeyword) { // Use a simple length-based heuristic to determine the minimum possible // edit distance. If the minimum isn't good enough, bail out early. StringRef TypoStr = Typo->getName(); unsigned MinED = abs((int)Name.size() - (int)TypoStr.size()); if (MinED && TypoStr.size() / MinED < 3) return; // Compute an upper bound on the allowable edit distance, so that the // edit-distance algorithm can short-circuit. unsigned UpperBound = (TypoStr.size() + 2) / 3 + 1; unsigned ED = TypoStr.edit_distance(Name, true, UpperBound); if (ED >= UpperBound) return; TypoCorrection TC(&SemaRef.Context.Idents.get(Name), ND, NNS, ED); if (isKeyword) TC.makeKeyword(); TC.setCorrectionRange(nullptr, Result.getLookupNameInfo()); addCorrection(TC); } static const unsigned MaxTypoDistanceResultSets = 5; void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) { StringRef TypoStr = Typo->getName(); StringRef Name = Correction.getCorrectionAsIdentifierInfo()->getName(); // For very short typos, ignore potential corrections that have a different // base identifier from the typo or which have a normalized edit distance // longer than the typo itself. if (TypoStr.size() < 3 && (Name != TypoStr || Correction.getEditDistance(true) > TypoStr.size())) return; // If the correction is resolved but is not viable, ignore it. if (Correction.isResolved()) { checkCorrectionVisibility(SemaRef, Correction); if (!Correction || !isCandidateViable(*CorrectionValidator, Correction)) return; } TypoResultList &CList = CorrectionResults[Correction.getEditDistance(false)][Name]; if (!CList.empty() && !CList.back().isResolved()) CList.pop_back(); if (NamedDecl *NewND = Correction.getCorrectionDecl()) { std::string CorrectionStr = Correction.getAsString(SemaRef.getLangOpts()); for (TypoResultList::iterator RI = CList.begin(), RIEnd = CList.end(); RI != RIEnd; ++RI) { // If the Correction refers to a decl already in the result list, // replace the existing result if the string representation of Correction // comes before the current result alphabetically, then stop as there is // nothing more to be done to add Correction to the candidate set. if (RI->getCorrectionDecl() == NewND) { if (CorrectionStr < RI->getAsString(SemaRef.getLangOpts())) *RI = Correction; return; } } } if (CList.empty() || Correction.isResolved()) CList.push_back(Correction); while (CorrectionResults.size() > MaxTypoDistanceResultSets) CorrectionResults.erase(std::prev(CorrectionResults.end())); } void TypoCorrectionConsumer::addNamespaces( const llvm::MapVector<NamespaceDecl *, bool> &KnownNamespaces) { SearchNamespaces = true; for (auto KNPair : KnownNamespaces) Namespaces.addNameSpecifier(KNPair.first); bool SSIsTemplate = false; if (NestedNameSpecifier *NNS = (SS && SS->isValid()) ? SS->getScopeRep() : nullptr) { if (const Type *T = NNS->getAsType()) SSIsTemplate = T->getTypeClass() == Type::TemplateSpecialization; } for (const auto *TI : SemaRef.getASTContext().types()) { if (CXXRecordDecl *CD = TI->getAsCXXRecordDecl()) { CD = CD->getCanonicalDecl(); if (!CD->isDependentType() && !CD->isAnonymousStructOrUnion() && !CD->isUnion() && CD->getIdentifier() && (SSIsTemplate || !isa<ClassTemplateSpecializationDecl>(CD)) && (CD->isBeingDefined() || CD->isCompleteDefinition())) Namespaces.addNameSpecifier(CD); } } } const TypoCorrection &TypoCorrectionConsumer::getNextCorrection() { if (++CurrentTCIndex < ValidatedCorrections.size()) return ValidatedCorrections[CurrentTCIndex]; CurrentTCIndex = ValidatedCorrections.size(); while (!CorrectionResults.empty()) { auto DI = CorrectionResults.begin(); if (DI->second.empty()) { CorrectionResults.erase(DI); continue; } auto RI = DI->second.begin(); if (RI->second.empty()) { DI->second.erase(RI); performQualifiedLookups(); continue; } TypoCorrection TC = RI->second.pop_back_val(); if (TC.isResolved() || TC.requiresImport() || resolveCorrection(TC)) { ValidatedCorrections.push_back(TC); return ValidatedCorrections[CurrentTCIndex]; } } return ValidatedCorrections[0]; // The empty correction. } bool TypoCorrectionConsumer::resolveCorrection(TypoCorrection &Candidate) { IdentifierInfo *Name = Candidate.getCorrectionAsIdentifierInfo(); DeclContext *TempMemberContext = MemberContext; CXXScopeSpec *TempSS = SS.get(); retry_lookup: LookupPotentialTypoResult(SemaRef, Result, Name, S, TempSS, TempMemberContext, EnteringContext, CorrectionValidator->IsObjCIvarLookup, Name == Typo && !Candidate.WillReplaceSpecifier()); switch (Result.getResultKind()) { case LookupResult::NotFound: case LookupResult::NotFoundInCurrentInstantiation: case LookupResult::FoundUnresolvedValue: if (TempSS) { // Immediately retry the lookup without the given CXXScopeSpec TempSS = nullptr; Candidate.WillReplaceSpecifier(true); goto retry_lookup; } if (TempMemberContext) { if (SS && !TempSS) TempSS = SS.get(); TempMemberContext = nullptr; goto retry_lookup; } if (SearchNamespaces) QualifiedResults.push_back(Candidate); break; case LookupResult::Ambiguous: // We don't deal with ambiguities. break; case LookupResult::Found: case LookupResult::FoundOverloaded: // Store all of the Decls for overloaded symbols for (auto *TRD : Result) Candidate.addCorrectionDecl(TRD); checkCorrectionVisibility(SemaRef, Candidate); if (!isCandidateViable(*CorrectionValidator, Candidate)) { if (SearchNamespaces) QualifiedResults.push_back(Candidate); break; } Candidate.setCorrectionRange(SS.get(), Result.getLookupNameInfo()); return true; } return false; } void TypoCorrectionConsumer::performQualifiedLookups() { unsigned TypoLen = Typo->getName().size(); for (auto QR : QualifiedResults) { for (auto NSI : Namespaces) { DeclContext *Ctx = NSI.DeclCtx; const Type *NSType = NSI.NameSpecifier->getAsType(); // If the current NestedNameSpecifier refers to a class and the // current correction candidate is the name of that class, then skip // it as it is unlikely a qualified version of the class' constructor // is an appropriate correction. if (CXXRecordDecl *NSDecl = NSType ? NSType->getAsCXXRecordDecl() : 0) { if (NSDecl->getIdentifier() == QR.getCorrectionAsIdentifierInfo()) continue; } TypoCorrection TC(QR); TC.ClearCorrectionDecls(); TC.setCorrectionSpecifier(NSI.NameSpecifier); TC.setQualifierDistance(NSI.EditDistance); TC.setCallbackDistance(0); // Reset the callback distance // If the current correction candidate and namespace combination are // too far away from the original typo based on the normalized edit // distance, then skip performing a qualified name lookup. unsigned TmpED = TC.getEditDistance(true); if (QR.getCorrectionAsIdentifierInfo() != Typo && TmpED && TypoLen / TmpED < 3) continue; Result.clear(); Result.setLookupName(QR.getCorrectionAsIdentifierInfo()); if (!SemaRef.LookupQualifiedName(Result, Ctx)) continue; // Any corrections added below will be validated in subsequent // iterations of the main while() loop over the Consumer's contents. switch (Result.getResultKind()) { case LookupResult::Found: case LookupResult::FoundOverloaded: { if (SS && SS->isValid()) { std::string NewQualified = TC.getAsString(SemaRef.getLangOpts()); std::string OldQualified; llvm::raw_string_ostream OldOStream(OldQualified); SS->getScopeRep()->print(OldOStream, SemaRef.getPrintingPolicy()); OldOStream << Typo->getName(); // If correction candidate would be an identical written qualified // identifer, then the existing CXXScopeSpec probably included a // typedef that didn't get accounted for properly. if (OldOStream.str() == NewQualified) break; } for (LookupResult::iterator TRD = Result.begin(), TRDEnd = Result.end(); TRD != TRDEnd; ++TRD) { if (SemaRef.CheckMemberAccess(TC.getCorrectionRange().getBegin(), NSType ? NSType->getAsCXXRecordDecl() : nullptr, TRD.getPair()) == Sema::AR_accessible) TC.addCorrectionDecl(*TRD); } if (TC.isResolved()) { TC.setCorrectionRange(SS.get(), Result.getLookupNameInfo()); addCorrection(TC); } break; } case LookupResult::NotFound: case LookupResult::NotFoundInCurrentInstantiation: case LookupResult::Ambiguous: case LookupResult::FoundUnresolvedValue: break; } } } QualifiedResults.clear(); } TypoCorrectionConsumer::NamespaceSpecifierSet::NamespaceSpecifierSet( ASTContext &Context, DeclContext *CurContext, CXXScopeSpec *CurScopeSpec) : Context(Context), CurContextChain(buildContextChain(CurContext)) { if (NestedNameSpecifier *NNS = CurScopeSpec ? CurScopeSpec->getScopeRep() : nullptr) { llvm::raw_string_ostream SpecifierOStream(CurNameSpecifier); NNS->print(SpecifierOStream, Context.getPrintingPolicy()); getNestedNameSpecifierIdentifiers(NNS, CurNameSpecifierIdentifiers); } // Build the list of identifiers that would be used for an absolute // (from the global context) NestedNameSpecifier referring to the current // context. for (DeclContextList::reverse_iterator C = CurContextChain.rbegin(), CEnd = CurContextChain.rend(); C != CEnd; ++C) { if (NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(*C)) CurContextIdentifiers.push_back(ND->getIdentifier()); } // Add the global context as a NestedNameSpecifier SpecifierInfo SI = {cast<DeclContext>(Context.getTranslationUnitDecl()), NestedNameSpecifier::GlobalSpecifier(Context), 1}; DistanceMap[1].push_back(SI); } auto TypoCorrectionConsumer::NamespaceSpecifierSet::buildContextChain( DeclContext *Start) -> DeclContextList { assert(Start && "Building a context chain from a null context"); DeclContextList Chain; for (DeclContext *DC = Start->getPrimaryContext(); DC != nullptr; DC = DC->getLookupParent()) { NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(DC); if (!DC->isInlineNamespace() && !DC->isTransparentContext() && !(ND && ND->isAnonymousNamespace())) Chain.push_back(DC->getPrimaryContext()); } return Chain; } unsigned TypoCorrectionConsumer::NamespaceSpecifierSet::buildNestedNameSpecifier( DeclContextList &DeclChain, NestedNameSpecifier *&NNS) { unsigned NumSpecifiers = 0; for (DeclContextList::reverse_iterator C = DeclChain.rbegin(), CEnd = DeclChain.rend(); C != CEnd; ++C) { if (NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(*C)) { NNS = NestedNameSpecifier::Create(Context, NNS, ND); ++NumSpecifiers; } else if (RecordDecl *RD = dyn_cast_or_null<RecordDecl>(*C)) { NNS = NestedNameSpecifier::Create(Context, NNS, RD->isTemplateDecl(), RD->getTypeForDecl()); ++NumSpecifiers; } } return NumSpecifiers; } void TypoCorrectionConsumer::NamespaceSpecifierSet::addNameSpecifier( DeclContext *Ctx) { NestedNameSpecifier *NNS = nullptr; unsigned NumSpecifiers = 0; DeclContextList NamespaceDeclChain(buildContextChain(Ctx)); DeclContextList FullNamespaceDeclChain(NamespaceDeclChain); // Eliminate common elements from the two DeclContext chains. for (DeclContextList::reverse_iterator C = CurContextChain.rbegin(), CEnd = CurContextChain.rend(); C != CEnd && !NamespaceDeclChain.empty() && NamespaceDeclChain.back() == *C; ++C) { NamespaceDeclChain.pop_back(); } // Build the NestedNameSpecifier from what is left of the NamespaceDeclChain NumSpecifiers = buildNestedNameSpecifier(NamespaceDeclChain, NNS); // Add an explicit leading '::' specifier if needed. if (NamespaceDeclChain.empty()) { // Rebuild the NestedNameSpecifier as a globally-qualified specifier. NNS = NestedNameSpecifier::GlobalSpecifier(Context); NumSpecifiers = buildNestedNameSpecifier(FullNamespaceDeclChain, NNS); } else if (NamedDecl *ND = dyn_cast_or_null<NamedDecl>(NamespaceDeclChain.back())) { IdentifierInfo *Name = ND->getIdentifier(); bool SameNameSpecifier = false; if (std::find(CurNameSpecifierIdentifiers.begin(), CurNameSpecifierIdentifiers.end(), Name) != CurNameSpecifierIdentifiers.end()) { std::string NewNameSpecifier; llvm::raw_string_ostream SpecifierOStream(NewNameSpecifier); SmallVector<const IdentifierInfo *, 4> NewNameSpecifierIdentifiers; getNestedNameSpecifierIdentifiers(NNS, NewNameSpecifierIdentifiers); NNS->print(SpecifierOStream, Context.getPrintingPolicy()); SpecifierOStream.flush(); SameNameSpecifier = NewNameSpecifier == CurNameSpecifier; } if (SameNameSpecifier || std::find(CurContextIdentifiers.begin(), CurContextIdentifiers.end(), Name) != CurContextIdentifiers.end()) { // Rebuild the NestedNameSpecifier as a globally-qualified specifier. NNS = NestedNameSpecifier::GlobalSpecifier(Context); NumSpecifiers = buildNestedNameSpecifier(FullNamespaceDeclChain, NNS); } } // If the built NestedNameSpecifier would be replacing an existing // NestedNameSpecifier, use the number of component identifiers that // would need to be changed as the edit distance instead of the number // of components in the built NestedNameSpecifier. if (NNS && !CurNameSpecifierIdentifiers.empty()) { SmallVector<const IdentifierInfo*, 4> NewNameSpecifierIdentifiers; getNestedNameSpecifierIdentifiers(NNS, NewNameSpecifierIdentifiers); NumSpecifiers = llvm::ComputeEditDistance( llvm::makeArrayRef(CurNameSpecifierIdentifiers), llvm::makeArrayRef(NewNameSpecifierIdentifiers)); } SpecifierInfo SI = {Ctx, NNS, NumSpecifiers}; DistanceMap[NumSpecifiers].push_back(SI); } /// \brief Perform name lookup for a possible result for typo correction. static void LookupPotentialTypoResult(Sema &SemaRef, LookupResult &Res, IdentifierInfo *Name, Scope *S, CXXScopeSpec *SS, DeclContext *MemberContext, bool EnteringContext, bool isObjCIvarLookup, bool FindHidden) { Res.suppressDiagnostics(); Res.clear(); Res.setLookupName(Name); Res.setAllowHidden(FindHidden); if (MemberContext) { if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(MemberContext)) { if (isObjCIvarLookup) { if (ObjCIvarDecl *Ivar = Class->lookupInstanceVariable(Name)) { Res.addDecl(Ivar); Res.resolveKind(); return; } } if (ObjCPropertyDecl *Prop = Class->FindPropertyDeclaration(Name)) { Res.addDecl(Prop); Res.resolveKind(); return; } } SemaRef.LookupQualifiedName(Res, MemberContext); return; } SemaRef.LookupParsedName(Res, S, SS, /*AllowBuiltinCreation=*/false, EnteringContext); // Fake ivar lookup; this should really be part of // LookupParsedName. if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) { if (Method->isInstanceMethod() && Method->getClassInterface() && (Res.empty() || (Res.isSingleResult() && Res.getFoundDecl()->isDefinedOutsideFunctionOrMethod()))) { if (ObjCIvarDecl *IV = Method->getClassInterface()->lookupInstanceVariable(Name)) { Res.addDecl(IV); Res.resolveKind(); } } } } /// \brief Add keywords to the consumer as possible typo corrections. static void AddKeywordsToConsumer(Sema &SemaRef, TypoCorrectionConsumer &Consumer, Scope *S, CorrectionCandidateCallback &CCC, bool AfterNestedNameSpecifier) { if (AfterNestedNameSpecifier) { // For 'X::', we know exactly which keywords can appear next. Consumer.addKeywordResult("template"); if (CCC.WantExpressionKeywords) Consumer.addKeywordResult("operator"); return; } if (CCC.WantObjCSuper) Consumer.addKeywordResult("super"); if (CCC.WantTypeSpecifiers) { // Add type-specifier keywords to the set of results. static const char *const CTypeSpecs[] = { "char", "const", "double", "enum", "float", "int", "long", "short", "signed", "struct", "union", "unsigned", "void", "volatile", "_Complex", "_Imaginary", // storage-specifiers as well "extern", "inline", "static", "typedef" }; const unsigned NumCTypeSpecs = llvm::array_lengthof(CTypeSpecs); for (unsigned I = 0; I != NumCTypeSpecs; ++I) Consumer.addKeywordResult(CTypeSpecs[I]); if (SemaRef.getLangOpts().C99) Consumer.addKeywordResult("restrict"); if (SemaRef.getLangOpts().Bool || SemaRef.getLangOpts().CPlusPlus) Consumer.addKeywordResult("bool"); else if (SemaRef.getLangOpts().C99) Consumer.addKeywordResult("_Bool"); if (SemaRef.getLangOpts().CPlusPlus) { Consumer.addKeywordResult("class"); Consumer.addKeywordResult("typename"); Consumer.addKeywordResult("wchar_t"); if (SemaRef.getLangOpts().CPlusPlus11) { Consumer.addKeywordResult("char16_t"); Consumer.addKeywordResult("char32_t"); Consumer.addKeywordResult("constexpr"); Consumer.addKeywordResult("decltype"); Consumer.addKeywordResult("thread_local"); } } if (SemaRef.getLangOpts().GNUMode) Consumer.addKeywordResult("typeof"); } else if (CCC.WantFunctionLikeCasts) { static const char *const CastableTypeSpecs[] = { "char", "double", "float", "int", "long", "short", "signed", "unsigned", "void" }; for (auto *kw : CastableTypeSpecs) Consumer.addKeywordResult(kw); } if (CCC.WantCXXNamedCasts && SemaRef.getLangOpts().CPlusPlus) { Consumer.addKeywordResult("const_cast"); Consumer.addKeywordResult("dynamic_cast"); Consumer.addKeywordResult("reinterpret_cast"); Consumer.addKeywordResult("static_cast"); } if (CCC.WantExpressionKeywords) { Consumer.addKeywordResult("sizeof"); if (SemaRef.getLangOpts().Bool || SemaRef.getLangOpts().CPlusPlus) { Consumer.addKeywordResult("false"); Consumer.addKeywordResult("true"); } if (SemaRef.getLangOpts().CPlusPlus) { static const char *const CXXExprs[] = { "delete", "new", "operator", "throw", "typeid" }; const unsigned NumCXXExprs = llvm::array_lengthof(CXXExprs); for (unsigned I = 0; I != NumCXXExprs; ++I) Consumer.addKeywordResult(CXXExprs[I]); if (isa<CXXMethodDecl>(SemaRef.CurContext) && cast<CXXMethodDecl>(SemaRef.CurContext)->isInstance()) Consumer.addKeywordResult("this"); if (SemaRef.getLangOpts().CPlusPlus11) { Consumer.addKeywordResult("alignof"); Consumer.addKeywordResult("nullptr"); } } if (SemaRef.getLangOpts().C11) { // FIXME: We should not suggest _Alignof if the alignof macro // is present. Consumer.addKeywordResult("_Alignof"); } } if (CCC.WantRemainingKeywords) { if (SemaRef.getCurFunctionOrMethodDecl() || SemaRef.getCurBlock()) { // Statements. static const char *const CStmts[] = { "do", "else", "for", "goto", "if", "return", "switch", "while" }; const unsigned NumCStmts = llvm::array_lengthof(CStmts); for (unsigned I = 0; I != NumCStmts; ++I) Consumer.addKeywordResult(CStmts[I]); if (SemaRef.getLangOpts().CPlusPlus) { Consumer.addKeywordResult("catch"); Consumer.addKeywordResult("try"); } if (S && S->getBreakParent()) Consumer.addKeywordResult("break"); if (S && S->getContinueParent()) Consumer.addKeywordResult("continue"); if (!SemaRef.getCurFunction()->SwitchStack.empty()) { Consumer.addKeywordResult("case"); Consumer.addKeywordResult("default"); } } else { if (SemaRef.getLangOpts().CPlusPlus) { Consumer.addKeywordResult("namespace"); Consumer.addKeywordResult("template"); } if (S && S->isClassScope()) { Consumer.addKeywordResult("explicit"); Consumer.addKeywordResult("friend"); Consumer.addKeywordResult("mutable"); Consumer.addKeywordResult("private"); Consumer.addKeywordResult("protected"); Consumer.addKeywordResult("public"); Consumer.addKeywordResult("virtual"); } } if (SemaRef.getLangOpts().CPlusPlus) { Consumer.addKeywordResult("using"); if (SemaRef.getLangOpts().CPlusPlus11) Consumer.addKeywordResult("static_assert"); } } } std::unique_ptr<TypoCorrectionConsumer> Sema::makeTypoCorrectionConsumer( const DeclarationNameInfo &TypoName, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery) { if (Diags.hasFatalErrorOccurred() || !getLangOpts().SpellChecking || DisableTypoCorrection) return nullptr; // In Microsoft mode, don't perform typo correction in a template member // function dependent context because it interferes with the "lookup into // dependent bases of class templates" feature. if (getLangOpts().MSVCCompat && CurContext->isDependentContext() && isa<CXXMethodDecl>(CurContext)) return nullptr; // We only attempt to correct typos for identifiers. IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo(); if (!Typo) return nullptr; // If the scope specifier itself was invalid, don't try to correct // typos. if (SS && SS->isInvalid()) return nullptr; // Never try to correct typos during template deduction or // instantiation. if (!ActiveTemplateInstantiations.empty()) return nullptr; // Don't try to correct 'super'. if (S && S->isInObjcMethodScope() && Typo == getSuperIdentifier()) return nullptr; // Abort if typo correction already failed for this specific typo. IdentifierSourceLocations::iterator locs = TypoCorrectionFailures.find(Typo); if (locs != TypoCorrectionFailures.end() && locs->second.count(TypoName.getLoc())) return nullptr; // Don't try to correct the identifier "vector" when in AltiVec mode. // TODO: Figure out why typo correction misbehaves in this case, fix it, and // remove this workaround. if ((getLangOpts().AltiVec || getLangOpts().ZVector) && Typo->isStr("vector")) return nullptr; // Provide a stop gap for files that are just seriously broken. Trying // to correct all typos can turn into a HUGE performance penalty, causing // some files to take minutes to get rejected by the parser. unsigned Limit = getDiagnostics().getDiagnosticOptions().SpellCheckingLimit; if (Limit && TyposCorrected >= Limit) return nullptr; ++TyposCorrected; // If we're handling a missing symbol error, using modules, and the // special search all modules option is used, look for a missing import. if (ErrorRecovery && getLangOpts().Modules && getLangOpts().ModulesSearchAll) { // The following has the side effect of loading the missing module. getModuleLoader().lookupMissingImports(Typo->getName(), TypoName.getLocStart()); } CorrectionCandidateCallback &CCCRef = *CCC; auto Consumer = llvm::make_unique<TypoCorrectionConsumer>( *this, TypoName, LookupKind, S, SS, std::move(CCC), MemberContext, EnteringContext); // Perform name lookup to find visible, similarly-named entities. bool IsUnqualifiedLookup = false; DeclContext *QualifiedDC = MemberContext; if (MemberContext) { LookupVisibleDecls(MemberContext, LookupKind, *Consumer); // Look in qualified interfaces. if (OPT) { for (auto *I : OPT->quals()) LookupVisibleDecls(I, LookupKind, *Consumer); } } else if (SS && SS->isSet()) { QualifiedDC = computeDeclContext(*SS, EnteringContext); if (!QualifiedDC) return nullptr; LookupVisibleDecls(QualifiedDC, LookupKind, *Consumer); } else { IsUnqualifiedLookup = true; } // Determine whether we are going to search in the various namespaces for // corrections. bool SearchNamespaces = getLangOpts().CPlusPlus && (IsUnqualifiedLookup || (SS && SS->isSet())); if (IsUnqualifiedLookup || SearchNamespaces) { // For unqualified lookup, look through all of the names that we have // seen in this translation unit. // FIXME: Re-add the ability to skip very unlikely potential corrections. for (const auto &I : Context.Idents) Consumer->FoundName(I.getKey()); // Walk through identifiers in external identifier sources. // FIXME: Re-add the ability to skip very unlikely potential corrections. if (IdentifierInfoLookup *External = Context.Idents.getExternalIdentifierLookup()) { std::unique_ptr<IdentifierIterator> Iter(External->getIdentifiers()); do { StringRef Name = Iter->Next(); if (Name.empty()) break; Consumer->FoundName(Name); } while (true); } } AddKeywordsToConsumer(*this, *Consumer, S, CCCRef, SS && SS->isNotEmpty()); // Build the NestedNameSpecifiers for the KnownNamespaces, if we're going // to search those namespaces. if (SearchNamespaces) { // Load any externally-known namespaces. if (ExternalSource && !LoadedExternalKnownNamespaces) { SmallVector<NamespaceDecl *, 4> ExternalKnownNamespaces; LoadedExternalKnownNamespaces = true; ExternalSource->ReadKnownNamespaces(ExternalKnownNamespaces); for (auto *N : ExternalKnownNamespaces) KnownNamespaces[N] = true; } Consumer->addNamespaces(KnownNamespaces); } return Consumer; } /// \brief Try to "correct" a typo in the source code by finding /// visible declarations whose names are similar to the name that was /// present in the source code. /// /// \param TypoName the \c DeclarationNameInfo structure that contains /// the name that was present in the source code along with its location. /// /// \param LookupKind the name-lookup criteria used to search for the name. /// /// \param S the scope in which name lookup occurs. /// /// \param SS the nested-name-specifier that precedes the name we're /// looking for, if present. /// /// \param CCC A CorrectionCandidateCallback object that provides further /// validation of typo correction candidates. It also provides flags for /// determining the set of keywords permitted. /// /// \param MemberContext if non-NULL, the context in which to look for /// a member access expression. /// /// \param EnteringContext whether we're entering the context described by /// the nested-name-specifier SS. /// /// \param OPT when non-NULL, the search for visible declarations will /// also walk the protocols in the qualified interfaces of \p OPT. /// /// \returns a \c TypoCorrection containing the corrected name if the typo /// along with information such as the \c NamedDecl where the corrected name /// was declared, and any additional \c NestedNameSpecifier needed to access /// it (C++ only). The \c TypoCorrection is empty if there is no correction. TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool RecordFailure) { assert(CCC && "CorrectTypo requires a CorrectionCandidateCallback"); // Always let the ExternalSource have the first chance at correction, even // if we would otherwise have given up. if (ExternalSource) { if (TypoCorrection Correction = ExternalSource->CorrectTypo( TypoName, LookupKind, S, SS, *CCC, MemberContext, EnteringContext, OPT)) return Correction; } // Ugly hack equivalent to CTC == CTC_ObjCMessageReceiver; // WantObjCSuper is only true for CTC_ObjCMessageReceiver and for // some instances of CTC_Unknown, while WantRemainingKeywords is true // for CTC_Unknown but not for CTC_ObjCMessageReceiver. bool ObjCMessageReceiver = CCC->WantObjCSuper && !CCC->WantRemainingKeywords; IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo(); auto Consumer = makeTypoCorrectionConsumer( TypoName, LookupKind, S, SS, std::move(CCC), MemberContext, EnteringContext, OPT, Mode == CTK_ErrorRecovery); if (!Consumer) return TypoCorrection(); // If we haven't found anything, we're done. if (Consumer->empty()) return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure); // Make sure the best edit distance (prior to adding any namespace qualifiers) // is not more that about a third of the length of the typo's identifier. unsigned ED = Consumer->getBestEditDistance(true); unsigned TypoLen = Typo->getName().size(); if (ED > 0 && TypoLen / ED < 3) return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure); TypoCorrection BestTC = Consumer->getNextCorrection(); TypoCorrection SecondBestTC = Consumer->getNextCorrection(); if (!BestTC) return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure); ED = BestTC.getEditDistance(); if (TypoLen >= 3 && ED > 0 && TypoLen / ED < 3) { // If this was an unqualified lookup and we believe the callback // object wouldn't have filtered out possible corrections, note // that no correction was found. return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure); } // If only a single name remains, return that result. if (!SecondBestTC || SecondBestTC.getEditDistance(false) > BestTC.getEditDistance(false)) { const TypoCorrection &Result = BestTC; // Don't correct to a keyword that's the same as the typo; the keyword // wasn't actually in scope. if (ED == 0 && Result.isKeyword()) return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure); TypoCorrection TC = Result; TC.setCorrectionRange(SS, TypoName); checkCorrectionVisibility(*this, TC); return TC; } else if (SecondBestTC && ObjCMessageReceiver) { // Prefer 'super' when we're completing in a message-receiver // context. if (BestTC.getCorrection().getAsString() != "super") { if (SecondBestTC.getCorrection().getAsString() == "super") BestTC = SecondBestTC; else if ((*Consumer)["super"].front().isKeyword()) BestTC = (*Consumer)["super"].front(); } // Don't correct to a keyword that's the same as the typo; the keyword // wasn't actually in scope. if (BestTC.getEditDistance() == 0 || BestTC.getCorrection().getAsString() != "super") return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure); BestTC.setCorrectionRange(SS, TypoName); return BestTC; } // Record the failure's location if needed and return an empty correction. If // this was an unqualified lookup and we believe the callback object did not // filter out possible corrections, also cache the failure for the typo. return FailedCorrection(Typo, TypoName.getLoc(), RecordFailure && !SecondBestTC); } /// \brief Try to "correct" a typo in the source code by finding /// visible declarations whose names are similar to the name that was /// present in the source code. /// /// \param TypoName the \c DeclarationNameInfo structure that contains /// the name that was present in the source code along with its location. /// /// \param LookupKind the name-lookup criteria used to search for the name. /// /// \param S the scope in which name lookup occurs. /// /// \param SS the nested-name-specifier that precedes the name we're /// looking for, if present. /// /// \param CCC A CorrectionCandidateCallback object that provides further /// validation of typo correction candidates. It also provides flags for /// determining the set of keywords permitted. /// /// \param TDG A TypoDiagnosticGenerator functor that will be used to print /// diagnostics when the actual typo correction is attempted. /// /// \param TRC A TypoRecoveryCallback functor that will be used to build an /// Expr from a typo correction candidate. /// /// \param MemberContext if non-NULL, the context in which to look for /// a member access expression. /// /// \param EnteringContext whether we're entering the context described by /// the nested-name-specifier SS. /// /// \param OPT when non-NULL, the search for visible declarations will /// also walk the protocols in the qualified interfaces of \p OPT. /// /// \returns a new \c TypoExpr that will later be replaced in the AST with an /// Expr representing the result of performing typo correction, or nullptr if /// typo correction is not possible. If nullptr is returned, no diagnostics will /// be emitted and it is the responsibility of the caller to emit any that are /// needed. TypoExpr *Sema::CorrectTypoDelayed( const DeclarationNameInfo &TypoName, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT) { assert(CCC && "CorrectTypoDelayed requires a CorrectionCandidateCallback"); TypoCorrection Empty; auto Consumer = makeTypoCorrectionConsumer( TypoName, LookupKind, S, SS, std::move(CCC), MemberContext, EnteringContext, OPT, Mode == CTK_ErrorRecovery); if (!Consumer || Consumer->empty()) return nullptr; // Make sure the best edit distance (prior to adding any namespace qualifiers) // is not more that about a third of the length of the typo's identifier. unsigned ED = Consumer->getBestEditDistance(true); IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo(); if (ED > 0 && Typo->getName().size() / ED < 3) return nullptr; ExprEvalContexts.back().NumTypos++; return createDelayedTypo(std::move(Consumer), std::move(TDG), std::move(TRC)); } void TypoCorrection::addCorrectionDecl(NamedDecl *CDecl) { if (!CDecl) return; if (isKeyword()) CorrectionDecls.clear(); CorrectionDecls.push_back(CDecl->getUnderlyingDecl()); if (!CorrectionName) CorrectionName = CDecl->getDeclName(); } std::string TypoCorrection::getAsString(const LangOptions &LO) const { if (CorrectionNameSpec) { std::string tmpBuffer; llvm::raw_string_ostream PrefixOStream(tmpBuffer); CorrectionNameSpec->print(PrefixOStream, PrintingPolicy(LO)); PrefixOStream << CorrectionName; return PrefixOStream.str(); } return CorrectionName.getAsString(); } bool CorrectionCandidateCallback::ValidateCandidate( const TypoCorrection &candidate) { if (!candidate.isResolved()) return true; if (candidate.isKeyword()) return WantTypeSpecifiers || WantExpressionKeywords || WantCXXNamedCasts || WantRemainingKeywords || WantObjCSuper; bool HasNonType = false; bool HasStaticMethod = false; bool HasNonStaticMethod = false; for (Decl *D : candidate) { if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D)) D = FTD->getTemplatedDecl(); if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) { if (Method->isStatic()) HasStaticMethod = true; else HasNonStaticMethod = true; } if (!isa<TypeDecl>(D)) HasNonType = true; } if (IsAddressOfOperand && HasNonStaticMethod && !HasStaticMethod && !candidate.getCorrectionSpecifier()) return false; return WantTypeSpecifiers || HasNonType; } FunctionCallFilterCCC::FunctionCallFilterCCC(Sema &SemaRef, unsigned NumArgs, bool HasExplicitTemplateArgs, MemberExpr *ME) : NumArgs(NumArgs), HasExplicitTemplateArgs(HasExplicitTemplateArgs), CurContext(SemaRef.CurContext), MemberFn(ME) { WantTypeSpecifiers = false; WantFunctionLikeCasts = SemaRef.getLangOpts().CPlusPlus && NumArgs == 1; WantRemainingKeywords = false; } bool FunctionCallFilterCCC::ValidateCandidate(const TypoCorrection &candidate) { if (!candidate.getCorrectionDecl()) return candidate.isKeyword(); for (auto *C : candidate) { FunctionDecl *FD = nullptr; NamedDecl *ND = C->getUnderlyingDecl(); if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND)) FD = FTD->getTemplatedDecl(); if (!HasExplicitTemplateArgs && !FD) { if (!(FD = dyn_cast<FunctionDecl>(ND)) && isa<ValueDecl>(ND)) { // If the Decl is neither a function nor a template function, // determine if it is a pointer or reference to a function. If so, // check against the number of arguments expected for the pointee. QualType ValType = cast<ValueDecl>(ND)->getType(); if (ValType->isAnyPointerType() || ValType->isReferenceType()) ValType = ValType->getPointeeType(); if (const FunctionProtoType *FPT = ValType->getAs<FunctionProtoType>()) if (FPT->getNumParams() == NumArgs) return true; } } // Skip the current candidate if it is not a FunctionDecl or does not accept // the current number of arguments. if (!FD || !(FD->getNumParams() >= NumArgs && FD->getMinRequiredArguments() <= NumArgs)) continue; // If the current candidate is a non-static C++ method, skip the candidate // unless the method being corrected--or the current DeclContext, if the // function being corrected is not a method--is a method in the same class // or a descendent class of the candidate's parent class. if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) { if (MemberFn || !MD->isStatic()) { CXXMethodDecl *CurMD = MemberFn ? dyn_cast_or_null<CXXMethodDecl>(MemberFn->getMemberDecl()) : dyn_cast_or_null<CXXMethodDecl>(CurContext); CXXRecordDecl *CurRD = CurMD ? CurMD->getParent()->getCanonicalDecl() : nullptr; CXXRecordDecl *RD = MD->getParent()->getCanonicalDecl(); if (!CurRD || (CurRD != RD && !CurRD->isDerivedFrom(RD))) continue; } } return true; } return false; } void Sema::diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery) { diagnoseTypo(Correction, TypoDiag, PDiag(diag::note_previous_decl), ErrorRecovery); } /// Find which declaration we should import to provide the definition of /// the given declaration. static NamedDecl *getDefinitionToImport(NamedDecl *D) { if (VarDecl *VD = dyn_cast<VarDecl>(D)) return VD->getDefinition(); if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) return FD->isDefined(FD) ? const_cast<FunctionDecl*>(FD) : nullptr; if (TagDecl *TD = dyn_cast<TagDecl>(D)) return TD->getDefinition(); if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) return ID->getDefinition(); if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) return PD->getDefinition(); if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D)) return getDefinitionToImport(TD->getTemplatedDecl()); return nullptr; } void Sema::diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover) { assert(!isVisible(Decl) && "missing import for non-hidden decl?"); // Suggest importing a module providing the definition of this entity, if // possible. NamedDecl *Def = getDefinitionToImport(Decl); if (!Def) Def = Decl; // FIXME: Add a Fix-It that imports the corresponding module or includes // the header. Module *Owner = getOwningModule(Decl); assert(Owner && "definition of hidden declaration is not in a module"); llvm::SmallVector<Module*, 8> OwningModules; OwningModules.push_back(Owner); auto Merged = Context.getModulesWithMergedDefinition(Decl); OwningModules.insert(OwningModules.end(), Merged.begin(), Merged.end()); diagnoseMissingImport(Loc, Decl, Decl->getLocation(), OwningModules, NeedDefinition ? MissingImportKind::Definition : MissingImportKind::Declaration, Recover); } void Sema::diagnoseMissingImport(SourceLocation UseLoc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover) { assert(!Modules.empty()); if (Modules.size() > 1) { std::string ModuleList; unsigned N = 0; for (Module *M : Modules) { ModuleList += "\n "; if (++N == 5 && N != Modules.size()) { ModuleList += "[...]"; break; } ModuleList += M->getFullModuleName(); } Diag(UseLoc, diag::err_module_unimported_use_multiple) << (int)MIK << Decl << ModuleList; } else { Diag(UseLoc, diag::err_module_unimported_use) << (int)MIK << Decl << Modules[0]->getFullModuleName(); } unsigned DiagID; switch (MIK) { case MissingImportKind::Declaration: DiagID = diag::note_previous_declaration; break; case MissingImportKind::Definition: DiagID = diag::note_previous_definition; break; case MissingImportKind::DefaultArgument: DiagID = diag::note_default_argument_declared_here; break; } Diag(DeclLoc, DiagID); // Try to recover by implicitly importing this module. if (Recover) createImplicitModuleImportForErrorRecovery(UseLoc, Modules[0]); } /// \brief Diagnose a successfully-corrected typo. Separated from the correction /// itself to allow external validation of the result, etc. /// /// \param Correction The result of performing typo correction. /// \param TypoDiag The diagnostic to produce. This will have the corrected /// string added to it (and usually also a fixit). /// \param PrevNote A note to use when indicating the location of the entity to /// which we are correcting. Will have the correction string added to it. /// \param ErrorRecovery If \c true (the default), the caller is going to /// recover from the typo as if the corrected string had been typed. /// In this case, \c PDiag must be an error, and we will attach a fixit /// to it. void Sema::diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery) { std::string CorrectedStr = Correction.getAsString(getLangOpts()); std::string CorrectedQuotedStr = Correction.getQuoted(getLangOpts()); FixItHint FixTypo = FixItHint::CreateReplacement( Correction.getCorrectionRange(), CorrectedStr); // Maybe we're just missing a module import. if (Correction.requiresImport()) { NamedDecl *Decl = Correction.getCorrectionDecl(); assert(Decl && "import required but no declaration to import"); diagnoseMissingImport(Correction.getCorrectionRange().getBegin(), Decl, /*NeedDefinition*/ false, ErrorRecovery); return; } Diag(Correction.getCorrectionRange().getBegin(), TypoDiag) << CorrectedQuotedStr << (ErrorRecovery ? FixTypo : FixItHint()); NamedDecl *ChosenDecl = Correction.isKeyword() ? nullptr : Correction.getCorrectionDecl(); if (PrevNote.getDiagID() && ChosenDecl) Diag(ChosenDecl->getLocation(), PrevNote) << CorrectedQuotedStr << (ErrorRecovery ? FixItHint() : FixTypo); } TypoExpr *Sema::createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC) { assert(TCC && "createDelayedTypo requires a valid TypoCorrectionConsumer"); auto TE = new (Context) TypoExpr(Context.DependentTy); auto &State = DelayedTypos[TE]; State.Consumer = std::move(TCC); State.DiagHandler = std::move(TDG); State.RecoveryHandler = std::move(TRC); return TE; } const Sema::TypoExprState &Sema::getTypoExprState(TypoExpr *TE) const { auto Entry = DelayedTypos.find(TE); assert(Entry != DelayedTypos.end() && "Failed to get the state for a TypoExpr!"); return Entry->second; } void Sema::clearDelayedTypo(TypoExpr *TE) { DelayedTypos.erase(TE); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/Sema/SemaAttr.cpp
//===--- SemaAttr.cpp - Semantic Analysis for Attributes ------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements semantic analysis for non-trivial attributes and // pragmas. // //===----------------------------------------------------------------------===// #include "clang/Sema/SemaInternal.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/Attr.h" #include "clang/AST/Expr.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Lookup.h" using namespace clang; //===----------------------------------------------------------------------===// // Pragma 'pack' and 'options align' //===----------------------------------------------------------------------===// namespace { struct PackStackEntry { // We just use a sentinel to represent when the stack is set to mac68k // alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; unsigned Alignment; IdentifierInfo *Name; }; /// PragmaPackStack - Simple class to wrap the stack used by #pragma /// pack. class PragmaPackStack { typedef std::vector<PackStackEntry> stack_ty; /// Alignment - The current user specified alignment. unsigned Alignment; /// Stack - Entries in the #pragma pack stack, consisting of saved /// alignments and optional names. stack_ty Stack; public: PragmaPackStack() : Alignment(0) {} void setAlignment(unsigned A) { Alignment = A; } unsigned getAlignment() { return Alignment; } /// push - Push the current alignment onto the stack, optionally /// using the given \arg Name for the record, if non-zero. void push(IdentifierInfo *Name) { PackStackEntry PSE = { Alignment, Name }; Stack.push_back(PSE); } /// pop - Pop a record from the stack and restore the current /// alignment to the previous value. If \arg Name is non-zero then /// the first such named record is popped, otherwise the top record /// is popped. Returns true if the pop succeeded. bool pop(IdentifierInfo *Name, bool IsReset); }; } // end anonymous namespace. bool PragmaPackStack::pop(IdentifierInfo *Name, bool IsReset) { // If name is empty just pop top. if (!Name) { // An empty stack is a special case... if (Stack.empty()) { // If this isn't a reset, it is always an error. if (!IsReset) return false; // Otherwise, it is an error only if some alignment has been set. if (!Alignment) return false; // Otherwise, reset to the default alignment. Alignment = 0; } else { Alignment = Stack.back().Alignment; Stack.pop_back(); } return true; } // Otherwise, find the named record. for (unsigned i = Stack.size(); i != 0; ) { --i; if (Stack[i].Name == Name) { // Found it, pop up to and including this record. Alignment = Stack[i].Alignment; Stack.erase(Stack.begin() + i, Stack.end()); return true; } } return false; } /// FreePackedContext - Deallocate and null out PackContext. void Sema::FreePackedContext() { delete static_cast<PragmaPackStack*>(PackContext); PackContext = nullptr; } void Sema::AddAlignmentAttributesForRecord(RecordDecl *RD) { // If there is no pack context, we don't need any attributes. if (!PackContext) return; PragmaPackStack *Stack = static_cast<PragmaPackStack*>(PackContext); // Otherwise, check to see if we need a max field alignment attribute. if (unsigned Alignment = Stack->getAlignment()) { if (Alignment == PackStackEntry::kMac68kAlignmentSentinel) RD->addAttr(AlignMac68kAttr::CreateImplicit(Context)); else RD->addAttr(MaxFieldAlignmentAttr::CreateImplicit(Context, Alignment * 8)); } } void Sema::AddMsStructLayoutForRecord(RecordDecl *RD) { if (MSStructPragmaOn) RD->addAttr(MSStructAttr::CreateImplicit(Context)); // FIXME: We should merge AddAlignmentAttributesForRecord with // AddMsStructLayoutForRecord into AddPragmaAttributesForRecord, which takes // all active pragmas and applies them as attributes to class definitions. if (VtorDispModeStack.back() != getLangOpts().VtorDispMode) RD->addAttr( MSVtorDispAttr::CreateImplicit(Context, VtorDispModeStack.back())); } void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc) { if (!PackContext) PackContext = new PragmaPackStack(); PragmaPackStack *Context = static_cast<PragmaPackStack*>(PackContext); switch (Kind) { // For all targets we support native and natural are the same. // // FIXME: This is not true on Darwin/PPC. case POAK_Native: case POAK_Power: case POAK_Natural: Context->push(nullptr); Context->setAlignment(0); break; // Note that '#pragma options align=packed' is not equivalent to attribute // packed, it has a different precedence relative to attribute aligned. case POAK_Packed: Context->push(nullptr); Context->setAlignment(1); break; case POAK_Mac68k: // Check if the target supports this. if (!this->Context.getTargetInfo().hasAlignMac68kSupport()) { Diag(PragmaLoc, diag::err_pragma_options_align_mac68k_target_unsupported); return; } Context->push(nullptr); Context->setAlignment(PackStackEntry::kMac68kAlignmentSentinel); break; case POAK_Reset: // Reset just pops the top of the stack, or resets the current alignment to // default. if (!Context->pop(nullptr, /*IsReset=*/true)) { Diag(PragmaLoc, diag::warn_pragma_options_align_reset_failed) << "stack empty"; } break; } } void Sema::ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc) { Expr *Alignment = static_cast<Expr *>(alignment); // If specified then alignment must be a "small" power of two. unsigned AlignmentVal = 0; if (Alignment) { llvm::APSInt Val; // pack(0) is like pack(), which just works out since that is what // we use 0 for in PackAttr. if (Alignment->isTypeDependent() || Alignment->isValueDependent() || !Alignment->isIntegerConstantExpr(Val, Context) || !(Val == 0 || Val.isPowerOf2()) || Val.getZExtValue() > 16) { Diag(PragmaLoc, diag::warn_pragma_pack_invalid_alignment); return; // Ignore } AlignmentVal = (unsigned) Val.getZExtValue(); } if (!PackContext) PackContext = new PragmaPackStack(); PragmaPackStack *Context = static_cast<PragmaPackStack*>(PackContext); switch (Kind) { case Sema::PPK_Default: // pack([n]) Context->setAlignment(AlignmentVal); break; case Sema::PPK_Show: // pack(show) // Show the current alignment, making sure to show the right value // for the default. AlignmentVal = Context->getAlignment(); // FIXME: This should come from the target. if (AlignmentVal == 0) AlignmentVal = 8; if (AlignmentVal == PackStackEntry::kMac68kAlignmentSentinel) Diag(PragmaLoc, diag::warn_pragma_pack_show) << "mac68k"; else Diag(PragmaLoc, diag::warn_pragma_pack_show) << AlignmentVal; break; case Sema::PPK_Push: // pack(push [, id] [, [n]) Context->push(Name); // Set the new alignment if specified. if (Alignment) Context->setAlignment(AlignmentVal); break; case Sema::PPK_Pop: // pack(pop [, id] [, n]) // MSDN, C/C++ Preprocessor Reference > Pragma Directives > pack: // "#pragma pack(pop, identifier, n) is undefined" if (Alignment && Name) Diag(PragmaLoc, diag::warn_pragma_pack_pop_identifer_and_alignment); // Do the pop. if (!Context->pop(Name, /*IsReset=*/false)) { // If a name was specified then failure indicates the name // wasn't found. Otherwise failure indicates the stack was // empty. Diag(PragmaLoc, diag::warn_pragma_pop_failed) << "pack" << (Name ? "no record matching name" : "stack empty"); // FIXME: Warn about popping named records as MSVC does. } else { // Pop succeeded, set the new alignment if specified. if (Alignment) Context->setAlignment(AlignmentVal); } break; } } void Sema::ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc) { // Once we've encountered one #pragma pack_matrix, we have a well-defined // default orientation for the rest of the program, even if we're rewriting. HasDefaultMatrixPack = true; DefaultMatrixPackRowMajor = bRowMajor; } void Sema::ActOnPragmaMSStruct(PragmaMSStructKind Kind) { MSStructPragmaOn = (Kind == PMSST_ON); } void Sema::ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg) { // FIXME: Serialize this. switch (Kind) { case PCK_Unknown: llvm_unreachable("unexpected pragma comment kind"); case PCK_Linker: Consumer.HandleLinkerOptionPragma(Arg); return; case PCK_Lib: Consumer.HandleDependentLibrary(Arg); return; case PCK_Compiler: case PCK_ExeStr: case PCK_User: return; // We ignore all of these. } llvm_unreachable("invalid pragma comment kind"); } void Sema::ActOnPragmaDetectMismatch(StringRef Name, StringRef Value) { // FIXME: Serialize this. Consumer.HandleDetectMismatch(Name, Value); } void Sema::ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind RepresentationMethod, SourceLocation PragmaLoc) { MSPointerToMemberRepresentationMethod = RepresentationMethod; ImplicitMSInheritanceAttrLoc = PragmaLoc; } void Sema::ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Mode) { switch (Kind) { case PVDK_Set: VtorDispModeStack.back() = Mode; break; case PVDK_Push: VtorDispModeStack.push_back(Mode); break; case PVDK_Reset: VtorDispModeStack.clear(); VtorDispModeStack.push_back(MSVtorDispAttr::Mode(LangOpts.VtorDispMode)); break; case PVDK_Pop: VtorDispModeStack.pop_back(); if (VtorDispModeStack.empty()) { Diag(PragmaLoc, diag::warn_pragma_pop_failed) << "vtordisp" << "stack empty"; VtorDispModeStack.push_back(MSVtorDispAttr::Mode(LangOpts.VtorDispMode)); } break; } } template<typename ValueType> void Sema::PragmaStack<ValueType>::Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = nullptr; return; } if (Action & PSK_Push) Stack.push_back(Slot(StackSlotLabel, CurrentValue, CurrentPragmaLocation)); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = std::find_if(Stack.rbegin(), Stack.rend(), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We don't have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } bool Sema::UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *Decl) { auto Section = Context.SectionInfos.find(SectionName); if (Section == Context.SectionInfos.end()) { Context.SectionInfos[SectionName] = ASTContext::SectionInfo(Decl, SourceLocation(), SectionFlags); return false; } // A pre-declared section takes precedence w/o diagnostic. if (Section->second.SectionFlags == SectionFlags || !(Section->second.SectionFlags & ASTContext::PSF_Implicit)) return false; auto OtherDecl = Section->second.Decl; Diag(Decl->getLocation(), diag::err_section_conflict) << Decl << OtherDecl; Diag(OtherDecl->getLocation(), diag::note_declared_at) << OtherDecl->getName(); if (auto A = Decl->getAttr<SectionAttr>()) if (A->isImplicit()) Diag(A->getLocation(), diag::note_pragma_entered_here); if (auto A = OtherDecl->getAttr<SectionAttr>()) if (A->isImplicit()) Diag(A->getLocation(), diag::note_pragma_entered_here); return true; } bool Sema::UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation) { auto Section = Context.SectionInfos.find(SectionName); if (Section != Context.SectionInfos.end()) { if (Section->second.SectionFlags == SectionFlags) return false; if (!(Section->second.SectionFlags & ASTContext::PSF_Implicit)) { Diag(PragmaSectionLocation, diag::err_section_conflict) << "this" << "a prior #pragma section"; Diag(Section->second.PragmaSectionLocation, diag::note_pragma_entered_here); return true; } } Context.SectionInfos[SectionName] = ASTContext::SectionInfo(nullptr, PragmaSectionLocation, SectionFlags); return false; } /// \brief Called on well formed \#pragma bss_seg(). void Sema::ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName) { PragmaStack<StringLiteral *> *Stack = llvm::StringSwitch<PragmaStack<StringLiteral *> *>(PragmaName) .Case("data_seg", &DataSegStack) .Case("bss_seg", &BSSSegStack) .Case("const_seg", &ConstSegStack) .Case("code_seg", &CodeSegStack); if (Action & PSK_Pop && Stack->Stack.empty()) Diag(PragmaLocation, diag::warn_pragma_pop_failed) << PragmaName << "stack empty"; if (SegmentName && !checkSectionName(SegmentName->getLocStart(), SegmentName->getString())) return; Stack->Act(PragmaLocation, Action, StackSlotLabel, SegmentName); } /// \brief Called on well formed \#pragma bss_seg(). void Sema::ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName) { UnifySection(SegmentName->getString(), SectionFlags, PragmaLocation); } void Sema::ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName) { // There's no stack to maintain, so we just have a current section. When we // see the default section, reset our current section back to null so we stop // tacking on unnecessary attributes. CurInitSeg = SegmentName->getString() == ".CRT$XCU" ? nullptr : SegmentName; CurInitSegLoc = PragmaLocation; } void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope, SourceLocation PragmaLoc) { IdentifierInfo *Name = IdTok.getIdentifierInfo(); LookupResult Lookup(*this, Name, IdTok.getLocation(), LookupOrdinaryName); LookupParsedName(Lookup, curScope, nullptr, true); if (Lookup.empty()) { Diag(PragmaLoc, diag::warn_pragma_unused_undeclared_var) << Name << SourceRange(IdTok.getLocation()); return; } VarDecl *VD = Lookup.getAsSingle<VarDecl>(); if (!VD) { Diag(PragmaLoc, diag::warn_pragma_unused_expected_var_arg) << Name << SourceRange(IdTok.getLocation()); return; } // Warn if this was used before being marked unused. if (VD->isUsed()) Diag(PragmaLoc, diag::warn_used_but_marked_unused) << Name; VD->addAttr(UnusedAttr::CreateImplicit(Context, IdTok.getLocation())); } void Sema::AddCFAuditedAttribute(Decl *D) { SourceLocation Loc = PP.getPragmaARCCFCodeAuditedLoc(); if (!Loc.isValid()) return; // Don't add a redundant or conflicting attribute. if (D->hasAttr<CFAuditedTransferAttr>() || D->hasAttr<CFUnknownTransferAttr>()) return; D->addAttr(CFAuditedTransferAttr::CreateImplicit(Context, Loc)); } void Sema::ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc) { if(On) OptimizeOffPragmaLocation = SourceLocation(); else OptimizeOffPragmaLocation = PragmaLoc; } void Sema::AddRangeBasedOptnone(FunctionDecl *FD) { // In the future, check other pragmas if they're implemented (e.g. pragma // optimize 0 will probably map to this functionality too). if(OptimizeOffPragmaLocation.isValid()) AddOptnoneAttributeIfNoConflicts(FD, OptimizeOffPragmaLocation); } void Sema::AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc) { // Don't add a conflicting attribute. No diagnostic is needed. if (FD->hasAttr<MinSizeAttr>() || FD->hasAttr<AlwaysInlineAttr>()) return; // Add attributes only if required. Optnone requires noinline as well, but if // either is already present then don't bother adding them. if (!FD->hasAttr<OptimizeNoneAttr>()) FD->addAttr(OptimizeNoneAttr::CreateImplicit(Context, Loc)); if (!FD->hasAttr<NoInlineAttr>()) FD->addAttr(NoInlineAttr::CreateImplicit(Context, Loc)); } typedef std::vector<std::pair<unsigned, SourceLocation> > VisStack; enum : unsigned { NoVisibility = ~0U }; void Sema::AddPushedVisibilityAttribute(Decl *D) { if (!VisContext) return; NamedDecl *ND = dyn_cast<NamedDecl>(D); if (ND && ND->getExplicitVisibility(NamedDecl::VisibilityForValue)) return; VisStack *Stack = static_cast<VisStack*>(VisContext); unsigned rawType = Stack->back().first; if (rawType == NoVisibility) return; VisibilityAttr::VisibilityType type = (VisibilityAttr::VisibilityType) rawType; SourceLocation loc = Stack->back().second; D->addAttr(VisibilityAttr::CreateImplicit(Context, type, loc)); } /// FreeVisContext - Deallocate and null out VisContext. void Sema::FreeVisContext() { delete static_cast<VisStack*>(VisContext); VisContext = nullptr; } static void PushPragmaVisibility(Sema &S, unsigned type, SourceLocation loc) { // Put visibility on stack. if (!S.VisContext) S.VisContext = new VisStack; VisStack *Stack = static_cast<VisStack*>(S.VisContext); Stack->push_back(std::make_pair(type, loc)); } void Sema::ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc) { if (VisType) { // Compute visibility to use. VisibilityAttr::VisibilityType T; if (!VisibilityAttr::ConvertStrToVisibilityType(VisType->getName(), T)) { Diag(PragmaLoc, diag::warn_attribute_unknown_visibility) << VisType; return; } PushPragmaVisibility(*this, T, PragmaLoc); } else { PopPragmaVisibility(false, PragmaLoc); } } void Sema::ActOnPragmaFPContract(tok::OnOffSwitch OOS) { switch (OOS) { case tok::OOS_ON: FPFeatures.fp_contract = 1; break; case tok::OOS_OFF: FPFeatures.fp_contract = 0; break; case tok::OOS_DEFAULT: FPFeatures.fp_contract = getLangOpts().DefaultFPContract; break; } } void Sema::PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc) { // Visibility calculations will consider the namespace's visibility. // Here we just want to note that we're in a visibility context // which overrides any enclosing #pragma context, but doesn't itself // contribute visibility. PushPragmaVisibility(*this, NoVisibility, Loc); } void Sema::PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc) { if (!VisContext) { Diag(EndLoc, diag::err_pragma_pop_visibility_mismatch); return; } // Pop visibility from stack VisStack *Stack = static_cast<VisStack*>(VisContext); const std::pair<unsigned, SourceLocation> *Back = &Stack->back(); bool StartsWithPragma = Back->first != NoVisibility; if (StartsWithPragma && IsNamespaceEnd) { Diag(Back->second, diag::err_pragma_push_visibility_mismatch); Diag(EndLoc, diag::note_surrounding_namespace_ends_here); // For better error recovery, eat all pushes inside the namespace. do { Stack->pop_back(); Back = &Stack->back(); StartsWithPragma = Back->first != NoVisibility; } while (StartsWithPragma); } else if (!StartsWithPragma && !IsNamespaceEnd) { Diag(EndLoc, diag::err_pragma_pop_visibility_mismatch); Diag(Back->second, diag::note_surrounding_namespace_starts_here); return; } Stack->pop_back(); // To simplify the implementation, never keep around an empty stack. if (Stack->empty()) FreeVisContext(); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/Transforms.cpp
//===--- Transforms.cpp - Transformations to ARC mode ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/AST/StmtVisitor.h" #include "clang/Analysis/DomainSpecific/CocoaConventions.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/Lex/Lexer.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/Sema.h" #include "clang/Sema/SemaDiagnostic.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/StringSwitch.h" #include <map> using namespace clang; using namespace arcmt; using namespace trans; ASTTraverser::~ASTTraverser() { } bool MigrationPass::CFBridgingFunctionsDefined() { if (!EnableCFBridgeFns.hasValue()) EnableCFBridgeFns = SemaRef.isKnownName("CFBridgingRetain") && SemaRef.isKnownName("CFBridgingRelease"); return *EnableCFBridgeFns; } //===----------------------------------------------------------------------===// // Helpers. //===----------------------------------------------------------------------===// bool trans::canApplyWeak(ASTContext &Ctx, QualType type, bool AllowOnUnknownClass) { if (!Ctx.getLangOpts().ObjCARCWeak) return false; QualType T = type; if (T.isNull()) return false; // iOS is always safe to use 'weak'. if (Ctx.getTargetInfo().getTriple().isiOS()) AllowOnUnknownClass = true; while (const PointerType *ptr = T->getAs<PointerType>()) T = ptr->getPointeeType(); if (const ObjCObjectPointerType *ObjT = T->getAs<ObjCObjectPointerType>()) { ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl(); if (!AllowOnUnknownClass && (!Class || Class->getName() == "NSObject")) return false; // id/NSObject is not safe for weak. if (!AllowOnUnknownClass && !Class->hasDefinition()) return false; // forward classes are not verifiable, therefore not safe. if (Class && Class->isArcWeakrefUnavailable()) return false; } return true; } bool trans::isPlusOneAssign(const BinaryOperator *E) { if (E->getOpcode() != BO_Assign) return false; return isPlusOne(E->getRHS()); } bool trans::isPlusOne(const Expr *E) { if (!E) return false; if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) E = EWC->getSubExpr(); if (const ObjCMessageExpr * ME = dyn_cast<ObjCMessageExpr>(E->IgnoreParenCasts())) if (ME->getMethodFamily() == OMF_retain) return true; if (const CallExpr * callE = dyn_cast<CallExpr>(E->IgnoreParenCasts())) { if (const FunctionDecl *FD = callE->getDirectCallee()) { if (FD->hasAttr<CFReturnsRetainedAttr>()) return true; if (FD->isGlobal() && FD->getIdentifier() && FD->getParent()->isTranslationUnit() && FD->isExternallyVisible() && ento::cocoa::isRefType(callE->getType(), "CF", FD->getIdentifier()->getName())) { StringRef fname = FD->getIdentifier()->getName(); if (fname.endswith("Retain") || fname.find("Create") != StringRef::npos || fname.find("Copy") != StringRef::npos) { return true; } } } } const ImplicitCastExpr *implCE = dyn_cast<ImplicitCastExpr>(E); while (implCE && implCE->getCastKind() == CK_BitCast) implCE = dyn_cast<ImplicitCastExpr>(implCE->getSubExpr()); if (implCE && implCE->getCastKind() == CK_ARCConsumeObject) return true; return false; } /// \brief 'Loc' is the end of a statement range. This returns the location /// immediately after the semicolon following the statement. /// If no semicolon is found or the location is inside a macro, the returned /// source location will be invalid. SourceLocation trans::findLocationAfterSemi(SourceLocation loc, ASTContext &Ctx, bool IsDecl) { SourceLocation SemiLoc = findSemiAfterLocation(loc, Ctx, IsDecl); if (SemiLoc.isInvalid()) return SourceLocation(); return SemiLoc.getLocWithOffset(1); } /// \brief \arg Loc is the end of a statement range. This returns the location /// of the semicolon following the statement. /// If no semicolon is found or the location is inside a macro, the returned /// source location will be invalid. SourceLocation trans::findSemiAfterLocation(SourceLocation loc, ASTContext &Ctx, bool IsDecl) { SourceManager &SM = Ctx.getSourceManager(); if (loc.isMacroID()) { if (!Lexer::isAtEndOfMacroExpansion(loc, SM, Ctx.getLangOpts(), &loc)) return SourceLocation(); } loc = Lexer::getLocForEndOfToken(loc, /*Offset=*/0, SM, Ctx.getLangOpts()); // Break down the source location. std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc); // Try to load the file buffer. bool invalidTemp = false; StringRef file = SM.getBufferData(locInfo.first, &invalidTemp); if (invalidTemp) return SourceLocation(); const char *tokenBegin = file.data() + locInfo.second; // Lex from the start of the given location. Lexer lexer(SM.getLocForStartOfFile(locInfo.first), Ctx.getLangOpts(), file.begin(), tokenBegin, file.end()); Token tok; lexer.LexFromRawLexer(tok); if (tok.isNot(tok::semi)) { if (!IsDecl) return SourceLocation(); // Declaration may be followed with other tokens; such as an __attribute, // before ending with a semicolon. return findSemiAfterLocation(tok.getLocation(), Ctx, /*IsDecl*/true); } return tok.getLocation(); } bool trans::hasSideEffects(Expr *E, ASTContext &Ctx) { if (!E || !E->HasSideEffects(Ctx)) return false; E = E->IgnoreParenCasts(); ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E); if (!ME) return true; switch (ME->getMethodFamily()) { case OMF_autorelease: case OMF_dealloc: case OMF_release: case OMF_retain: switch (ME->getReceiverKind()) { case ObjCMessageExpr::SuperInstance: return false; case ObjCMessageExpr::Instance: return hasSideEffects(ME->getInstanceReceiver(), Ctx); default: break; } break; default: break; } return true; } bool trans::isGlobalVar(Expr *E) { E = E->IgnoreParenCasts(); if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) return DRE->getDecl()->getDeclContext()->isFileContext() && DRE->getDecl()->isExternallyVisible(); if (ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(E)) return isGlobalVar(condOp->getTrueExpr()) && isGlobalVar(condOp->getFalseExpr()); return false; } StringRef trans::getNilString(MigrationPass &Pass) { return Pass.SemaRef.PP.isMacroDefined("nil") ? "nil" : "0"; } namespace { class ReferenceClear : public RecursiveASTVisitor<ReferenceClear> { ExprSet &Refs; public: ReferenceClear(ExprSet &refs) : Refs(refs) { } bool VisitDeclRefExpr(DeclRefExpr *E) { Refs.erase(E); return true; } }; class ReferenceCollector : public RecursiveASTVisitor<ReferenceCollector> { ValueDecl *Dcl; ExprSet &Refs; public: ReferenceCollector(ValueDecl *D, ExprSet &refs) : Dcl(D), Refs(refs) { } bool VisitDeclRefExpr(DeclRefExpr *E) { if (E->getDecl() == Dcl) Refs.insert(E); return true; } }; class RemovablesCollector : public RecursiveASTVisitor<RemovablesCollector> { ExprSet &Removables; public: RemovablesCollector(ExprSet &removables) : Removables(removables) { } bool shouldWalkTypesOfTypeLocs() const { return false; } bool TraverseStmtExpr(StmtExpr *E) { CompoundStmt *S = E->getSubStmt(); for (CompoundStmt::body_iterator I = S->body_begin(), E = S->body_end(); I != E; ++I) { if (I != E - 1) mark(*I); TraverseStmt(*I); } return true; } bool VisitCompoundStmt(CompoundStmt *S) { for (auto *I : S->body()) mark(I); return true; } bool VisitIfStmt(IfStmt *S) { mark(S->getThen()); mark(S->getElse()); return true; } bool VisitWhileStmt(WhileStmt *S) { mark(S->getBody()); return true; } bool VisitDoStmt(DoStmt *S) { mark(S->getBody()); return true; } bool VisitForStmt(ForStmt *S) { mark(S->getInit()); mark(S->getInc()); mark(S->getBody()); return true; } private: void mark(Stmt *S) { if (!S) return; while (LabelStmt *Label = dyn_cast<LabelStmt>(S)) S = Label->getSubStmt(); S = S->IgnoreImplicit(); if (Expr *E = dyn_cast<Expr>(S)) Removables.insert(E); } }; } // end anonymous namespace void trans::clearRefsIn(Stmt *S, ExprSet &refs) { ReferenceClear(refs).TraverseStmt(S); } void trans::collectRefs(ValueDecl *D, Stmt *S, ExprSet &refs) { ReferenceCollector(D, refs).TraverseStmt(S); } void trans::collectRemovables(Stmt *S, ExprSet &exprs) { RemovablesCollector(exprs).TraverseStmt(S); } //===----------------------------------------------------------------------===// // MigrationContext //===----------------------------------------------------------------------===// namespace { class ASTTransform : public RecursiveASTVisitor<ASTTransform> { MigrationContext &MigrateCtx; typedef RecursiveASTVisitor<ASTTransform> base; public: ASTTransform(MigrationContext &MigrateCtx) : MigrateCtx(MigrateCtx) { } bool shouldWalkTypesOfTypeLocs() const { return false; } bool TraverseObjCImplementationDecl(ObjCImplementationDecl *D) { ObjCImplementationContext ImplCtx(MigrateCtx, D); for (MigrationContext::traverser_iterator I = MigrateCtx.traversers_begin(), E = MigrateCtx.traversers_end(); I != E; ++I) (*I)->traverseObjCImplementation(ImplCtx); return base::TraverseObjCImplementationDecl(D); } bool TraverseStmt(Stmt *rootS) { if (!rootS) return true; BodyContext BodyCtx(MigrateCtx, rootS); for (MigrationContext::traverser_iterator I = MigrateCtx.traversers_begin(), E = MigrateCtx.traversers_end(); I != E; ++I) (*I)->traverseBody(BodyCtx); return true; } }; } MigrationContext::~MigrationContext() { for (traverser_iterator I = traversers_begin(), E = traversers_end(); I != E; ++I) delete *I; } bool MigrationContext::isGCOwnedNonObjC(QualType T) { while (!T.isNull()) { if (const AttributedType *AttrT = T->getAs<AttributedType>()) { if (AttrT->getAttrKind() == AttributedType::attr_objc_ownership) return !AttrT->getModifiedType()->isObjCRetainableType(); } if (T->isArrayType()) T = Pass.Ctx.getBaseElementType(T); else if (const PointerType *PT = T->getAs<PointerType>()) T = PT->getPointeeType(); else if (const ReferenceType *RT = T->getAs<ReferenceType>()) T = RT->getPointeeType(); else break; } return false; } bool MigrationContext::rewritePropertyAttribute(StringRef fromAttr, StringRef toAttr, SourceLocation atLoc) { if (atLoc.isMacroID()) return false; SourceManager &SM = Pass.Ctx.getSourceManager(); // Break down the source location. std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc); // Try to load the file buffer. bool invalidTemp = false; StringRef file = SM.getBufferData(locInfo.first, &invalidTemp); if (invalidTemp) return false; const char *tokenBegin = file.data() + locInfo.second; // Lex from the start of the given location. Lexer lexer(SM.getLocForStartOfFile(locInfo.first), Pass.Ctx.getLangOpts(), file.begin(), tokenBegin, file.end()); Token tok; lexer.LexFromRawLexer(tok); if (tok.isNot(tok::at)) return false; lexer.LexFromRawLexer(tok); if (tok.isNot(tok::raw_identifier)) return false; if (tok.getRawIdentifier() != "property") return false; lexer.LexFromRawLexer(tok); if (tok.isNot(tok::l_paren)) return false; Token BeforeTok = tok; Token AfterTok; AfterTok.startToken(); SourceLocation AttrLoc; lexer.LexFromRawLexer(tok); if (tok.is(tok::r_paren)) return false; while (1) { if (tok.isNot(tok::raw_identifier)) return false; if (tok.getRawIdentifier() == fromAttr) { if (!toAttr.empty()) { Pass.TA.replaceText(tok.getLocation(), fromAttr, toAttr); return true; } // We want to remove the attribute. AttrLoc = tok.getLocation(); } do { lexer.LexFromRawLexer(tok); if (AttrLoc.isValid() && AfterTok.is(tok::unknown)) AfterTok = tok; } while (tok.isNot(tok::comma) && tok.isNot(tok::r_paren)); if (tok.is(tok::r_paren)) break; if (AttrLoc.isInvalid()) BeforeTok = tok; lexer.LexFromRawLexer(tok); } if (toAttr.empty() && AttrLoc.isValid() && AfterTok.isNot(tok::unknown)) { // We want to remove the attribute. if (BeforeTok.is(tok::l_paren) && AfterTok.is(tok::r_paren)) { Pass.TA.remove(SourceRange(BeforeTok.getLocation(), AfterTok.getLocation())); } else if (BeforeTok.is(tok::l_paren) && AfterTok.is(tok::comma)) { Pass.TA.remove(SourceRange(AttrLoc, AfterTok.getLocation())); } else { Pass.TA.remove(SourceRange(BeforeTok.getLocation(), AttrLoc)); } return true; } return false; } bool MigrationContext::addPropertyAttribute(StringRef attr, SourceLocation atLoc) { if (atLoc.isMacroID()) return false; SourceManager &SM = Pass.Ctx.getSourceManager(); // Break down the source location. std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc); // Try to load the file buffer. bool invalidTemp = false; StringRef file = SM.getBufferData(locInfo.first, &invalidTemp); if (invalidTemp) return false; const char *tokenBegin = file.data() + locInfo.second; // Lex from the start of the given location. Lexer lexer(SM.getLocForStartOfFile(locInfo.first), Pass.Ctx.getLangOpts(), file.begin(), tokenBegin, file.end()); Token tok; lexer.LexFromRawLexer(tok); if (tok.isNot(tok::at)) return false; lexer.LexFromRawLexer(tok); if (tok.isNot(tok::raw_identifier)) return false; if (tok.getRawIdentifier() != "property") return false; lexer.LexFromRawLexer(tok); if (tok.isNot(tok::l_paren)) { Pass.TA.insert(tok.getLocation(), std::string("(") + attr.str() + ") "); return true; } lexer.LexFromRawLexer(tok); if (tok.is(tok::r_paren)) { Pass.TA.insert(tok.getLocation(), attr); return true; } if (tok.isNot(tok::raw_identifier)) return false; Pass.TA.insert(tok.getLocation(), std::string(attr) + ", "); return true; } void MigrationContext::traverse(TranslationUnitDecl *TU) { for (traverser_iterator I = traversers_begin(), E = traversers_end(); I != E; ++I) (*I)->traverseTU(*this); ASTTransform(*this).TraverseDecl(TU); } static void GCRewriteFinalize(MigrationPass &pass) { ASTContext &Ctx = pass.Ctx; TransformActions &TA = pass.TA; DeclContext *DC = Ctx.getTranslationUnitDecl(); Selector FinalizeSel = Ctx.Selectors.getNullarySelector(&pass.Ctx.Idents.get("finalize")); typedef DeclContext::specific_decl_iterator<ObjCImplementationDecl> impl_iterator; for (impl_iterator I = impl_iterator(DC->decls_begin()), E = impl_iterator(DC->decls_end()); I != E; ++I) { for (const auto *MD : I->instance_methods()) { if (!MD->hasBody()) continue; if (MD->isInstanceMethod() && MD->getSelector() == FinalizeSel) { const ObjCMethodDecl *FinalizeM = MD; Transaction Trans(TA); TA.insert(FinalizeM->getSourceRange().getBegin(), "#if !__has_feature(objc_arc)\n"); CharSourceRange::getTokenRange(FinalizeM->getSourceRange()); const SourceManager &SM = pass.Ctx.getSourceManager(); const LangOptions &LangOpts = pass.Ctx.getLangOpts(); bool Invalid; std::string str = "\n#endif\n"; str += Lexer::getSourceText( CharSourceRange::getTokenRange(FinalizeM->getSourceRange()), SM, LangOpts, &Invalid); TA.insertAfterToken(FinalizeM->getSourceRange().getEnd(), str); break; } } } } //===----------------------------------------------------------------------===// // getAllTransformations. //===----------------------------------------------------------------------===// static void traverseAST(MigrationPass &pass) { MigrationContext MigrateCtx(pass); if (pass.isGCMigration()) { MigrateCtx.addTraverser(new GCCollectableCallsTraverser); MigrateCtx.addTraverser(new GCAttrsTraverser()); } MigrateCtx.addTraverser(new PropertyRewriteTraverser()); MigrateCtx.addTraverser(new BlockObjCVariableTraverser()); MigrateCtx.addTraverser(new ProtectedScopeTraverser()); MigrateCtx.traverse(pass.Ctx.getTranslationUnitDecl()); } static void independentTransforms(MigrationPass &pass) { rewriteAutoreleasePool(pass); removeRetainReleaseDeallocFinalize(pass); rewriteUnusedInitDelegate(pass); removeZeroOutPropsInDeallocFinalize(pass); makeAssignARCSafe(pass); rewriteUnbridgedCasts(pass); checkAPIUses(pass); traverseAST(pass); } std::vector<TransformFn> arcmt::getAllTransformations( LangOptions::GCMode OrigGCMode, bool NoFinalizeRemoval) { std::vector<TransformFn> transforms; if (OrigGCMode == LangOptions::GCOnly && NoFinalizeRemoval) transforms.push_back(GCRewriteFinalize); transforms.push_back(independentTransforms); // This depends on previous transformations removing various expressions. transforms.push_back(removeEmptyStatementsAndDeallocFinalize); return transforms; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/Internals.h
//===-- Internals.h - Implementation Details---------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H #define LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H #include "clang/ARCMigrate/ARCMT.h" #include "clang/Basic/Diagnostic.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include <list> namespace clang { class Sema; class Stmt; namespace arcmt { class CapturedDiagList { typedef std::list<StoredDiagnostic> ListTy; ListTy List; public: void push_back(const StoredDiagnostic &diag) { List.push_back(diag); } bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range); bool hasDiagnostic(ArrayRef<unsigned> IDs, SourceRange range) const; void reportDiagnostics(DiagnosticsEngine &diags) const; bool hasErrors() const; typedef ListTy::const_iterator iterator; iterator begin() const { return List.begin(); } iterator end() const { return List.end(); } }; void writeARCDiagsToPlist(const std::string &outPath, ArrayRef<StoredDiagnostic> diags, SourceManager &SM, const LangOptions &LangOpts); class TransformActions { DiagnosticsEngine &Diags; CapturedDiagList &CapturedDiags; void *Impl; // TransformActionsImpl. public: TransformActions(DiagnosticsEngine &diag, CapturedDiagList &capturedDiags, ASTContext &ctx, Preprocessor &PP); ~TransformActions(); void startTransaction(); bool commitTransaction(); void abortTransaction(); void insert(SourceLocation loc, StringRef text); void insertAfterToken(SourceLocation loc, StringRef text); void remove(SourceRange range); void removeStmt(Stmt *S); void replace(SourceRange range, StringRef text); void replace(SourceRange range, SourceRange replacementRange); void replaceStmt(Stmt *S, StringRef text); void replaceText(SourceLocation loc, StringRef text, StringRef replacementText); void increaseIndentation(SourceRange range, SourceLocation parentIndent); bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range); bool clearAllDiagnostics(SourceRange range) { return clearDiagnostic(None, range); } bool clearDiagnostic(unsigned ID1, unsigned ID2, SourceRange range) { unsigned IDs[] = { ID1, ID2 }; return clearDiagnostic(IDs, range); } bool clearDiagnostic(unsigned ID1, unsigned ID2, unsigned ID3, SourceRange range) { unsigned IDs[] = { ID1, ID2, ID3 }; return clearDiagnostic(IDs, range); } bool hasDiagnostic(unsigned ID, SourceRange range) { return CapturedDiags.hasDiagnostic(ID, range); } bool hasDiagnostic(unsigned ID1, unsigned ID2, SourceRange range) { unsigned IDs[] = { ID1, ID2 }; return CapturedDiags.hasDiagnostic(IDs, range); } DiagnosticBuilder report(SourceLocation loc, unsigned diagId, SourceRange range = SourceRange()); void reportError(StringRef error, SourceLocation loc, SourceRange range = SourceRange()); void reportWarning(StringRef warning, SourceLocation loc, SourceRange range = SourceRange()); void reportNote(StringRef note, SourceLocation loc, SourceRange range = SourceRange()); bool hasReportedErrors() const { return Diags.hasUnrecoverableErrorOccurred(); } class RewriteReceiver { public: virtual ~RewriteReceiver(); virtual void insert(SourceLocation loc, StringRef text) = 0; virtual void remove(CharSourceRange range) = 0; virtual void increaseIndentation(CharSourceRange range, SourceLocation parentIndent) = 0; }; void applyRewrites(RewriteReceiver &receiver); }; class Transaction { TransformActions &TA; bool Aborted; public: Transaction(TransformActions &TA) : TA(TA), Aborted(false) { TA.startTransaction(); } ~Transaction() { if (!isAborted()) TA.commitTransaction(); } void abort() { TA.abortTransaction(); Aborted = true; } bool isAborted() const { return Aborted; } }; class MigrationPass { public: ASTContext &Ctx; LangOptions::GCMode OrigGCMode; MigratorOptions MigOptions; Sema &SemaRef; TransformActions &TA; const CapturedDiagList &CapturedDiags; std::vector<SourceLocation> &ARCMTMacroLocs; Optional<bool> EnableCFBridgeFns; MigrationPass(ASTContext &Ctx, LangOptions::GCMode OrigGCMode, Sema &sema, TransformActions &TA, const CapturedDiagList &capturedDiags, std::vector<SourceLocation> &ARCMTMacroLocs) : Ctx(Ctx), OrigGCMode(OrigGCMode), MigOptions(), SemaRef(sema), TA(TA), CapturedDiags(capturedDiags), ARCMTMacroLocs(ARCMTMacroLocs) { } const CapturedDiagList &getDiags() const { return CapturedDiags; } bool isGCMigration() const { return OrigGCMode != LangOptions::NonGC; } bool noFinalizeRemoval() const { return MigOptions.NoFinalizeRemoval; } void setNoFinalizeRemoval(bool val) {MigOptions.NoFinalizeRemoval = val; } bool CFBridgingFunctionsDefined(); }; static inline StringRef getARCMTMacroName() { return "__IMPL_ARCMT_REMOVED_EXPR__"; } } // end namespace arcmt } // end namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/ObjCMT.cpp
//===--- ObjCMT.cpp - ObjC Migrate Tool -----------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "clang/ARCMigrate/ARCMT.h" #include "clang/ARCMigrate/ARCMTActions.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/NSAPI.h" #include "clang/AST/ParentMap.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/Analysis/DomainSpecific/CocoaConventions.h" #include "clang/Basic/FileManager.h" #include "clang/Edit/Commit.h" #include "clang/Edit/EditedSource.h" #include "clang/Edit/EditsReceiver.h" #include "clang/Edit/Rewriters.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/MultiplexConsumer.h" #include "clang/Lex/PPConditionalDirectiveRecord.h" #include "clang/Lex/Preprocessor.h" #include "clang/Rewrite/Core/Rewriter.h" #include "clang/StaticAnalyzer/Checkers/ObjCRetainCount.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/StringSet.h" #include "llvm/Support/Path.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/YAMLParser.h" using namespace clang; using namespace arcmt; using namespace ento::objc_retain; namespace { class ObjCMigrateASTConsumer : public ASTConsumer { enum CF_BRIDGING_KIND { CF_BRIDGING_NONE, CF_BRIDGING_ENABLE, CF_BRIDGING_MAY_INCLUDE }; void migrateDecl(Decl *D); void migrateObjCContainerDecl(ASTContext &Ctx, ObjCContainerDecl *D); void migrateProtocolConformance(ASTContext &Ctx, const ObjCImplementationDecl *ImpDecl); void CacheObjCNSIntegerTypedefed(const TypedefDecl *TypedefDcl); bool migrateNSEnumDecl(ASTContext &Ctx, const EnumDecl *EnumDcl, const TypedefDecl *TypedefDcl); void migrateAllMethodInstaceType(ASTContext &Ctx, ObjCContainerDecl *CDecl); void migrateMethodInstanceType(ASTContext &Ctx, ObjCContainerDecl *CDecl, ObjCMethodDecl *OM); bool migrateProperty(ASTContext &Ctx, ObjCContainerDecl *D, ObjCMethodDecl *OM); void migrateNsReturnsInnerPointer(ASTContext &Ctx, ObjCMethodDecl *OM); void migratePropertyNsReturnsInnerPointer(ASTContext &Ctx, ObjCPropertyDecl *P); void migrateFactoryMethod(ASTContext &Ctx, ObjCContainerDecl *CDecl, ObjCMethodDecl *OM, ObjCInstanceTypeFamily OIT_Family = OIT_None); void migrateCFAnnotation(ASTContext &Ctx, const Decl *Decl); void AddCFAnnotations(ASTContext &Ctx, const CallEffects &CE, const FunctionDecl *FuncDecl, bool ResultAnnotated); void AddCFAnnotations(ASTContext &Ctx, const CallEffects &CE, const ObjCMethodDecl *MethodDecl, bool ResultAnnotated); void AnnotateImplicitBridging(ASTContext &Ctx); CF_BRIDGING_KIND migrateAddFunctionAnnotation(ASTContext &Ctx, const FunctionDecl *FuncDecl); void migrateARCSafeAnnotation(ASTContext &Ctx, ObjCContainerDecl *CDecl); void migrateAddMethodAnnotation(ASTContext &Ctx, const ObjCMethodDecl *MethodDecl); void inferDesignatedInitializers(ASTContext &Ctx, const ObjCImplementationDecl *ImplD); bool InsertFoundation(ASTContext &Ctx, SourceLocation Loc); public: std::string MigrateDir; unsigned ASTMigrateActions; FileID FileId; const TypedefDecl *NSIntegerTypedefed; const TypedefDecl *NSUIntegerTypedefed; std::unique_ptr<NSAPI> NSAPIObj; std::unique_ptr<edit::EditedSource> Editor; FileRemapper &Remapper; FileManager &FileMgr; const PPConditionalDirectiveRecord *PPRec; Preprocessor &PP; bool IsOutputFile; bool FoundationIncluded; llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ObjCProtocolDecls; llvm::SmallVector<const Decl *, 8> CFFunctionIBCandidates; llvm::StringSet<> WhiteListFilenames; ObjCMigrateASTConsumer(StringRef migrateDir, unsigned astMigrateActions, FileRemapper &remapper, FileManager &fileMgr, const PPConditionalDirectiveRecord *PPRec, Preprocessor &PP, bool isOutputFile, ArrayRef<std::string> WhiteList) : MigrateDir(migrateDir), ASTMigrateActions(astMigrateActions), NSIntegerTypedefed(nullptr), NSUIntegerTypedefed(nullptr), Remapper(remapper), FileMgr(fileMgr), PPRec(PPRec), PP(PP), IsOutputFile(isOutputFile), FoundationIncluded(false){ // FIXME: StringSet should have insert(iter, iter) to use here. for (const std::string &Val : WhiteList) WhiteListFilenames.insert(Val); } protected: void Initialize(ASTContext &Context) override { NSAPIObj.reset(new NSAPI(Context)); Editor.reset(new edit::EditedSource(Context.getSourceManager(), Context.getLangOpts(), PPRec)); } bool HandleTopLevelDecl(DeclGroupRef DG) override { for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) migrateDecl(*I); return true; } void HandleInterestingDecl(DeclGroupRef DG) override { // Ignore decls from the PCH. } void HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) override { ObjCMigrateASTConsumer::HandleTopLevelDecl(DG); } void HandleTranslationUnit(ASTContext &Ctx) override; bool canModifyFile(StringRef Path) { if (WhiteListFilenames.empty()) return true; return WhiteListFilenames.find(llvm::sys::path::filename(Path)) != WhiteListFilenames.end(); } bool canModifyFile(const FileEntry *FE) { if (!FE) return false; return canModifyFile(FE->getName()); } bool canModifyFile(FileID FID) { if (FID.isInvalid()) return false; return canModifyFile(PP.getSourceManager().getFileEntryForID(FID)); } bool canModify(const Decl *D) { if (!D) return false; if (const ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(D)) return canModify(CatImpl->getCategoryDecl()); if (const ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(D)) return canModify(Impl->getClassInterface()); if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) return canModify(cast<Decl>(MD->getDeclContext())); FileID FID = PP.getSourceManager().getFileID(D->getLocation()); return canModifyFile(FID); } }; } ObjCMigrateAction::ObjCMigrateAction(FrontendAction *WrappedAction, StringRef migrateDir, unsigned migrateAction) : WrapperFrontendAction(WrappedAction), MigrateDir(migrateDir), ObjCMigAction(migrateAction), CompInst(nullptr) { if (MigrateDir.empty()) MigrateDir = "."; // user current directory if none is given. } std::unique_ptr<ASTConsumer> ObjCMigrateAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { PPConditionalDirectiveRecord * PPRec = new PPConditionalDirectiveRecord(CompInst->getSourceManager()); CI.getPreprocessor().addPPCallbacks(std::unique_ptr<PPCallbacks>(PPRec)); std::vector<std::unique_ptr<ASTConsumer>> Consumers; Consumers.push_back(WrapperFrontendAction::CreateASTConsumer(CI, InFile)); Consumers.push_back(llvm::make_unique<ObjCMigrateASTConsumer>( MigrateDir, ObjCMigAction, Remapper, CompInst->getFileManager(), PPRec, CompInst->getPreprocessor(), false, None)); return llvm::make_unique<MultiplexConsumer>(std::move(Consumers)); } bool ObjCMigrateAction::BeginInvocation(CompilerInstance &CI) { Remapper.initFromDisk(MigrateDir, CI.getDiagnostics(), /*ignoreIfFilesChanges=*/true); CompInst = &CI; CI.getDiagnostics().setIgnoreAllWarnings(true); return true; } namespace { // FIXME. This duplicates one in RewriteObjCFoundationAPI.cpp bool subscriptOperatorNeedsParens(const Expr *FullExpr) { const Expr* Expr = FullExpr->IgnoreImpCasts(); if (isa<ArraySubscriptExpr>(Expr) || isa<CallExpr>(Expr) || isa<DeclRefExpr>(Expr) || isa<CXXNamedCastExpr>(Expr) || isa<CXXConstructExpr>(Expr) || isa<CXXThisExpr>(Expr) || isa<CXXTypeidExpr>(Expr) || isa<CXXUnresolvedConstructExpr>(Expr) || isa<ObjCMessageExpr>(Expr) || isa<ObjCPropertyRefExpr>(Expr) || isa<ObjCProtocolExpr>(Expr) || isa<MemberExpr>(Expr) || isa<ObjCIvarRefExpr>(Expr) || isa<ParenExpr>(FullExpr) || isa<ParenListExpr>(Expr) || isa<SizeOfPackExpr>(Expr)) return false; return true; } /// \brief - Rewrite message expression for Objective-C setter and getters into /// property-dot syntax. bool rewriteToPropertyDotSyntax(const ObjCMessageExpr *Msg, Preprocessor &PP, const NSAPI &NS, edit::Commit &commit, const ParentMap *PMap) { if (!Msg || Msg->isImplicit() || (Msg->getReceiverKind() != ObjCMessageExpr::Instance && Msg->getReceiverKind() != ObjCMessageExpr::SuperInstance)) return false; if (const Expr *Receiver = Msg->getInstanceReceiver()) if (Receiver->getType()->isObjCBuiltinType()) return false; const ObjCMethodDecl *Method = Msg->getMethodDecl(); if (!Method) return false; if (!Method->isPropertyAccessor()) return false; const ObjCPropertyDecl *Prop = Method->findPropertyDecl(); if (!Prop) return false; SourceRange MsgRange = Msg->getSourceRange(); bool ReceiverIsSuper = (Msg->getReceiverKind() == ObjCMessageExpr::SuperInstance); // for 'super' receiver is nullptr. const Expr *receiver = Msg->getInstanceReceiver(); bool NeedsParen = ReceiverIsSuper ? false : subscriptOperatorNeedsParens(receiver); bool IsGetter = (Msg->getNumArgs() == 0); if (IsGetter) { // Find space location range between receiver expression and getter method. SourceLocation BegLoc = ReceiverIsSuper ? Msg->getSuperLoc() : receiver->getLocEnd(); BegLoc = PP.getLocForEndOfToken(BegLoc); SourceLocation EndLoc = Msg->getSelectorLoc(0); SourceRange SpaceRange(BegLoc, EndLoc); std::string PropertyDotString; // rewrite getter method expression into: receiver.property or // (receiver).property if (NeedsParen) { commit.insertBefore(receiver->getLocStart(), "("); PropertyDotString = ")."; } else PropertyDotString = "."; PropertyDotString += Prop->getName(); commit.replace(SpaceRange, PropertyDotString); // remove '[' ']' commit.replace(SourceRange(MsgRange.getBegin(), MsgRange.getBegin()), ""); commit.replace(SourceRange(MsgRange.getEnd(), MsgRange.getEnd()), ""); } else { if (NeedsParen) commit.insertWrap("(", receiver->getSourceRange(), ")"); std::string PropertyDotString = "."; PropertyDotString += Prop->getName(); PropertyDotString += " ="; const Expr*const* Args = Msg->getArgs(); const Expr *RHS = Args[0]; if (!RHS) return false; SourceLocation BegLoc = ReceiverIsSuper ? Msg->getSuperLoc() : receiver->getLocEnd(); BegLoc = PP.getLocForEndOfToken(BegLoc); SourceLocation EndLoc = RHS->getLocStart(); EndLoc = EndLoc.getLocWithOffset(-1); const char *colon = PP.getSourceManager().getCharacterData(EndLoc); // Add a space after '=' if there is no space between RHS and '=' if (colon && colon[0] == ':') PropertyDotString += " "; SourceRange Range(BegLoc, EndLoc); commit.replace(Range, PropertyDotString); // remove '[' ']' commit.replace(SourceRange(MsgRange.getBegin(), MsgRange.getBegin()), ""); commit.replace(SourceRange(MsgRange.getEnd(), MsgRange.getEnd()), ""); } return true; } class ObjCMigrator : public RecursiveASTVisitor<ObjCMigrator> { ObjCMigrateASTConsumer &Consumer; ParentMap &PMap; public: ObjCMigrator(ObjCMigrateASTConsumer &consumer, ParentMap &PMap) : Consumer(consumer), PMap(PMap) { } bool shouldVisitTemplateInstantiations() const { return false; } bool shouldWalkTypesOfTypeLocs() const { return false; } bool VisitObjCMessageExpr(ObjCMessageExpr *E) { if (Consumer.ASTMigrateActions & FrontendOptions::ObjCMT_Literals) { edit::Commit commit(*Consumer.Editor); edit::rewriteToObjCLiteralSyntax(E, *Consumer.NSAPIObj, commit, &PMap); Consumer.Editor->commit(commit); } if (Consumer.ASTMigrateActions & FrontendOptions::ObjCMT_Subscripting) { edit::Commit commit(*Consumer.Editor); edit::rewriteToObjCSubscriptSyntax(E, *Consumer.NSAPIObj, commit); Consumer.Editor->commit(commit); } if (Consumer.ASTMigrateActions & FrontendOptions::ObjCMT_PropertyDotSyntax) { edit::Commit commit(*Consumer.Editor); rewriteToPropertyDotSyntax(E, Consumer.PP, *Consumer.NSAPIObj, commit, &PMap); Consumer.Editor->commit(commit); } return true; } bool TraverseObjCMessageExpr(ObjCMessageExpr *E) { // Do depth first; we want to rewrite the subexpressions first so that if // we have to move expressions we will move them already rewritten. for (Stmt *SubStmt : E->children()) if (!TraverseStmt(SubStmt)) return false; return WalkUpFromObjCMessageExpr(E); } }; class BodyMigrator : public RecursiveASTVisitor<BodyMigrator> { ObjCMigrateASTConsumer &Consumer; std::unique_ptr<ParentMap> PMap; public: BodyMigrator(ObjCMigrateASTConsumer &consumer) : Consumer(consumer) { } bool shouldVisitTemplateInstantiations() const { return false; } bool shouldWalkTypesOfTypeLocs() const { return false; } bool TraverseStmt(Stmt *S) { PMap.reset(new ParentMap(S)); ObjCMigrator(Consumer, *PMap).TraverseStmt(S); return true; } }; } void ObjCMigrateASTConsumer::migrateDecl(Decl *D) { if (!D) return; if (isa<ObjCMethodDecl>(D)) return; // Wait for the ObjC container declaration. BodyMigrator(*this).TraverseDecl(D); } static void append_attr(std::string &PropertyString, const char *attr, bool &LParenAdded) { if (!LParenAdded) { PropertyString += "("; LParenAdded = true; } else PropertyString += ", "; PropertyString += attr; } static void MigrateBlockOrFunctionPointerTypeVariable(std::string & PropertyString, const std::string& TypeString, const char *name) { const char *argPtr = TypeString.c_str(); int paren = 0; while (*argPtr) { switch (*argPtr) { case '(': PropertyString += *argPtr; paren++; break; case ')': PropertyString += *argPtr; paren--; break; case '^': case '*': PropertyString += (*argPtr); if (paren == 1) { PropertyString += name; name = ""; } break; default: PropertyString += *argPtr; break; } argPtr++; } } static const char *PropertyMemoryAttribute(ASTContext &Context, QualType ArgType) { Qualifiers::ObjCLifetime propertyLifetime = ArgType.getObjCLifetime(); bool RetainableObject = ArgType->isObjCRetainableType(); if (RetainableObject && (propertyLifetime == Qualifiers::OCL_Strong || propertyLifetime == Qualifiers::OCL_None)) { if (const ObjCObjectPointerType *ObjPtrTy = ArgType->getAs<ObjCObjectPointerType>()) { ObjCInterfaceDecl *IDecl = ObjPtrTy->getObjectType()->getInterface(); if (IDecl && IDecl->lookupNestedProtocol(&Context.Idents.get("NSCopying"))) return "copy"; else return "strong"; } else if (ArgType->isBlockPointerType()) return "copy"; } else if (propertyLifetime == Qualifiers::OCL_Weak) // TODO. More precise determination of 'weak' attribute requires // looking into setter's implementation for backing weak ivar. return "weak"; else if (RetainableObject) return ArgType->isBlockPointerType() ? "copy" : "strong"; return nullptr; } static void rewriteToObjCProperty(const ObjCMethodDecl *Getter, const ObjCMethodDecl *Setter, const NSAPI &NS, edit::Commit &commit, unsigned LengthOfPrefix, bool Atomic, bool UseNsIosOnlyMacro, bool AvailabilityArgsMatch) { ASTContext &Context = NS.getASTContext(); bool LParenAdded = false; std::string PropertyString = "@property "; if (UseNsIosOnlyMacro && NS.isMacroDefined("NS_NONATOMIC_IOSONLY")) { PropertyString += "(NS_NONATOMIC_IOSONLY"; LParenAdded = true; } else if (!Atomic) { PropertyString += "(nonatomic"; LParenAdded = true; } std::string PropertyNameString = Getter->getNameAsString(); StringRef PropertyName(PropertyNameString); if (LengthOfPrefix > 0) { if (!LParenAdded) { PropertyString += "(getter="; LParenAdded = true; } else PropertyString += ", getter="; PropertyString += PropertyNameString; } // Property with no setter may be suggested as a 'readonly' property. if (!Setter) append_attr(PropertyString, "readonly", LParenAdded); // Short circuit 'delegate' properties that contain the name "delegate" or // "dataSource", or have exact name "target" to have 'assign' attribute. if (PropertyName.equals("target") || (PropertyName.find("delegate") != StringRef::npos) || (PropertyName.find("dataSource") != StringRef::npos)) { QualType QT = Getter->getReturnType(); if (!QT->isRealType()) append_attr(PropertyString, "assign", LParenAdded); } else if (!Setter) { QualType ResType = Context.getCanonicalType(Getter->getReturnType()); if (const char *MemoryManagementAttr = PropertyMemoryAttribute(Context, ResType)) append_attr(PropertyString, MemoryManagementAttr, LParenAdded); } else { const ParmVarDecl *argDecl = *Setter->param_begin(); QualType ArgType = Context.getCanonicalType(argDecl->getType()); if (const char *MemoryManagementAttr = PropertyMemoryAttribute(Context, ArgType)) append_attr(PropertyString, MemoryManagementAttr, LParenAdded); } if (LParenAdded) PropertyString += ')'; QualType RT = Getter->getReturnType(); if (!isa<TypedefType>(RT)) { // strip off any ARC lifetime qualifier. QualType CanResultTy = Context.getCanonicalType(RT); if (CanResultTy.getQualifiers().hasObjCLifetime()) { Qualifiers Qs = CanResultTy.getQualifiers(); Qs.removeObjCLifetime(); RT = Context.getQualifiedType(CanResultTy.getUnqualifiedType(), Qs); } } PropertyString += " "; PrintingPolicy SubPolicy(Context.getPrintingPolicy()); SubPolicy.SuppressStrongLifetime = true; SubPolicy.SuppressLifetimeQualifiers = true; std::string TypeString = RT.getAsString(SubPolicy); if (LengthOfPrefix > 0) { // property name must strip off "is" and lower case the first character // after that; e.g. isContinuous will become continuous. StringRef PropertyNameStringRef(PropertyNameString); PropertyNameStringRef = PropertyNameStringRef.drop_front(LengthOfPrefix); PropertyNameString = PropertyNameStringRef; bool NoLowering = (isUppercase(PropertyNameString[0]) && PropertyNameString.size() > 1 && isUppercase(PropertyNameString[1])); if (!NoLowering) PropertyNameString[0] = toLowercase(PropertyNameString[0]); } if (RT->isBlockPointerType() || RT->isFunctionPointerType()) MigrateBlockOrFunctionPointerTypeVariable(PropertyString, TypeString, PropertyNameString.c_str()); else { char LastChar = TypeString[TypeString.size()-1]; PropertyString += TypeString; if (LastChar != '*') PropertyString += ' '; PropertyString += PropertyNameString; } SourceLocation StartGetterSelectorLoc = Getter->getSelectorStartLoc(); Selector GetterSelector = Getter->getSelector(); SourceLocation EndGetterSelectorLoc = StartGetterSelectorLoc.getLocWithOffset(GetterSelector.getNameForSlot(0).size()); commit.replace(CharSourceRange::getCharRange(Getter->getLocStart(), EndGetterSelectorLoc), PropertyString); if (Setter && AvailabilityArgsMatch) { SourceLocation EndLoc = Setter->getDeclaratorEndLoc(); // Get location past ';' EndLoc = EndLoc.getLocWithOffset(1); SourceLocation BeginOfSetterDclLoc = Setter->getLocStart(); // FIXME. This assumes that setter decl; is immediately preceded by eoln. // It is trying to remove the setter method decl. line entirely. BeginOfSetterDclLoc = BeginOfSetterDclLoc.getLocWithOffset(-1); commit.remove(SourceRange(BeginOfSetterDclLoc, EndLoc)); } } static bool IsCategoryNameWithDeprecatedSuffix(ObjCContainerDecl *D) { if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(D)) { StringRef Name = CatDecl->getName(); return Name.endswith("Deprecated"); } return false; } void ObjCMigrateASTConsumer::migrateObjCContainerDecl(ASTContext &Ctx, ObjCContainerDecl *D) { if (D->isDeprecated() || IsCategoryNameWithDeprecatedSuffix(D)) return; for (auto *Method : D->methods()) { if (Method->isDeprecated()) continue; bool PropertyInferred = migrateProperty(Ctx, D, Method); // If a property is inferred, do not attempt to attach NS_RETURNS_INNER_POINTER to // the getter method as it ends up on the property itself which we don't want // to do unless -objcmt-returns-innerpointer-property option is on. if (!PropertyInferred || (ASTMigrateActions & FrontendOptions::ObjCMT_ReturnsInnerPointerProperty)) if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) migrateNsReturnsInnerPointer(Ctx, Method); } if (!(ASTMigrateActions & FrontendOptions::ObjCMT_ReturnsInnerPointerProperty)) return; for (auto *Prop : D->properties()) { if ((ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) && !Prop->isDeprecated()) migratePropertyNsReturnsInnerPointer(Ctx, Prop); } } static bool ClassImplementsAllMethodsAndProperties(ASTContext &Ctx, const ObjCImplementationDecl *ImpDecl, const ObjCInterfaceDecl *IDecl, ObjCProtocolDecl *Protocol) { // In auto-synthesis, protocol properties are not synthesized. So, // a conforming protocol must have its required properties declared // in class interface. bool HasAtleastOneRequiredProperty = false; if (const ObjCProtocolDecl *PDecl = Protocol->getDefinition()) for (const auto *Property : PDecl->properties()) { if (Property->getPropertyImplementation() == ObjCPropertyDecl::Optional) continue; HasAtleastOneRequiredProperty = true; DeclContext::lookup_result R = IDecl->lookup(Property->getDeclName()); if (R.size() == 0) { // Relax the rule and look into class's implementation for a synthesize // or dynamic declaration. Class is implementing a property coming from // another protocol. This still makes the target protocol as conforming. if (!ImpDecl->FindPropertyImplDecl( Property->getDeclName().getAsIdentifierInfo())) return false; } else if (ObjCPropertyDecl *ClassProperty = dyn_cast<ObjCPropertyDecl>(R[0])) { if ((ClassProperty->getPropertyAttributes() != Property->getPropertyAttributes()) || !Ctx.hasSameType(ClassProperty->getType(), Property->getType())) return false; } else return false; } // At this point, all required properties in this protocol conform to those // declared in the class. // Check that class implements the required methods of the protocol too. bool HasAtleastOneRequiredMethod = false; if (const ObjCProtocolDecl *PDecl = Protocol->getDefinition()) { if (PDecl->meth_begin() == PDecl->meth_end()) return HasAtleastOneRequiredProperty; for (const auto *MD : PDecl->methods()) { if (MD->isImplicit()) continue; if (MD->getImplementationControl() == ObjCMethodDecl::Optional) continue; DeclContext::lookup_result R = ImpDecl->lookup(MD->getDeclName()); if (R.size() == 0) return false; bool match = false; HasAtleastOneRequiredMethod = true; for (unsigned I = 0, N = R.size(); I != N; ++I) if (ObjCMethodDecl *ImpMD = dyn_cast<ObjCMethodDecl>(R[0])) if (Ctx.ObjCMethodsAreEqual(MD, ImpMD)) { match = true; break; } if (!match) return false; } } if (HasAtleastOneRequiredProperty || HasAtleastOneRequiredMethod) return true; return false; } static bool rewriteToObjCInterfaceDecl(const ObjCInterfaceDecl *IDecl, llvm::SmallVectorImpl<ObjCProtocolDecl*> &ConformingProtocols, const NSAPI &NS, edit::Commit &commit) { const ObjCList<ObjCProtocolDecl> &Protocols = IDecl->getReferencedProtocols(); std::string ClassString; SourceLocation EndLoc = IDecl->getSuperClass() ? IDecl->getSuperClassLoc() : IDecl->getLocation(); if (Protocols.empty()) { ClassString = '<'; for (unsigned i = 0, e = ConformingProtocols.size(); i != e; i++) { ClassString += ConformingProtocols[i]->getNameAsString(); if (i != (e-1)) ClassString += ", "; } ClassString += "> "; } else { ClassString = ", "; for (unsigned i = 0, e = ConformingProtocols.size(); i != e; i++) { ClassString += ConformingProtocols[i]->getNameAsString(); if (i != (e-1)) ClassString += ", "; } ObjCInterfaceDecl::protocol_loc_iterator PL = IDecl->protocol_loc_end() - 1; EndLoc = *PL; } commit.insertAfterToken(EndLoc, ClassString); return true; } static StringRef GetUnsignedName(StringRef NSIntegerName) { StringRef UnsignedName = llvm::StringSwitch<StringRef>(NSIntegerName) .Case("int8_t", "uint8_t") .Case("int16_t", "uint16_t") .Case("int32_t", "uint32_t") .Case("NSInteger", "NSUInteger") .Case("int64_t", "uint64_t") .Default(NSIntegerName); return UnsignedName; } static bool rewriteToNSEnumDecl(const EnumDecl *EnumDcl, const TypedefDecl *TypedefDcl, const NSAPI &NS, edit::Commit &commit, StringRef NSIntegerName, bool NSOptions) { std::string ClassString; if (NSOptions) { ClassString = "typedef NS_OPTIONS("; ClassString += GetUnsignedName(NSIntegerName); } else { ClassString = "typedef NS_ENUM("; ClassString += NSIntegerName; } ClassString += ", "; ClassString += TypedefDcl->getIdentifier()->getName(); ClassString += ')'; SourceRange R(EnumDcl->getLocStart(), EnumDcl->getLocStart()); commit.replace(R, ClassString); SourceLocation EndOfEnumDclLoc = EnumDcl->getLocEnd(); EndOfEnumDclLoc = trans::findSemiAfterLocation(EndOfEnumDclLoc, NS.getASTContext(), /*IsDecl*/true); if (!EndOfEnumDclLoc.isInvalid()) { SourceRange EnumDclRange(EnumDcl->getLocStart(), EndOfEnumDclLoc); commit.insertFromRange(TypedefDcl->getLocStart(), EnumDclRange); } else return false; SourceLocation EndTypedefDclLoc = TypedefDcl->getLocEnd(); EndTypedefDclLoc = trans::findSemiAfterLocation(EndTypedefDclLoc, NS.getASTContext(), /*IsDecl*/true); if (!EndTypedefDclLoc.isInvalid()) { SourceRange TDRange(TypedefDcl->getLocStart(), EndTypedefDclLoc); commit.remove(TDRange); } else return false; EndOfEnumDclLoc = trans::findLocationAfterSemi(EnumDcl->getLocEnd(), NS.getASTContext(), /*IsDecl*/true); if (!EndOfEnumDclLoc.isInvalid()) { SourceLocation BeginOfEnumDclLoc = EnumDcl->getLocStart(); // FIXME. This assumes that enum decl; is immediately preceded by eoln. // It is trying to remove the enum decl. lines entirely. BeginOfEnumDclLoc = BeginOfEnumDclLoc.getLocWithOffset(-1); commit.remove(SourceRange(BeginOfEnumDclLoc, EndOfEnumDclLoc)); return true; } return false; } static void rewriteToNSMacroDecl(ASTContext &Ctx, const EnumDecl *EnumDcl, const TypedefDecl *TypedefDcl, const NSAPI &NS, edit::Commit &commit, bool IsNSIntegerType) { QualType DesignatedEnumType = EnumDcl->getIntegerType(); assert(!DesignatedEnumType.isNull() && "rewriteToNSMacroDecl - underlying enum type is null"); PrintingPolicy Policy(Ctx.getPrintingPolicy()); std::string TypeString = DesignatedEnumType.getAsString(Policy); std::string ClassString = IsNSIntegerType ? "NS_ENUM(" : "NS_OPTIONS("; ClassString += TypeString; ClassString += ", "; ClassString += TypedefDcl->getIdentifier()->getName(); ClassString += ')'; SourceLocation EndLoc; if (EnumDcl->getIntegerTypeSourceInfo()) { TypeSourceInfo *TSourceInfo = EnumDcl->getIntegerTypeSourceInfo(); TypeLoc TLoc = TSourceInfo->getTypeLoc(); EndLoc = TLoc.getLocEnd(); const char *lbrace = Ctx.getSourceManager().getCharacterData(EndLoc); unsigned count = 0; if (lbrace) while (lbrace[count] != '{') ++count; if (count > 0) EndLoc = EndLoc.getLocWithOffset(count-1); } else EndLoc = EnumDcl->getLocStart(); SourceRange R(EnumDcl->getLocStart(), EndLoc); commit.replace(R, ClassString); // This is to remove spaces between '}' and typedef name. SourceLocation StartTypedefLoc = EnumDcl->getLocEnd(); StartTypedefLoc = StartTypedefLoc.getLocWithOffset(+1); SourceLocation EndTypedefLoc = TypedefDcl->getLocEnd(); commit.remove(SourceRange(StartTypedefLoc, EndTypedefLoc)); } static bool UseNSOptionsMacro(Preprocessor &PP, ASTContext &Ctx, const EnumDecl *EnumDcl) { bool PowerOfTwo = true; bool AllHexdecimalEnumerator = true; uint64_t MaxPowerOfTwoVal = 0; for (auto Enumerator : EnumDcl->enumerators()) { const Expr *InitExpr = Enumerator->getInitExpr(); if (!InitExpr) { PowerOfTwo = false; AllHexdecimalEnumerator = false; continue; } InitExpr = InitExpr->IgnoreParenCasts(); if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(InitExpr)) if (BO->isShiftOp() || BO->isBitwiseOp()) return true; uint64_t EnumVal = Enumerator->getInitVal().getZExtValue(); if (PowerOfTwo && EnumVal) { if (!llvm::isPowerOf2_64(EnumVal)) PowerOfTwo = false; else if (EnumVal > MaxPowerOfTwoVal) MaxPowerOfTwoVal = EnumVal; } if (AllHexdecimalEnumerator && EnumVal) { bool FoundHexdecimalEnumerator = false; SourceLocation EndLoc = Enumerator->getLocEnd(); Token Tok; if (!PP.getRawToken(EndLoc, Tok, /*IgnoreWhiteSpace=*/true)) if (Tok.isLiteral() && Tok.getLength() > 2) { if (const char *StringLit = Tok.getLiteralData()) FoundHexdecimalEnumerator = (StringLit[0] == '0' && (toLowercase(StringLit[1]) == 'x')); } if (!FoundHexdecimalEnumerator) AllHexdecimalEnumerator = false; } } return AllHexdecimalEnumerator || (PowerOfTwo && (MaxPowerOfTwoVal > 2)); } void ObjCMigrateASTConsumer::migrateProtocolConformance(ASTContext &Ctx, const ObjCImplementationDecl *ImpDecl) { const ObjCInterfaceDecl *IDecl = ImpDecl->getClassInterface(); if (!IDecl || ObjCProtocolDecls.empty() || IDecl->isDeprecated()) return; // Find all implicit conforming protocols for this class // and make them explicit. llvm::SmallPtrSet<ObjCProtocolDecl *, 8> ExplicitProtocols; Ctx.CollectInheritedProtocols(IDecl, ExplicitProtocols); llvm::SmallVector<ObjCProtocolDecl *, 8> PotentialImplicitProtocols; for (ObjCProtocolDecl *ProtDecl : ObjCProtocolDecls) if (!ExplicitProtocols.count(ProtDecl)) PotentialImplicitProtocols.push_back(ProtDecl); if (PotentialImplicitProtocols.empty()) return; // go through list of non-optional methods and properties in each protocol // in the PotentialImplicitProtocols list. If class implements every one of the // methods and properties, then this class conforms to this protocol. llvm::SmallVector<ObjCProtocolDecl*, 8> ConformingProtocols; for (unsigned i = 0, e = PotentialImplicitProtocols.size(); i != e; i++) if (ClassImplementsAllMethodsAndProperties(Ctx, ImpDecl, IDecl, PotentialImplicitProtocols[i])) ConformingProtocols.push_back(PotentialImplicitProtocols[i]); if (ConformingProtocols.empty()) return; // Further reduce number of conforming protocols. If protocol P1 is in the list // protocol P2 (P2<P1>), No need to include P1. llvm::SmallVector<ObjCProtocolDecl*, 8> MinimalConformingProtocols; for (unsigned i = 0, e = ConformingProtocols.size(); i != e; i++) { bool DropIt = false; ObjCProtocolDecl *TargetPDecl = ConformingProtocols[i]; for (unsigned i1 = 0, e1 = ConformingProtocols.size(); i1 != e1; i1++) { ObjCProtocolDecl *PDecl = ConformingProtocols[i1]; if (PDecl == TargetPDecl) continue; if (PDecl->lookupProtocolNamed( TargetPDecl->getDeclName().getAsIdentifierInfo())) { DropIt = true; break; } } if (!DropIt) MinimalConformingProtocols.push_back(TargetPDecl); } if (MinimalConformingProtocols.empty()) return; edit::Commit commit(*Editor); rewriteToObjCInterfaceDecl(IDecl, MinimalConformingProtocols, *NSAPIObj, commit); Editor->commit(commit); } void ObjCMigrateASTConsumer::CacheObjCNSIntegerTypedefed( const TypedefDecl *TypedefDcl) { QualType qt = TypedefDcl->getTypeSourceInfo()->getType(); if (NSAPIObj->isObjCNSIntegerType(qt)) NSIntegerTypedefed = TypedefDcl; else if (NSAPIObj->isObjCNSUIntegerType(qt)) NSUIntegerTypedefed = TypedefDcl; } bool ObjCMigrateASTConsumer::migrateNSEnumDecl(ASTContext &Ctx, const EnumDecl *EnumDcl, const TypedefDecl *TypedefDcl) { if (!EnumDcl->isCompleteDefinition() || EnumDcl->getIdentifier() || EnumDcl->isDeprecated()) return false; if (!TypedefDcl) { if (NSIntegerTypedefed) { TypedefDcl = NSIntegerTypedefed; NSIntegerTypedefed = nullptr; } else if (NSUIntegerTypedefed) { TypedefDcl = NSUIntegerTypedefed; NSUIntegerTypedefed = nullptr; } else return false; FileID FileIdOfTypedefDcl = PP.getSourceManager().getFileID(TypedefDcl->getLocation()); FileID FileIdOfEnumDcl = PP.getSourceManager().getFileID(EnumDcl->getLocation()); if (FileIdOfTypedefDcl != FileIdOfEnumDcl) return false; } if (TypedefDcl->isDeprecated()) return false; QualType qt = TypedefDcl->getTypeSourceInfo()->getType(); StringRef NSIntegerName = NSAPIObj->GetNSIntegralKind(qt); if (NSIntegerName.empty()) { // Also check for typedef enum {...} TD; if (const EnumType *EnumTy = qt->getAs<EnumType>()) { if (EnumTy->getDecl() == EnumDcl) { bool NSOptions = UseNSOptionsMacro(PP, Ctx, EnumDcl); if (!InsertFoundation(Ctx, TypedefDcl->getLocStart())) return false; edit::Commit commit(*Editor); rewriteToNSMacroDecl(Ctx, EnumDcl, TypedefDcl, *NSAPIObj, commit, !NSOptions); Editor->commit(commit); return true; } } return false; } // We may still use NS_OPTIONS based on what we find in the enumertor list. bool NSOptions = UseNSOptionsMacro(PP, Ctx, EnumDcl); if (!InsertFoundation(Ctx, TypedefDcl->getLocStart())) return false; edit::Commit commit(*Editor); bool Res = rewriteToNSEnumDecl(EnumDcl, TypedefDcl, *NSAPIObj, commit, NSIntegerName, NSOptions); Editor->commit(commit); return Res; } static void ReplaceWithInstancetype(ASTContext &Ctx, const ObjCMigrateASTConsumer &ASTC, ObjCMethodDecl *OM) { if (OM->getReturnType() == Ctx.getObjCInstanceType()) return; // already has instancetype. SourceRange R; std::string ClassString; if (TypeSourceInfo *TSInfo = OM->getReturnTypeSourceInfo()) { TypeLoc TL = TSInfo->getTypeLoc(); R = SourceRange(TL.getBeginLoc(), TL.getEndLoc()); ClassString = "instancetype"; } else { R = SourceRange(OM->getLocStart(), OM->getLocStart()); ClassString = OM->isInstanceMethod() ? '-' : '+'; ClassString += " (instancetype)"; } edit::Commit commit(*ASTC.Editor); commit.replace(R, ClassString); ASTC.Editor->commit(commit); } static void ReplaceWithClasstype(const ObjCMigrateASTConsumer &ASTC, ObjCMethodDecl *OM) { ObjCInterfaceDecl *IDecl = OM->getClassInterface(); SourceRange R; std::string ClassString; if (TypeSourceInfo *TSInfo = OM->getReturnTypeSourceInfo()) { TypeLoc TL = TSInfo->getTypeLoc(); R = SourceRange(TL.getBeginLoc(), TL.getEndLoc()); { ClassString = IDecl->getName(); ClassString += "*"; } } else { R = SourceRange(OM->getLocStart(), OM->getLocStart()); ClassString = "+ ("; ClassString += IDecl->getName(); ClassString += "*)"; } edit::Commit commit(*ASTC.Editor); commit.replace(R, ClassString); ASTC.Editor->commit(commit); } void ObjCMigrateASTConsumer::migrateMethodInstanceType(ASTContext &Ctx, ObjCContainerDecl *CDecl, ObjCMethodDecl *OM) { ObjCInstanceTypeFamily OIT_Family = Selector::getInstTypeMethodFamily(OM->getSelector()); std::string ClassName; switch (OIT_Family) { case OIT_None: migrateFactoryMethod(Ctx, CDecl, OM); return; case OIT_Array: ClassName = "NSArray"; break; case OIT_Dictionary: ClassName = "NSDictionary"; break; case OIT_Singleton: migrateFactoryMethod(Ctx, CDecl, OM, OIT_Singleton); return; case OIT_Init: if (OM->getReturnType()->isObjCIdType()) ReplaceWithInstancetype(Ctx, *this, OM); return; case OIT_ReturnsSelf: migrateFactoryMethod(Ctx, CDecl, OM, OIT_ReturnsSelf); return; } if (!OM->getReturnType()->isObjCIdType()) return; ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl); if (!IDecl) { if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl)) IDecl = CatDecl->getClassInterface(); else if (ObjCImplDecl *ImpDecl = dyn_cast<ObjCImplDecl>(CDecl)) IDecl = ImpDecl->getClassInterface(); } if (!IDecl || !IDecl->lookupInheritedClass(&Ctx.Idents.get(ClassName))) { migrateFactoryMethod(Ctx, CDecl, OM); return; } ReplaceWithInstancetype(Ctx, *this, OM); } static bool TypeIsInnerPointer(QualType T) { if (!T->isAnyPointerType()) return false; if (T->isObjCObjectPointerType() || T->isObjCBuiltinType() || T->isBlockPointerType() || T->isFunctionPointerType() || ento::coreFoundation::isCFObjectRef(T)) return false; // Also, typedef-of-pointer-to-incomplete-struct is something that we assume // is not an innter pointer type. QualType OrigT = T; while (const TypedefType *TD = dyn_cast<TypedefType>(T.getTypePtr())) T = TD->getDecl()->getUnderlyingType(); if (OrigT == T || !T->isPointerType()) return true; const PointerType* PT = T->getAs<PointerType>(); QualType UPointeeT = PT->getPointeeType().getUnqualifiedType(); if (UPointeeT->isRecordType()) { const RecordType *RecordTy = UPointeeT->getAs<RecordType>(); if (!RecordTy->getDecl()->isCompleteDefinition()) return false; } return true; } /// \brief Check whether the two versions match. static bool versionsMatch(const VersionTuple &X, const VersionTuple &Y) { return (X == Y); } /// AvailabilityAttrsMatch - This routine checks that if comparing two /// availability attributes, all their components match. It returns /// true, if not dealing with availability or when all components of /// availability attributes match. This routine is only called when /// the attributes are of the same kind. static bool AvailabilityAttrsMatch(Attr *At1, Attr *At2) { const AvailabilityAttr *AA1 = dyn_cast<AvailabilityAttr>(At1); if (!AA1) return true; const AvailabilityAttr *AA2 = dyn_cast<AvailabilityAttr>(At2); VersionTuple Introduced1 = AA1->getIntroduced(); VersionTuple Deprecated1 = AA1->getDeprecated(); VersionTuple Obsoleted1 = AA1->getObsoleted(); bool IsUnavailable1 = AA1->getUnavailable(); VersionTuple Introduced2 = AA2->getIntroduced(); VersionTuple Deprecated2 = AA2->getDeprecated(); VersionTuple Obsoleted2 = AA2->getObsoleted(); bool IsUnavailable2 = AA2->getUnavailable(); return (versionsMatch(Introduced1, Introduced2) && versionsMatch(Deprecated1, Deprecated2) && versionsMatch(Obsoleted1, Obsoleted2) && IsUnavailable1 == IsUnavailable2); } static bool MatchTwoAttributeLists(const AttrVec &Attrs1, const AttrVec &Attrs2, bool &AvailabilityArgsMatch) { // This list is very small, so this need not be optimized. for (unsigned i = 0, e = Attrs1.size(); i != e; i++) { bool match = false; for (unsigned j = 0, f = Attrs2.size(); j != f; j++) { // Matching attribute kind only. Except for Availabilty attributes, // we are not getting into details of the attributes. For all practical purposes // this is sufficient. if (Attrs1[i]->getKind() == Attrs2[j]->getKind()) { if (AvailabilityArgsMatch) AvailabilityArgsMatch = AvailabilityAttrsMatch(Attrs1[i], Attrs2[j]); match = true; break; } } if (!match) return false; } return true; } /// AttributesMatch - This routine checks list of attributes for two /// decls. It returns false, if there is a mismatch in kind of /// attributes seen in the decls. It returns true if the two decls /// have list of same kind of attributes. Furthermore, when there /// are availability attributes in the two decls, it sets the /// AvailabilityArgsMatch to false if availability attributes have /// different versions, etc. static bool AttributesMatch(const Decl *Decl1, const Decl *Decl2, bool &AvailabilityArgsMatch) { if (!Decl1->hasAttrs() || !Decl2->hasAttrs()) { AvailabilityArgsMatch = (Decl1->hasAttrs() == Decl2->hasAttrs()); return true; } AvailabilityArgsMatch = true; const AttrVec &Attrs1 = Decl1->getAttrs(); const AttrVec &Attrs2 = Decl2->getAttrs(); bool match = MatchTwoAttributeLists(Attrs1, Attrs2, AvailabilityArgsMatch); if (match && (Attrs2.size() > Attrs1.size())) return MatchTwoAttributeLists(Attrs2, Attrs1, AvailabilityArgsMatch); return match; } static bool IsValidIdentifier(ASTContext &Ctx, const char *Name) { if (!isIdentifierHead(Name[0])) return false; std::string NameString = Name; NameString[0] = toLowercase(NameString[0]); IdentifierInfo *II = &Ctx.Idents.get(NameString); return II->getTokenID() == tok::identifier; } bool ObjCMigrateASTConsumer::migrateProperty(ASTContext &Ctx, ObjCContainerDecl *D, ObjCMethodDecl *Method) { if (Method->isPropertyAccessor() || !Method->isInstanceMethod() || Method->param_size() != 0) return false; // Is this method candidate to be a getter? QualType GRT = Method->getReturnType(); if (GRT->isVoidType()) return false; Selector GetterSelector = Method->getSelector(); ObjCInstanceTypeFamily OIT_Family = Selector::getInstTypeMethodFamily(GetterSelector); if (OIT_Family != OIT_None) return false; IdentifierInfo *getterName = GetterSelector.getIdentifierInfoForSlot(0); Selector SetterSelector = SelectorTable::constructSetterSelector(PP.getIdentifierTable(), PP.getSelectorTable(), getterName); ObjCMethodDecl *SetterMethod = D->getInstanceMethod(SetterSelector); unsigned LengthOfPrefix = 0; if (!SetterMethod) { // try a different naming convention for getter: isXxxxx StringRef getterNameString = getterName->getName(); bool IsPrefix = getterNameString.startswith("is"); // Note that we don't want to change an isXXX method of retainable object // type to property (readonly or otherwise). if (IsPrefix && GRT->isObjCRetainableType()) return false; if (IsPrefix || getterNameString.startswith("get")) { LengthOfPrefix = (IsPrefix ? 2 : 3); const char *CGetterName = getterNameString.data() + LengthOfPrefix; // Make sure that first character after "is" or "get" prefix can // start an identifier. if (!IsValidIdentifier(Ctx, CGetterName)) return false; if (CGetterName[0] && isUppercase(CGetterName[0])) { getterName = &Ctx.Idents.get(CGetterName); SetterSelector = SelectorTable::constructSetterSelector(PP.getIdentifierTable(), PP.getSelectorTable(), getterName); SetterMethod = D->getInstanceMethod(SetterSelector); } } } if (SetterMethod) { if ((ASTMigrateActions & FrontendOptions::ObjCMT_ReadwriteProperty) == 0) return false; bool AvailabilityArgsMatch; if (SetterMethod->isDeprecated() || !AttributesMatch(Method, SetterMethod, AvailabilityArgsMatch)) return false; // Is this a valid setter, matching the target getter? QualType SRT = SetterMethod->getReturnType(); if (!SRT->isVoidType()) return false; const ParmVarDecl *argDecl = *SetterMethod->param_begin(); QualType ArgType = argDecl->getType(); if (!Ctx.hasSameUnqualifiedType(ArgType, GRT)) return false; edit::Commit commit(*Editor); rewriteToObjCProperty(Method, SetterMethod, *NSAPIObj, commit, LengthOfPrefix, (ASTMigrateActions & FrontendOptions::ObjCMT_AtomicProperty) != 0, (ASTMigrateActions & FrontendOptions::ObjCMT_NsAtomicIOSOnlyProperty) != 0, AvailabilityArgsMatch); Editor->commit(commit); return true; } else if (ASTMigrateActions & FrontendOptions::ObjCMT_ReadonlyProperty) { // Try a non-void method with no argument (and no setter or property of same name // as a 'readonly' property. edit::Commit commit(*Editor); rewriteToObjCProperty(Method, nullptr /*SetterMethod*/, *NSAPIObj, commit, LengthOfPrefix, (ASTMigrateActions & FrontendOptions::ObjCMT_AtomicProperty) != 0, (ASTMigrateActions & FrontendOptions::ObjCMT_NsAtomicIOSOnlyProperty) != 0, /*AvailabilityArgsMatch*/false); Editor->commit(commit); return true; } return false; } void ObjCMigrateASTConsumer::migrateNsReturnsInnerPointer(ASTContext &Ctx, ObjCMethodDecl *OM) { if (OM->isImplicit() || !OM->isInstanceMethod() || OM->hasAttr<ObjCReturnsInnerPointerAttr>()) return; QualType RT = OM->getReturnType(); if (!TypeIsInnerPointer(RT) || !NSAPIObj->isMacroDefined("NS_RETURNS_INNER_POINTER")) return; edit::Commit commit(*Editor); commit.insertBefore(OM->getLocEnd(), " NS_RETURNS_INNER_POINTER"); Editor->commit(commit); } void ObjCMigrateASTConsumer::migratePropertyNsReturnsInnerPointer(ASTContext &Ctx, ObjCPropertyDecl *P) { QualType T = P->getType(); if (!TypeIsInnerPointer(T) || !NSAPIObj->isMacroDefined("NS_RETURNS_INNER_POINTER")) return; edit::Commit commit(*Editor); commit.insertBefore(P->getLocEnd(), " NS_RETURNS_INNER_POINTER "); Editor->commit(commit); } void ObjCMigrateASTConsumer::migrateAllMethodInstaceType(ASTContext &Ctx, ObjCContainerDecl *CDecl) { if (CDecl->isDeprecated() || IsCategoryNameWithDeprecatedSuffix(CDecl)) return; // migrate methods which can have instancetype as their result type. for (auto *Method : CDecl->methods()) { if (Method->isDeprecated()) continue; migrateMethodInstanceType(Ctx, CDecl, Method); } } void ObjCMigrateASTConsumer::migrateFactoryMethod(ASTContext &Ctx, ObjCContainerDecl *CDecl, ObjCMethodDecl *OM, ObjCInstanceTypeFamily OIT_Family) { if (OM->isInstanceMethod() || OM->getReturnType() == Ctx.getObjCInstanceType() || !OM->getReturnType()->isObjCIdType()) return; // Candidate factory methods are + (id) NaMeXXX : ... which belong to a class // NSYYYNamE with matching names be at least 3 characters long. ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl); if (!IDecl) { if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(CDecl)) IDecl = CatDecl->getClassInterface(); else if (ObjCImplDecl *ImpDecl = dyn_cast<ObjCImplDecl>(CDecl)) IDecl = ImpDecl->getClassInterface(); } if (!IDecl) return; std::string StringClassName = IDecl->getName(); StringRef LoweredClassName(StringClassName); std::string StringLoweredClassName = LoweredClassName.lower(); LoweredClassName = StringLoweredClassName; IdentifierInfo *MethodIdName = OM->getSelector().getIdentifierInfoForSlot(0); // Handle method with no name at its first selector slot; e.g. + (id):(int)x. if (!MethodIdName) return; std::string MethodName = MethodIdName->getName(); if (OIT_Family == OIT_Singleton || OIT_Family == OIT_ReturnsSelf) { StringRef STRefMethodName(MethodName); size_t len = 0; if (STRefMethodName.startswith("standard")) len = strlen("standard"); else if (STRefMethodName.startswith("shared")) len = strlen("shared"); else if (STRefMethodName.startswith("default")) len = strlen("default"); else return; MethodName = STRefMethodName.substr(len); } std::string MethodNameSubStr = MethodName.substr(0, 3); StringRef MethodNamePrefix(MethodNameSubStr); std::string StringLoweredMethodNamePrefix = MethodNamePrefix.lower(); MethodNamePrefix = StringLoweredMethodNamePrefix; size_t Ix = LoweredClassName.rfind(MethodNamePrefix); if (Ix == StringRef::npos) return; std::string ClassNamePostfix = LoweredClassName.substr(Ix); StringRef LoweredMethodName(MethodName); std::string StringLoweredMethodName = LoweredMethodName.lower(); LoweredMethodName = StringLoweredMethodName; if (!LoweredMethodName.startswith(ClassNamePostfix)) return; if (OIT_Family == OIT_ReturnsSelf) ReplaceWithClasstype(*this, OM); else ReplaceWithInstancetype(Ctx, *this, OM); } static bool IsVoidStarType(QualType Ty) { if (!Ty->isPointerType()) return false; while (const TypedefType *TD = dyn_cast<TypedefType>(Ty.getTypePtr())) Ty = TD->getDecl()->getUnderlyingType(); // Is the type void*? const PointerType* PT = Ty->getAs<PointerType>(); if (PT->getPointeeType().getUnqualifiedType()->isVoidType()) return true; return IsVoidStarType(PT->getPointeeType()); } /// AuditedType - This routine audits the type AT and returns false if it is one of known /// CF object types or of the "void *" variety. It returns true if we don't care about the type /// such as a non-pointer or pointers which have no ownership issues (such as "int *"). static bool AuditedType (QualType AT) { if (!AT->isAnyPointerType() && !AT->isBlockPointerType()) return true; // FIXME. There isn't much we can say about CF pointer type; or is there? if (ento::coreFoundation::isCFObjectRef(AT) || IsVoidStarType(AT) || // If an ObjC object is type, assuming that it is not a CF function and // that it is an un-audited function. AT->isObjCObjectPointerType() || AT->isObjCBuiltinType()) return false; // All other pointers are assumed audited as harmless. return true; } void ObjCMigrateASTConsumer::AnnotateImplicitBridging(ASTContext &Ctx) { if (CFFunctionIBCandidates.empty()) return; if (!NSAPIObj->isMacroDefined("CF_IMPLICIT_BRIDGING_ENABLED")) { CFFunctionIBCandidates.clear(); FileId = FileID(); return; } // Insert CF_IMPLICIT_BRIDGING_ENABLE/CF_IMPLICIT_BRIDGING_DISABLED const Decl *FirstFD = CFFunctionIBCandidates[0]; const Decl *LastFD = CFFunctionIBCandidates[CFFunctionIBCandidates.size()-1]; const char *PragmaString = "\nCF_IMPLICIT_BRIDGING_ENABLED\n\n"; edit::Commit commit(*Editor); commit.insertBefore(FirstFD->getLocStart(), PragmaString); PragmaString = "\n\nCF_IMPLICIT_BRIDGING_DISABLED\n"; SourceLocation EndLoc = LastFD->getLocEnd(); // get location just past end of function location. EndLoc = PP.getLocForEndOfToken(EndLoc); if (isa<FunctionDecl>(LastFD)) { // For Methods, EndLoc points to the ending semcolon. So, // not of these extra work is needed. Token Tok; // get locaiton of token that comes after end of function. bool Failed = PP.getRawToken(EndLoc, Tok, /*IgnoreWhiteSpace=*/true); if (!Failed) EndLoc = Tok.getLocation(); } commit.insertAfterToken(EndLoc, PragmaString); Editor->commit(commit); FileId = FileID(); CFFunctionIBCandidates.clear(); } void ObjCMigrateASTConsumer::migrateCFAnnotation(ASTContext &Ctx, const Decl *Decl) { if (Decl->isDeprecated()) return; if (Decl->hasAttr<CFAuditedTransferAttr>()) { assert(CFFunctionIBCandidates.empty() && "Cannot have audited functions/methods inside user " "provided CF_IMPLICIT_BRIDGING_ENABLE"); return; } // Finction must be annotated first. if (const FunctionDecl *FuncDecl = dyn_cast<FunctionDecl>(Decl)) { CF_BRIDGING_KIND AuditKind = migrateAddFunctionAnnotation(Ctx, FuncDecl); if (AuditKind == CF_BRIDGING_ENABLE) { CFFunctionIBCandidates.push_back(Decl); if (FileId.isInvalid()) FileId = PP.getSourceManager().getFileID(Decl->getLocation()); } else if (AuditKind == CF_BRIDGING_MAY_INCLUDE) { if (!CFFunctionIBCandidates.empty()) { CFFunctionIBCandidates.push_back(Decl); if (FileId.isInvalid()) FileId = PP.getSourceManager().getFileID(Decl->getLocation()); } } else AnnotateImplicitBridging(Ctx); } else { migrateAddMethodAnnotation(Ctx, cast<ObjCMethodDecl>(Decl)); AnnotateImplicitBridging(Ctx); } } void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx, const CallEffects &CE, const FunctionDecl *FuncDecl, bool ResultAnnotated) { // Annotate function. if (!ResultAnnotated) { RetEffect Ret = CE.getReturnValue(); const char *AnnotationString = nullptr; if (Ret.getObjKind() == RetEffect::CF) { if (Ret.isOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_RETAINED")) AnnotationString = " CF_RETURNS_RETAINED"; else if (Ret.notOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_NOT_RETAINED")) AnnotationString = " CF_RETURNS_NOT_RETAINED"; } else if (Ret.getObjKind() == RetEffect::ObjC) { if (Ret.isOwned() && NSAPIObj->isMacroDefined("NS_RETURNS_RETAINED")) AnnotationString = " NS_RETURNS_RETAINED"; } if (AnnotationString) { edit::Commit commit(*Editor); commit.insertAfterToken(FuncDecl->getLocEnd(), AnnotationString); Editor->commit(commit); } } ArrayRef<ArgEffect> AEArgs = CE.getArgs(); unsigned i = 0; for (FunctionDecl::param_const_iterator pi = FuncDecl->param_begin(), pe = FuncDecl->param_end(); pi != pe; ++pi, ++i) { const ParmVarDecl *pd = *pi; ArgEffect AE = AEArgs[i]; if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>() && NSAPIObj->isMacroDefined("CF_CONSUMED")) { edit::Commit commit(*Editor); commit.insertBefore(pd->getLocation(), "CF_CONSUMED "); Editor->commit(commit); } else if (AE == DecRefMsg && !pd->hasAttr<NSConsumedAttr>() && NSAPIObj->isMacroDefined("NS_CONSUMED")) { edit::Commit commit(*Editor); commit.insertBefore(pd->getLocation(), "NS_CONSUMED "); Editor->commit(commit); } } } ObjCMigrateASTConsumer::CF_BRIDGING_KIND ObjCMigrateASTConsumer::migrateAddFunctionAnnotation( ASTContext &Ctx, const FunctionDecl *FuncDecl) { if (FuncDecl->hasBody()) return CF_BRIDGING_NONE; CallEffects CE = CallEffects::getEffect(FuncDecl); bool FuncIsReturnAnnotated = (FuncDecl->hasAttr<CFReturnsRetainedAttr>() || FuncDecl->hasAttr<CFReturnsNotRetainedAttr>() || FuncDecl->hasAttr<NSReturnsRetainedAttr>() || FuncDecl->hasAttr<NSReturnsNotRetainedAttr>() || FuncDecl->hasAttr<NSReturnsAutoreleasedAttr>()); // Trivial case of when funciton is annotated and has no argument. if (FuncIsReturnAnnotated && FuncDecl->getNumParams() == 0) return CF_BRIDGING_NONE; bool ReturnCFAudited = false; if (!FuncIsReturnAnnotated) { RetEffect Ret = CE.getReturnValue(); if (Ret.getObjKind() == RetEffect::CF && (Ret.isOwned() || Ret.notOwned())) ReturnCFAudited = true; else if (!AuditedType(FuncDecl->getReturnType())) return CF_BRIDGING_NONE; } // At this point result type is audited for potential inclusion. // Now, how about argument types. ArrayRef<ArgEffect> AEArgs = CE.getArgs(); unsigned i = 0; bool ArgCFAudited = false; for (FunctionDecl::param_const_iterator pi = FuncDecl->param_begin(), pe = FuncDecl->param_end(); pi != pe; ++pi, ++i) { const ParmVarDecl *pd = *pi; ArgEffect AE = AEArgs[i]; if (AE == DecRef /*CFConsumed annotated*/ || AE == IncRef) { if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>()) ArgCFAudited = true; else if (AE == IncRef) ArgCFAudited = true; } else { QualType AT = pd->getType(); if (!AuditedType(AT)) { AddCFAnnotations(Ctx, CE, FuncDecl, FuncIsReturnAnnotated); return CF_BRIDGING_NONE; } } } if (ReturnCFAudited || ArgCFAudited) return CF_BRIDGING_ENABLE; return CF_BRIDGING_MAY_INCLUDE; } void ObjCMigrateASTConsumer::migrateARCSafeAnnotation(ASTContext &Ctx, ObjCContainerDecl *CDecl) { if (!isa<ObjCInterfaceDecl>(CDecl) || CDecl->isDeprecated()) return; // migrate methods which can have instancetype as their result type. for (const auto *Method : CDecl->methods()) migrateCFAnnotation(Ctx, Method); } void ObjCMigrateASTConsumer::AddCFAnnotations(ASTContext &Ctx, const CallEffects &CE, const ObjCMethodDecl *MethodDecl, bool ResultAnnotated) { // Annotate function. if (!ResultAnnotated) { RetEffect Ret = CE.getReturnValue(); const char *AnnotationString = nullptr; if (Ret.getObjKind() == RetEffect::CF) { if (Ret.isOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_RETAINED")) AnnotationString = " CF_RETURNS_RETAINED"; else if (Ret.notOwned() && NSAPIObj->isMacroDefined("CF_RETURNS_NOT_RETAINED")) AnnotationString = " CF_RETURNS_NOT_RETAINED"; } else if (Ret.getObjKind() == RetEffect::ObjC) { ObjCMethodFamily OMF = MethodDecl->getMethodFamily(); switch (OMF) { case clang::OMF_alloc: case clang::OMF_new: case clang::OMF_copy: case clang::OMF_init: case clang::OMF_mutableCopy: break; default: if (Ret.isOwned() && NSAPIObj->isMacroDefined("NS_RETURNS_RETAINED")) AnnotationString = " NS_RETURNS_RETAINED"; break; } } if (AnnotationString) { edit::Commit commit(*Editor); commit.insertBefore(MethodDecl->getLocEnd(), AnnotationString); Editor->commit(commit); } } ArrayRef<ArgEffect> AEArgs = CE.getArgs(); unsigned i = 0; for (ObjCMethodDecl::param_const_iterator pi = MethodDecl->param_begin(), pe = MethodDecl->param_end(); pi != pe; ++pi, ++i) { const ParmVarDecl *pd = *pi; ArgEffect AE = AEArgs[i]; if (AE == DecRef && !pd->hasAttr<CFConsumedAttr>() && NSAPIObj->isMacroDefined("CF_CONSUMED")) { edit::Commit commit(*Editor); commit.insertBefore(pd->getLocation(), "CF_CONSUMED "); Editor->commit(commit); } } } void ObjCMigrateASTConsumer::migrateAddMethodAnnotation( ASTContext &Ctx, const ObjCMethodDecl *MethodDecl) { if (MethodDecl->hasBody() || MethodDecl->isImplicit()) return; CallEffects CE = CallEffects::getEffect(MethodDecl); bool MethodIsReturnAnnotated = (MethodDecl->hasAttr<CFReturnsRetainedAttr>() || MethodDecl->hasAttr<CFReturnsNotRetainedAttr>() || MethodDecl->hasAttr<NSReturnsRetainedAttr>() || MethodDecl->hasAttr<NSReturnsNotRetainedAttr>() || MethodDecl->hasAttr<NSReturnsAutoreleasedAttr>()); if (CE.getReceiver() == DecRefMsg && !MethodDecl->hasAttr<NSConsumesSelfAttr>() && MethodDecl->getMethodFamily() != OMF_init && MethodDecl->getMethodFamily() != OMF_release && NSAPIObj->isMacroDefined("NS_CONSUMES_SELF")) { edit::Commit commit(*Editor); commit.insertBefore(MethodDecl->getLocEnd(), " NS_CONSUMES_SELF"); Editor->commit(commit); } // Trivial case of when funciton is annotated and has no argument. if (MethodIsReturnAnnotated && (MethodDecl->param_begin() == MethodDecl->param_end())) return; if (!MethodIsReturnAnnotated) { RetEffect Ret = CE.getReturnValue(); if ((Ret.getObjKind() == RetEffect::CF || Ret.getObjKind() == RetEffect::ObjC) && (Ret.isOwned() || Ret.notOwned())) { AddCFAnnotations(Ctx, CE, MethodDecl, false); return; } else if (!AuditedType(MethodDecl->getReturnType())) return; } // At this point result type is either annotated or audited. // Now, how about argument types. ArrayRef<ArgEffect> AEArgs = CE.getArgs(); unsigned i = 0; for (ObjCMethodDecl::param_const_iterator pi = MethodDecl->param_begin(), pe = MethodDecl->param_end(); pi != pe; ++pi, ++i) { const ParmVarDecl *pd = *pi; ArgEffect AE = AEArgs[i]; if ((AE == DecRef && !pd->hasAttr<CFConsumedAttr>()) || AE == IncRef || !AuditedType(pd->getType())) { AddCFAnnotations(Ctx, CE, MethodDecl, MethodIsReturnAnnotated); return; } } return; } namespace { class SuperInitChecker : public RecursiveASTVisitor<SuperInitChecker> { public: bool shouldVisitTemplateInstantiations() const { return false; } bool shouldWalkTypesOfTypeLocs() const { return false; } bool VisitObjCMessageExpr(ObjCMessageExpr *E) { if (E->getReceiverKind() == ObjCMessageExpr::SuperInstance) { if (E->getMethodFamily() == OMF_init) return false; } return true; } }; } // anonymous namespace static bool hasSuperInitCall(const ObjCMethodDecl *MD) { return !SuperInitChecker().TraverseStmt(MD->getBody()); } void ObjCMigrateASTConsumer::inferDesignatedInitializers( ASTContext &Ctx, const ObjCImplementationDecl *ImplD) { const ObjCInterfaceDecl *IFace = ImplD->getClassInterface(); if (!IFace || IFace->hasDesignatedInitializers()) return; if (!NSAPIObj->isMacroDefined("NS_DESIGNATED_INITIALIZER")) return; for (const auto *MD : ImplD->instance_methods()) { if (MD->isDeprecated() || MD->getMethodFamily() != OMF_init || MD->isDesignatedInitializerForTheInterface()) continue; const ObjCMethodDecl *IFaceM = IFace->getMethod(MD->getSelector(), /*isInstance=*/true); if (!IFaceM) continue; if (hasSuperInitCall(MD)) { edit::Commit commit(*Editor); commit.insert(IFaceM->getLocEnd(), " NS_DESIGNATED_INITIALIZER"); Editor->commit(commit); } } } bool ObjCMigrateASTConsumer::InsertFoundation(ASTContext &Ctx, SourceLocation Loc) { if (FoundationIncluded) return true; if (Loc.isInvalid()) return false; edit::Commit commit(*Editor); if (Ctx.getLangOpts().Modules) commit.insert(Loc, "#ifndef NS_ENUM\n@import Foundation;\n#endif\n"); else commit.insert(Loc, "#ifndef NS_ENUM\n#import <Foundation/Foundation.h>\n#endif\n"); Editor->commit(commit); FoundationIncluded = true; return true; } namespace { class RewritesReceiver : public edit::EditsReceiver { Rewriter &Rewrite; public: RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { } void insert(SourceLocation loc, StringRef text) override { Rewrite.InsertText(loc, text); } void replace(CharSourceRange range, StringRef text) override { Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text); } }; class JSONEditWriter : public edit::EditsReceiver { SourceManager &SourceMgr; llvm::raw_ostream &OS; public: JSONEditWriter(SourceManager &SM, llvm::raw_ostream &OS) : SourceMgr(SM), OS(OS) { OS << "[\n"; } ~JSONEditWriter() override { OS << "]\n"; } private: struct EntryWriter { SourceManager &SourceMgr; llvm::raw_ostream &OS; EntryWriter(SourceManager &SM, llvm::raw_ostream &OS) : SourceMgr(SM), OS(OS) { OS << " {\n"; } ~EntryWriter() { OS << " },\n"; } void writeLoc(SourceLocation Loc) { FileID FID; unsigned Offset; std::tie(FID, Offset) = SourceMgr.getDecomposedLoc(Loc); assert(!FID.isInvalid()); SmallString<200> Path = StringRef(SourceMgr.getFileEntryForID(FID)->getName()); llvm::sys::fs::make_absolute(Path); OS << " \"file\": \""; OS.write_escaped(Path.str()) << "\",\n"; OS << " \"offset\": " << Offset << ",\n"; } void writeRemove(CharSourceRange Range) { assert(Range.isCharRange()); std::pair<FileID, unsigned> Begin = SourceMgr.getDecomposedLoc(Range.getBegin()); std::pair<FileID, unsigned> End = SourceMgr.getDecomposedLoc(Range.getEnd()); assert(Begin.first == End.first); assert(Begin.second <= End.second); unsigned Length = End.second - Begin.second; OS << " \"remove\": " << Length << ",\n"; } void writeText(StringRef Text) { OS << " \"text\": \""; OS.write_escaped(Text) << "\",\n"; } }; void insert(SourceLocation Loc, StringRef Text) override { EntryWriter Writer(SourceMgr, OS); Writer.writeLoc(Loc); Writer.writeText(Text); } void replace(CharSourceRange Range, StringRef Text) override { EntryWriter Writer(SourceMgr, OS); Writer.writeLoc(Range.getBegin()); Writer.writeRemove(Range); Writer.writeText(Text); } void remove(CharSourceRange Range) override { EntryWriter Writer(SourceMgr, OS); Writer.writeLoc(Range.getBegin()); Writer.writeRemove(Range); } }; } void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) { TranslationUnitDecl *TU = Ctx.getTranslationUnitDecl(); if (ASTMigrateActions & FrontendOptions::ObjCMT_MigrateDecls) { for (DeclContext::decl_iterator D = TU->decls_begin(), DEnd = TU->decls_end(); D != DEnd; ++D) { FileID FID = PP.getSourceManager().getFileID((*D)->getLocation()); if (!FID.isInvalid()) if (!FileId.isInvalid() && FileId != FID) { if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) AnnotateImplicitBridging(Ctx); } if (ObjCInterfaceDecl *CDecl = dyn_cast<ObjCInterfaceDecl>(*D)) if (canModify(CDecl)) migrateObjCContainerDecl(Ctx, CDecl); if (ObjCCategoryDecl *CatDecl = dyn_cast<ObjCCategoryDecl>(*D)) { if (canModify(CatDecl)) migrateObjCContainerDecl(Ctx, CatDecl); } else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(*D)) { ObjCProtocolDecls.insert(PDecl->getCanonicalDecl()); if (canModify(PDecl)) migrateObjCContainerDecl(Ctx, PDecl); } else if (const ObjCImplementationDecl *ImpDecl = dyn_cast<ObjCImplementationDecl>(*D)) { if ((ASTMigrateActions & FrontendOptions::ObjCMT_ProtocolConformance) && canModify(ImpDecl)) migrateProtocolConformance(Ctx, ImpDecl); } else if (const EnumDecl *ED = dyn_cast<EnumDecl>(*D)) { if (!(ASTMigrateActions & FrontendOptions::ObjCMT_NsMacros)) continue; if (!canModify(ED)) continue; DeclContext::decl_iterator N = D; if (++N != DEnd) { const TypedefDecl *TD = dyn_cast<TypedefDecl>(*N); if (migrateNSEnumDecl(Ctx, ED, TD) && TD) D++; } else migrateNSEnumDecl(Ctx, ED, /*TypedefDecl */nullptr); } else if (const TypedefDecl *TD = dyn_cast<TypedefDecl>(*D)) { if (!(ASTMigrateActions & FrontendOptions::ObjCMT_NsMacros)) continue; if (!canModify(TD)) continue; DeclContext::decl_iterator N = D; if (++N == DEnd) continue; if (const EnumDecl *ED = dyn_cast<EnumDecl>(*N)) { if (++N != DEnd) if (const TypedefDecl *TDF = dyn_cast<TypedefDecl>(*N)) { // prefer typedef-follows-enum to enum-follows-typedef pattern. if (migrateNSEnumDecl(Ctx, ED, TDF)) { ++D; ++D; CacheObjCNSIntegerTypedefed(TD); continue; } } if (migrateNSEnumDecl(Ctx, ED, TD)) { ++D; continue; } } CacheObjCNSIntegerTypedefed(TD); } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*D)) { if ((ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) && canModify(FD)) migrateCFAnnotation(Ctx, FD); } if (ObjCContainerDecl *CDecl = dyn_cast<ObjCContainerDecl>(*D)) { bool CanModify = canModify(CDecl); // migrate methods which can have instancetype as their result type. if ((ASTMigrateActions & FrontendOptions::ObjCMT_Instancetype) && CanModify) migrateAllMethodInstaceType(Ctx, CDecl); // annotate methods with CF annotations. if ((ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) && CanModify) migrateARCSafeAnnotation(Ctx, CDecl); } if (const ObjCImplementationDecl * ImplD = dyn_cast<ObjCImplementationDecl>(*D)) { if ((ASTMigrateActions & FrontendOptions::ObjCMT_DesignatedInitializer) && canModify(ImplD)) inferDesignatedInitializers(Ctx, ImplD); } } if (ASTMigrateActions & FrontendOptions::ObjCMT_Annotation) AnnotateImplicitBridging(Ctx); } if (IsOutputFile) { std::error_code EC; llvm::raw_fd_ostream OS(MigrateDir, EC, llvm::sys::fs::F_None); if (EC) { DiagnosticsEngine &Diags = Ctx.getDiagnostics(); Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0")) << EC.message(); return; } JSONEditWriter Writer(Ctx.getSourceManager(), OS); Editor->applyRewrites(Writer); return; } Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOpts()); RewritesReceiver Rec(rewriter); Editor->applyRewrites(Rec); for (Rewriter::buffer_iterator I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) { FileID FID = I->first; RewriteBuffer &buf = I->second; const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID); assert(file); SmallString<512> newText; llvm::raw_svector_ostream vecOS(newText); buf.write(vecOS); vecOS.flush(); std::unique_ptr<llvm::MemoryBuffer> memBuf( llvm::MemoryBuffer::getMemBufferCopy( StringRef(newText.data(), newText.size()), file->getName())); SmallString<64> filePath(file->getName()); FileMgr.FixupRelativePath(filePath); Remapper.remap(filePath.str(), std::move(memBuf)); } if (IsOutputFile) { Remapper.flushToFile(MigrateDir, Ctx.getDiagnostics()); } else { Remapper.flushToDisk(MigrateDir, Ctx.getDiagnostics()); } } bool MigrateSourceAction::BeginInvocation(CompilerInstance &CI) { CI.getDiagnostics().setIgnoreAllWarnings(true); return true; } static std::vector<std::string> getWhiteListFilenames(StringRef DirPath) { using namespace llvm::sys::fs; using namespace llvm::sys::path; std::vector<std::string> Filenames; if (DirPath.empty() || !is_directory(DirPath)) return Filenames; std::error_code EC; directory_iterator DI = directory_iterator(DirPath, EC); directory_iterator DE; for (; !EC && DI != DE; DI = DI.increment(EC)) { if (is_regular_file(DI->path())) Filenames.push_back(filename(DI->path())); } return Filenames; } std::unique_ptr<ASTConsumer> MigrateSourceAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { PPConditionalDirectiveRecord * PPRec = new PPConditionalDirectiveRecord(CI.getSourceManager()); unsigned ObjCMTAction = CI.getFrontendOpts().ObjCMTAction; unsigned ObjCMTOpts = ObjCMTAction; // These are companion flags, they do not enable transformations. ObjCMTOpts &= ~(FrontendOptions::ObjCMT_AtomicProperty | FrontendOptions::ObjCMT_NsAtomicIOSOnlyProperty); if (ObjCMTOpts == FrontendOptions::ObjCMT_None) { // If no specific option was given, enable literals+subscripting transforms // by default. ObjCMTAction |= FrontendOptions::ObjCMT_Literals | FrontendOptions::ObjCMT_Subscripting; } CI.getPreprocessor().addPPCallbacks(std::unique_ptr<PPCallbacks>(PPRec)); std::vector<std::string> WhiteList = getWhiteListFilenames(CI.getFrontendOpts().ObjCMTWhiteListPath); return llvm::make_unique<ObjCMigrateASTConsumer>( CI.getFrontendOpts().OutputFile, ObjCMTAction, Remapper, CI.getFileManager(), PPRec, CI.getPreprocessor(), /*isOutputFile=*/true, WhiteList); } namespace { struct EditEntry { const FileEntry *File; unsigned Offset; unsigned RemoveLen; std::string Text; EditEntry() : File(), Offset(), RemoveLen() {} }; } namespace llvm { template<> struct DenseMapInfo<EditEntry> { static inline EditEntry getEmptyKey() { EditEntry Entry; Entry.Offset = unsigned(-1); return Entry; } static inline EditEntry getTombstoneKey() { EditEntry Entry; Entry.Offset = unsigned(-2); return Entry; } static unsigned getHashValue(const EditEntry& Val) { llvm::FoldingSetNodeID ID; ID.AddPointer(Val.File); ID.AddInteger(Val.Offset); ID.AddInteger(Val.RemoveLen); ID.AddString(Val.Text); return ID.ComputeHash(); } static bool isEqual(const EditEntry &LHS, const EditEntry &RHS) { return LHS.File == RHS.File && LHS.Offset == RHS.Offset && LHS.RemoveLen == RHS.RemoveLen && LHS.Text == RHS.Text; } }; } namespace { class RemapFileParser { FileManager &FileMgr; public: RemapFileParser(FileManager &FileMgr) : FileMgr(FileMgr) { } bool parse(StringRef File, SmallVectorImpl<EditEntry> &Entries) { using namespace llvm::yaml; llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileBufOrErr = llvm::MemoryBuffer::getFile(File); if (!FileBufOrErr) return true; llvm::SourceMgr SM; Stream YAMLStream(FileBufOrErr.get()->getMemBufferRef(), SM); document_iterator I = YAMLStream.begin(); if (I == YAMLStream.end()) return true; Node *Root = I->getRoot(); if (!Root) return true; SequenceNode *SeqNode = dyn_cast<SequenceNode>(Root); if (!SeqNode) return true; for (SequenceNode::iterator AI = SeqNode->begin(), AE = SeqNode->end(); AI != AE; ++AI) { MappingNode *MapNode = dyn_cast<MappingNode>(&*AI); if (!MapNode) continue; parseEdit(MapNode, Entries); } return false; } private: void parseEdit(llvm::yaml::MappingNode *Node, SmallVectorImpl<EditEntry> &Entries) { using namespace llvm::yaml; EditEntry Entry; bool Ignore = false; for (MappingNode::iterator KVI = Node->begin(), KVE = Node->end(); KVI != KVE; ++KVI) { ScalarNode *KeyString = dyn_cast<ScalarNode>((*KVI).getKey()); if (!KeyString) continue; SmallString<10> KeyStorage; StringRef Key = KeyString->getValue(KeyStorage); ScalarNode *ValueString = dyn_cast<ScalarNode>((*KVI).getValue()); if (!ValueString) continue; SmallString<64> ValueStorage; StringRef Val = ValueString->getValue(ValueStorage); if (Key == "file") { const FileEntry *FE = FileMgr.getFile(Val); if (!FE) Ignore = true; Entry.File = FE; } else if (Key == "offset") { if (Val.getAsInteger(10, Entry.Offset)) Ignore = true; } else if (Key == "remove") { if (Val.getAsInteger(10, Entry.RemoveLen)) Ignore = true; } else if (Key == "text") { Entry.Text = Val; } } if (!Ignore) Entries.push_back(Entry); } }; } static bool reportDiag(const Twine &Err, DiagnosticsEngine &Diag) { Diag.Report(Diag.getCustomDiagID(DiagnosticsEngine::Error, "%0")) << Err.str(); return true; } static std::string applyEditsToTemp(const FileEntry *FE, ArrayRef<EditEntry> Edits, FileManager &FileMgr, DiagnosticsEngine &Diag) { using namespace llvm::sys; SourceManager SM(Diag, FileMgr); FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User); LangOptions LangOpts; edit::EditedSource Editor(SM, LangOpts); for (ArrayRef<EditEntry>::iterator I = Edits.begin(), E = Edits.end(); I != E; ++I) { const EditEntry &Entry = *I; assert(Entry.File == FE); SourceLocation Loc = SM.getLocForStartOfFile(FID).getLocWithOffset(Entry.Offset); CharSourceRange Range; if (Entry.RemoveLen != 0) { Range = CharSourceRange::getCharRange(Loc, Loc.getLocWithOffset(Entry.RemoveLen)); } edit::Commit commit(Editor); if (Range.isInvalid()) { commit.insert(Loc, Entry.Text); } else if (Entry.Text.empty()) { commit.remove(Range); } else { commit.replace(Range, Entry.Text); } Editor.commit(commit); } Rewriter rewriter(SM, LangOpts); RewritesReceiver Rec(rewriter); Editor.applyRewrites(Rec); const RewriteBuffer *Buf = rewriter.getRewriteBufferFor(FID); SmallString<512> NewText; llvm::raw_svector_ostream OS(NewText); Buf->write(OS); OS.flush(); SmallString<64> TempPath; int FD; if (fs::createTemporaryFile(path::filename(FE->getName()), path::extension(FE->getName()), FD, TempPath)) { reportDiag("Could not create file: " + TempPath.str(), Diag); return std::string(); } llvm::raw_fd_ostream TmpOut(FD, /*shouldClose=*/true); TmpOut.write(NewText.data(), NewText.size()); TmpOut.close(); return TempPath.str(); } bool arcmt::getFileRemappingsFromFileList( std::vector<std::pair<std::string,std::string> > &remap, ArrayRef<StringRef> remapFiles, DiagnosticConsumer *DiagClient) { bool hasErrorOccurred = false; FileSystemOptions FSOpts; FileManager FileMgr(FSOpts); RemapFileParser Parser(FileMgr); IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs()); IntrusiveRefCntPtr<DiagnosticsEngine> Diags( new DiagnosticsEngine(DiagID, new DiagnosticOptions, DiagClient, /*ShouldOwnClient=*/false)); typedef llvm::DenseMap<const FileEntry *, std::vector<EditEntry> > FileEditEntriesTy; FileEditEntriesTy FileEditEntries; llvm::DenseSet<EditEntry> EntriesSet; for (ArrayRef<StringRef>::iterator I = remapFiles.begin(), E = remapFiles.end(); I != E; ++I) { SmallVector<EditEntry, 16> Entries; if (Parser.parse(*I, Entries)) continue; for (SmallVectorImpl<EditEntry>::iterator EI = Entries.begin(), EE = Entries.end(); EI != EE; ++EI) { EditEntry &Entry = *EI; if (!Entry.File) continue; std::pair<llvm::DenseSet<EditEntry>::iterator, bool> Insert = EntriesSet.insert(Entry); if (!Insert.second) continue; FileEditEntries[Entry.File].push_back(Entry); } } for (FileEditEntriesTy::iterator I = FileEditEntries.begin(), E = FileEditEntries.end(); I != E; ++I) { std::string TempFile = applyEditsToTemp(I->first, I->second, FileMgr, *Diags); if (TempFile.empty()) { hasErrorOccurred = true; continue; } remap.emplace_back(I->first->getName(), TempFile); } return hasErrorOccurred; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp
//===--- TransGCAttrs.cpp - Transformations to ARC mode --------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/Basic/SourceManager.h" #include "clang/Lex/Lexer.h" #include "clang/Sema/SemaDiagnostic.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Support/SaveAndRestore.h" using namespace clang; using namespace arcmt; using namespace trans; namespace { /// \brief Collects all the places where GC attributes __strong/__weak occur. class GCAttrsCollector : public RecursiveASTVisitor<GCAttrsCollector> { MigrationContext &MigrateCtx; bool FullyMigratable; std::vector<ObjCPropertyDecl *> &AllProps; typedef RecursiveASTVisitor<GCAttrsCollector> base; public: GCAttrsCollector(MigrationContext &ctx, std::vector<ObjCPropertyDecl *> &AllProps) : MigrateCtx(ctx), FullyMigratable(false), AllProps(AllProps) { } bool shouldWalkTypesOfTypeLocs() const { return false; } bool VisitAttributedTypeLoc(AttributedTypeLoc TL) { handleAttr(TL); return true; } bool TraverseDecl(Decl *D) { if (!D || D->isImplicit()) return true; SaveAndRestore<bool> Save(FullyMigratable, isMigratable(D)); if (ObjCPropertyDecl *PropD = dyn_cast<ObjCPropertyDecl>(D)) { lookForAttribute(PropD, PropD->getTypeSourceInfo()); AllProps.push_back(PropD); } else if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) { lookForAttribute(DD, DD->getTypeSourceInfo()); } return base::TraverseDecl(D); } void lookForAttribute(Decl *D, TypeSourceInfo *TInfo) { if (!TInfo) return; TypeLoc TL = TInfo->getTypeLoc(); while (TL) { if (QualifiedTypeLoc QL = TL.getAs<QualifiedTypeLoc>()) { TL = QL.getUnqualifiedLoc(); } else if (AttributedTypeLoc Attr = TL.getAs<AttributedTypeLoc>()) { if (handleAttr(Attr, D)) break; TL = Attr.getModifiedLoc(); } else if (ArrayTypeLoc Arr = TL.getAs<ArrayTypeLoc>()) { TL = Arr.getElementLoc(); } else if (PointerTypeLoc PT = TL.getAs<PointerTypeLoc>()) { TL = PT.getPointeeLoc(); } else if (ReferenceTypeLoc RT = TL.getAs<ReferenceTypeLoc>()) TL = RT.getPointeeLoc(); else break; } } bool handleAttr(AttributedTypeLoc TL, Decl *D = nullptr) { if (TL.getAttrKind() != AttributedType::attr_objc_ownership) return false; SourceLocation Loc = TL.getAttrNameLoc(); unsigned RawLoc = Loc.getRawEncoding(); if (MigrateCtx.AttrSet.count(RawLoc)) return true; ASTContext &Ctx = MigrateCtx.Pass.Ctx; SourceManager &SM = Ctx.getSourceManager(); if (Loc.isMacroID()) Loc = SM.getImmediateExpansionRange(Loc).first; SmallString<32> Buf; bool Invalid = false; StringRef Spell = Lexer::getSpelling( SM.getSpellingLoc(TL.getAttrEnumOperandLoc()), Buf, SM, Ctx.getLangOpts(), &Invalid); if (Invalid) return false; MigrationContext::GCAttrOccurrence::AttrKind Kind; if (Spell == "strong") Kind = MigrationContext::GCAttrOccurrence::Strong; else if (Spell == "weak") Kind = MigrationContext::GCAttrOccurrence::Weak; else return false; MigrateCtx.AttrSet.insert(RawLoc); MigrateCtx.GCAttrs.push_back(MigrationContext::GCAttrOccurrence()); MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs.back(); Attr.Kind = Kind; Attr.Loc = Loc; Attr.ModifiedType = TL.getModifiedLoc().getType(); Attr.Dcl = D; Attr.FullyMigratable = FullyMigratable; return true; } bool isMigratable(Decl *D) { if (isa<TranslationUnitDecl>(D)) return false; if (isInMainFile(D)) return true; if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) return FD->hasBody(); if (ObjCContainerDecl *ContD = dyn_cast<ObjCContainerDecl>(D)) return hasObjCImpl(ContD); if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) { for (const auto *MI : RD->methods()) { if (MI->isOutOfLine()) return true; } return false; } return isMigratable(cast<Decl>(D->getDeclContext())); } static bool hasObjCImpl(Decl *D) { if (!D) return false; if (ObjCContainerDecl *ContD = dyn_cast<ObjCContainerDecl>(D)) { if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(ContD)) return ID->getImplementation() != nullptr; if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ContD)) return CD->getImplementation() != nullptr; if (isa<ObjCImplDecl>(ContD)) return true; return false; } return false; } bool isInMainFile(Decl *D) { if (!D) return false; for (auto I : D->redecls()) if (!isInMainFile(I->getLocation())) return false; return true; } bool isInMainFile(SourceLocation Loc) { if (Loc.isInvalid()) return false; SourceManager &SM = MigrateCtx.Pass.Ctx.getSourceManager(); return SM.isInFileID(SM.getExpansionLoc(Loc), SM.getMainFileID()); } }; } // anonymous namespace static void errorForGCAttrsOnNonObjC(MigrationContext &MigrateCtx) { TransformActions &TA = MigrateCtx.Pass.TA; for (unsigned i = 0, e = MigrateCtx.GCAttrs.size(); i != e; ++i) { MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs[i]; if (Attr.FullyMigratable && Attr.Dcl) { if (Attr.ModifiedType.isNull()) continue; if (!Attr.ModifiedType->isObjCRetainableType()) { TA.reportError("GC managed memory will become unmanaged in ARC", Attr.Loc); } } } } static void checkWeakGCAttrs(MigrationContext &MigrateCtx) { TransformActions &TA = MigrateCtx.Pass.TA; for (unsigned i = 0, e = MigrateCtx.GCAttrs.size(); i != e; ++i) { MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs[i]; if (Attr.Kind == MigrationContext::GCAttrOccurrence::Weak) { if (Attr.ModifiedType.isNull() || !Attr.ModifiedType->isObjCRetainableType()) continue; if (!canApplyWeak(MigrateCtx.Pass.Ctx, Attr.ModifiedType, /*AllowOnUnknownClass=*/true)) { Transaction Trans(TA); if (!MigrateCtx.RemovedAttrSet.count(Attr.Loc.getRawEncoding())) TA.replaceText(Attr.Loc, "__weak", "__unsafe_unretained"); TA.clearDiagnostic(diag::err_arc_weak_no_runtime, diag::err_arc_unsupported_weak_class, Attr.Loc); } } } } typedef llvm::TinyPtrVector<ObjCPropertyDecl *> IndivPropsTy; static void checkAllAtProps(MigrationContext &MigrateCtx, SourceLocation AtLoc, IndivPropsTy &IndProps) { if (IndProps.empty()) return; for (IndivPropsTy::iterator PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) { QualType T = (*PI)->getType(); if (T.isNull() || !T->isObjCRetainableType()) return; } SmallVector<std::pair<AttributedTypeLoc, ObjCPropertyDecl *>, 4> ATLs; bool hasWeak = false, hasStrong = false; ObjCPropertyDecl::PropertyAttributeKind Attrs = ObjCPropertyDecl::OBJC_PR_noattr; for (IndivPropsTy::iterator PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) { ObjCPropertyDecl *PD = *PI; Attrs = PD->getPropertyAttributesAsWritten(); TypeSourceInfo *TInfo = PD->getTypeSourceInfo(); if (!TInfo) return; TypeLoc TL = TInfo->getTypeLoc(); if (AttributedTypeLoc ATL = TL.getAs<AttributedTypeLoc>()) { ATLs.push_back(std::make_pair(ATL, PD)); if (TInfo->getType().getObjCLifetime() == Qualifiers::OCL_Weak) { hasWeak = true; } else if (TInfo->getType().getObjCLifetime() == Qualifiers::OCL_Strong) hasStrong = true; else return; } } if (ATLs.empty()) return; if (hasWeak && hasStrong) return; TransformActions &TA = MigrateCtx.Pass.TA; Transaction Trans(TA); if (GCAttrsCollector::hasObjCImpl( cast<Decl>(IndProps.front()->getDeclContext()))) { if (hasWeak) MigrateCtx.AtPropsWeak.insert(AtLoc.getRawEncoding()); } else { StringRef toAttr = "strong"; if (hasWeak) { if (canApplyWeak(MigrateCtx.Pass.Ctx, IndProps.front()->getType(), /*AllowOnUnkwownClass=*/true)) toAttr = "weak"; else toAttr = "unsafe_unretained"; } if (Attrs & ObjCPropertyDecl::OBJC_PR_assign) MigrateCtx.rewritePropertyAttribute("assign", toAttr, AtLoc); else MigrateCtx.addPropertyAttribute(toAttr, AtLoc); } for (unsigned i = 0, e = ATLs.size(); i != e; ++i) { SourceLocation Loc = ATLs[i].first.getAttrNameLoc(); if (Loc.isMacroID()) Loc = MigrateCtx.Pass.Ctx.getSourceManager() .getImmediateExpansionRange(Loc).first; TA.remove(Loc); TA.clearDiagnostic(diag::err_objc_property_attr_mutually_exclusive, AtLoc); TA.clearDiagnostic(diag::err_arc_inconsistent_property_ownership, ATLs[i].second->getLocation()); MigrateCtx.RemovedAttrSet.insert(Loc.getRawEncoding()); } } static void checkAllProps(MigrationContext &MigrateCtx, std::vector<ObjCPropertyDecl *> &AllProps) { typedef llvm::TinyPtrVector<ObjCPropertyDecl *> IndivPropsTy; llvm::DenseMap<unsigned, IndivPropsTy> AtProps; for (unsigned i = 0, e = AllProps.size(); i != e; ++i) { ObjCPropertyDecl *PD = AllProps[i]; if (PD->getPropertyAttributesAsWritten() & (ObjCPropertyDecl::OBJC_PR_assign | ObjCPropertyDecl::OBJC_PR_readonly)) { SourceLocation AtLoc = PD->getAtLoc(); if (AtLoc.isInvalid()) continue; unsigned RawAt = AtLoc.getRawEncoding(); AtProps[RawAt].push_back(PD); } } for (llvm::DenseMap<unsigned, IndivPropsTy>::iterator I = AtProps.begin(), E = AtProps.end(); I != E; ++I) { SourceLocation AtLoc = SourceLocation::getFromRawEncoding(I->first); IndivPropsTy &IndProps = I->second; checkAllAtProps(MigrateCtx, AtLoc, IndProps); } } void GCAttrsTraverser::traverseTU(MigrationContext &MigrateCtx) { std::vector<ObjCPropertyDecl *> AllProps; GCAttrsCollector(MigrateCtx, AllProps).TraverseDecl( MigrateCtx.Pass.Ctx.getTranslationUnitDecl()); errorForGCAttrsOnNonObjC(MigrateCtx); checkAllProps(MigrateCtx, AllProps); checkWeakGCAttrs(MigrateCtx); } void MigrationContext::dumpGCAttrs() { llvm::errs() << "\n################\n"; for (unsigned i = 0, e = GCAttrs.size(); i != e; ++i) { GCAttrOccurrence &Attr = GCAttrs[i]; llvm::errs() << "KIND: " << (Attr.Kind == GCAttrOccurrence::Strong ? "strong" : "weak"); llvm::errs() << "\nLOC: "; Attr.Loc.dump(Pass.Ctx.getSourceManager()); llvm::errs() << "\nTYPE: "; Attr.ModifiedType.dump(); if (Attr.Dcl) { llvm::errs() << "DECL:\n"; Attr.Dcl->dump(); } else { llvm::errs() << "DECL: NONE"; } llvm::errs() << "\nMIGRATABLE: " << Attr.FullyMigratable; llvm::errs() << "\n----------------\n"; } llvm::errs() << "\n################\n"; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
//===--- TransEmptyStatements.cpp - Transformations to ARC mode -----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // removeEmptyStatementsAndDealloc: // // Removes empty statements that are leftovers from previous transformations. // e.g for // // [x retain]; // // removeRetainReleaseDealloc will leave an empty ";" that removeEmptyStatements // will remove. // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/SourceManager.h" using namespace clang; using namespace arcmt; using namespace trans; static bool isEmptyARCMTMacroStatement(NullStmt *S, std::vector<SourceLocation> &MacroLocs, ASTContext &Ctx) { if (!S->hasLeadingEmptyMacro()) return false; SourceLocation SemiLoc = S->getSemiLoc(); if (SemiLoc.isInvalid() || SemiLoc.isMacroID()) return false; if (MacroLocs.empty()) return false; SourceManager &SM = Ctx.getSourceManager(); std::vector<SourceLocation>::iterator I = std::upper_bound(MacroLocs.begin(), MacroLocs.end(), SemiLoc, BeforeThanCompare<SourceLocation>(SM)); --I; SourceLocation AfterMacroLoc = I->getLocWithOffset(getARCMTMacroName().size()); assert(AfterMacroLoc.isFileID()); if (AfterMacroLoc == SemiLoc) return true; int RelOffs = 0; if (!SM.isInSameSLocAddrSpace(AfterMacroLoc, SemiLoc, &RelOffs)) return false; if (RelOffs < 0) return false; // We make the reasonable assumption that a semicolon after 100 characters // means that it is not the next token after our macro. If this assumption // fails it is not critical, we will just fail to clear out, e.g., an empty // 'if'. if (RelOffs - getARCMTMacroName().size() > 100) return false; SourceLocation AfterMacroSemiLoc = findSemiAfterLocation(AfterMacroLoc, Ctx); return AfterMacroSemiLoc == SemiLoc; } namespace { /// \brief Returns true if the statement became empty due to previous /// transformations. class EmptyChecker : public StmtVisitor<EmptyChecker, bool> { ASTContext &Ctx; std::vector<SourceLocation> &MacroLocs; public: EmptyChecker(ASTContext &ctx, std::vector<SourceLocation> &macroLocs) : Ctx(ctx), MacroLocs(macroLocs) { } bool VisitNullStmt(NullStmt *S) { return isEmptyARCMTMacroStatement(S, MacroLocs, Ctx); } bool VisitCompoundStmt(CompoundStmt *S) { if (S->body_empty()) return false; // was already empty, not because of transformations. for (auto *I : S->body()) if (!Visit(I)) return false; return true; } bool VisitIfStmt(IfStmt *S) { if (S->getConditionVariable()) return false; Expr *condE = S->getCond(); if (!condE) return false; if (hasSideEffects(condE, Ctx)) return false; if (!S->getThen() || !Visit(S->getThen())) return false; if (S->getElse() && !Visit(S->getElse())) return false; return true; } bool VisitWhileStmt(WhileStmt *S) { if (S->getConditionVariable()) return false; Expr *condE = S->getCond(); if (!condE) return false; if (hasSideEffects(condE, Ctx)) return false; if (!S->getBody()) return false; return Visit(S->getBody()); } bool VisitDoStmt(DoStmt *S) { Expr *condE = S->getCond(); if (!condE) return false; if (hasSideEffects(condE, Ctx)) return false; if (!S->getBody()) return false; return Visit(S->getBody()); } bool VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) { Expr *Exp = S->getCollection(); if (!Exp) return false; if (hasSideEffects(Exp, Ctx)) return false; if (!S->getBody()) return false; return Visit(S->getBody()); } bool VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) { if (!S->getSubStmt()) return false; return Visit(S->getSubStmt()); } }; class EmptyStatementsRemover : public RecursiveASTVisitor<EmptyStatementsRemover> { MigrationPass &Pass; public: EmptyStatementsRemover(MigrationPass &pass) : Pass(pass) { } bool TraverseStmtExpr(StmtExpr *E) { CompoundStmt *S = E->getSubStmt(); for (CompoundStmt::body_iterator I = S->body_begin(), E = S->body_end(); I != E; ++I) { if (I != E - 1) check(*I); TraverseStmt(*I); } return true; } bool VisitCompoundStmt(CompoundStmt *S) { for (auto *I : S->body()) check(I); return true; } ASTContext &getContext() { return Pass.Ctx; } private: void check(Stmt *S) { if (!S) return; if (EmptyChecker(Pass.Ctx, Pass.ARCMTMacroLocs).Visit(S)) { Transaction Trans(Pass.TA); Pass.TA.removeStmt(S); } } }; } // anonymous namespace static bool isBodyEmpty(CompoundStmt *body, ASTContext &Ctx, std::vector<SourceLocation> &MacroLocs) { for (auto *I : body->body()) if (!EmptyChecker(Ctx, MacroLocs).Visit(I)) return false; return true; } static void cleanupDeallocOrFinalize(MigrationPass &pass) { ASTContext &Ctx = pass.Ctx; TransformActions &TA = pass.TA; DeclContext *DC = Ctx.getTranslationUnitDecl(); Selector FinalizeSel = Ctx.Selectors.getNullarySelector(&pass.Ctx.Idents.get("finalize")); typedef DeclContext::specific_decl_iterator<ObjCImplementationDecl> impl_iterator; for (impl_iterator I = impl_iterator(DC->decls_begin()), E = impl_iterator(DC->decls_end()); I != E; ++I) { ObjCMethodDecl *DeallocM = nullptr; ObjCMethodDecl *FinalizeM = nullptr; for (auto *MD : I->instance_methods()) { if (!MD->hasBody()) continue; if (MD->getMethodFamily() == OMF_dealloc) { DeallocM = MD; } else if (MD->isInstanceMethod() && MD->getSelector() == FinalizeSel) { FinalizeM = MD; } } if (DeallocM) { if (isBodyEmpty(DeallocM->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) { Transaction Trans(TA); TA.remove(DeallocM->getSourceRange()); } if (FinalizeM) { Transaction Trans(TA); TA.remove(FinalizeM->getSourceRange()); } } else if (FinalizeM) { if (isBodyEmpty(FinalizeM->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) { Transaction Trans(TA); TA.remove(FinalizeM->getSourceRange()); } else { Transaction Trans(TA); TA.replaceText(FinalizeM->getSelectorStartLoc(), "finalize", "dealloc"); } } } } void trans::removeEmptyStatementsAndDeallocFinalize(MigrationPass &pass) { EmptyStatementsRemover(pass).TraverseDecl(pass.Ctx.getTranslationUnitDecl()); cleanupDeallocOrFinalize(pass); for (unsigned i = 0, e = pass.ARCMTMacroLocs.size(); i != e; ++i) { Transaction Trans(pass.TA); pass.TA.remove(pass.ARCMTMacroLocs[i]); } }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
//===--- TransUnusedInitDelegate.cpp - Transformations to ARC mode --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Transformations: //===----------------------------------------------------------------------===// // // rewriteUnusedInitDelegate: // // Rewrites an unused result of calling a delegate initialization, to assigning // the result to self. // e.g // [self init]; // ----> // self = [self init]; // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/Sema/SemaDiagnostic.h" using namespace clang; using namespace arcmt; using namespace trans; namespace { class UnusedInitRewriter : public RecursiveASTVisitor<UnusedInitRewriter> { Stmt *Body; MigrationPass &Pass; ExprSet Removables; public: UnusedInitRewriter(MigrationPass &pass) : Body(nullptr), Pass(pass) { } void transformBody(Stmt *body, Decl *ParentD) { Body = body; collectRemovables(body, Removables); TraverseStmt(body); } bool VisitObjCMessageExpr(ObjCMessageExpr *ME) { if (ME->isDelegateInitCall() && isRemovable(ME) && Pass.TA.hasDiagnostic(diag::err_arc_unused_init_message, ME->getExprLoc())) { Transaction Trans(Pass.TA); Pass.TA.clearDiagnostic(diag::err_arc_unused_init_message, ME->getExprLoc()); SourceRange ExprRange = ME->getSourceRange(); Pass.TA.insert(ExprRange.getBegin(), "if (!(self = "); std::string retStr = ")) return "; retStr += getNilString(Pass); Pass.TA.insertAfterToken(ExprRange.getEnd(), retStr); } return true; } private: bool isRemovable(Expr *E) const { return Removables.count(E); } }; } // anonymous namespace void trans::rewriteUnusedInitDelegate(MigrationPass &pass) { BodyTransform<UnusedInitRewriter> trans(pass); trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl()); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransProtectedScope.cpp
//===--- TransProtectedScope.cpp - Transformations to ARC mode ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Adds brackets in case statements that "contain" initialization of retaining // variable, thus emitting the "switch case is in protected scope" error. // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/Sema/SemaDiagnostic.h" using namespace clang; using namespace arcmt; using namespace trans; namespace { class LocalRefsCollector : public RecursiveASTVisitor<LocalRefsCollector> { SmallVectorImpl<DeclRefExpr *> &Refs; public: LocalRefsCollector(SmallVectorImpl<DeclRefExpr *> &refs) : Refs(refs) { } bool VisitDeclRefExpr(DeclRefExpr *E) { if (ValueDecl *D = E->getDecl()) if (D->getDeclContext()->getRedeclContext()->isFunctionOrMethod()) Refs.push_back(E); return true; } }; struct CaseInfo { SwitchCase *SC; SourceRange Range; enum { St_Unchecked, St_CannotFix, St_Fixed } State; CaseInfo() : SC(nullptr), State(St_Unchecked) {} CaseInfo(SwitchCase *S, SourceRange Range) : SC(S), Range(Range), State(St_Unchecked) {} }; class CaseCollector : public RecursiveASTVisitor<CaseCollector> { ParentMap &PMap; SmallVectorImpl<CaseInfo> &Cases; public: CaseCollector(ParentMap &PMap, SmallVectorImpl<CaseInfo> &Cases) : PMap(PMap), Cases(Cases) { } bool VisitSwitchStmt(SwitchStmt *S) { SwitchCase *Curr = S->getSwitchCaseList(); if (!Curr) return true; Stmt *Parent = getCaseParent(Curr); Curr = Curr->getNextSwitchCase(); // Make sure all case statements are in the same scope. while (Curr) { if (getCaseParent(Curr) != Parent) return true; Curr = Curr->getNextSwitchCase(); } SourceLocation NextLoc = S->getLocEnd(); Curr = S->getSwitchCaseList(); // We iterate over case statements in reverse source-order. while (Curr) { Cases.push_back(CaseInfo(Curr,SourceRange(Curr->getLocStart(), NextLoc))); NextLoc = Curr->getLocStart(); Curr = Curr->getNextSwitchCase(); } return true; } Stmt *getCaseParent(SwitchCase *S) { Stmt *Parent = PMap.getParent(S); while (Parent && (isa<SwitchCase>(Parent) || isa<LabelStmt>(Parent))) Parent = PMap.getParent(Parent); return Parent; } }; class ProtectedScopeFixer { MigrationPass &Pass; SourceManager &SM; SmallVector<CaseInfo, 16> Cases; SmallVector<DeclRefExpr *, 16> LocalRefs; public: ProtectedScopeFixer(BodyContext &BodyCtx) : Pass(BodyCtx.getMigrationContext().Pass), SM(Pass.Ctx.getSourceManager()) { CaseCollector(BodyCtx.getParentMap(), Cases) .TraverseStmt(BodyCtx.getTopStmt()); LocalRefsCollector(LocalRefs).TraverseStmt(BodyCtx.getTopStmt()); SourceRange BodyRange = BodyCtx.getTopStmt()->getSourceRange(); const CapturedDiagList &DiagList = Pass.getDiags(); // Copy the diagnostics so we don't have to worry about invaliding iterators // from the diagnostic list. SmallVector<StoredDiagnostic, 16> StoredDiags; StoredDiags.append(DiagList.begin(), DiagList.end()); SmallVectorImpl<StoredDiagnostic>::iterator I = StoredDiags.begin(), E = StoredDiags.end(); while (I != E) { if (I->getID() == diag::err_switch_into_protected_scope && isInRange(I->getLocation(), BodyRange)) { handleProtectedScopeError(I, E); continue; } ++I; } } void handleProtectedScopeError( SmallVectorImpl<StoredDiagnostic>::iterator &DiagI, SmallVectorImpl<StoredDiagnostic>::iterator DiagE){ Transaction Trans(Pass.TA); assert(DiagI->getID() == diag::err_switch_into_protected_scope); SourceLocation ErrLoc = DiagI->getLocation(); bool handledAllNotes = true; ++DiagI; for (; DiagI != DiagE && DiagI->getLevel() == DiagnosticsEngine::Note; ++DiagI) { if (!handleProtectedNote(*DiagI)) handledAllNotes = false; } if (handledAllNotes) Pass.TA.clearDiagnostic(diag::err_switch_into_protected_scope, ErrLoc); } bool handleProtectedNote(const StoredDiagnostic &Diag) { assert(Diag.getLevel() == DiagnosticsEngine::Note); for (unsigned i = 0; i != Cases.size(); i++) { CaseInfo &info = Cases[i]; if (isInRange(Diag.getLocation(), info.Range)) { if (info.State == CaseInfo::St_Unchecked) tryFixing(info); assert(info.State != CaseInfo::St_Unchecked); if (info.State == CaseInfo::St_Fixed) { Pass.TA.clearDiagnostic(Diag.getID(), Diag.getLocation()); return true; } return false; } } return false; } void tryFixing(CaseInfo &info) { assert(info.State == CaseInfo::St_Unchecked); if (hasVarReferencedOutside(info)) { info.State = CaseInfo::St_CannotFix; return; } Pass.TA.insertAfterToken(info.SC->getColonLoc(), " {"); Pass.TA.insert(info.Range.getEnd(), "}\n"); info.State = CaseInfo::St_Fixed; } bool hasVarReferencedOutside(CaseInfo &info) { for (unsigned i = 0, e = LocalRefs.size(); i != e; ++i) { DeclRefExpr *DRE = LocalRefs[i]; if (isInRange(DRE->getDecl()->getLocation(), info.Range) && !isInRange(DRE->getLocation(), info.Range)) return true; } return false; } bool isInRange(SourceLocation Loc, SourceRange R) { if (Loc.isInvalid()) return false; return !SM.isBeforeInTranslationUnit(Loc, R.getBegin()) && SM.isBeforeInTranslationUnit(Loc, R.getEnd()); } }; } // anonymous namespace void ProtectedScopeTraverser::traverseBody(BodyContext &BodyCtx) { ProtectedScopeFixer Fix(BodyCtx); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
//===--- TransBlockObjCVariable.cpp - Transformations to ARC mode ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // rewriteBlockObjCVariable: // // Adding __block to an obj-c variable could be either because the variable // is used for output storage or the user wanted to break a retain cycle. // This transformation checks whether a reference of the variable for the block // is actually needed (it is assigned to or its address is taken) or not. // If the reference is not needed it will assume __block was added to break a // cycle so it will remove '__block' and add __weak/__unsafe_unretained. // e.g // // __block Foo *x; // bar(^ { [x cake]; }); // ----> // __weak Foo *x; // bar(^ { [x cake]; }); // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/Basic/SourceManager.h" using namespace clang; using namespace arcmt; using namespace trans; namespace { class RootBlockObjCVarRewriter : public RecursiveASTVisitor<RootBlockObjCVarRewriter> { llvm::DenseSet<VarDecl *> &VarsToChange; class BlockVarChecker : public RecursiveASTVisitor<BlockVarChecker> { VarDecl *Var; typedef RecursiveASTVisitor<BlockVarChecker> base; public: BlockVarChecker(VarDecl *var) : Var(var) { } bool TraverseImplicitCastExpr(ImplicitCastExpr *castE) { if (DeclRefExpr * ref = dyn_cast<DeclRefExpr>(castE->getSubExpr())) { if (ref->getDecl() == Var) { if (castE->getCastKind() == CK_LValueToRValue) return true; // Using the value of the variable. if (castE->getCastKind() == CK_NoOp && castE->isLValue() && Var->getASTContext().getLangOpts().CPlusPlus) return true; // Binding to const C++ reference. } } return base::TraverseImplicitCastExpr(castE); } bool VisitDeclRefExpr(DeclRefExpr *E) { if (E->getDecl() == Var) return false; // The reference of the variable, and not just its value, // is needed. return true; } }; public: RootBlockObjCVarRewriter(llvm::DenseSet<VarDecl *> &VarsToChange) : VarsToChange(VarsToChange) { } bool VisitBlockDecl(BlockDecl *block) { SmallVector<VarDecl *, 4> BlockVars; for (const auto &I : block->captures()) { VarDecl *var = I.getVariable(); if (I.isByRef() && var->getType()->isObjCObjectPointerType() && isImplicitStrong(var->getType())) { BlockVars.push_back(var); } } for (unsigned i = 0, e = BlockVars.size(); i != e; ++i) { VarDecl *var = BlockVars[i]; BlockVarChecker checker(var); bool onlyValueOfVarIsNeeded = checker.TraverseStmt(block->getBody()); if (onlyValueOfVarIsNeeded) VarsToChange.insert(var); else VarsToChange.erase(var); } return true; } private: bool isImplicitStrong(QualType ty) { if (isa<AttributedType>(ty.getTypePtr())) return false; return ty.getLocalQualifiers().getObjCLifetime() == Qualifiers::OCL_Strong; } }; class BlockObjCVarRewriter : public RecursiveASTVisitor<BlockObjCVarRewriter> { llvm::DenseSet<VarDecl *> &VarsToChange; public: BlockObjCVarRewriter(llvm::DenseSet<VarDecl *> &VarsToChange) : VarsToChange(VarsToChange) { } bool TraverseBlockDecl(BlockDecl *block) { RootBlockObjCVarRewriter(VarsToChange).TraverseDecl(block); return true; } }; } // anonymous namespace void BlockObjCVariableTraverser::traverseBody(BodyContext &BodyCtx) { MigrationPass &Pass = BodyCtx.getMigrationContext().Pass; llvm::DenseSet<VarDecl *> VarsToChange; BlockObjCVarRewriter trans(VarsToChange); trans.TraverseStmt(BodyCtx.getTopStmt()); for (llvm::DenseSet<VarDecl *>::iterator I = VarsToChange.begin(), E = VarsToChange.end(); I != E; ++I) { VarDecl *var = *I; BlocksAttr *attr = var->getAttr<BlocksAttr>(); if(!attr) continue; bool useWeak = canApplyWeak(Pass.Ctx, var->getType()); SourceManager &SM = Pass.Ctx.getSourceManager(); Transaction Trans(Pass.TA); Pass.TA.replaceText(SM.getExpansionLoc(attr->getLocation()), "__block", useWeak ? "__weak" : "__unsafe_unretained"); } }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransProperties.cpp
//===--- TransProperties.cpp - Transformations to ARC mode ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // rewriteProperties: // // - Adds strong/weak/unsafe_unretained ownership specifier to properties that // are missing one. // - Migrates properties from (retain) to (strong) and (assign) to // (unsafe_unretained/weak). // - If a property is synthesized, adds the ownership specifier in the ivar // backing the property. // // @interface Foo : NSObject { // NSObject *x; // } // @property (assign) id x; // @end // ----> // @interface Foo : NSObject { // NSObject *__weak x; // } // @property (weak) id x; // @end // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/Basic/SourceManager.h" #include "clang/Lex/Lexer.h" #include "clang/Sema/SemaDiagnostic.h" #include <map> using namespace clang; using namespace arcmt; using namespace trans; namespace { class PropertiesRewriter { MigrationContext &MigrateCtx; MigrationPass &Pass; ObjCImplementationDecl *CurImplD; enum PropActionKind { PropAction_None, PropAction_RetainReplacedWithStrong, PropAction_AssignRemoved, PropAction_AssignRewritten, PropAction_MaybeAddWeakOrUnsafe }; struct PropData { ObjCPropertyDecl *PropD; ObjCIvarDecl *IvarD; ObjCPropertyImplDecl *ImplD; PropData(ObjCPropertyDecl *propD) : PropD(propD), IvarD(nullptr), ImplD(nullptr) {} }; typedef SmallVector<PropData, 2> PropsTy; typedef std::map<unsigned, PropsTy> AtPropDeclsTy; AtPropDeclsTy AtProps; llvm::DenseMap<IdentifierInfo *, PropActionKind> ActionOnProp; public: explicit PropertiesRewriter(MigrationContext &MigrateCtx) : MigrateCtx(MigrateCtx), Pass(MigrateCtx.Pass) { } static void collectProperties(ObjCContainerDecl *D, AtPropDeclsTy &AtProps, AtPropDeclsTy *PrevAtProps = nullptr) { for (auto *Prop : D->properties()) { if (Prop->getAtLoc().isInvalid()) continue; unsigned RawLoc = Prop->getAtLoc().getRawEncoding(); if (PrevAtProps) if (PrevAtProps->find(RawLoc) != PrevAtProps->end()) continue; PropsTy &props = AtProps[RawLoc]; props.push_back(Prop); } } void doTransform(ObjCImplementationDecl *D) { CurImplD = D; ObjCInterfaceDecl *iface = D->getClassInterface(); if (!iface) return; collectProperties(iface, AtProps); typedef DeclContext::specific_decl_iterator<ObjCPropertyImplDecl> prop_impl_iterator; for (prop_impl_iterator I = prop_impl_iterator(D->decls_begin()), E = prop_impl_iterator(D->decls_end()); I != E; ++I) { ObjCPropertyImplDecl *implD = *I; if (implD->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize) continue; ObjCPropertyDecl *propD = implD->getPropertyDecl(); if (!propD || propD->isInvalidDecl()) continue; ObjCIvarDecl *ivarD = implD->getPropertyIvarDecl(); if (!ivarD || ivarD->isInvalidDecl()) continue; unsigned rawAtLoc = propD->getAtLoc().getRawEncoding(); AtPropDeclsTy::iterator findAtLoc = AtProps.find(rawAtLoc); if (findAtLoc == AtProps.end()) continue; PropsTy &props = findAtLoc->second; for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) { if (I->PropD == propD) { I->IvarD = ivarD; I->ImplD = implD; break; } } } for (AtPropDeclsTy::iterator I = AtProps.begin(), E = AtProps.end(); I != E; ++I) { SourceLocation atLoc = SourceLocation::getFromRawEncoding(I->first); PropsTy &props = I->second; if (!getPropertyType(props)->isObjCRetainableType()) continue; if (hasIvarWithExplicitARCOwnership(props)) continue; Transaction Trans(Pass.TA); rewriteProperty(props, atLoc); } AtPropDeclsTy AtExtProps; // Look through extensions. for (auto *Ext : iface->visible_extensions()) collectProperties(Ext, AtExtProps, &AtProps); for (AtPropDeclsTy::iterator I = AtExtProps.begin(), E = AtExtProps.end(); I != E; ++I) { SourceLocation atLoc = SourceLocation::getFromRawEncoding(I->first); PropsTy &props = I->second; Transaction Trans(Pass.TA); doActionForExtensionProp(props, atLoc); } } private: void doPropAction(PropActionKind kind, PropsTy &props, SourceLocation atLoc, bool markAction = true) { if (markAction) for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) ActionOnProp[I->PropD->getIdentifier()] = kind; switch (kind) { case PropAction_None: return; case PropAction_RetainReplacedWithStrong: { StringRef toAttr = "strong"; MigrateCtx.rewritePropertyAttribute("retain", toAttr, atLoc); return; } case PropAction_AssignRemoved: return removeAssignForDefaultStrong(props, atLoc); case PropAction_AssignRewritten: return rewriteAssign(props, atLoc); case PropAction_MaybeAddWeakOrUnsafe: return maybeAddWeakOrUnsafeUnretainedAttr(props, atLoc); } } void doActionForExtensionProp(PropsTy &props, SourceLocation atLoc) { llvm::DenseMap<IdentifierInfo *, PropActionKind>::iterator I; I = ActionOnProp.find(props[0].PropD->getIdentifier()); if (I == ActionOnProp.end()) return; doPropAction(I->second, props, atLoc, false); } void rewriteProperty(PropsTy &props, SourceLocation atLoc) { ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props); if (propAttrs & (ObjCPropertyDecl::OBJC_PR_copy | ObjCPropertyDecl::OBJC_PR_unsafe_unretained | ObjCPropertyDecl::OBJC_PR_strong | ObjCPropertyDecl::OBJC_PR_weak)) return; if (propAttrs & ObjCPropertyDecl::OBJC_PR_retain) { // strong is the default. return doPropAction(PropAction_RetainReplacedWithStrong, props, atLoc); } bool HasIvarAssignedAPlusOneObject = hasIvarAssignedAPlusOneObject(props); if (propAttrs & ObjCPropertyDecl::OBJC_PR_assign) { if (HasIvarAssignedAPlusOneObject) return doPropAction(PropAction_AssignRemoved, props, atLoc); return doPropAction(PropAction_AssignRewritten, props, atLoc); } if (HasIvarAssignedAPlusOneObject || (Pass.isGCMigration() && !hasGCWeak(props, atLoc))) return; // 'strong' by default. return doPropAction(PropAction_MaybeAddWeakOrUnsafe, props, atLoc); } void removeAssignForDefaultStrong(PropsTy &props, SourceLocation atLoc) const { removeAttribute("retain", atLoc); if (!removeAttribute("assign", atLoc)) return; for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) { if (I->ImplD) Pass.TA.clearDiagnostic(diag::err_arc_strong_property_ownership, diag::err_arc_assign_property_ownership, diag::err_arc_inconsistent_property_ownership, I->IvarD->getLocation()); } } void rewriteAssign(PropsTy &props, SourceLocation atLoc) const { bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props), /*AllowOnUnknownClass=*/Pass.isGCMigration()); const char *toWhich = (Pass.isGCMigration() && !hasGCWeak(props, atLoc)) ? "strong" : (canUseWeak ? "weak" : "unsafe_unretained"); bool rewroteAttr = rewriteAttribute("assign", toWhich, atLoc); if (!rewroteAttr) canUseWeak = false; for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) { if (isUserDeclared(I->IvarD)) { if (I->IvarD && I->IvarD->getType().getObjCLifetime() != Qualifiers::OCL_Weak) { const char *toWhich = (Pass.isGCMigration() && !hasGCWeak(props, atLoc)) ? "__strong " : (canUseWeak ? "__weak " : "__unsafe_unretained "); Pass.TA.insert(I->IvarD->getLocation(), toWhich); } } if (I->ImplD) Pass.TA.clearDiagnostic(diag::err_arc_strong_property_ownership, diag::err_arc_assign_property_ownership, diag::err_arc_inconsistent_property_ownership, I->IvarD->getLocation()); } } void maybeAddWeakOrUnsafeUnretainedAttr(PropsTy &props, SourceLocation atLoc) const { bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props), /*AllowOnUnknownClass=*/Pass.isGCMigration()); bool addedAttr = addAttribute(canUseWeak ? "weak" : "unsafe_unretained", atLoc); if (!addedAttr) canUseWeak = false; for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) { if (isUserDeclared(I->IvarD)) { if (I->IvarD && I->IvarD->getType().getObjCLifetime() != Qualifiers::OCL_Weak) Pass.TA.insert(I->IvarD->getLocation(), canUseWeak ? "__weak " : "__unsafe_unretained "); } if (I->ImplD) { Pass.TA.clearDiagnostic(diag::err_arc_strong_property_ownership, diag::err_arc_assign_property_ownership, diag::err_arc_inconsistent_property_ownership, I->IvarD->getLocation()); Pass.TA.clearDiagnostic( diag::err_arc_objc_property_default_assign_on_object, I->ImplD->getLocation()); } } } bool removeAttribute(StringRef fromAttr, SourceLocation atLoc) const { return MigrateCtx.removePropertyAttribute(fromAttr, atLoc); } bool rewriteAttribute(StringRef fromAttr, StringRef toAttr, SourceLocation atLoc) const { return MigrateCtx.rewritePropertyAttribute(fromAttr, toAttr, atLoc); } bool addAttribute(StringRef attr, SourceLocation atLoc) const { return MigrateCtx.addPropertyAttribute(attr, atLoc); } class PlusOneAssign : public RecursiveASTVisitor<PlusOneAssign> { ObjCIvarDecl *Ivar; public: PlusOneAssign(ObjCIvarDecl *D) : Ivar(D) {} bool VisitBinAssign(BinaryOperator *E) { Expr *lhs = E->getLHS()->IgnoreParenImpCasts(); if (ObjCIvarRefExpr *RE = dyn_cast<ObjCIvarRefExpr>(lhs)) { if (RE->getDecl() != Ivar) return true; if (isPlusOneAssign(E)) return false; } return true; } }; bool hasIvarAssignedAPlusOneObject(PropsTy &props) const { for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) { PlusOneAssign oneAssign(I->IvarD); bool notFound = oneAssign.TraverseDecl(CurImplD); if (!notFound) return true; } return false; } bool hasIvarWithExplicitARCOwnership(PropsTy &props) const { if (Pass.isGCMigration()) return false; for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) { if (isUserDeclared(I->IvarD)) { if (isa<AttributedType>(I->IvarD->getType())) return true; if (I->IvarD->getType().getLocalQualifiers().getObjCLifetime() != Qualifiers::OCL_Strong) return true; } } return false; } // \brief Returns true if all declarations in the @property have GC __weak. bool hasGCWeak(PropsTy &props, SourceLocation atLoc) const { if (!Pass.isGCMigration()) return false; if (props.empty()) return false; return MigrateCtx.AtPropsWeak.count(atLoc.getRawEncoding()); } bool isUserDeclared(ObjCIvarDecl *ivarD) const { return ivarD && !ivarD->getSynthesize(); } QualType getPropertyType(PropsTy &props) const { assert(!props.empty()); QualType ty = props[0].PropD->getType().getUnqualifiedType(); #ifndef NDEBUG for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) assert(ty == I->PropD->getType().getUnqualifiedType()); #endif return ty; } ObjCPropertyDecl::PropertyAttributeKind getPropertyAttrs(PropsTy &props) const { assert(!props.empty()); ObjCPropertyDecl::PropertyAttributeKind attrs = props[0].PropD->getPropertyAttributesAsWritten(); #ifndef NDEBUG for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) assert(attrs == I->PropD->getPropertyAttributesAsWritten()); #endif return attrs; } }; } // anonymous namespace void PropertyRewriteTraverser::traverseObjCImplementation( ObjCImplementationContext &ImplCtx) { PropertiesRewriter(ImplCtx.getMigrationContext()) .doTransform(ImplCtx.getImplementationDecl()); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
//===--- TransARCAssign.cpp - Transformations to ARC mode -----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // makeAssignARCSafe: // // Add '__strong' where appropriate. // // for (id x in collection) { // x = 0; // } // ----> // for (__strong id x in collection) { // x = 0; // } // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/Sema/SemaDiagnostic.h" using namespace clang; using namespace arcmt; using namespace trans; namespace { class ARCAssignChecker : public RecursiveASTVisitor<ARCAssignChecker> { MigrationPass &Pass; llvm::DenseSet<VarDecl *> ModifiedVars; public: ARCAssignChecker(MigrationPass &pass) : Pass(pass) { } bool VisitBinaryOperator(BinaryOperator *Exp) { if (Exp->getType()->isDependentType()) return true; Expr *E = Exp->getLHS(); SourceLocation OrigLoc = E->getExprLoc(); SourceLocation Loc = OrigLoc; DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()); if (declRef && isa<VarDecl>(declRef->getDecl())) { ASTContext &Ctx = Pass.Ctx; Expr::isModifiableLvalueResult IsLV = E->isModifiableLvalue(Ctx, &Loc); if (IsLV != Expr::MLV_ConstQualified) return true; VarDecl *var = cast<VarDecl>(declRef->getDecl()); if (var->isARCPseudoStrong()) { Transaction Trans(Pass.TA); if (Pass.TA.clearDiagnostic(diag::err_typecheck_arr_assign_enumeration, Exp->getOperatorLoc())) { if (!ModifiedVars.count(var)) { TypeLoc TLoc = var->getTypeSourceInfo()->getTypeLoc(); Pass.TA.insert(TLoc.getBeginLoc(), "__strong "); ModifiedVars.insert(var); } } } } return true; } }; } // anonymous namespace void trans::makeAssignARCSafe(MigrationPass &pass) { ARCAssignChecker assignCheck(pass); assignCheck.TraverseDecl(pass.Ctx.getTranslationUnitDecl()); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
//===--- TransAutoreleasePool.cpp - Transformations to ARC mode -----------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // rewriteAutoreleasePool: // // Calls to NSAutoreleasePools will be rewritten as an @autorelease scope. // // NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init]; // ... // [pool release]; // ----> // @autorelease { // ... // } // // An NSAutoreleasePool will not be touched if: // - There is not a corresponding -release/-drain in the same scope // - Not all references of the NSAutoreleasePool variable can be removed // - There is a variable that is declared inside the intended @autorelease scope // which is also used outside it. // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/Basic/SourceManager.h" #include "clang/Sema/SemaDiagnostic.h" #include <map> using namespace clang; using namespace arcmt; using namespace trans; namespace { class ReleaseCollector : public RecursiveASTVisitor<ReleaseCollector> { Decl *Dcl; SmallVectorImpl<ObjCMessageExpr *> &Releases; public: ReleaseCollector(Decl *D, SmallVectorImpl<ObjCMessageExpr *> &releases) : Dcl(D), Releases(releases) { } bool VisitObjCMessageExpr(ObjCMessageExpr *E) { if (!E->isInstanceMessage()) return true; if (E->getMethodFamily() != OMF_release) return true; Expr *instance = E->getInstanceReceiver()->IgnoreParenCasts(); if (DeclRefExpr *DE = dyn_cast<DeclRefExpr>(instance)) { if (DE->getDecl() == Dcl) Releases.push_back(E); } return true; } }; } namespace { class AutoreleasePoolRewriter : public RecursiveASTVisitor<AutoreleasePoolRewriter> { public: AutoreleasePoolRewriter(MigrationPass &pass) : Body(nullptr), Pass(pass) { PoolII = &pass.Ctx.Idents.get("NSAutoreleasePool"); DrainSel = pass.Ctx.Selectors.getNullarySelector( &pass.Ctx.Idents.get("drain")); } void transformBody(Stmt *body, Decl *ParentD) { Body = body; TraverseStmt(body); } ~AutoreleasePoolRewriter() { SmallVector<VarDecl *, 8> VarsToHandle; for (std::map<VarDecl *, PoolVarInfo>::iterator I = PoolVars.begin(), E = PoolVars.end(); I != E; ++I) { VarDecl *var = I->first; PoolVarInfo &info = I->second; // Check that we can handle/rewrite all references of the pool. clearRefsIn(info.Dcl, info.Refs); for (SmallVectorImpl<PoolScope>::iterator scpI = info.Scopes.begin(), scpE = info.Scopes.end(); scpI != scpE; ++scpI) { PoolScope &scope = *scpI; clearRefsIn(*scope.Begin, info.Refs); clearRefsIn(*scope.End, info.Refs); clearRefsIn(scope.Releases.begin(), scope.Releases.end(), info.Refs); } // Even if one reference is not handled we will not do anything about that // pool variable. if (info.Refs.empty()) VarsToHandle.push_back(var); } for (unsigned i = 0, e = VarsToHandle.size(); i != e; ++i) { PoolVarInfo &info = PoolVars[VarsToHandle[i]]; Transaction Trans(Pass.TA); clearUnavailableDiags(info.Dcl); Pass.TA.removeStmt(info.Dcl); // Add "@autoreleasepool { }" for (SmallVectorImpl<PoolScope>::iterator scpI = info.Scopes.begin(), scpE = info.Scopes.end(); scpI != scpE; ++scpI) { PoolScope &scope = *scpI; clearUnavailableDiags(*scope.Begin); clearUnavailableDiags(*scope.End); if (scope.IsFollowedBySimpleReturnStmt) { // Include the return in the scope. Pass.TA.replaceStmt(*scope.Begin, "@autoreleasepool {"); Pass.TA.removeStmt(*scope.End); Stmt::child_iterator retI = scope.End; ++retI; SourceLocation afterSemi = findLocationAfterSemi((*retI)->getLocEnd(), Pass.Ctx); assert(afterSemi.isValid() && "Didn't we check before setting IsFollowedBySimpleReturnStmt " "to true?"); Pass.TA.insertAfterToken(afterSemi, "\n}"); Pass.TA.increaseIndentation( SourceRange(scope.getIndentedRange().getBegin(), (*retI)->getLocEnd()), scope.CompoundParent->getLocStart()); } else { Pass.TA.replaceStmt(*scope.Begin, "@autoreleasepool {"); Pass.TA.replaceStmt(*scope.End, "}"); Pass.TA.increaseIndentation(scope.getIndentedRange(), scope.CompoundParent->getLocStart()); } } // Remove rest of pool var references. for (SmallVectorImpl<PoolScope>::iterator scpI = info.Scopes.begin(), scpE = info.Scopes.end(); scpI != scpE; ++scpI) { PoolScope &scope = *scpI; for (SmallVectorImpl<ObjCMessageExpr *>::iterator relI = scope.Releases.begin(), relE = scope.Releases.end(); relI != relE; ++relI) { clearUnavailableDiags(*relI); Pass.TA.removeStmt(*relI); } } } } bool VisitCompoundStmt(CompoundStmt *S) { SmallVector<PoolScope, 4> Scopes; for (Stmt::child_iterator I = S->body_begin(), E = S->body_end(); I != E; ++I) { Stmt *child = getEssential(*I); if (DeclStmt *DclS = dyn_cast<DeclStmt>(child)) { if (DclS->isSingleDecl()) { if (VarDecl *VD = dyn_cast<VarDecl>(DclS->getSingleDecl())) { if (isNSAutoreleasePool(VD->getType())) { PoolVarInfo &info = PoolVars[VD]; info.Dcl = DclS; collectRefs(VD, S, info.Refs); // Does this statement follow the pattern: // NSAutoreleasePool * pool = [NSAutoreleasePool new]; if (isPoolCreation(VD->getInit())) { Scopes.push_back(PoolScope()); Scopes.back().PoolVar = VD; Scopes.back().CompoundParent = S; Scopes.back().Begin = I; } } } } } else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(child)) { if (DeclRefExpr *dref = dyn_cast<DeclRefExpr>(bop->getLHS())) { if (VarDecl *VD = dyn_cast<VarDecl>(dref->getDecl())) { // Does this statement follow the pattern: // pool = [NSAutoreleasePool new]; if (isNSAutoreleasePool(VD->getType()) && isPoolCreation(bop->getRHS())) { Scopes.push_back(PoolScope()); Scopes.back().PoolVar = VD; Scopes.back().CompoundParent = S; Scopes.back().Begin = I; } } } } if (Scopes.empty()) continue; if (isPoolDrain(Scopes.back().PoolVar, child)) { PoolScope &scope = Scopes.back(); scope.End = I; handlePoolScope(scope, S); Scopes.pop_back(); } } return true; } private: void clearUnavailableDiags(Stmt *S) { if (S) Pass.TA.clearDiagnostic(diag::err_unavailable, diag::err_unavailable_message, S->getSourceRange()); } struct PoolScope { VarDecl *PoolVar; CompoundStmt *CompoundParent; Stmt::child_iterator Begin; Stmt::child_iterator End; bool IsFollowedBySimpleReturnStmt; SmallVector<ObjCMessageExpr *, 4> Releases; PoolScope() : PoolVar(nullptr), CompoundParent(nullptr), Begin(), End(), IsFollowedBySimpleReturnStmt(false) { } SourceRange getIndentedRange() const { Stmt::child_iterator rangeS = Begin; ++rangeS; if (rangeS == End) return SourceRange(); Stmt::child_iterator rangeE = Begin; for (Stmt::child_iterator I = rangeS; I != End; ++I) ++rangeE; return SourceRange((*rangeS)->getLocStart(), (*rangeE)->getLocEnd()); } }; class NameReferenceChecker : public RecursiveASTVisitor<NameReferenceChecker>{ ASTContext &Ctx; SourceRange ScopeRange; SourceLocation &referenceLoc, &declarationLoc; public: NameReferenceChecker(ASTContext &ctx, PoolScope &scope, SourceLocation &referenceLoc, SourceLocation &declarationLoc) : Ctx(ctx), referenceLoc(referenceLoc), declarationLoc(declarationLoc) { ScopeRange = SourceRange((*scope.Begin)->getLocStart(), (*scope.End)->getLocStart()); } bool VisitDeclRefExpr(DeclRefExpr *E) { return checkRef(E->getLocation(), E->getDecl()->getLocation()); } bool VisitTypedefTypeLoc(TypedefTypeLoc TL) { return checkRef(TL.getBeginLoc(), TL.getTypedefNameDecl()->getLocation()); } bool VisitTagTypeLoc(TagTypeLoc TL) { return checkRef(TL.getBeginLoc(), TL.getDecl()->getLocation()); } private: bool checkRef(SourceLocation refLoc, SourceLocation declLoc) { if (isInScope(declLoc)) { referenceLoc = refLoc; declarationLoc = declLoc; return false; } return true; } bool isInScope(SourceLocation loc) { if (loc.isInvalid()) return false; SourceManager &SM = Ctx.getSourceManager(); if (SM.isBeforeInTranslationUnit(loc, ScopeRange.getBegin())) return false; return SM.isBeforeInTranslationUnit(loc, ScopeRange.getEnd()); } }; void handlePoolScope(PoolScope &scope, CompoundStmt *compoundS) { // Check that all names declared inside the scope are not used // outside the scope. { bool nameUsedOutsideScope = false; SourceLocation referenceLoc, declarationLoc; Stmt::child_iterator SI = scope.End, SE = compoundS->body_end(); ++SI; // Check if the autoreleasepool scope is followed by a simple return // statement, in which case we will include the return in the scope. if (SI != SE) if (ReturnStmt *retS = dyn_cast<ReturnStmt>(*SI)) if ((retS->getRetValue() == nullptr || isa<DeclRefExpr>(retS->getRetValue()->IgnoreParenCasts())) && findLocationAfterSemi(retS->getLocEnd(), Pass.Ctx).isValid()) { scope.IsFollowedBySimpleReturnStmt = true; ++SI; // the return will be included in scope, don't check it. } for (; SI != SE; ++SI) { nameUsedOutsideScope = !NameReferenceChecker(Pass.Ctx, scope, referenceLoc, declarationLoc).TraverseStmt(*SI); if (nameUsedOutsideScope) break; } // If not all references were cleared it means some variables/typenames/etc // declared inside the pool scope are used outside of it. // We won't try to rewrite the pool. if (nameUsedOutsideScope) { Pass.TA.reportError("a name is referenced outside the " "NSAutoreleasePool scope that it was declared in", referenceLoc); Pass.TA.reportNote("name declared here", declarationLoc); Pass.TA.reportNote("intended @autoreleasepool scope begins here", (*scope.Begin)->getLocStart()); Pass.TA.reportNote("intended @autoreleasepool scope ends here", (*scope.End)->getLocStart()); return; } } // Collect all releases of the pool; they will be removed. { ReleaseCollector releaseColl(scope.PoolVar, scope.Releases); Stmt::child_iterator I = scope.Begin; ++I; for (; I != scope.End; ++I) releaseColl.TraverseStmt(*I); } PoolVars[scope.PoolVar].Scopes.push_back(scope); } bool isPoolCreation(Expr *E) { if (!E) return false; E = getEssential(E); ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E); if (!ME) return false; if (ME->getMethodFamily() == OMF_new && ME->getReceiverKind() == ObjCMessageExpr::Class && isNSAutoreleasePool(ME->getReceiverInterface())) return true; if (ME->getReceiverKind() == ObjCMessageExpr::Instance && ME->getMethodFamily() == OMF_init) { Expr *rec = getEssential(ME->getInstanceReceiver()); if (ObjCMessageExpr *recME = dyn_cast_or_null<ObjCMessageExpr>(rec)) { if (recME->getMethodFamily() == OMF_alloc && recME->getReceiverKind() == ObjCMessageExpr::Class && isNSAutoreleasePool(recME->getReceiverInterface())) return true; } } return false; } bool isPoolDrain(VarDecl *poolVar, Stmt *S) { if (!S) return false; S = getEssential(S); ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S); if (!ME) return false; if (ME->getReceiverKind() == ObjCMessageExpr::Instance) { Expr *rec = getEssential(ME->getInstanceReceiver()); if (DeclRefExpr *dref = dyn_cast<DeclRefExpr>(rec)) if (dref->getDecl() == poolVar) return ME->getMethodFamily() == OMF_release || ME->getSelector() == DrainSel; } return false; } bool isNSAutoreleasePool(ObjCInterfaceDecl *IDecl) { return IDecl && IDecl->getIdentifier() == PoolII; } bool isNSAutoreleasePool(QualType Ty) { QualType pointee = Ty->getPointeeType(); if (pointee.isNull()) return false; if (const ObjCInterfaceType *interT = pointee->getAs<ObjCInterfaceType>()) return isNSAutoreleasePool(interT->getDecl()); return false; } static Expr *getEssential(Expr *E) { return cast<Expr>(getEssential((Stmt*)E)); } static Stmt *getEssential(Stmt *S) { if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S)) S = EWC->getSubExpr(); if (Expr *E = dyn_cast<Expr>(S)) S = E->IgnoreParenCasts(); return S; } Stmt *Body; MigrationPass &Pass; IdentifierInfo *PoolII; Selector DrainSel; struct PoolVarInfo { DeclStmt *Dcl; ExprSet Refs; SmallVector<PoolScope, 2> Scopes; PoolVarInfo() : Dcl(nullptr) { } }; std::map<VarDecl *, PoolVarInfo> PoolVars; }; } // anonymous namespace void trans::rewriteAutoreleasePool(MigrationPass &pass) { BodyTransform<AutoreleasePoolRewriter> trans(pass); trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl()); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
//===--- TransZeroOutPropsInDealloc.cpp - Transformations to ARC mode -----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // removeZeroOutPropsInDealloc: // // Removes zero'ing out "strong" @synthesized properties in a -dealloc method. // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" using namespace clang; using namespace arcmt; using namespace trans; namespace { class ZeroOutInDeallocRemover : public RecursiveASTVisitor<ZeroOutInDeallocRemover> { typedef RecursiveASTVisitor<ZeroOutInDeallocRemover> base; MigrationPass &Pass; llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*> SynthesizedProperties; ImplicitParamDecl *SelfD; ExprSet Removables; Selector FinalizeSel; public: ZeroOutInDeallocRemover(MigrationPass &pass) : Pass(pass), SelfD(nullptr) { FinalizeSel = Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("finalize")); } bool VisitObjCMessageExpr(ObjCMessageExpr *ME) { ASTContext &Ctx = Pass.Ctx; TransformActions &TA = Pass.TA; if (ME->getReceiverKind() != ObjCMessageExpr::Instance) return true; Expr *receiver = ME->getInstanceReceiver(); if (!receiver) return true; DeclRefExpr *refE = dyn_cast<DeclRefExpr>(receiver->IgnoreParenCasts()); if (!refE || refE->getDecl() != SelfD) return true; bool BackedBySynthesizeSetter = false; for (llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*>::iterator P = SynthesizedProperties.begin(), E = SynthesizedProperties.end(); P != E; ++P) { ObjCPropertyDecl *PropDecl = P->first; if (PropDecl->getSetterName() == ME->getSelector()) { BackedBySynthesizeSetter = true; break; } } if (!BackedBySynthesizeSetter) return true; // Remove the setter message if RHS is null Transaction Trans(TA); Expr *RHS = ME->getArg(0); bool RHSIsNull = RHS->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNull); if (RHSIsNull && isRemovable(ME)) TA.removeStmt(ME); return true; } bool VisitPseudoObjectExpr(PseudoObjectExpr *POE) { if (isZeroingPropIvar(POE) && isRemovable(POE)) { Transaction Trans(Pass.TA); Pass.TA.removeStmt(POE); } return true; } bool VisitBinaryOperator(BinaryOperator *BOE) { if (isZeroingPropIvar(BOE) && isRemovable(BOE)) { Transaction Trans(Pass.TA); Pass.TA.removeStmt(BOE); } return true; } bool TraverseObjCMethodDecl(ObjCMethodDecl *D) { if (D->getMethodFamily() != OMF_dealloc && !(D->isInstanceMethod() && D->getSelector() == FinalizeSel)) return true; if (!D->hasBody()) return true; ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(D->getDeclContext()); if (!IMD) return true; SelfD = D->getSelfDecl(); collectRemovables(D->getBody(), Removables); // For a 'dealloc' method use, find all property implementations in // this class implementation. for (auto *PID : IMD->property_impls()) { if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) { ObjCPropertyDecl *PD = PID->getPropertyDecl(); ObjCMethodDecl *setterM = PD->getSetterMethodDecl(); if (!(setterM && setterM->isDefined())) { ObjCPropertyDecl::PropertyAttributeKind AttrKind = PD->getPropertyAttributes(); if (AttrKind & (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_copy | ObjCPropertyDecl::OBJC_PR_strong)) SynthesizedProperties[PD] = PID; } } } // Now, remove all zeroing of ivars etc. base::TraverseObjCMethodDecl(D); // clear out for next method. SynthesizedProperties.clear(); SelfD = nullptr; Removables.clear(); return true; } bool TraverseFunctionDecl(FunctionDecl *D) { return true; } bool TraverseBlockDecl(BlockDecl *block) { return true; } bool TraverseBlockExpr(BlockExpr *block) { return true; } private: bool isRemovable(Expr *E) const { return Removables.count(E); } bool isZeroingPropIvar(Expr *E) { E = E->IgnoreParens(); if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) return isZeroingPropIvar(BO); if (PseudoObjectExpr *PO = dyn_cast<PseudoObjectExpr>(E)) return isZeroingPropIvar(PO); return false; } bool isZeroingPropIvar(BinaryOperator *BOE) { if (BOE->getOpcode() == BO_Comma) return isZeroingPropIvar(BOE->getLHS()) && isZeroingPropIvar(BOE->getRHS()); if (BOE->getOpcode() != BO_Assign) return false; Expr *LHS = BOE->getLHS(); if (ObjCIvarRefExpr *IV = dyn_cast<ObjCIvarRefExpr>(LHS)) { ObjCIvarDecl *IVDecl = IV->getDecl(); if (!IVDecl->getType()->isObjCObjectPointerType()) return false; bool IvarBacksPropertySynthesis = false; for (llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*>::iterator P = SynthesizedProperties.begin(), E = SynthesizedProperties.end(); P != E; ++P) { ObjCPropertyImplDecl *PropImpDecl = P->second; if (PropImpDecl && PropImpDecl->getPropertyIvarDecl() == IVDecl) { IvarBacksPropertySynthesis = true; break; } } if (!IvarBacksPropertySynthesis) return false; } else return false; return isZero(BOE->getRHS()); } bool isZeroingPropIvar(PseudoObjectExpr *PO) { BinaryOperator *BO = dyn_cast<BinaryOperator>(PO->getSyntacticForm()); if (!BO) return false; if (BO->getOpcode() != BO_Assign) return false; ObjCPropertyRefExpr *PropRefExp = dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParens()); if (!PropRefExp) return false; // TODO: Using implicit property decl. if (PropRefExp->isImplicitProperty()) return false; if (ObjCPropertyDecl *PDecl = PropRefExp->getExplicitProperty()) { if (!SynthesizedProperties.count(PDecl)) return false; } return isZero(cast<OpaqueValueExpr>(BO->getRHS())->getSourceExpr()); } bool isZero(Expr *E) { if (E->isNullPointerConstant(Pass.Ctx, Expr::NPC_ValueDependentIsNull)) return true; return isZeroingPropIvar(E); } }; } // anonymous namespace void trans::removeZeroOutPropsInDeallocFinalize(MigrationPass &pass) { ZeroOutInDeallocRemover trans(pass); trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl()); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
//===--- TransUnbridgedCasts.cpp - Transformations to ARC mode ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // rewriteUnbridgedCasts: // // A cast of non-objc pointer to an objc one is checked. If the non-objc pointer // is from a file-level variable, __bridge cast is used to convert it. // For the result of a function call that we know is +1/+0, // __bridge/CFBridgingRelease is used. // // NSString *str = (NSString *)kUTTypePlainText; // str = b ? kUTTypeRTF : kUTTypePlainText; // NSString *_uuidString = (NSString *)CFUUIDCreateString(kCFAllocatorDefault, // _uuid); // ----> // NSString *str = (__bridge NSString *)kUTTypePlainText; // str = (__bridge NSString *)(b ? kUTTypeRTF : kUTTypePlainText); // NSString *_uuidString = (NSString *) // CFBridgingRelease(CFUUIDCreateString(kCFAllocatorDefault, _uuid)); // // For a C pointer to ObjC, for casting 'self', __bridge is used. // // CFStringRef str = (CFStringRef)self; // ----> // CFStringRef str = (__bridge CFStringRef)self; // // Uses of Block_copy/Block_release macros are rewritten: // // c = Block_copy(b); // Block_release(c); // ----> // c = [b copy]; // <removed> // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/ParentMap.h" #include "clang/Analysis/DomainSpecific/CocoaConventions.h" #include "clang/Basic/SourceManager.h" #include "clang/Lex/Lexer.h" #include "clang/Sema/SemaDiagnostic.h" #include "llvm/ADT/SmallString.h" using namespace clang; using namespace arcmt; using namespace trans; namespace { class UnbridgedCastRewriter : public RecursiveASTVisitor<UnbridgedCastRewriter>{ MigrationPass &Pass; IdentifierInfo *SelfII; std::unique_ptr<ParentMap> StmtMap; Decl *ParentD; Stmt *Body; mutable std::unique_ptr<ExprSet> Removables; public: UnbridgedCastRewriter(MigrationPass &pass) : Pass(pass), ParentD(nullptr), Body(nullptr) { SelfII = &Pass.Ctx.Idents.get("self"); } void transformBody(Stmt *body, Decl *ParentD) { this->ParentD = ParentD; Body = body; StmtMap.reset(new ParentMap(body)); TraverseStmt(body); } bool TraverseBlockDecl(BlockDecl *D) { // ParentMap does not enter into a BlockDecl to record its stmts, so use a // new UnbridgedCastRewriter to handle the block. UnbridgedCastRewriter(Pass).transformBody(D->getBody(), D); return true; } bool VisitCastExpr(CastExpr *E) { if (E->getCastKind() != CK_CPointerToObjCPointerCast && E->getCastKind() != CK_BitCast && E->getCastKind() != CK_AnyPointerToBlockPointerCast) return true; QualType castType = E->getType(); Expr *castExpr = E->getSubExpr(); QualType castExprType = castExpr->getType(); if (castType->isObjCRetainableType() == castExprType->isObjCRetainableType()) return true; bool exprRetainable = castExprType->isObjCIndirectLifetimeType(); bool castRetainable = castType->isObjCIndirectLifetimeType(); if (exprRetainable == castRetainable) return true; if (castExpr->isNullPointerConstant(Pass.Ctx, Expr::NPC_ValueDependentIsNull)) return true; SourceLocation loc = castExpr->getExprLoc(); if (loc.isValid() && Pass.Ctx.getSourceManager().isInSystemHeader(loc)) return true; if (castType->isObjCRetainableType()) transformNonObjCToObjCCast(E); else transformObjCToNonObjCCast(E); return true; } private: void transformNonObjCToObjCCast(CastExpr *E) { if (!E) return; // Global vars are assumed that are cast as unretained. if (isGlobalVar(E)) if (E->getSubExpr()->getType()->isPointerType()) { castToObjCObject(E, /*retained=*/false); return; } // If the cast is directly over the result of a Core Foundation function // try to figure out whether it should be cast as retained or unretained. Expr *inner = E->IgnoreParenCasts(); if (CallExpr *callE = dyn_cast<CallExpr>(inner)) { if (FunctionDecl *FD = callE->getDirectCallee()) { if (FD->hasAttr<CFReturnsRetainedAttr>()) { castToObjCObject(E, /*retained=*/true); return; } if (FD->hasAttr<CFReturnsNotRetainedAttr>()) { castToObjCObject(E, /*retained=*/false); return; } if (FD->isGlobal() && FD->getIdentifier() && ento::cocoa::isRefType(E->getSubExpr()->getType(), "CF", FD->getIdentifier()->getName())) { StringRef fname = FD->getIdentifier()->getName(); if (fname.endswith("Retain") || fname.find("Create") != StringRef::npos || fname.find("Copy") != StringRef::npos) { // Do not migrate to couple of bridge transfer casts which // cancel each other out. Leave it unchanged so error gets user // attention instead. if (FD->getName() == "CFRetain" && FD->getNumParams() == 1 && FD->getParent()->isTranslationUnit() && FD->isExternallyVisible()) { Expr *Arg = callE->getArg(0); if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) { const Expr *sub = ICE->getSubExpr(); QualType T = sub->getType(); if (T->isObjCObjectPointerType()) return; } } castToObjCObject(E, /*retained=*/true); return; } if (fname.find("Get") != StringRef::npos) { castToObjCObject(E, /*retained=*/false); return; } } } } // If returning an ivar or a member of an ivar from a +0 method, use // a __bridge cast. Expr *base = inner->IgnoreParenImpCasts(); while (isa<MemberExpr>(base)) base = cast<MemberExpr>(base)->getBase()->IgnoreParenImpCasts(); if (isa<ObjCIvarRefExpr>(base) && isa<ReturnStmt>(StmtMap->getParentIgnoreParenCasts(E))) { if (ObjCMethodDecl *method = dyn_cast_or_null<ObjCMethodDecl>(ParentD)) { if (!method->hasAttr<NSReturnsRetainedAttr>()) { castToObjCObject(E, /*retained=*/false); return; } } } } void castToObjCObject(CastExpr *E, bool retained) { rewriteToBridgedCast(E, retained ? OBC_BridgeTransfer : OBC_Bridge); } void rewriteToBridgedCast(CastExpr *E, ObjCBridgeCastKind Kind) { Transaction Trans(Pass.TA); rewriteToBridgedCast(E, Kind, Trans); } void rewriteToBridgedCast(CastExpr *E, ObjCBridgeCastKind Kind, Transaction &Trans) { TransformActions &TA = Pass.TA; // We will remove the compiler diagnostic. if (!TA.hasDiagnostic(diag::err_arc_mismatched_cast, diag::err_arc_cast_requires_bridge, E->getLocStart())) { Trans.abort(); return; } StringRef bridge; switch(Kind) { case OBC_Bridge: bridge = "__bridge "; break; case OBC_BridgeTransfer: bridge = "__bridge_transfer "; break; case OBC_BridgeRetained: bridge = "__bridge_retained "; break; } TA.clearDiagnostic(diag::err_arc_mismatched_cast, diag::err_arc_cast_requires_bridge, E->getLocStart()); if (Kind == OBC_Bridge || !Pass.CFBridgingFunctionsDefined()) { if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(E)) { TA.insertAfterToken(CCE->getLParenLoc(), bridge); } else { SourceLocation insertLoc = E->getSubExpr()->getLocStart(); SmallString<128> newCast; newCast += '('; newCast += bridge; newCast += E->getType().getAsString(Pass.Ctx.getPrintingPolicy()); newCast += ')'; if (isa<ParenExpr>(E->getSubExpr())) { TA.insert(insertLoc, newCast.str()); } else { newCast += '('; TA.insert(insertLoc, newCast.str()); TA.insertAfterToken(E->getLocEnd(), ")"); } } } else { assert(Kind == OBC_BridgeTransfer || Kind == OBC_BridgeRetained); SmallString<32> BridgeCall; Expr *WrapE = E->getSubExpr(); SourceLocation InsertLoc = WrapE->getLocStart(); SourceManager &SM = Pass.Ctx.getSourceManager(); char PrevChar = *SM.getCharacterData(InsertLoc.getLocWithOffset(-1)); if (Lexer::isIdentifierBodyChar(PrevChar, Pass.Ctx.getLangOpts())) BridgeCall += ' '; if (Kind == OBC_BridgeTransfer) BridgeCall += "CFBridgingRelease"; else BridgeCall += "CFBridgingRetain"; if (isa<ParenExpr>(WrapE)) { TA.insert(InsertLoc, BridgeCall); } else { BridgeCall += '('; TA.insert(InsertLoc, BridgeCall); TA.insertAfterToken(WrapE->getLocEnd(), ")"); } } } void rewriteCastForCFRetain(CastExpr *castE, CallExpr *callE) { Transaction Trans(Pass.TA); Pass.TA.replace(callE->getSourceRange(), callE->getArg(0)->getSourceRange()); rewriteToBridgedCast(castE, OBC_BridgeRetained, Trans); } void getBlockMacroRanges(CastExpr *E, SourceRange &Outer, SourceRange &Inner) { SourceManager &SM = Pass.Ctx.getSourceManager(); SourceLocation Loc = E->getExprLoc(); assert(Loc.isMacroID()); SourceLocation MacroBegin, MacroEnd; std::tie(MacroBegin, MacroEnd) = SM.getImmediateExpansionRange(Loc); SourceRange SubRange = E->getSubExpr()->IgnoreParenImpCasts()->getSourceRange(); SourceLocation InnerBegin = SM.getImmediateMacroCallerLoc(SubRange.getBegin()); SourceLocation InnerEnd = SM.getImmediateMacroCallerLoc(SubRange.getEnd()); Outer = SourceRange(MacroBegin, MacroEnd); Inner = SourceRange(InnerBegin, InnerEnd); } void rewriteBlockCopyMacro(CastExpr *E) { SourceRange OuterRange, InnerRange; getBlockMacroRanges(E, OuterRange, InnerRange); Transaction Trans(Pass.TA); Pass.TA.replace(OuterRange, InnerRange); Pass.TA.insert(InnerRange.getBegin(), "["); Pass.TA.insertAfterToken(InnerRange.getEnd(), " copy]"); Pass.TA.clearDiagnostic(diag::err_arc_mismatched_cast, diag::err_arc_cast_requires_bridge, OuterRange); } void removeBlockReleaseMacro(CastExpr *E) { SourceRange OuterRange, InnerRange; getBlockMacroRanges(E, OuterRange, InnerRange); Transaction Trans(Pass.TA); Pass.TA.clearDiagnostic(diag::err_arc_mismatched_cast, diag::err_arc_cast_requires_bridge, OuterRange); if (!hasSideEffects(E, Pass.Ctx)) { if (tryRemoving(cast<Expr>(StmtMap->getParentIgnoreParenCasts(E)))) return; } Pass.TA.replace(OuterRange, InnerRange); } bool tryRemoving(Expr *E) const { if (!Removables) { Removables.reset(new ExprSet); collectRemovables(Body, *Removables); } if (Removables->count(E)) { Pass.TA.removeStmt(E); return true; } return false; } void transformObjCToNonObjCCast(CastExpr *E) { SourceLocation CastLoc = E->getExprLoc(); if (CastLoc.isMacroID()) { StringRef MacroName = Lexer::getImmediateMacroName(CastLoc, Pass.Ctx.getSourceManager(), Pass.Ctx.getLangOpts()); if (MacroName == "Block_copy") { rewriteBlockCopyMacro(E); return; } if (MacroName == "Block_release") { removeBlockReleaseMacro(E); return; } } if (isSelf(E->getSubExpr())) return rewriteToBridgedCast(E, OBC_Bridge); CallExpr *callE; if (isPassedToCFRetain(E, callE)) return rewriteCastForCFRetain(E, callE); ObjCMethodFamily family = getFamilyOfMessage(E->getSubExpr()); if (family == OMF_retain) return rewriteToBridgedCast(E, OBC_BridgeRetained); if (family == OMF_autorelease || family == OMF_release) { std::string err = "it is not safe to cast to '"; err += E->getType().getAsString(Pass.Ctx.getPrintingPolicy()); err += "' the result of '"; err += family == OMF_autorelease ? "autorelease" : "release"; err += "' message; a __bridge cast may result in a pointer to a " "destroyed object and a __bridge_retained may leak the object"; Pass.TA.reportError(err, E->getLocStart(), E->getSubExpr()->getSourceRange()); Stmt *parent = E; do { parent = StmtMap->getParentIgnoreParenImpCasts(parent); } while (parent && isa<ExprWithCleanups>(parent)); if (ReturnStmt *retS = dyn_cast_or_null<ReturnStmt>(parent)) { std::string note = "remove the cast and change return type of function " "to '"; note += E->getSubExpr()->getType().getAsString(Pass.Ctx.getPrintingPolicy()); note += "' to have the object automatically autoreleased"; Pass.TA.reportNote(note, retS->getLocStart()); } } Expr *subExpr = E->getSubExpr(); // Look through pseudo-object expressions. if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(subExpr)) { subExpr = pseudo->getResultExpr(); assert(subExpr && "no result for pseudo-object of non-void type?"); } if (ImplicitCastExpr *implCE = dyn_cast<ImplicitCastExpr>(subExpr)) { if (implCE->getCastKind() == CK_ARCConsumeObject) return rewriteToBridgedCast(E, OBC_BridgeRetained); if (implCE->getCastKind() == CK_ARCReclaimReturnedObject) return rewriteToBridgedCast(E, OBC_Bridge); } bool isConsumed = false; if (isPassedToCParamWithKnownOwnership(E, isConsumed)) return rewriteToBridgedCast(E, isConsumed ? OBC_BridgeRetained : OBC_Bridge); } static ObjCMethodFamily getFamilyOfMessage(Expr *E) { E = E->IgnoreParenCasts(); if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) return ME->getMethodFamily(); return OMF_None; } bool isPassedToCFRetain(Expr *E, CallExpr *&callE) const { if ((callE = dyn_cast_or_null<CallExpr>( StmtMap->getParentIgnoreParenImpCasts(E)))) if (FunctionDecl * FD = dyn_cast_or_null<FunctionDecl>(callE->getCalleeDecl())) if (FD->getName() == "CFRetain" && FD->getNumParams() == 1 && FD->getParent()->isTranslationUnit() && FD->isExternallyVisible()) return true; return false; } bool isPassedToCParamWithKnownOwnership(Expr *E, bool &isConsumed) const { if (CallExpr *callE = dyn_cast_or_null<CallExpr>( StmtMap->getParentIgnoreParenImpCasts(E))) if (FunctionDecl * FD = dyn_cast_or_null<FunctionDecl>(callE->getCalleeDecl())) { unsigned i = 0; for (unsigned e = callE->getNumArgs(); i != e; ++i) { Expr *arg = callE->getArg(i); if (arg == E || arg->IgnoreParenImpCasts() == E) break; } if (i < callE->getNumArgs() && i < FD->getNumParams()) { ParmVarDecl *PD = FD->getParamDecl(i); if (PD->hasAttr<CFConsumedAttr>()) { isConsumed = true; return true; } } } return false; } bool isSelf(Expr *E) const { E = E->IgnoreParenLValueCasts(); if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) if (ImplicitParamDecl *IPD = dyn_cast<ImplicitParamDecl>(DRE->getDecl())) if (IPD->getIdentifier() == SelfII) return true; return false; } }; } // end anonymous namespace void trans::rewriteUnbridgedCasts(MigrationPass &pass) { BodyTransform<UnbridgedCastRewriter> trans(pass); trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl()); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/Transforms.h
//===-- Transforms.h - Transformations to ARC mode --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_ARCMIGRATE_TRANSFORMS_H #define LLVM_CLANG_LIB_ARCMIGRATE_TRANSFORMS_H #include "clang/AST/ParentMap.h" #include "clang/AST/RecursiveASTVisitor.h" #include "llvm/ADT/DenseSet.h" #include "llvm/Support/SaveAndRestore.h" namespace clang { class Decl; class Stmt; class BlockDecl; class ObjCMethodDecl; class FunctionDecl; namespace arcmt { class MigrationPass; namespace trans { class MigrationContext; //===----------------------------------------------------------------------===// // Transformations. //===----------------------------------------------------------------------===// void rewriteAutoreleasePool(MigrationPass &pass); void rewriteUnbridgedCasts(MigrationPass &pass); void makeAssignARCSafe(MigrationPass &pass); void removeRetainReleaseDeallocFinalize(MigrationPass &pass); void removeZeroOutPropsInDeallocFinalize(MigrationPass &pass); void rewriteUnusedInitDelegate(MigrationPass &pass); void checkAPIUses(MigrationPass &pass); void removeEmptyStatementsAndDeallocFinalize(MigrationPass &pass); class BodyContext { MigrationContext &MigrateCtx; ParentMap PMap; Stmt *TopStmt; public: BodyContext(MigrationContext &MigrateCtx, Stmt *S) : MigrateCtx(MigrateCtx), PMap(S), TopStmt(S) {} MigrationContext &getMigrationContext() { return MigrateCtx; } ParentMap &getParentMap() { return PMap; } Stmt *getTopStmt() { return TopStmt; } }; class ObjCImplementationContext { MigrationContext &MigrateCtx; ObjCImplementationDecl *ImpD; public: ObjCImplementationContext(MigrationContext &MigrateCtx, ObjCImplementationDecl *D) : MigrateCtx(MigrateCtx), ImpD(D) {} MigrationContext &getMigrationContext() { return MigrateCtx; } ObjCImplementationDecl *getImplementationDecl() { return ImpD; } }; class ASTTraverser { public: virtual ~ASTTraverser(); virtual void traverseTU(MigrationContext &MigrateCtx) { } virtual void traverseBody(BodyContext &BodyCtx) { } virtual void traverseObjCImplementation(ObjCImplementationContext &ImplCtx) {} }; class MigrationContext { std::vector<ASTTraverser *> Traversers; public: MigrationPass &Pass; struct GCAttrOccurrence { enum AttrKind { Weak, Strong } Kind; SourceLocation Loc; QualType ModifiedType; Decl *Dcl; /// \brief true if the attribute is owned, e.g. it is in a body and not just /// in an interface. bool FullyMigratable; }; std::vector<GCAttrOccurrence> GCAttrs; llvm::DenseSet<unsigned> AttrSet; llvm::DenseSet<unsigned> RemovedAttrSet; /// \brief Set of raw '@' locations for 'assign' properties group that contain /// GC __weak. llvm::DenseSet<unsigned> AtPropsWeak; explicit MigrationContext(MigrationPass &pass) : Pass(pass) {} ~MigrationContext(); typedef std::vector<ASTTraverser *>::iterator traverser_iterator; traverser_iterator traversers_begin() { return Traversers.begin(); } traverser_iterator traversers_end() { return Traversers.end(); } void addTraverser(ASTTraverser *traverser) { Traversers.push_back(traverser); } bool isGCOwnedNonObjC(QualType T); bool removePropertyAttribute(StringRef fromAttr, SourceLocation atLoc) { return rewritePropertyAttribute(fromAttr, StringRef(), atLoc); } bool rewritePropertyAttribute(StringRef fromAttr, StringRef toAttr, SourceLocation atLoc); bool addPropertyAttribute(StringRef attr, SourceLocation atLoc); void traverse(TranslationUnitDecl *TU); void dumpGCAttrs(); }; class PropertyRewriteTraverser : public ASTTraverser { public: void traverseObjCImplementation(ObjCImplementationContext &ImplCtx) override; }; class BlockObjCVariableTraverser : public ASTTraverser { public: void traverseBody(BodyContext &BodyCtx) override; }; class ProtectedScopeTraverser : public ASTTraverser { public: void traverseBody(BodyContext &BodyCtx) override; }; // GC transformations class GCAttrsTraverser : public ASTTraverser { public: void traverseTU(MigrationContext &MigrateCtx) override; }; class GCCollectableCallsTraverser : public ASTTraverser { public: void traverseBody(BodyContext &BodyCtx) override; }; //===----------------------------------------------------------------------===// // Helpers. // // /////////////////////////////////////////////////////////////////////////////// /// \brief Determine whether we can add weak to the given type. bool canApplyWeak(ASTContext &Ctx, QualType type, bool AllowOnUnknownClass = false); bool isPlusOneAssign(const BinaryOperator *E); bool isPlusOne(const Expr *E); /// \brief 'Loc' is the end of a statement range. This returns the location /// immediately after the semicolon following the statement. /// If no semicolon is found or the location is inside a macro, the returned /// source location will be invalid. SourceLocation findLocationAfterSemi(SourceLocation loc, ASTContext &Ctx, bool IsDecl = false); /// \brief 'Loc' is the end of a statement range. This returns the location /// of the semicolon following the statement. /// If no semicolon is found or the location is inside a macro, the returned /// source location will be invalid. SourceLocation findSemiAfterLocation(SourceLocation loc, ASTContext &Ctx, bool IsDecl = false); bool hasSideEffects(Expr *E, ASTContext &Ctx); bool isGlobalVar(Expr *E); /// \brief Returns "nil" or "0" if 'nil' macro is not actually defined. StringRef getNilString(MigrationPass &Pass); template <typename BODY_TRANS> class BodyTransform : public RecursiveASTVisitor<BodyTransform<BODY_TRANS> > { MigrationPass &Pass; Decl *ParentD; typedef RecursiveASTVisitor<BodyTransform<BODY_TRANS> > base; public: BodyTransform(MigrationPass &pass) : Pass(pass), ParentD(nullptr) { } bool TraverseStmt(Stmt *rootS) { if (rootS) BODY_TRANS(Pass).transformBody(rootS, ParentD); return true; } bool TraverseObjCMethodDecl(ObjCMethodDecl *D) { SaveAndRestore<Decl *> SetParent(ParentD, D); return base::TraverseObjCMethodDecl(D); } }; typedef llvm::DenseSet<Expr *> ExprSet; void clearRefsIn(Stmt *S, ExprSet &refs); template <typename iterator> void clearRefsIn(iterator begin, iterator end, ExprSet &refs) { for (; begin != end; ++begin) clearRefsIn(*begin, refs); } void collectRefs(ValueDecl *D, Stmt *S, ExprSet &refs); void collectRemovables(Stmt *S, ExprSet &exprs); } // end namespace trans } // end namespace arcmt } // end namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/CMakeLists.txt
set(LLVM_LINK_COMPONENTS Support ) add_clang_library(clangARCMigrate ARCMT.cpp ARCMTActions.cpp FileRemapper.cpp ObjCMT.cpp PlistReporter.cpp TransAPIUses.cpp TransARCAssign.cpp TransAutoreleasePool.cpp TransBlockObjCVariable.cpp TransEmptyStatementsAndDealloc.cpp TransGCAttrs.cpp TransGCCalls.cpp TransProperties.cpp TransProtectedScope.cpp TransRetainReleaseDealloc.cpp TransUnbridgedCasts.cpp TransUnusedInitDelegate.cpp TransZeroOutPropsInDealloc.cpp TransformActions.cpp Transforms.cpp LINK_LIBS clangAST clangAnalysis clangBasic clangEdit clangFrontend clangLex clangRewrite clangSema clangSerialization clangStaticAnalyzerCheckers )
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/ARCMTActions.cpp
//===--- ARCMTActions.cpp - ARC Migrate Tool Frontend Actions ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "clang/ARCMigrate/ARCMTActions.h" #include "clang/ARCMigrate/ARCMT.h" #include "clang/Frontend/CompilerInstance.h" using namespace clang; using namespace arcmt; bool CheckAction::BeginInvocation(CompilerInstance &CI) { if (arcmt::checkForManualIssues(CI.getInvocation(), getCurrentInput(), CI.getPCHContainerOperations(), CI.getDiagnostics().getClient())) return false; // errors, stop the action. // We only want to see warnings reported from arcmt::checkForManualIssues. CI.getDiagnostics().setIgnoreAllWarnings(true); return true; } CheckAction::CheckAction(FrontendAction *WrappedAction) : WrapperFrontendAction(WrappedAction) {} bool ModifyAction::BeginInvocation(CompilerInstance &CI) { return !arcmt::applyTransformations(CI.getInvocation(), getCurrentInput(), CI.getPCHContainerOperations(), CI.getDiagnostics().getClient()); } ModifyAction::ModifyAction(FrontendAction *WrappedAction) : WrapperFrontendAction(WrappedAction) {} bool MigrateAction::BeginInvocation(CompilerInstance &CI) { if (arcmt::migrateWithTemporaryFiles( CI.getInvocation(), getCurrentInput(), CI.getPCHContainerOperations(), CI.getDiagnostics().getClient(), MigrateDir, EmitPremigrationARCErros, PlistOut)) return false; // errors, stop the action. // We only want to see diagnostics emitted by migrateWithTemporaryFiles. CI.getDiagnostics().setIgnoreAllWarnings(true); return true; } MigrateAction::MigrateAction(FrontendAction *WrappedAction, StringRef migrateDir, StringRef plistOut, bool emitPremigrationARCErrors) : WrapperFrontendAction(WrappedAction), MigrateDir(migrateDir), PlistOut(plistOut), EmitPremigrationARCErros(emitPremigrationARCErrors) { if (MigrateDir.empty()) MigrateDir = "."; // user current directory if none is given. }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/FileRemapper.cpp
//===--- FileRemapper.cpp - File Remapping Helper -------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "clang/ARCMigrate/FileRemapper.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/FileManager.h" #include "clang/Lex/PreprocessorOptions.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Path.h" #include "llvm/Support/raw_ostream.h" #include <fstream> using namespace clang; using namespace arcmt; FileRemapper::FileRemapper() { FileMgr.reset(new FileManager(FileSystemOptions())); } FileRemapper::~FileRemapper() { clear(); } void FileRemapper::clear(StringRef outputDir) { for (MappingsTy::iterator I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) resetTarget(I->second); FromToMappings.clear(); assert(ToFromMappings.empty()); if (!outputDir.empty()) { std::string infoFile = getRemapInfoFile(outputDir); llvm::sys::fs::remove(infoFile); } } std::string FileRemapper::getRemapInfoFile(StringRef outputDir) { assert(!outputDir.empty()); SmallString<128> InfoFile = outputDir; llvm::sys::path::append(InfoFile, "remap"); return InfoFile.str(); } bool FileRemapper::initFromDisk(StringRef outputDir, DiagnosticsEngine &Diag, bool ignoreIfFilesChanged) { std::string infoFile = getRemapInfoFile(outputDir); return initFromFile(infoFile, Diag, ignoreIfFilesChanged); } bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag, bool ignoreIfFilesChanged) { assert(FromToMappings.empty() && "initFromDisk should be called before any remap calls"); std::string infoFile = filePath; if (!llvm::sys::fs::exists(infoFile)) return false; std::vector<std::pair<const FileEntry *, const FileEntry *> > pairs; llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileBuf = llvm::MemoryBuffer::getFile(infoFile.c_str()); if (!fileBuf) return report("Error opening file: " + infoFile, Diag); SmallVector<StringRef, 64> lines; fileBuf.get()->getBuffer().split(lines, "\n"); for (unsigned idx = 0; idx+3 <= lines.size(); idx += 3) { StringRef fromFilename = lines[idx]; unsigned long long timeModified; if (lines[idx+1].getAsInteger(10, timeModified)) return report("Invalid file data: '" + lines[idx+1] + "' not a number", Diag); StringRef toFilename = lines[idx+2]; const FileEntry *origFE = FileMgr->getFile(fromFilename); if (!origFE) { if (ignoreIfFilesChanged) continue; return report("File does not exist: " + fromFilename, Diag); } const FileEntry *newFE = FileMgr->getFile(toFilename); if (!newFE) { if (ignoreIfFilesChanged) continue; return report("File does not exist: " + toFilename, Diag); } if ((uint64_t)origFE->getModificationTime() != timeModified) { if (ignoreIfFilesChanged) continue; return report("File was modified: " + fromFilename, Diag); } pairs.push_back(std::make_pair(origFE, newFE)); } for (unsigned i = 0, e = pairs.size(); i != e; ++i) remap(pairs[i].first, pairs[i].second); return false; } bool FileRemapper::flushToDisk(StringRef outputDir, DiagnosticsEngine &Diag) { using namespace llvm::sys; if (fs::create_directory(outputDir)) return report("Could not create directory: " + outputDir, Diag); std::string infoFile = getRemapInfoFile(outputDir); return flushToFile(infoFile, Diag); } bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) { using namespace llvm::sys; std::error_code EC; std::string infoFile = outputPath; llvm::raw_fd_ostream infoOut(infoFile, EC, llvm::sys::fs::F_None); if (EC) return report(EC.message(), Diag); for (MappingsTy::iterator I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) { const FileEntry *origFE = I->first; SmallString<200> origPath = StringRef(origFE->getName()); fs::make_absolute(origPath); infoOut << origPath << '\n'; infoOut << (uint64_t)origFE->getModificationTime() << '\n'; if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) { SmallString<200> newPath = StringRef(FE->getName()); fs::make_absolute(newPath); infoOut << newPath << '\n'; } else { SmallString<64> tempPath; int fd; if (fs::createTemporaryFile(path::filename(origFE->getName()), path::extension(origFE->getName()), fd, tempPath)) return report("Could not create file: " + tempPath.str(), Diag); llvm::raw_fd_ostream newOut(fd, /*shouldClose=*/true); llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>(); newOut.write(mem->getBufferStart(), mem->getBufferSize()); newOut.close(); const FileEntry *newE = FileMgr->getFile(tempPath); remap(origFE, newE); infoOut << newE->getName() << '\n'; } } infoOut.close(); return false; } bool FileRemapper::overwriteOriginal(DiagnosticsEngine &Diag, StringRef outputDir) { using namespace llvm::sys; for (MappingsTy::iterator I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) { const FileEntry *origFE = I->first; assert(I->second.is<llvm::MemoryBuffer *>()); if (!fs::exists(origFE->getName())) return report(StringRef("File does not exist: ") + origFE->getName(), Diag); std::error_code EC; llvm::raw_fd_ostream Out(origFE->getName(), EC, llvm::sys::fs::F_None); if (EC) return report(EC.message(), Diag); llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>(); Out.write(mem->getBufferStart(), mem->getBufferSize()); Out.close(); } clear(outputDir); return false; } void FileRemapper::applyMappings(PreprocessorOptions &PPOpts) const { for (MappingsTy::const_iterator I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) { if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) { PPOpts.addRemappedFile(I->first->getName(), FE->getName()); } else { llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>(); PPOpts.addRemappedFile(I->first->getName(), mem); } } PPOpts.RetainRemappedFileBuffers = true; } void FileRemapper::remap(StringRef filePath, std::unique_ptr<llvm::MemoryBuffer> memBuf) { remap(getOriginalFile(filePath), std::move(memBuf)); } void FileRemapper::remap(const FileEntry *file, std::unique_ptr<llvm::MemoryBuffer> memBuf) { assert(file); Target &targ = FromToMappings[file]; resetTarget(targ); targ = memBuf.release(); } void FileRemapper::remap(const FileEntry *file, const FileEntry *newfile) { assert(file && newfile); Target &targ = FromToMappings[file]; resetTarget(targ); targ = newfile; ToFromMappings[newfile] = file; } const FileEntry *FileRemapper::getOriginalFile(StringRef filePath) { const FileEntry *file = FileMgr->getFile(filePath); // If we are updating a file that overriden an original file, // actually update the original file. llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator I = ToFromMappings.find(file); if (I != ToFromMappings.end()) { file = I->second; assert(FromToMappings.find(file) != FromToMappings.end() && "Original file not in mappings!"); } return file; } void FileRemapper::resetTarget(Target &targ) { if (!targ) return; if (llvm::MemoryBuffer *oldmem = targ.dyn_cast<llvm::MemoryBuffer *>()) { delete oldmem; } else { const FileEntry *toFE = targ.get<const FileEntry *>(); ToFromMappings.erase(toFE); } } bool FileRemapper::report(const Twine &err, DiagnosticsEngine &Diag) { Diag.Report(Diag.getCustomDiagID(DiagnosticsEngine::Error, "%0")) << err.str(); return true; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransAPIUses.cpp
//===--- TransAPIUses.cpp - Transformations to ARC mode -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // checkAPIUses: // // Emits error/fix with some API uses that are obsolete or not safe in ARC mode: // // - NSInvocation's [get/set]ReturnValue and [get/set]Argument are only safe // with __unsafe_unretained objects. // - Calling -zone gets replaced with 'nil'. // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/Sema/SemaDiagnostic.h" using namespace clang; using namespace arcmt; using namespace trans; namespace { class APIChecker : public RecursiveASTVisitor<APIChecker> { MigrationPass &Pass; Selector getReturnValueSel, setReturnValueSel; Selector getArgumentSel, setArgumentSel; Selector zoneSel; public: APIChecker(MigrationPass &pass) : Pass(pass) { SelectorTable &sels = Pass.Ctx.Selectors; IdentifierTable &ids = Pass.Ctx.Idents; getReturnValueSel = sels.getUnarySelector(&ids.get("getReturnValue")); setReturnValueSel = sels.getUnarySelector(&ids.get("setReturnValue")); IdentifierInfo *selIds[2]; selIds[0] = &ids.get("getArgument"); selIds[1] = &ids.get("atIndex"); getArgumentSel = sels.getSelector(2, selIds); selIds[0] = &ids.get("setArgument"); setArgumentSel = sels.getSelector(2, selIds); zoneSel = sels.getNullarySelector(&ids.get("zone")); } bool VisitObjCMessageExpr(ObjCMessageExpr *E) { // NSInvocation. if (E->isInstanceMessage() && E->getReceiverInterface() && E->getReceiverInterface()->getName() == "NSInvocation") { StringRef selName; if (E->getSelector() == getReturnValueSel) selName = "getReturnValue"; else if (E->getSelector() == setReturnValueSel) selName = "setReturnValue"; else if (E->getSelector() == getArgumentSel) selName = "getArgument"; else if (E->getSelector() == setArgumentSel) selName = "setArgument"; else return true; Expr *parm = E->getArg(0)->IgnoreParenCasts(); QualType pointee = parm->getType()->getPointeeType(); if (pointee.isNull()) return true; if (pointee.getObjCLifetime() > Qualifiers::OCL_ExplicitNone) Pass.TA.report(parm->getLocStart(), diag::err_arcmt_nsinvocation_ownership, parm->getSourceRange()) << selName; return true; } // -zone. if (E->isInstanceMessage() && E->getInstanceReceiver() && E->getSelector() == zoneSel && Pass.TA.hasDiagnostic(diag::err_unavailable, diag::err_unavailable_message, E->getSelectorLoc(0))) { // Calling -zone is meaningless in ARC, change it to nil. Transaction Trans(Pass.TA); Pass.TA.clearDiagnostic(diag::err_unavailable, diag::err_unavailable_message, E->getSelectorLoc(0)); Pass.TA.replace(E->getSourceRange(), getNilString(Pass)); } return true; } }; } // anonymous namespace void trans::checkAPIUses(MigrationPass &pass) { APIChecker(pass).TraverseDecl(pass.Ctx.getTranslationUnitDecl()); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
//===--- TransRetainReleaseDealloc.cpp - Transformations to ARC mode ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // removeRetainReleaseDealloc: // // Removes retain/release/autorelease/dealloc messages. // // return [[foo retain] autorelease]; // ----> // return foo; // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/AST/ParentMap.h" #include "clang/Basic/SourceManager.h" #include "clang/Lex/Lexer.h" #include "clang/Sema/SemaDiagnostic.h" #include "llvm/ADT/StringSwitch.h" using namespace clang; using namespace arcmt; using namespace trans; namespace { class RetainReleaseDeallocRemover : public RecursiveASTVisitor<RetainReleaseDeallocRemover> { Stmt *Body; MigrationPass &Pass; ExprSet Removables; std::unique_ptr<ParentMap> StmtMap; Selector DelegateSel, FinalizeSel; public: RetainReleaseDeallocRemover(MigrationPass &pass) : Body(nullptr), Pass(pass) { DelegateSel = Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("delegate")); FinalizeSel = Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("finalize")); } void transformBody(Stmt *body, Decl *ParentD) { Body = body; collectRemovables(body, Removables); StmtMap.reset(new ParentMap(body)); TraverseStmt(body); } bool VisitObjCMessageExpr(ObjCMessageExpr *E) { switch (E->getMethodFamily()) { default: if (E->isInstanceMessage() && E->getSelector() == FinalizeSel) break; return true; case OMF_autorelease: if (isRemovable(E)) { if (!isCommonUnusedAutorelease(E)) { // An unused autorelease is badness. If we remove it the receiver // will likely die immediately while previously it was kept alive // by the autorelease pool. This is bad practice in general, leave it // and emit an error to force the user to restructure their code. Pass.TA.reportError("it is not safe to remove an unused 'autorelease' " "message; its receiver may be destroyed immediately", E->getLocStart(), E->getSourceRange()); return true; } } // Pass through. case OMF_retain: case OMF_release: if (E->getReceiverKind() == ObjCMessageExpr::Instance) if (Expr *rec = E->getInstanceReceiver()) { rec = rec->IgnoreParenImpCasts(); if (rec->getType().getObjCLifetime() == Qualifiers::OCL_ExplicitNone && (E->getMethodFamily() != OMF_retain || isRemovable(E))) { std::string err = "it is not safe to remove '"; err += E->getSelector().getAsString() + "' message on " "an __unsafe_unretained type"; Pass.TA.reportError(err, rec->getLocStart()); return true; } if (isGlobalVar(rec) && (E->getMethodFamily() != OMF_retain || isRemovable(E))) { std::string err = "it is not safe to remove '"; err += E->getSelector().getAsString() + "' message on " "a global variable"; Pass.TA.reportError(err, rec->getLocStart()); return true; } if (E->getMethodFamily() == OMF_release && isDelegateMessage(rec)) { Pass.TA.reportError("it is not safe to remove 'retain' " "message on the result of a 'delegate' message; " "the object that was passed to 'setDelegate:' may not be " "properly retained", rec->getLocStart()); return true; } } case OMF_dealloc: break; } switch (E->getReceiverKind()) { default: return true; case ObjCMessageExpr::SuperInstance: { Transaction Trans(Pass.TA); clearDiagnostics(E->getSelectorLoc(0)); if (tryRemoving(E)) return true; Pass.TA.replace(E->getSourceRange(), "self"); return true; } case ObjCMessageExpr::Instance: break; } Expr *rec = E->getInstanceReceiver(); if (!rec) return true; Transaction Trans(Pass.TA); clearDiagnostics(E->getSelectorLoc(0)); ObjCMessageExpr *Msg = E; Expr *RecContainer = Msg; SourceRange RecRange = rec->getSourceRange(); checkForGCDOrXPC(Msg, RecContainer, rec, RecRange); if (Msg->getMethodFamily() == OMF_release && isRemovable(RecContainer) && isInAtFinally(RecContainer)) { // Change the -release to "receiver = nil" in a finally to avoid a leak // when an exception is thrown. Pass.TA.replace(RecContainer->getSourceRange(), RecRange); std::string str = " = "; str += getNilString(Pass); Pass.TA.insertAfterToken(RecRange.getEnd(), str); return true; } if (!hasSideEffects(rec, Pass.Ctx)) { if (tryRemoving(RecContainer)) return true; } Pass.TA.replace(RecContainer->getSourceRange(), RecRange); return true; } private: /// \brief Checks for idioms where an unused -autorelease is common. /// /// Returns true for this idiom which is common in property /// setters: /// /// [backingValue autorelease]; /// backingValue = [newValue retain]; // in general a +1 assign /// /// For these as well: /// /// [[var retain] autorelease]; /// return var; /// bool isCommonUnusedAutorelease(ObjCMessageExpr *E) { if (isPlusOneAssignBeforeOrAfterAutorelease(E)) return true; if (isReturnedAfterAutorelease(E)) return true; return false; } bool isReturnedAfterAutorelease(ObjCMessageExpr *E) { Expr *Rec = E->getInstanceReceiver(); if (!Rec) return false; Decl *RefD = getReferencedDecl(Rec); if (!RefD) return false; Stmt *nextStmt = getNextStmt(E); if (!nextStmt) return false; // Check for "return <variable>;". if (ReturnStmt *RetS = dyn_cast<ReturnStmt>(nextStmt)) return RefD == getReferencedDecl(RetS->getRetValue()); return false; } bool isPlusOneAssignBeforeOrAfterAutorelease(ObjCMessageExpr *E) { Expr *Rec = E->getInstanceReceiver(); if (!Rec) return false; Decl *RefD = getReferencedDecl(Rec); if (!RefD) return false; Stmt *prevStmt, *nextStmt; std::tie(prevStmt, nextStmt) = getPreviousAndNextStmt(E); return isPlusOneAssignToVar(prevStmt, RefD) || isPlusOneAssignToVar(nextStmt, RefD); } bool isPlusOneAssignToVar(Stmt *S, Decl *RefD) { if (!S) return false; // Check for "RefD = [+1 retained object];". if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(S)) { if (RefD != getReferencedDecl(Bop->getLHS())) return false; if (isPlusOneAssign(Bop)) return true; return false; } if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) { if (DS->isSingleDecl() && DS->getSingleDecl() == RefD) { if (VarDecl *VD = dyn_cast<VarDecl>(RefD)) return isPlusOne(VD->getInit()); } return false; } return false; } Stmt *getNextStmt(Expr *E) { return getPreviousAndNextStmt(E).second; } std::pair<Stmt *, Stmt *> getPreviousAndNextStmt(Expr *E) { Stmt *prevStmt = nullptr, *nextStmt = nullptr; if (!E) return std::make_pair(prevStmt, nextStmt); Stmt *OuterS = E, *InnerS; do { InnerS = OuterS; OuterS = StmtMap->getParent(InnerS); } while (OuterS && (isa<ParenExpr>(OuterS) || isa<CastExpr>(OuterS) || isa<ExprWithCleanups>(OuterS))); if (!OuterS) return std::make_pair(prevStmt, nextStmt); Stmt::child_iterator currChildS = OuterS->child_begin(); Stmt::child_iterator childE = OuterS->child_end(); Stmt::child_iterator prevChildS = childE; for (; currChildS != childE; ++currChildS) { if (*currChildS == InnerS) break; prevChildS = currChildS; } if (prevChildS != childE) { prevStmt = *prevChildS; if (prevStmt) prevStmt = prevStmt->IgnoreImplicit(); } if (currChildS == childE) return std::make_pair(prevStmt, nextStmt); ++currChildS; if (currChildS == childE) return std::make_pair(prevStmt, nextStmt); nextStmt = *currChildS; if (nextStmt) nextStmt = nextStmt->IgnoreImplicit(); return std::make_pair(prevStmt, nextStmt); } Decl *getReferencedDecl(Expr *E) { if (!E) return nullptr; E = E->IgnoreParenCasts(); if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) { switch (ME->getMethodFamily()) { case OMF_copy: case OMF_autorelease: case OMF_release: case OMF_retain: return getReferencedDecl(ME->getInstanceReceiver()); default: return nullptr; } } if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) return DRE->getDecl(); if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) return ME->getMemberDecl(); if (ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(E)) return IRE->getDecl(); return nullptr; } /// \brief Check if the retain/release is due to a GCD/XPC macro that are /// defined as: /// /// #define dispatch_retain(object) ({ dispatch_object_t _o = (object); _dispatch_object_validate(_o); (void)[_o retain]; }) /// #define dispatch_release(object) ({ dispatch_object_t _o = (object); _dispatch_object_validate(_o); [_o release]; }) /// #define xpc_retain(object) ({ xpc_object_t _o = (object); _xpc_object_validate(_o); [_o retain]; }) /// #define xpc_release(object) ({ xpc_object_t _o = (object); _xpc_object_validate(_o); [_o release]; }) /// /// and return the top container which is the StmtExpr and the macro argument /// expression. void checkForGCDOrXPC(ObjCMessageExpr *Msg, Expr *&RecContainer, Expr *&Rec, SourceRange &RecRange) { SourceLocation Loc = Msg->getExprLoc(); if (!Loc.isMacroID()) return; SourceManager &SM = Pass.Ctx.getSourceManager(); StringRef MacroName = Lexer::getImmediateMacroName(Loc, SM, Pass.Ctx.getLangOpts()); bool isGCDOrXPC = llvm::StringSwitch<bool>(MacroName) .Case("dispatch_retain", true) .Case("dispatch_release", true) .Case("xpc_retain", true) .Case("xpc_release", true) .Default(false); if (!isGCDOrXPC) return; StmtExpr *StmtE = nullptr; Stmt *S = Msg; while (S) { if (StmtExpr *SE = dyn_cast<StmtExpr>(S)) { StmtE = SE; break; } S = StmtMap->getParent(S); } if (!StmtE) return; Stmt::child_range StmtExprChild = StmtE->children(); if (!StmtExprChild) return; CompoundStmt *CompS = dyn_cast_or_null<CompoundStmt>(*StmtExprChild); if (!CompS) return; Stmt::child_range CompStmtChild = CompS->children(); if (!CompStmtChild) return; DeclStmt *DeclS = dyn_cast_or_null<DeclStmt>(*CompStmtChild); if (!DeclS) return; if (!DeclS->isSingleDecl()) return; VarDecl *VD = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl()); if (!VD) return; Expr *Init = VD->getInit(); if (!Init) return; RecContainer = StmtE; Rec = Init->IgnoreParenImpCasts(); if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Rec)) Rec = EWC->getSubExpr()->IgnoreParenImpCasts(); RecRange = Rec->getSourceRange(); if (SM.isMacroArgExpansion(RecRange.getBegin())) RecRange.setBegin(SM.getImmediateSpellingLoc(RecRange.getBegin())); if (SM.isMacroArgExpansion(RecRange.getEnd())) RecRange.setEnd(SM.getImmediateSpellingLoc(RecRange.getEnd())); } void clearDiagnostics(SourceLocation loc) const { Pass.TA.clearDiagnostic(diag::err_arc_illegal_explicit_message, diag::err_unavailable, diag::err_unavailable_message, loc); } bool isDelegateMessage(Expr *E) const { if (!E) return false; E = E->IgnoreParenCasts(); // Also look through property-getter sugar. if (PseudoObjectExpr *pseudoOp = dyn_cast<PseudoObjectExpr>(E)) E = pseudoOp->getResultExpr()->IgnoreImplicit(); if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) return (ME->isInstanceMessage() && ME->getSelector() == DelegateSel); return false; } bool isInAtFinally(Expr *E) const { assert(E); Stmt *S = E; while (S) { if (isa<ObjCAtFinallyStmt>(S)) return true; S = StmtMap->getParent(S); } return false; } bool isRemovable(Expr *E) const { return Removables.count(E); } bool tryRemoving(Expr *E) const { if (isRemovable(E)) { Pass.TA.removeStmt(E); return true; } Stmt *parent = StmtMap->getParent(E); if (ImplicitCastExpr *castE = dyn_cast_or_null<ImplicitCastExpr>(parent)) return tryRemoving(castE); if (ParenExpr *parenE = dyn_cast_or_null<ParenExpr>(parent)) return tryRemoving(parenE); if (BinaryOperator * bopE = dyn_cast_or_null<BinaryOperator>(parent)) { if (bopE->getOpcode() == BO_Comma && bopE->getLHS() == E && isRemovable(bopE)) { Pass.TA.replace(bopE->getSourceRange(), bopE->getRHS()->getSourceRange()); return true; } } return false; } }; } // anonymous namespace void trans::removeRetainReleaseDeallocFinalize(MigrationPass &pass) { BodyTransform<RetainReleaseDeallocRemover> trans(pass); trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl()); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/ARCMT.cpp
//===--- ARCMT.cpp - Migration to ARC mode --------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "Internals.h" #include "clang/AST/ASTConsumer.h" #include "clang/Basic/DiagnosticCategories.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendAction.h" #include "clang/Frontend/TextDiagnosticPrinter.h" #include "clang/Frontend/Utils.h" #include "clang/Lex/Preprocessor.h" #include "clang/Rewrite/Core/Rewriter.h" #include "clang/Sema/SemaDiagnostic.h" #include "clang/Serialization/ASTReader.h" #include "llvm/ADT/Triple.h" #include "llvm/Support/MemoryBuffer.h" using namespace clang; using namespace arcmt; bool CapturedDiagList::clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range) { if (range.isInvalid()) return false; bool cleared = false; ListTy::iterator I = List.begin(); while (I != List.end()) { FullSourceLoc diagLoc = I->getLocation(); if ((IDs.empty() || // empty means clear all diagnostics in the range. std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) && !diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) && (diagLoc == range.getEnd() || diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) { cleared = true; ListTy::iterator eraseS = I++; if (eraseS->getLevel() != DiagnosticsEngine::Note) while (I != List.end() && I->getLevel() == DiagnosticsEngine::Note) ++I; // Clear the diagnostic and any notes following it. I = List.erase(eraseS, I); continue; } ++I; } return cleared; } bool CapturedDiagList::hasDiagnostic(ArrayRef<unsigned> IDs, SourceRange range) const { if (range.isInvalid()) return false; ListTy::const_iterator I = List.begin(); while (I != List.end()) { FullSourceLoc diagLoc = I->getLocation(); if ((IDs.empty() || // empty means any diagnostic in the range. std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) && !diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) && (diagLoc == range.getEnd() || diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) { return true; } ++I; } return false; } void CapturedDiagList::reportDiagnostics(DiagnosticsEngine &Diags) const { for (ListTy::const_iterator I = List.begin(), E = List.end(); I != E; ++I) Diags.Report(*I); } bool CapturedDiagList::hasErrors() const { for (ListTy::const_iterator I = List.begin(), E = List.end(); I != E; ++I) if (I->getLevel() >= DiagnosticsEngine::Error) return true; return false; } namespace { class CaptureDiagnosticConsumer : public DiagnosticConsumer { DiagnosticsEngine &Diags; DiagnosticConsumer &DiagClient; CapturedDiagList &CapturedDiags; bool HasBegunSourceFile; public: CaptureDiagnosticConsumer(DiagnosticsEngine &diags, DiagnosticConsumer &client, CapturedDiagList &capturedDiags) : Diags(diags), DiagClient(client), CapturedDiags(capturedDiags), HasBegunSourceFile(false) { } void BeginSourceFile(const LangOptions &Opts, const Preprocessor *PP) override { // Pass BeginSourceFile message onto DiagClient on first call. // The corresponding EndSourceFile call will be made from an // explicit call to FinishCapture. if (!HasBegunSourceFile) { DiagClient.BeginSourceFile(Opts, PP); HasBegunSourceFile = true; } } void FinishCapture() { // Call EndSourceFile on DiagClient on completion of capture to // enable VerifyDiagnosticConsumer to check diagnostics *after* // it has received the diagnostic list. if (HasBegunSourceFile) { DiagClient.EndSourceFile(); HasBegunSourceFile = false; } } ~CaptureDiagnosticConsumer() override { assert(!HasBegunSourceFile && "FinishCapture not called!"); } void HandleDiagnostic(DiagnosticsEngine::Level level, const Diagnostic &Info) override { if (DiagnosticIDs::isARCDiagnostic(Info.getID()) || level >= DiagnosticsEngine::Error || level == DiagnosticsEngine::Note) { if (Info.getLocation().isValid()) CapturedDiags.push_back(StoredDiagnostic(level, Info)); return; } // Non-ARC warnings are ignored. Diags.setLastDiagnosticIgnored(); } }; } // end anonymous namespace static bool HasARCRuntime(CompilerInvocation &origCI) { // This duplicates some functionality from Darwin::AddDeploymentTarget // but this function is well defined, so keep it decoupled from the driver // and avoid unrelated complications. llvm::Triple triple(origCI.getTargetOpts().Triple); if (triple.isiOS()) return triple.getOSMajorVersion() >= 5; if (triple.getOS() == llvm::Triple::Darwin) return triple.getOSMajorVersion() >= 11; if (triple.getOS() == llvm::Triple::MacOSX) { unsigned Major, Minor, Micro; triple.getOSVersion(Major, Minor, Micro); return Major > 10 || (Major == 10 && Minor >= 7); } return false; } static CompilerInvocation * createInvocationForMigration(CompilerInvocation &origCI, const PCHContainerReader &PCHContainerRdr) { std::unique_ptr<CompilerInvocation> CInvok; CInvok.reset(new CompilerInvocation(origCI)); PreprocessorOptions &PPOpts = CInvok->getPreprocessorOpts(); if (!PPOpts.ImplicitPCHInclude.empty()) { // We can't use a PCH because it was likely built in non-ARC mode and we // want to parse in ARC. Include the original header. FileManager FileMgr(origCI.getFileSystemOpts()); IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs()); IntrusiveRefCntPtr<DiagnosticsEngine> Diags( new DiagnosticsEngine(DiagID, &origCI.getDiagnosticOpts(), new IgnoringDiagConsumer())); std::string OriginalFile = ASTReader::getOriginalSourceFile( PPOpts.ImplicitPCHInclude, FileMgr, PCHContainerRdr, *Diags); if (!OriginalFile.empty()) PPOpts.Includes.insert(PPOpts.Includes.begin(), OriginalFile); PPOpts.ImplicitPCHInclude.clear(); } // FIXME: Get the original header of a PTH as well. CInvok->getPreprocessorOpts().ImplicitPTHInclude.clear(); std::string define = getARCMTMacroName(); define += '='; CInvok->getPreprocessorOpts().addMacroDef(define); CInvok->getLangOpts()->ObjCAutoRefCount = true; CInvok->getLangOpts()->setGC(LangOptions::NonGC); CInvok->getDiagnosticOpts().ErrorLimit = 0; CInvok->getDiagnosticOpts().PedanticErrors = 0; // Ignore -Werror flags when migrating. std::vector<std::string> WarnOpts; for (std::vector<std::string>::iterator I = CInvok->getDiagnosticOpts().Warnings.begin(), E = CInvok->getDiagnosticOpts().Warnings.end(); I != E; ++I) { if (!StringRef(*I).startswith("error")) WarnOpts.push_back(*I); } WarnOpts.push_back("error=arc-unsafe-retained-assign"); CInvok->getDiagnosticOpts().Warnings = std::move(WarnOpts); CInvok->getLangOpts()->ObjCARCWeak = HasARCRuntime(origCI); return CInvok.release(); } static void emitPremigrationErrors(const CapturedDiagList &arcDiags, DiagnosticOptions *diagOpts, Preprocessor &PP) { TextDiagnosticPrinter printer(llvm::errs(), diagOpts); IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs()); IntrusiveRefCntPtr<DiagnosticsEngine> Diags( new DiagnosticsEngine(DiagID, diagOpts, &printer, /*ShouldOwnClient=*/false)); Diags->setSourceManager(&PP.getSourceManager()); printer.BeginSourceFile(PP.getLangOpts(), &PP); arcDiags.reportDiagnostics(*Diags); printer.EndSourceFile(); } //===----------------------------------------------------------------------===// // checkForManualIssues. //===----------------------------------------------------------------------===// bool arcmt::checkForManualIssues( CompilerInvocation &origCI, const FrontendInputFile &Input, std::shared_ptr<PCHContainerOperations> PCHContainerOps, DiagnosticConsumer *DiagClient, bool emitPremigrationARCErrors, StringRef plistOut) { if (!origCI.getLangOpts()->ObjC1) return false; LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC(); bool NoNSAllocReallocError = origCI.getMigratorOpts().NoNSAllocReallocError; bool NoFinalizeRemoval = origCI.getMigratorOpts().NoFinalizeRemoval; std::vector<TransformFn> transforms = arcmt::getAllTransformations(OrigGCMode, NoFinalizeRemoval); assert(!transforms.empty()); std::unique_ptr<CompilerInvocation> CInvok; CInvok.reset( createInvocationForMigration(origCI, PCHContainerOps->getRawReader())); CInvok->getFrontendOpts().Inputs.clear(); CInvok->getFrontendOpts().Inputs.push_back(Input); CapturedDiagList capturedDiags; assert(DiagClient); IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs()); IntrusiveRefCntPtr<DiagnosticsEngine> Diags( new DiagnosticsEngine(DiagID, &origCI.getDiagnosticOpts(), DiagClient, /*ShouldOwnClient=*/false)); // Filter of all diagnostics. CaptureDiagnosticConsumer errRec(*Diags, *DiagClient, capturedDiags); Diags->setClient(&errRec, /*ShouldOwnClient=*/false); std::unique_ptr<ASTUnit> Unit(ASTUnit::LoadFromCompilerInvocationAction( CInvok.release(), PCHContainerOps, Diags)); if (!Unit) { errRec.FinishCapture(); return true; } // Don't filter diagnostics anymore. Diags->setClient(DiagClient, /*ShouldOwnClient=*/false); ASTContext &Ctx = Unit->getASTContext(); if (Diags->hasFatalErrorOccurred()) { Diags->Reset(); DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor()); capturedDiags.reportDiagnostics(*Diags); DiagClient->EndSourceFile(); errRec.FinishCapture(); return true; } if (emitPremigrationARCErrors) emitPremigrationErrors(capturedDiags, &origCI.getDiagnosticOpts(), Unit->getPreprocessor()); if (!plistOut.empty()) { SmallVector<StoredDiagnostic, 8> arcDiags; for (CapturedDiagList::iterator I = capturedDiags.begin(), E = capturedDiags.end(); I != E; ++I) arcDiags.push_back(*I); writeARCDiagsToPlist(plistOut, arcDiags, Ctx.getSourceManager(), Ctx.getLangOpts()); } // After parsing of source files ended, we want to reuse the // diagnostics objects to emit further diagnostics. // We call BeginSourceFile because DiagnosticConsumer requires that // diagnostics with source range information are emitted only in between // BeginSourceFile() and EndSourceFile(). DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor()); // No macros will be added since we are just checking and we won't modify // source code. std::vector<SourceLocation> ARCMTMacroLocs; TransformActions testAct(*Diags, capturedDiags, Ctx, Unit->getPreprocessor()); MigrationPass pass(Ctx, OrigGCMode, Unit->getSema(), testAct, capturedDiags, ARCMTMacroLocs); pass.setNoFinalizeRemoval(NoFinalizeRemoval); if (!NoNSAllocReallocError) Diags->setSeverity(diag::warn_arcmt_nsalloc_realloc, diag::Severity::Error, SourceLocation()); for (unsigned i=0, e = transforms.size(); i != e; ++i) transforms[i](pass); capturedDiags.reportDiagnostics(*Diags); DiagClient->EndSourceFile(); errRec.FinishCapture(); return capturedDiags.hasErrors() || testAct.hasReportedErrors(); } //===----------------------------------------------------------------------===// // applyTransformations. //===----------------------------------------------------------------------===// static bool applyTransforms(CompilerInvocation &origCI, const FrontendInputFile &Input, std::shared_ptr<PCHContainerOperations> PCHContainerOps, DiagnosticConsumer *DiagClient, StringRef outputDir, bool emitPremigrationARCErrors, StringRef plistOut) { if (!origCI.getLangOpts()->ObjC1) return false; LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC(); // Make sure checking is successful first. CompilerInvocation CInvokForCheck(origCI); if (arcmt::checkForManualIssues(CInvokForCheck, Input, PCHContainerOps, DiagClient, emitPremigrationARCErrors, plistOut)) return true; CompilerInvocation CInvok(origCI); CInvok.getFrontendOpts().Inputs.clear(); CInvok.getFrontendOpts().Inputs.push_back(Input); MigrationProcess migration(CInvok, PCHContainerOps, DiagClient, outputDir); bool NoFinalizeRemoval = origCI.getMigratorOpts().NoFinalizeRemoval; std::vector<TransformFn> transforms = arcmt::getAllTransformations(OrigGCMode, NoFinalizeRemoval); assert(!transforms.empty()); for (unsigned i=0, e = transforms.size(); i != e; ++i) { bool err = migration.applyTransform(transforms[i]); if (err) return true; } IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs()); IntrusiveRefCntPtr<DiagnosticsEngine> Diags( new DiagnosticsEngine(DiagID, &origCI.getDiagnosticOpts(), DiagClient, /*ShouldOwnClient=*/false)); if (outputDir.empty()) { origCI.getLangOpts()->ObjCAutoRefCount = true; return migration.getRemapper().overwriteOriginal(*Diags); } else { return migration.getRemapper().flushToDisk(outputDir, *Diags); } } bool arcmt::applyTransformations( CompilerInvocation &origCI, const FrontendInputFile &Input, std::shared_ptr<PCHContainerOperations> PCHContainerOps, DiagnosticConsumer *DiagClient) { return applyTransforms(origCI, Input, PCHContainerOps, DiagClient, StringRef(), false, StringRef()); } bool arcmt::migrateWithTemporaryFiles( CompilerInvocation &origCI, const FrontendInputFile &Input, std::shared_ptr<PCHContainerOperations> PCHContainerOps, DiagnosticConsumer *DiagClient, StringRef outputDir, bool emitPremigrationARCErrors, StringRef plistOut) { assert(!outputDir.empty() && "Expected output directory path"); return applyTransforms(origCI, Input, PCHContainerOps, DiagClient, outputDir, emitPremigrationARCErrors, plistOut); } bool arcmt::getFileRemappings(std::vector<std::pair<std::string,std::string> > & remap, StringRef outputDir, DiagnosticConsumer *DiagClient) { assert(!outputDir.empty()); IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs()); IntrusiveRefCntPtr<DiagnosticsEngine> Diags( new DiagnosticsEngine(DiagID, new DiagnosticOptions, DiagClient, /*ShouldOwnClient=*/false)); FileRemapper remapper; bool err = remapper.initFromDisk(outputDir, *Diags, /*ignoreIfFilesChanged=*/true); if (err) return true; PreprocessorOptions PPOpts; remapper.applyMappings(PPOpts); remap = PPOpts.RemappedFiles; return false; } //===----------------------------------------------------------------------===// // CollectTransformActions. //===----------------------------------------------------------------------===// namespace { class ARCMTMacroTrackerPPCallbacks : public PPCallbacks { std::vector<SourceLocation> &ARCMTMacroLocs; public: ARCMTMacroTrackerPPCallbacks(std::vector<SourceLocation> &ARCMTMacroLocs) : ARCMTMacroLocs(ARCMTMacroLocs) { } void MacroExpands(const Token &MacroNameTok, const MacroDefinition &MD, SourceRange Range, const MacroArgs *Args) override { if (MacroNameTok.getIdentifierInfo()->getName() == getARCMTMacroName()) ARCMTMacroLocs.push_back(MacroNameTok.getLocation()); } }; class ARCMTMacroTrackerAction : public ASTFrontendAction { std::vector<SourceLocation> &ARCMTMacroLocs; public: ARCMTMacroTrackerAction(std::vector<SourceLocation> &ARCMTMacroLocs) : ARCMTMacroLocs(ARCMTMacroLocs) { } std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI, StringRef InFile) override { CI.getPreprocessor().addPPCallbacks( llvm::make_unique<ARCMTMacroTrackerPPCallbacks>(ARCMTMacroLocs)); return llvm::make_unique<ASTConsumer>(); } }; class RewritesApplicator : public TransformActions::RewriteReceiver { Rewriter &rewriter; MigrationProcess::RewriteListener *Listener; public: RewritesApplicator(Rewriter &rewriter, ASTContext &ctx, MigrationProcess::RewriteListener *listener) : rewriter(rewriter), Listener(listener) { if (Listener) Listener->start(ctx); } ~RewritesApplicator() override { if (Listener) Listener->finish(); } void insert(SourceLocation loc, StringRef text) override { bool err = rewriter.InsertText(loc, text, /*InsertAfter=*/true, /*indentNewLines=*/true); if (!err && Listener) Listener->insert(loc, text); } void remove(CharSourceRange range) override { Rewriter::RewriteOptions removeOpts; removeOpts.IncludeInsertsAtBeginOfRange = false; removeOpts.IncludeInsertsAtEndOfRange = false; removeOpts.RemoveLineIfEmpty = true; bool err = rewriter.RemoveText(range, removeOpts); if (!err && Listener) Listener->remove(range); } void increaseIndentation(CharSourceRange range, SourceLocation parentIndent) override { rewriter.IncreaseIndentation(range, parentIndent); } }; } // end anonymous namespace. /// \brief Anchor for VTable. MigrationProcess::RewriteListener::~RewriteListener() { } MigrationProcess::MigrationProcess( const CompilerInvocation &CI, std::shared_ptr<PCHContainerOperations> PCHContainerOps, DiagnosticConsumer *diagClient, StringRef outputDir) : OrigCI(CI), PCHContainerOps(PCHContainerOps), DiagClient(diagClient), HadARCErrors(false) { if (!outputDir.empty()) { IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs()); IntrusiveRefCntPtr<DiagnosticsEngine> Diags( new DiagnosticsEngine(DiagID, &CI.getDiagnosticOpts(), DiagClient, /*ShouldOwnClient=*/false)); Remapper.initFromDisk(outputDir, *Diags, /*ignoreIfFilesChanges=*/true); } } bool MigrationProcess::applyTransform(TransformFn trans, RewriteListener *listener) { std::unique_ptr<CompilerInvocation> CInvok; CInvok.reset( createInvocationForMigration(OrigCI, PCHContainerOps->getRawReader())); CInvok->getDiagnosticOpts().IgnoreWarnings = true; Remapper.applyMappings(CInvok->getPreprocessorOpts()); CapturedDiagList capturedDiags; std::vector<SourceLocation> ARCMTMacroLocs; assert(DiagClient); IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs()); IntrusiveRefCntPtr<DiagnosticsEngine> Diags( new DiagnosticsEngine(DiagID, new DiagnosticOptions, DiagClient, /*ShouldOwnClient=*/false)); // Filter of all diagnostics. CaptureDiagnosticConsumer errRec(*Diags, *DiagClient, capturedDiags); Diags->setClient(&errRec, /*ShouldOwnClient=*/false); std::unique_ptr<ARCMTMacroTrackerAction> ASTAction; ASTAction.reset(new ARCMTMacroTrackerAction(ARCMTMacroLocs)); std::unique_ptr<ASTUnit> Unit(ASTUnit::LoadFromCompilerInvocationAction( CInvok.release(), PCHContainerOps, Diags, ASTAction.get())); if (!Unit) { errRec.FinishCapture(); return true; } Unit->setOwnsRemappedFileBuffers(false); // FileRemapper manages that. HadARCErrors = HadARCErrors || capturedDiags.hasErrors(); // Don't filter diagnostics anymore. Diags->setClient(DiagClient, /*ShouldOwnClient=*/false); ASTContext &Ctx = Unit->getASTContext(); if (Diags->hasFatalErrorOccurred()) { Diags->Reset(); DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor()); capturedDiags.reportDiagnostics(*Diags); DiagClient->EndSourceFile(); errRec.FinishCapture(); return true; } // After parsing of source files ended, we want to reuse the // diagnostics objects to emit further diagnostics. // We call BeginSourceFile because DiagnosticConsumer requires that // diagnostics with source range information are emitted only in between // BeginSourceFile() and EndSourceFile(). DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor()); Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOpts()); TransformActions TA(*Diags, capturedDiags, Ctx, Unit->getPreprocessor()); MigrationPass pass(Ctx, OrigCI.getLangOpts()->getGC(), Unit->getSema(), TA, capturedDiags, ARCMTMacroLocs); trans(pass); { RewritesApplicator applicator(rewriter, Ctx, listener); TA.applyRewrites(applicator); } DiagClient->EndSourceFile(); errRec.FinishCapture(); if (DiagClient->getNumErrors()) return true; for (Rewriter::buffer_iterator I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) { FileID FID = I->first; RewriteBuffer &buf = I->second; const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID); assert(file); std::string newFname = file->getName(); newFname += "-trans"; SmallString<512> newText; llvm::raw_svector_ostream vecOS(newText); buf.write(vecOS); vecOS.flush(); std::unique_ptr<llvm::MemoryBuffer> memBuf( llvm::MemoryBuffer::getMemBufferCopy( StringRef(newText.data(), newText.size()), newFname)); SmallString<64> filePath(file->getName()); Unit->getFileManager().FixupRelativePath(filePath); Remapper.remap(filePath.str(), std::move(memBuf)); } return false; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransformActions.cpp
//===--- ARCMT.cpp - Migration to ARC mode --------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Expr.h" #include "clang/Basic/SourceManager.h" #include "clang/Lex/Preprocessor.h" #include "llvm/ADT/DenseSet.h" #include <map> using namespace clang; using namespace arcmt; namespace { /// \brief Collects transformations and merges them before applying them with /// with applyRewrites(). E.g. if the same source range /// is requested to be removed twice, only one rewriter remove will be invoked. /// Rewrites happen in "transactions"; if one rewrite in the transaction cannot /// be done (e.g. it resides in a macro) all rewrites in the transaction are /// aborted. /// FIXME: "Transactional" rewrites support should be baked in the Rewriter. class TransformActionsImpl { CapturedDiagList &CapturedDiags; ASTContext &Ctx; Preprocessor &PP; bool IsInTransaction; enum ActionKind { Act_Insert, Act_InsertAfterToken, Act_Remove, Act_RemoveStmt, Act_Replace, Act_ReplaceText, Act_IncreaseIndentation, Act_ClearDiagnostic }; struct ActionData { ActionKind Kind; SourceLocation Loc; SourceRange R1, R2; StringRef Text1, Text2; Stmt *S; SmallVector<unsigned, 2> DiagIDs; }; std::vector<ActionData> CachedActions; enum RangeComparison { Range_Before, Range_After, Range_Contains, Range_Contained, Range_ExtendsBegin, Range_ExtendsEnd }; /// \brief A range to remove. It is a character range. struct CharRange { FullSourceLoc Begin, End; CharRange(CharSourceRange range, SourceManager &srcMgr, Preprocessor &PP) { SourceLocation beginLoc = range.getBegin(), endLoc = range.getEnd(); assert(beginLoc.isValid() && endLoc.isValid()); if (range.isTokenRange()) { Begin = FullSourceLoc(srcMgr.getExpansionLoc(beginLoc), srcMgr); End = FullSourceLoc(getLocForEndOfToken(endLoc, srcMgr, PP), srcMgr); } else { Begin = FullSourceLoc(srcMgr.getExpansionLoc(beginLoc), srcMgr); End = FullSourceLoc(srcMgr.getExpansionLoc(endLoc), srcMgr); } assert(Begin.isValid() && End.isValid()); } RangeComparison compareWith(const CharRange &RHS) const { if (End.isBeforeInTranslationUnitThan(RHS.Begin)) return Range_Before; if (RHS.End.isBeforeInTranslationUnitThan(Begin)) return Range_After; if (!Begin.isBeforeInTranslationUnitThan(RHS.Begin) && !RHS.End.isBeforeInTranslationUnitThan(End)) return Range_Contained; if (Begin.isBeforeInTranslationUnitThan(RHS.Begin) && RHS.End.isBeforeInTranslationUnitThan(End)) return Range_Contains; if (Begin.isBeforeInTranslationUnitThan(RHS.Begin)) return Range_ExtendsBegin; else return Range_ExtendsEnd; } static RangeComparison compare(SourceRange LHS, SourceRange RHS, SourceManager &SrcMgr, Preprocessor &PP) { return CharRange(CharSourceRange::getTokenRange(LHS), SrcMgr, PP) .compareWith(CharRange(CharSourceRange::getTokenRange(RHS), SrcMgr, PP)); } }; typedef SmallVector<StringRef, 2> TextsVec; typedef std::map<FullSourceLoc, TextsVec, FullSourceLoc::BeforeThanCompare> InsertsMap; InsertsMap Inserts; /// \brief A list of ranges to remove. They are always sorted and they never /// intersect with each other. std::list<CharRange> Removals; llvm::DenseSet<Stmt *> StmtRemovals; std::vector<std::pair<CharRange, SourceLocation> > IndentationRanges; /// \brief Keeps text passed to transformation methods. llvm::StringMap<bool> UniqueText; public: TransformActionsImpl(CapturedDiagList &capturedDiags, ASTContext &ctx, Preprocessor &PP) : CapturedDiags(capturedDiags), Ctx(ctx), PP(PP), IsInTransaction(false) { } ASTContext &getASTContext() { return Ctx; } void startTransaction(); bool commitTransaction(); void abortTransaction(); bool isInTransaction() const { return IsInTransaction; } void insert(SourceLocation loc, StringRef text); void insertAfterToken(SourceLocation loc, StringRef text); void remove(SourceRange range); void removeStmt(Stmt *S); void replace(SourceRange range, StringRef text); void replace(SourceRange range, SourceRange replacementRange); void replaceStmt(Stmt *S, StringRef text); void replaceText(SourceLocation loc, StringRef text, StringRef replacementText); void increaseIndentation(SourceRange range, SourceLocation parentIndent); bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range); void applyRewrites(TransformActions::RewriteReceiver &receiver); private: bool canInsert(SourceLocation loc); bool canInsertAfterToken(SourceLocation loc); bool canRemoveRange(SourceRange range); bool canReplaceRange(SourceRange range, SourceRange replacementRange); bool canReplaceText(SourceLocation loc, StringRef text); void commitInsert(SourceLocation loc, StringRef text); void commitInsertAfterToken(SourceLocation loc, StringRef text); void commitRemove(SourceRange range); void commitRemoveStmt(Stmt *S); void commitReplace(SourceRange range, SourceRange replacementRange); void commitReplaceText(SourceLocation loc, StringRef text, StringRef replacementText); void commitIncreaseIndentation(SourceRange range,SourceLocation parentIndent); void commitClearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range); void addRemoval(CharSourceRange range); void addInsertion(SourceLocation loc, StringRef text); /// \brief Stores text passed to the transformation methods to keep the string /// "alive". Since the vast majority of text will be the same, we also unique /// the strings using a StringMap. StringRef getUniqueText(StringRef text); /// \brief Computes the source location just past the end of the token at /// the given source location. If the location points at a macro, the whole /// macro expansion is skipped. static SourceLocation getLocForEndOfToken(SourceLocation loc, SourceManager &SM,Preprocessor &PP); }; } // anonymous namespace void TransformActionsImpl::startTransaction() { assert(!IsInTransaction && "Cannot start a transaction in the middle of another one"); IsInTransaction = true; } bool TransformActionsImpl::commitTransaction() { assert(IsInTransaction && "No transaction started"); if (CachedActions.empty()) { IsInTransaction = false; return false; } // Verify that all actions are possible otherwise abort the whole transaction. bool AllActionsPossible = true; for (unsigned i = 0, e = CachedActions.size(); i != e; ++i) { ActionData &act = CachedActions[i]; switch (act.Kind) { case Act_Insert: if (!canInsert(act.Loc)) AllActionsPossible = false; break; case Act_InsertAfterToken: if (!canInsertAfterToken(act.Loc)) AllActionsPossible = false; break; case Act_Remove: if (!canRemoveRange(act.R1)) AllActionsPossible = false; break; case Act_RemoveStmt: assert(act.S); if (!canRemoveRange(act.S->getSourceRange())) AllActionsPossible = false; break; case Act_Replace: if (!canReplaceRange(act.R1, act.R2)) AllActionsPossible = false; break; case Act_ReplaceText: if (!canReplaceText(act.Loc, act.Text1)) AllActionsPossible = false; break; case Act_IncreaseIndentation: // This is not important, we don't care if it will fail. break; case Act_ClearDiagnostic: // We are just checking source rewrites. break; } if (!AllActionsPossible) break; } if (!AllActionsPossible) { abortTransaction(); return true; } for (unsigned i = 0, e = CachedActions.size(); i != e; ++i) { ActionData &act = CachedActions[i]; switch (act.Kind) { case Act_Insert: commitInsert(act.Loc, act.Text1); break; case Act_InsertAfterToken: commitInsertAfterToken(act.Loc, act.Text1); break; case Act_Remove: commitRemove(act.R1); break; case Act_RemoveStmt: commitRemoveStmt(act.S); break; case Act_Replace: commitReplace(act.R1, act.R2); break; case Act_ReplaceText: commitReplaceText(act.Loc, act.Text1, act.Text2); break; case Act_IncreaseIndentation: commitIncreaseIndentation(act.R1, act.Loc); break; case Act_ClearDiagnostic: commitClearDiagnostic(act.DiagIDs, act.R1); break; } } CachedActions.clear(); IsInTransaction = false; return false; } void TransformActionsImpl::abortTransaction() { assert(IsInTransaction && "No transaction started"); CachedActions.clear(); IsInTransaction = false; } void TransformActionsImpl::insert(SourceLocation loc, StringRef text) { assert(IsInTransaction && "Actions only allowed during a transaction"); text = getUniqueText(text); ActionData data; data.Kind = Act_Insert; data.Loc = loc; data.Text1 = text; CachedActions.push_back(data); } void TransformActionsImpl::insertAfterToken(SourceLocation loc, StringRef text) { assert(IsInTransaction && "Actions only allowed during a transaction"); text = getUniqueText(text); ActionData data; data.Kind = Act_InsertAfterToken; data.Loc = loc; data.Text1 = text; CachedActions.push_back(data); } void TransformActionsImpl::remove(SourceRange range) { assert(IsInTransaction && "Actions only allowed during a transaction"); ActionData data; data.Kind = Act_Remove; data.R1 = range; CachedActions.push_back(data); } void TransformActionsImpl::removeStmt(Stmt *S) { assert(IsInTransaction && "Actions only allowed during a transaction"); ActionData data; data.Kind = Act_RemoveStmt; data.S = S->IgnoreImplicit(); // important for uniquing CachedActions.push_back(data); } void TransformActionsImpl::replace(SourceRange range, StringRef text) { assert(IsInTransaction && "Actions only allowed during a transaction"); text = getUniqueText(text); remove(range); insert(range.getBegin(), text); } void TransformActionsImpl::replace(SourceRange range, SourceRange replacementRange) { assert(IsInTransaction && "Actions only allowed during a transaction"); ActionData data; data.Kind = Act_Replace; data.R1 = range; data.R2 = replacementRange; CachedActions.push_back(data); } void TransformActionsImpl::replaceText(SourceLocation loc, StringRef text, StringRef replacementText) { text = getUniqueText(text); replacementText = getUniqueText(replacementText); ActionData data; data.Kind = Act_ReplaceText; data.Loc = loc; data.Text1 = text; data.Text2 = replacementText; CachedActions.push_back(data); } void TransformActionsImpl::replaceStmt(Stmt *S, StringRef text) { assert(IsInTransaction && "Actions only allowed during a transaction"); text = getUniqueText(text); insert(S->getLocStart(), text); removeStmt(S); } void TransformActionsImpl::increaseIndentation(SourceRange range, SourceLocation parentIndent) { if (range.isInvalid()) return; assert(IsInTransaction && "Actions only allowed during a transaction"); ActionData data; data.Kind = Act_IncreaseIndentation; data.R1 = range; data.Loc = parentIndent; CachedActions.push_back(data); } bool TransformActionsImpl::clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range) { assert(IsInTransaction && "Actions only allowed during a transaction"); if (!CapturedDiags.hasDiagnostic(IDs, range)) return false; ActionData data; data.Kind = Act_ClearDiagnostic; data.R1 = range; data.DiagIDs.append(IDs.begin(), IDs.end()); CachedActions.push_back(data); return true; } bool TransformActionsImpl::canInsert(SourceLocation loc) { if (loc.isInvalid()) return false; SourceManager &SM = Ctx.getSourceManager(); if (SM.isInSystemHeader(SM.getExpansionLoc(loc))) return false; if (loc.isFileID()) return true; return PP.isAtStartOfMacroExpansion(loc); } bool TransformActionsImpl::canInsertAfterToken(SourceLocation loc) { if (loc.isInvalid()) return false; SourceManager &SM = Ctx.getSourceManager(); if (SM.isInSystemHeader(SM.getExpansionLoc(loc))) return false; if (loc.isFileID()) return true; return PP.isAtEndOfMacroExpansion(loc); } bool TransformActionsImpl::canRemoveRange(SourceRange range) { return canInsert(range.getBegin()) && canInsertAfterToken(range.getEnd()); } bool TransformActionsImpl::canReplaceRange(SourceRange range, SourceRange replacementRange) { return canRemoveRange(range) && canRemoveRange(replacementRange); } bool TransformActionsImpl::canReplaceText(SourceLocation loc, StringRef text) { if (!canInsert(loc)) return false; SourceManager &SM = Ctx.getSourceManager(); loc = SM.getExpansionLoc(loc); // Break down the source location. std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc); // Try to load the file buffer. bool invalidTemp = false; StringRef file = SM.getBufferData(locInfo.first, &invalidTemp); if (invalidTemp) return false; return file.substr(locInfo.second).startswith(text); } void TransformActionsImpl::commitInsert(SourceLocation loc, StringRef text) { addInsertion(loc, text); } void TransformActionsImpl::commitInsertAfterToken(SourceLocation loc, StringRef text) { addInsertion(getLocForEndOfToken(loc, Ctx.getSourceManager(), PP), text); } void TransformActionsImpl::commitRemove(SourceRange range) { addRemoval(CharSourceRange::getTokenRange(range)); } void TransformActionsImpl::commitRemoveStmt(Stmt *S) { assert(S); if (StmtRemovals.count(S)) return; // already removed. if (Expr *E = dyn_cast<Expr>(S)) { commitRemove(E->getSourceRange()); commitInsert(E->getSourceRange().getBegin(), getARCMTMacroName()); } else commitRemove(S->getSourceRange()); StmtRemovals.insert(S); } void TransformActionsImpl::commitReplace(SourceRange range, SourceRange replacementRange) { RangeComparison comp = CharRange::compare(replacementRange, range, Ctx.getSourceManager(), PP); assert(comp == Range_Contained); if (comp != Range_Contained) return; // Although we asserted, be extra safe for release build. if (range.getBegin() != replacementRange.getBegin()) addRemoval(CharSourceRange::getCharRange(range.getBegin(), replacementRange.getBegin())); if (replacementRange.getEnd() != range.getEnd()) addRemoval(CharSourceRange::getTokenRange( getLocForEndOfToken(replacementRange.getEnd(), Ctx.getSourceManager(), PP), range.getEnd())); } void TransformActionsImpl::commitReplaceText(SourceLocation loc, StringRef text, StringRef replacementText) { SourceManager &SM = Ctx.getSourceManager(); loc = SM.getExpansionLoc(loc); // canReplaceText already checked if loc points at text. SourceLocation afterText = loc.getLocWithOffset(text.size()); addRemoval(CharSourceRange::getCharRange(loc, afterText)); commitInsert(loc, replacementText); } void TransformActionsImpl::commitIncreaseIndentation(SourceRange range, SourceLocation parentIndent) { SourceManager &SM = Ctx.getSourceManager(); IndentationRanges.push_back( std::make_pair(CharRange(CharSourceRange::getTokenRange(range), SM, PP), SM.getExpansionLoc(parentIndent))); } void TransformActionsImpl::commitClearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range) { CapturedDiags.clearDiagnostic(IDs, range); } void TransformActionsImpl::addInsertion(SourceLocation loc, StringRef text) { SourceManager &SM = Ctx.getSourceManager(); loc = SM.getExpansionLoc(loc); for (std::list<CharRange>::reverse_iterator I = Removals.rbegin(), E = Removals.rend(); I != E; ++I) { if (!SM.isBeforeInTranslationUnit(loc, I->End)) break; if (I->Begin.isBeforeInTranslationUnitThan(loc)) return; } Inserts[FullSourceLoc(loc, SM)].push_back(text); } void TransformActionsImpl::addRemoval(CharSourceRange range) { CharRange newRange(range, Ctx.getSourceManager(), PP); if (newRange.Begin == newRange.End) return; Inserts.erase(Inserts.upper_bound(newRange.Begin), Inserts.lower_bound(newRange.End)); std::list<CharRange>::iterator I = Removals.end(); while (I != Removals.begin()) { std::list<CharRange>::iterator RI = I; --RI; RangeComparison comp = newRange.compareWith(*RI); switch (comp) { case Range_Before: --I; break; case Range_After: Removals.insert(I, newRange); return; case Range_Contained: return; case Range_Contains: RI->End = newRange.End; case Range_ExtendsBegin: newRange.End = RI->End; Removals.erase(RI); break; case Range_ExtendsEnd: RI->End = newRange.End; return; } } Removals.insert(Removals.begin(), newRange); } void TransformActionsImpl::applyRewrites( TransformActions::RewriteReceiver &receiver) { for (InsertsMap::iterator I = Inserts.begin(), E = Inserts.end(); I!=E; ++I) { SourceLocation loc = I->first; for (TextsVec::iterator TI = I->second.begin(), TE = I->second.end(); TI != TE; ++TI) { receiver.insert(loc, *TI); } } for (std::vector<std::pair<CharRange, SourceLocation> >::iterator I = IndentationRanges.begin(), E = IndentationRanges.end(); I!=E; ++I) { CharSourceRange range = CharSourceRange::getCharRange(I->first.Begin, I->first.End); receiver.increaseIndentation(range, I->second); } for (std::list<CharRange>::iterator I = Removals.begin(), E = Removals.end(); I != E; ++I) { CharSourceRange range = CharSourceRange::getCharRange(I->Begin, I->End); receiver.remove(range); } } /// \brief Stores text passed to the transformation methods to keep the string /// "alive". Since the vast majority of text will be the same, we also unique /// the strings using a StringMap. StringRef TransformActionsImpl::getUniqueText(StringRef text) { return UniqueText.insert(std::make_pair(text, false)).first->first(); } /// \brief Computes the source location just past the end of the token at /// the given source location. If the location points at a macro, the whole /// macro expansion is skipped. SourceLocation TransformActionsImpl::getLocForEndOfToken(SourceLocation loc, SourceManager &SM, Preprocessor &PP) { if (loc.isMacroID()) loc = SM.getExpansionRange(loc).second; return PP.getLocForEndOfToken(loc); } TransformActions::RewriteReceiver::~RewriteReceiver() { } TransformActions::TransformActions(DiagnosticsEngine &diag, CapturedDiagList &capturedDiags, ASTContext &ctx, Preprocessor &PP) : Diags(diag), CapturedDiags(capturedDiags) { Impl = new TransformActionsImpl(capturedDiags, ctx, PP); } TransformActions::~TransformActions() { delete static_cast<TransformActionsImpl*>(Impl); } void TransformActions::startTransaction() { static_cast<TransformActionsImpl*>(Impl)->startTransaction(); } bool TransformActions::commitTransaction() { return static_cast<TransformActionsImpl*>(Impl)->commitTransaction(); } void TransformActions::abortTransaction() { static_cast<TransformActionsImpl*>(Impl)->abortTransaction(); } void TransformActions::insert(SourceLocation loc, StringRef text) { static_cast<TransformActionsImpl*>(Impl)->insert(loc, text); } void TransformActions::insertAfterToken(SourceLocation loc, StringRef text) { static_cast<TransformActionsImpl*>(Impl)->insertAfterToken(loc, text); } void TransformActions::remove(SourceRange range) { static_cast<TransformActionsImpl*>(Impl)->remove(range); } void TransformActions::removeStmt(Stmt *S) { static_cast<TransformActionsImpl*>(Impl)->removeStmt(S); } void TransformActions::replace(SourceRange range, StringRef text) { static_cast<TransformActionsImpl*>(Impl)->replace(range, text); } void TransformActions::replace(SourceRange range, SourceRange replacementRange) { static_cast<TransformActionsImpl*>(Impl)->replace(range, replacementRange); } void TransformActions::replaceStmt(Stmt *S, StringRef text) { static_cast<TransformActionsImpl*>(Impl)->replaceStmt(S, text); } void TransformActions::replaceText(SourceLocation loc, StringRef text, StringRef replacementText) { static_cast<TransformActionsImpl*>(Impl)->replaceText(loc, text, replacementText); } void TransformActions::increaseIndentation(SourceRange range, SourceLocation parentIndent) { static_cast<TransformActionsImpl*>(Impl)->increaseIndentation(range, parentIndent); } bool TransformActions::clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range) { return static_cast<TransformActionsImpl*>(Impl)->clearDiagnostic(IDs, range); } void TransformActions::applyRewrites(RewriteReceiver &receiver) { static_cast<TransformActionsImpl*>(Impl)->applyRewrites(receiver); } DiagnosticBuilder TransformActions::report(SourceLocation loc, unsigned diagId, SourceRange range) { assert(!static_cast<TransformActionsImpl *>(Impl)->isInTransaction() && "Errors should be emitted out of a transaction"); return Diags.Report(loc, diagId) << range; } void TransformActions::reportError(StringRef message, SourceLocation loc, SourceRange range) { report(loc, diag::err_mt_message, range) << message; } void TransformActions::reportWarning(StringRef message, SourceLocation loc, SourceRange range) { report(loc, diag::warn_mt_message, range) << message; } void TransformActions::reportNote(StringRef message, SourceLocation loc, SourceRange range) { report(loc, diag::note_mt_message, range) << message; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/PlistReporter.cpp
//===--- PlistReporter.cpp - ARC Migrate Tool Plist Reporter ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "Internals.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/PlistSupport.h" #include "clang/Basic/SourceManager.h" #include "clang/Lex/Lexer.h" using namespace clang; using namespace arcmt; using namespace markup; static StringRef getLevelName(DiagnosticsEngine::Level Level) { switch (Level) { case DiagnosticsEngine::Ignored: llvm_unreachable("ignored"); case DiagnosticsEngine::Note: return "note"; case DiagnosticsEngine::Remark: case DiagnosticsEngine::Warning: return "warning"; case DiagnosticsEngine::Fatal: case DiagnosticsEngine::Error: return "error"; } llvm_unreachable("Invalid DiagnosticsEngine level!"); } void arcmt::writeARCDiagsToPlist(const std::string &outPath, ArrayRef<StoredDiagnostic> diags, SourceManager &SM, const LangOptions &LangOpts) { DiagnosticIDs DiagIDs; // Build up a set of FIDs that we use by scanning the locations and // ranges of the diagnostics. FIDMap FM; SmallVector<FileID, 10> Fids; for (ArrayRef<StoredDiagnostic>::iterator I = diags.begin(), E = diags.end(); I != E; ++I) { const StoredDiagnostic &D = *I; AddFID(FM, Fids, SM, D.getLocation()); for (StoredDiagnostic::range_iterator RI = D.range_begin(), RE = D.range_end(); RI != RE; ++RI) { AddFID(FM, Fids, SM, RI->getBegin()); AddFID(FM, Fids, SM, RI->getEnd()); } } std::error_code EC; llvm::raw_fd_ostream o(outPath, EC, llvm::sys::fs::F_Text); if (EC) { llvm::errs() << "error: could not create file: " << outPath << '\n'; return; } EmitPlistHeader(o); // Write the root object: a <dict> containing... // - "files", an <array> mapping from FIDs to file names // - "diagnostics", an <array> containing the diagnostics o << "<dict>\n" " <key>files</key>\n" " <array>\n"; for (FileID FID : Fids) EmitString(o << " ", SM.getFileEntryForID(FID)->getName()) << '\n'; o << " </array>\n" " <key>diagnostics</key>\n" " <array>\n"; for (ArrayRef<StoredDiagnostic>::iterator DI = diags.begin(), DE = diags.end(); DI != DE; ++DI) { const StoredDiagnostic &D = *DI; if (D.getLevel() == DiagnosticsEngine::Ignored) continue; o << " <dict>\n"; // Output the diagnostic. o << " <key>description</key>"; EmitString(o, D.getMessage()) << '\n'; o << " <key>category</key>"; EmitString(o, DiagIDs.getCategoryNameFromID( DiagIDs.getCategoryNumberForDiag(D.getID()))) << '\n'; o << " <key>type</key>"; EmitString(o, getLevelName(D.getLevel())) << '\n'; // Output the location of the bug. o << " <key>location</key>\n"; EmitLocation(o, SM, D.getLocation(), FM, 2); // Output the ranges (if any). if (!D.getRanges().empty()) { o << " <key>ranges</key>\n"; o << " <array>\n"; for (auto &R : D.getRanges()) { CharSourceRange ExpansionRange(SM.getExpansionRange(R.getAsRange()), R.isTokenRange()); EmitRange(o, SM, Lexer::getAsCharRange(ExpansionRange, SM, LangOpts), FM, 4); } o << " </array>\n"; } // Close up the entry. o << " </dict>\n"; } o << " </array>\n"; // Finish. o << "</dict>\n</plist>"; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/ARCMigrate/TransGCCalls.cpp
//===--- TransGCCalls.cpp - Transformations to ARC mode -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "Transforms.h" #include "Internals.h" #include "clang/AST/ASTContext.h" #include "clang/Sema/SemaDiagnostic.h" using namespace clang; using namespace arcmt; using namespace trans; namespace { class GCCollectableCallsChecker : public RecursiveASTVisitor<GCCollectableCallsChecker> { MigrationContext &MigrateCtx; IdentifierInfo *NSMakeCollectableII; IdentifierInfo *CFMakeCollectableII; public: GCCollectableCallsChecker(MigrationContext &ctx) : MigrateCtx(ctx) { IdentifierTable &Ids = MigrateCtx.Pass.Ctx.Idents; NSMakeCollectableII = &Ids.get("NSMakeCollectable"); CFMakeCollectableII = &Ids.get("CFMakeCollectable"); } bool shouldWalkTypesOfTypeLocs() const { return false; } bool VisitCallExpr(CallExpr *E) { TransformActions &TA = MigrateCtx.Pass.TA; if (MigrateCtx.isGCOwnedNonObjC(E->getType())) { TA.report(E->getLocStart(), diag::warn_arcmt_nsalloc_realloc, E->getSourceRange()); return true; } Expr *CEE = E->getCallee()->IgnoreParenImpCasts(); if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) { if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(DRE->getDecl())) { if (!FD->getDeclContext()->getRedeclContext()->isFileContext()) return true; if (FD->getIdentifier() == NSMakeCollectableII) { Transaction Trans(TA); TA.clearDiagnostic(diag::err_unavailable, diag::err_unavailable_message, diag::err_ovl_deleted_call, // ObjC++ DRE->getSourceRange()); TA.replace(DRE->getSourceRange(), "CFBridgingRelease"); } else if (FD->getIdentifier() == CFMakeCollectableII) { TA.reportError("CFMakeCollectable will leak the object that it " "receives in ARC", DRE->getLocation(), DRE->getSourceRange()); } } } return true; } }; } // anonymous namespace void GCCollectableCallsTraverser::traverseBody(BodyContext &BodyCtx) { GCCollectableCallsChecker(BodyCtx.getMigrationContext()) .TraverseStmt(BodyCtx.getTopStmt()); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenTypes.cpp
//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This is the code that handles AST -> LLVM type lowering. // //===----------------------------------------------------------------------===// #include "CodeGenTypes.h" #include "CGCXXABI.h" #include "CGCall.h" #include "CGOpenCLRuntime.h" #include "CGRecordLayout.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "clang/AST/RecordLayout.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Module.h" #include "CodeGenModule.h" // HLSL Change #include "CGHLSLRuntime.h" // HLSL Change using namespace clang; using namespace CodeGen; CodeGenTypes::CodeGenTypes(CodeGenModule &cgm) : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), TheDataLayout(cgm.getDataLayout()), Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) { SkippedLayout = false; } CodeGenTypes::~CodeGenTypes() { llvm::DeleteContainerSeconds(CGRecordLayouts); for (llvm::FoldingSet<CGFunctionInfo>::iterator I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) delete &*I++; } void CodeGenTypes::addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty, StringRef suffix) { SmallString<256> TypeName; llvm::raw_svector_ostream OS(TypeName); OS << RD->getKindName() << '.'; // Name the codegen type after the typedef name // if there is no tag type name available if (RD->getIdentifier()) { // FIXME: We should not have to check for a null decl context here. // Right now we do it because the implicit Obj-C decls don't have one. if (RD->getDeclContext()) RD->printQualifiedName(OS); else RD->printName(OS); } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { // FIXME: We should not have to check for a null decl context here. // Right now we do it because the implicit Obj-C decls don't have one. if (TDD->getDeclContext()) TDD->printQualifiedName(OS); else TDD->printName(OS); } else OS << "anon"; if (!suffix.empty()) OS << suffix; // HLSL Change Starts const clang::PrintingPolicy &policy = RD->getASTContext().getPrintingPolicy(); // Note that this check, like the 'abs' check, shouldn't happen by simple // string comparison; instead we can map the types and functions we // inject and look them up by a simple pointer value check. bool isMatrix = RD->getIdentifier() == &(getContext().Idents.get(StringRef("matrix"))); if (isMatrix) { // Encode additional information into the type. const ClassTemplateSpecializationDecl* templateDecl = dyn_cast<ClassTemplateSpecializationDecl>(RD); QualType CompType = templateDecl->getTemplateArgs().get(0).getAsType(); OS << "."; CompType.print(OS, policy); OS << "." << templateDecl->getTemplateArgs().get(1).getAsIntegral().toString(10) << "." << templateDecl->getTemplateArgs().get(2).getAsIntegral().toString(10); } else if (const ClassTemplateSpecializationDecl *Spec = dyn_cast<ClassTemplateSpecializationDecl>(RD)) { const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs(); TemplateSpecializationType::PrintTemplateArgumentList(OS, TemplateArgs.data(), TemplateArgs.size(), policy); } // HLSL Change Ends Ty->setName(OS.str()); } /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from /// ConvertType in that it is used to convert to the memory representation for /// a type. For example, the scalar representation for _Bool is i1, but the /// memory representation is usually i8 or i32, depending on the target. llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { // HLSL Change Starts if (hlsl::IsHLSLVecType(T)) { // Vectors of bools in memory should become vectors of // the memory representation of the elements. // Clang doesn't do this for plain VectorTypes, // which is fine otherwise a bool1x1 matrix would become // [n x <m x i32>] since array elements always have memory representation. QualType ElemT = hlsl::GetElementTypeOrType(T); return llvm::VectorType::get(ConvertTypeForMem(ElemT), hlsl::GetHLSLVecSize(T)); } llvm::Type *R = ConvertType(T); if (R->isIntegerTy(1)) { // Bools have a different representation in memory return llvm::IntegerType::get(getLLVMContext(), (unsigned)Context.getTypeSize(T)); } return R; // HLSL Change Ends } /// isRecordLayoutComplete - Return true if the specified type is already /// completely laid out. bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = RecordDeclTypes.find(Ty); return I != RecordDeclTypes.end() && !I->second->isOpaque(); } static bool isSafeToConvert(QualType T, CodeGenTypes &CGT, llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked); /// isSafeToConvert - Return true if it is safe to convert the specified record /// decl to IR and lay it out, false if doing so would cause us to get into a /// recursive compilation mess. static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT, llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { // If we have already checked this type (maybe the same type is used by-value // multiple times in multiple structure fields, don't check again. if (!AlreadyChecked.insert(RD).second) return true; const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr(); // If this type is already laid out, converting it is a noop. if (CGT.isRecordLayoutComplete(Key)) return true; // If this type is currently being laid out, we can't recursively compile it. if (CGT.isRecordBeingLaidOut(Key)) return false; // If this type would require laying out bases that are currently being laid // out, don't do it. This includes virtual base classes which get laid out // when a class is translated, even though they aren't embedded by-value into // the class. if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { for (const auto &I : CRD->bases()) if (!isSafeToConvert(I.getType()->getAs<RecordType>()->getDecl(), CGT, AlreadyChecked)) return false; } // If this type would require laying out members that are currently being laid // out, don't do it. for (const auto *I : RD->fields()) if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked)) return false; // If there are no problems, lets do it. return true; } /// isSafeToConvert - Return true if it is safe to convert this field type, /// which requires the structure elements contained by-value to all be /// recursively safe to convert. static bool isSafeToConvert(QualType T, CodeGenTypes &CGT, llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { // Strip off atomic type sugar. if (const auto *AT = T->getAs<AtomicType>()) T = AT->getValueType(); // If this is a record, check it. if (const auto *RT = T->getAs<RecordType>()) return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked); // If this is an array, check the elements, which are embedded inline. if (const auto *AT = CGT.getContext().getAsArrayType(T)) return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked); // Otherwise, there is no concern about transforming this. We only care about // things that are contained by-value in a structure that can have another // structure as a member. return true; } /// isSafeToConvert - Return true if it is safe to convert the specified record /// decl to IR and lay it out, false if doing so would cause us to get into a /// recursive compilation mess. static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) { // If no structs are being laid out, we can certainly do this one. if (CGT.noRecordsBeingLaidOut()) return true; llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked; return isSafeToConvert(RD, CGT, AlreadyChecked); } /// isFuncParamTypeConvertible - Return true if the specified type in a /// function parameter or result position can be converted to an IR type at this /// point. This boils down to being whether it is complete, as well as whether /// we've temporarily deferred expanding the type because we're in a recursive /// context. bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) { // Some ABIs cannot have their member pointers represented in IR unless // certain circumstances have been reached. if (const auto *MPT = Ty->getAs<MemberPointerType>()) return getCXXABI().isMemberPointerConvertible(MPT); // If this isn't a tagged type, we can convert it! const TagType *TT = Ty->getAs<TagType>(); if (!TT) return true; // Incomplete types cannot be converted. if (TT->isIncompleteType()) return false; // If this is an enum, then it is always safe to convert. const RecordType *RT = dyn_cast<RecordType>(TT); if (!RT) return true; // Otherwise, we have to be careful. If it is a struct that we're in the // process of expanding, then we can't convert the function type. That's ok // though because we must be in a pointer context under the struct, so we can // just convert it to a dummy type. // // We decide this by checking whether ConvertRecordDeclType returns us an // opaque type for a struct that we know is defined. return isSafeToConvert(RT->getDecl(), *this); } /// Code to verify a given function type is complete, i.e. the return type /// and all of the parameter types are complete. Also check to see if we are in /// a RS_StructPointer context, and if so whether any struct types have been /// pended. If so, we don't want to ask the ABI lowering code to handle a type /// that cannot be converted to an IR type. bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { if (!isFuncParamTypeConvertible(FT->getReturnType())) return false; if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) if (!isFuncParamTypeConvertible(FPT->getParamType(i))) return false; return true; } /// UpdateCompletedType - When we find the full definition for a TagDecl, /// replace the 'opaque' type we previously made for it if applicable. void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { // If this is an enum being completed, then we flush all non-struct types from // the cache. This allows function types and other things that may be derived // from the enum to be recomputed. if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) { // Only flush the cache if we've actually already converted this type. if (TypeCache.count(ED->getTypeForDecl())) { // Okay, we formed some types based on this. We speculated that the enum // would be lowered to i32, so we only need to flush the cache if this // didn't happen. if (!ConvertType(ED->getIntegerType())->isIntegerTy(32)) TypeCache.clear(); } // If necessary, provide the full definition of a type only used with a // declaration so far. if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) DI->completeType(ED); return; } // If we completed a RecordDecl that we previously used and converted to an // anonymous type, then go ahead and complete it now. const RecordDecl *RD = cast<RecordDecl>(TD); if (RD->isDependentType()) return; // Only complete it if we converted it already. If we haven't converted it // yet, we'll just do it lazily. if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) ConvertRecordDeclType(RD); // If necessary, provide the full definition of a type only used with a // declaration so far. if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) DI->completeType(RD); } static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, const llvm::fltSemantics &format, bool UseNativeHalf = false) { if (&format == &llvm::APFloat::IEEEhalf) { if (UseNativeHalf) return llvm::Type::getHalfTy(VMContext); else return llvm::Type::getInt16Ty(VMContext); } if (&format == &llvm::APFloat::IEEEsingle) return llvm::Type::getFloatTy(VMContext); if (&format == &llvm::APFloat::IEEEdouble) return llvm::Type::getDoubleTy(VMContext); if (&format == &llvm::APFloat::IEEEquad) return llvm::Type::getFP128Ty(VMContext); if (&format == &llvm::APFloat::PPCDoubleDouble) return llvm::Type::getPPC_FP128Ty(VMContext); if (&format == &llvm::APFloat::x87DoubleExtended) return llvm::Type::getX86_FP80Ty(VMContext); llvm_unreachable("Unknown float format!"); } /// ConvertType - Convert the specified type to its LLVM form. llvm::Type *CodeGenTypes::ConvertType(QualType T) { T = Context.getCanonicalType(T); const Type *Ty = T.getTypePtr(); // RecordTypes are cached and processed specially. if (const RecordType *RT = dyn_cast<RecordType>(Ty)) { // HLSL Change Starts if (getContext().getLangOpts().HLSL && !hlsl::IsHLSLMatType(T)) { if (const ExtVectorType *extVecTy = hlsl::ConvertHLSLVecMatTypeToExtVectorType(getContext(), T)) { Ty = getContext() .getExtVectorType(extVecTy->getElementType(), extVecTy->getNumElements()) .getTypePtr(); } else if (hlsl::IsHLSLOutputPatchType(T)) { // Convert OutputPatch to array. QualType eltTy = hlsl::GetHLSLOutputPatchElementType(T); unsigned count = hlsl::GetHLSLOutputPatchCount(T); Ty = getContext() .getConstantArrayType(eltTy, llvm::APInt(32, count), ArrayType::ArraySizeModifier::Normal, 0) .getTypePtr(); } else if (hlsl::IsHLSLInputPatchType(T)) { // Convert InputPatch to array. QualType eltTy = hlsl::GetHLSLOutputPatchElementType(T); unsigned count = hlsl::GetHLSLOutputPatchCount(T); Ty = getContext() .getConstantArrayType(eltTy, llvm::APInt(32, count), ArrayType::ArraySizeModifier::Normal, 0) .getTypePtr(); } else return ConvertRecordDeclType(RT->getDecl()); } // HLSL Change Ends else return ConvertRecordDeclType(RT->getDecl()); } // See if type is already cached. llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty); // If type is found in map then use it. Otherwise, convert type T. if (TCI != TypeCache.end()) return TCI->second; // If we don't have it in the cache, convert it now. llvm::Type *ResultType = nullptr; switch (Ty->getTypeClass()) { case Type::Record: // Handled above. #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: #define DEPENDENT_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.def" llvm_unreachable("Non-canonical or dependent types aren't possible."); case Type::Builtin: { switch (cast<BuiltinType>(Ty)->getKind()) { case BuiltinType::Void: case BuiltinType::ObjCId: case BuiltinType::ObjCClass: case BuiltinType::ObjCSel: // LLVM void type can only be used as the result of a function call. Just // map to the same as char. ResultType = llvm::Type::getInt8Ty(getLLVMContext()); break; case BuiltinType::Bool: // Note that we always return bool as i1 for use as a scalar type. ResultType = llvm::Type::getInt1Ty(getLLVMContext()); break; case BuiltinType::Char_S: case BuiltinType::Char_U: case BuiltinType::SChar: case BuiltinType::UChar: case BuiltinType::Short: case BuiltinType::UShort: case BuiltinType::Int: case BuiltinType::UInt: case BuiltinType::Long: case BuiltinType::ULong: case BuiltinType::LongLong: case BuiltinType::ULongLong: case BuiltinType::WChar_S: case BuiltinType::WChar_U: // HLSL Change Starts case BuiltinType::Min12Int: case BuiltinType::Min16Int: case BuiltinType::Min16UInt: case BuiltinType::LitInt: case BuiltinType::Int8_4Packed: case BuiltinType::UInt8_4Packed: // HLSL Change Ends case BuiltinType::Char16: case BuiltinType::Char32: ResultType = llvm::IntegerType::get(getLLVMContext(), static_cast<unsigned>(Context.getTypeSize(T))); break; // HLSL Change Starts case BuiltinType::Min10Float: case BuiltinType::Min16Float: // OACR error 6287 #pragma prefast(disable: __WARNING_REDUNDANTTEST, "language options are constants, by design") // HLSL Change Ends case BuiltinType::Half: // Half FP can either be storage-only (lowered to i16) or native. ResultType = getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T), Context.getLangOpts().NativeHalfType || Context.getLangOpts().HalfArgsAndReturns); break; case BuiltinType::LitFloat: // HLSL Change case BuiltinType::HalfFloat: // HLSL Change case BuiltinType::Float: case BuiltinType::Double: case BuiltinType::LongDouble: ResultType = getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T), /* UseNativeHalf = */ false); break; case BuiltinType::NullPtr: // Model std::nullptr_t as i8* ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); break; case BuiltinType::UInt128: case BuiltinType::Int128: ResultType = llvm::IntegerType::get(getLLVMContext(), 128); break; case BuiltinType::OCLImage1d: case BuiltinType::OCLImage1dArray: case BuiltinType::OCLImage1dBuffer: case BuiltinType::OCLImage2d: case BuiltinType::OCLImage2dArray: case BuiltinType::OCLImage3d: case BuiltinType::OCLSampler: case BuiltinType::OCLEvent: ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty); break; case BuiltinType::Dependent: #define BUILTIN_TYPE(Id, SingletonId) #define PLACEHOLDER_TYPE(Id, SingletonId) \ case BuiltinType::Id: #include "clang/AST/BuiltinTypes.def" llvm_unreachable("Unexpected placeholder builtin type!"); } break; } case Type::Auto: llvm_unreachable("Unexpected undeduced auto type!"); case Type::Complex: { llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType()); ResultType = llvm::StructType::get(EltTy, EltTy, nullptr); break; } case Type::LValueReference: case Type::RValueReference: { const ReferenceType *RTy = cast<ReferenceType>(Ty); QualType ETy = RTy->getPointeeType(); llvm::Type *PointeeType = ConvertTypeForMem(ETy); unsigned AS = Context.getTargetAddressSpace(ETy); ResultType = llvm::PointerType::get(PointeeType, AS); break; } case Type::Pointer: { const PointerType *PTy = cast<PointerType>(Ty); QualType ETy = PTy->getPointeeType(); llvm::Type *PointeeType = ConvertTypeForMem(ETy); if (PointeeType->isVoidTy()) PointeeType = llvm::Type::getInt8Ty(getLLVMContext()); unsigned AS = Context.getTargetAddressSpace(ETy); ResultType = llvm::PointerType::get(PointeeType, AS); break; } case Type::VariableArray: { const VariableArrayType *A = cast<VariableArrayType>(Ty); assert(A->getIndexTypeCVRQualifiers() == 0 && "FIXME: We only handle trivial array types so far!"); // VLAs resolve to the innermost element type; this matches // the return of alloca, and there isn't any obviously better choice. ResultType = ConvertTypeForMem(A->getElementType()); break; } case Type::IncompleteArray: { const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); assert(A->getIndexTypeCVRQualifiers() == 0 && "FIXME: We only handle trivial array types so far!"); // int X[] -> [0 x int], unless the element type is not sized. If it is // unsized (e.g. an incomplete struct) just use [0 x i8]. ResultType = ConvertTypeForMem(A->getElementType()); if (!ResultType->isSized()) { SkippedLayout = true; ResultType = llvm::Type::getInt8Ty(getLLVMContext()); } ResultType = llvm::ArrayType::get(ResultType, 0); break; } case Type::ConstantArray: { const ConstantArrayType *A = cast<ConstantArrayType>(Ty); llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); // Lower arrays of undefined struct type to arrays of i8 just to have a // concrete type. if (!EltTy->isSized()) { SkippedLayout = true; EltTy = llvm::Type::getInt8Ty(getLLVMContext()); } ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue()); break; } case Type::ExtVector: case Type::Vector: { const VectorType *VT = cast<VectorType>(Ty); ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()), VT->getNumElements()); break; } case Type::FunctionNoProto: case Type::FunctionProto: { const FunctionType *FT = cast<FunctionType>(Ty); // First, check whether we can build the full function type. If the // function type depends on an incomplete type (e.g. a struct or enum), we // cannot lower the function type. if (!isFuncTypeConvertible(FT)) { // This function's type depends on an incomplete tag type. // Force conversion of all the relevant record types, to make sure // we re-convert the FunctionType when appropriate. if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>()) ConvertRecordDeclType(RT->getDecl()); if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++) if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>()) ConvertRecordDeclType(RT->getDecl()); // Return a placeholder type. ResultType = llvm::StructType::get(getLLVMContext()); SkippedLayout = true; break; } // While we're converting the parameter types for a function, we don't want // to recursively convert any pointed-to structs. Converting directly-used // structs is ok though. if (!RecordsBeingLaidOut.insert(Ty).second) { ResultType = llvm::StructType::get(getLLVMContext()); SkippedLayout = true; break; } // The function type can be built; call the appropriate routines to // build it. const CGFunctionInfo *FI; if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { FI = &arrangeFreeFunctionType( CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); } else { const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT); FI = &arrangeFreeFunctionType( CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); } // If there is something higher level prodding our CGFunctionInfo, then // don't recurse into it again. if (FunctionsBeingProcessed.count(FI)) { ResultType = llvm::StructType::get(getLLVMContext()); SkippedLayout = true; } else { // Otherwise, we're good to go, go ahead and convert it. ResultType = GetFunctionType(*FI); } RecordsBeingLaidOut.erase(Ty); if (SkippedLayout) TypeCache.clear(); if (RecordsBeingLaidOut.empty()) while (!DeferredRecords.empty()) ConvertRecordDeclType(DeferredRecords.pop_back_val()); break; } case Type::ObjCObject: ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); break; case Type::ObjCInterface: { // Objective-C interfaces are always opaque (outside of the // runtime, which can do whatever it likes); we never refine // these. llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; if (!T) T = llvm::StructType::create(getLLVMContext()); ResultType = T; break; } case Type::ObjCObjectPointer: { // Protocol qualifications do not influence the LLVM type, we just return a // pointer to the underlying interface type. We don't need to worry about // recursive conversion. llvm::Type *T = ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); ResultType = T->getPointerTo(); break; } case Type::Enum: { const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); if (ED->isCompleteDefinition() || ED->isFixed()) return ConvertType(ED->getIntegerType()); // Return a placeholder 'i32' type. This can be changed later when the // type is defined (see UpdateCompletedType), but is likely to be the // "right" answer. ResultType = llvm::Type::getInt32Ty(getLLVMContext()); break; } case Type::BlockPointer: { const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); llvm::Type *PointeeType = ConvertTypeForMem(FTy); unsigned AS = Context.getTargetAddressSpace(FTy); ResultType = llvm::PointerType::get(PointeeType, AS); break; } case Type::MemberPointer: { if (!getCXXABI().isMemberPointerConvertible(cast<MemberPointerType>(Ty))) return llvm::StructType::create(getLLVMContext()); ResultType = getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty)); break; } case Type::Atomic: { QualType valueType = cast<AtomicType>(Ty)->getValueType(); ResultType = ConvertTypeForMem(valueType); // Pad out to the inflated size if necessary. uint64_t valueSize = Context.getTypeSize(valueType); uint64_t atomicSize = Context.getTypeSize(Ty); if (valueSize != atomicSize) { assert(valueSize < atomicSize); llvm::Type *elts[] = { ResultType, llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8) }; ResultType = llvm::StructType::get(getLLVMContext(), llvm::makeArrayRef(elts)); } break; } } assert(ResultType && "Didn't convert a type?"); TypeCache[Ty] = ResultType; return ResultType; } bool CodeGenModule::isPaddedAtomicType(QualType type) { return isPaddedAtomicType(type->castAs<AtomicType>()); } bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType()); } /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { // TagDecl's are not necessarily unique, instead use the (clang) // type connected to the decl. const Type *Key = Context.getTagDeclType(RD).getTypePtr(); llvm::StructType *&Entry = RecordDeclTypes[Key]; // If we don't have a StructType at all yet, create the forward declaration. if (!Entry) { Entry = llvm::StructType::create(getLLVMContext()); addRecordTypeName(RD, Entry, ""); } llvm::StructType *Ty = Entry; // If this is still a forward declaration, or the LLVM type is already // complete, there's nothing more to do. RD = RD->getDefinition(); if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque()) return Ty; // If converting this type would cause us to infinitely loop, don't do it! if (!isSafeToConvert(RD, *this)) { DeferredRecords.push_back(RD); return Ty; } // Okay, this is a definition of a type. Compile the implementation now. bool InsertResult = RecordsBeingLaidOut.insert(Key).second; (void)InsertResult; assert(InsertResult && "Recursively compiling a struct?"); // Force conversion of non-virtual base classes recursively. if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { for (const auto &I : CRD->bases()) { if (I.isVirtual()) continue; ConvertRecordDeclType(I.getType()->getAs<RecordType>()->getDecl()); } } // Layout fields. CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty); CGRecordLayouts[Key] = Layout; // We're done laying out this struct. bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult; assert(EraseResult && "struct not in RecordsBeingLaidOut set?"); // If this struct blocked a FunctionType conversion, then recompute whatever // was derived from that. // FIXME: This is hugely overconservative. if (SkippedLayout) TypeCache.clear(); // If we're done converting the outer-most record, then convert any deferred // structs as well. if (RecordsBeingLaidOut.empty()) while (!DeferredRecords.empty()) ConvertRecordDeclType(DeferredRecords.pop_back_val()); return Ty; } /// getCGRecordLayout - Return record layout info for the given record decl. const CGRecordLayout & CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { const Type *Key = Context.getTagDeclType(RD).getTypePtr(); const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key); if (!Layout) { // Compute the type information. ConvertRecordDeclType(RD); // Now try again. Layout = CGRecordLayouts.lookup(Key); } assert(Layout && "Unable to find record layout information for type"); return *Layout; } bool CodeGenTypes::isZeroInitializable(QualType T) { // No need to check for member pointers when not compiling C++. if (!Context.getLangOpts().CPlusPlus) return true; if (const auto *AT = Context.getAsArrayType(T)) { if (isa<IncompleteArrayType>(AT)) return true; if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) if (Context.getConstantArrayElementCount(CAT) == 0) return true; T = Context.getBaseElementType(T); } // Records are non-zero-initializable if they contain any // non-zero-initializable subobjects. if (const RecordType *RT = T->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); return isZeroInitializable(RD); } // We have to ask the ABI about member pointers. if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) return getCXXABI().isZeroInitializable(MPT); // Everything else is okay. return true; } bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) { return getCGRecordLayout(RD).isZeroInitializable(); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/ModuleBuilder.cpp
//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This builds an AST and converts it to LLVM Code. // //===----------------------------------------------------------------------===// #include "clang/CodeGen/ModuleBuilder.h" #include "CGDebugInfo.h" #include "CodeGenModule.h" #include "dxc/DXIL/DxilMetadataHelper.h" // HLSL Change - dx source info #include "dxc/DxcBindingTable/DxcBindingTable.h" // HLSL Change #include "dxc/Support/Path.h" // HLSL Change #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/TargetInfo.h" #include "clang/Frontend/CodeGenOptions.h" #include "llvm/ADT/StringRef.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Support/Path.h" #include <memory> using namespace clang; namespace { class CodeGeneratorImpl : public CodeGenerator { DiagnosticsEngine &Diags; std::unique_ptr<const llvm::DataLayout> TD; ASTContext *Ctx; const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info. const PreprocessorOptions &PreprocessorOpts; // Only used for debug info. const CodeGenOptions CodeGenOpts; // Intentionally copied in. unsigned HandlingTopLevelDecls; struct HandlingTopLevelDeclRAII { CodeGeneratorImpl &Self; HandlingTopLevelDeclRAII(CodeGeneratorImpl &Self) : Self(Self) { ++Self.HandlingTopLevelDecls; } ~HandlingTopLevelDeclRAII() { if (--Self.HandlingTopLevelDecls == 0) Self.EmitDeferredDecls(); } }; CoverageSourceInfo *CoverageInfo; protected: std::unique_ptr<llvm::Module> M; std::unique_ptr<CodeGen::CodeGenModule> Builder; private: SmallVector<CXXMethodDecl *, 8> DeferredInlineMethodDefinitions; public: CodeGeneratorImpl(DiagnosticsEngine &diags, const std::string &ModuleName, const HeaderSearchOptions &HSO, const PreprocessorOptions &PPO, const CodeGenOptions &CGO, llvm::LLVMContext &C, CoverageSourceInfo *CoverageInfo = nullptr) : Diags(diags), Ctx(nullptr), HeaderSearchOpts(HSO), PreprocessorOpts(PPO), CodeGenOpts(CGO), HandlingTopLevelDecls(0), CoverageInfo(CoverageInfo), M(new llvm::Module(ModuleName, C)) {} ~CodeGeneratorImpl() override { // There should normally not be any leftover inline method definitions. assert(DeferredInlineMethodDefinitions.empty() || Diags.hasErrorOccurred()); } llvm::Module* GetModule() override { return M.get(); } const Decl *GetDeclForMangledName(StringRef MangledName) override { GlobalDecl Result; if (!Builder->lookupRepresentativeDecl(MangledName, Result)) return nullptr; const Decl *D = Result.getCanonicalDecl().getDecl(); if (auto FD = dyn_cast<FunctionDecl>(D)) { if (FD->hasBody(FD)) return FD; } else if (auto TD = dyn_cast<TagDecl>(D)) { if (auto Def = TD->getDefinition()) return Def; } return D; } llvm::Module *ReleaseModule() override { return M.release(); } void Initialize(ASTContext &Context) override { Ctx = &Context; M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple()); M->setDataLayout(Ctx->getTargetInfo().getTargetDescription()); TD.reset( new llvm::DataLayout(Ctx->getTargetInfo().getTargetDescription())); Builder.reset(new CodeGen::CodeGenModule(Context, HeaderSearchOpts, PreprocessorOpts, CodeGenOpts, *M, *TD, Diags, CoverageInfo)); for (size_t i = 0, e = CodeGenOpts.DependentLibraries.size(); i < e; ++i) HandleDependentLibrary(CodeGenOpts.DependentLibraries[i]); } void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override { if (Diags.hasErrorOccurred()) return; Builder->HandleCXXStaticMemberVarInstantiation(VD); } bool HandleTopLevelDecl(DeclGroupRef DG) override { if (Diags.hasErrorOccurred()) return true; HandlingTopLevelDeclRAII HandlingDecl(*this); // Make sure to emit all elements of a Decl. for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) Builder->EmitTopLevelDecl(*I); return true; } void EmitDeferredDecls() { if (DeferredInlineMethodDefinitions.empty()) return; // Emit any deferred inline method definitions. Note that more deferred // methods may be added during this loop, since ASTConsumer callbacks // can be invoked if AST inspection results in declarations being added. HandlingTopLevelDeclRAII HandlingDecl(*this); for (unsigned I = 0; I != DeferredInlineMethodDefinitions.size(); ++I) Builder->EmitTopLevelDecl(DeferredInlineMethodDefinitions[I]); DeferredInlineMethodDefinitions.clear(); } void HandleInlineMethodDefinition(CXXMethodDecl *D) override { if (Diags.hasErrorOccurred()) return; assert(D->doesThisDeclarationHaveABody()); // We may want to emit this definition. However, that decision might be // based on computing the linkage, and we have to defer that in case we // are inside of something that will change the method's final linkage, // e.g. // typedef struct { // void bar(); // void foo() { bar(); } // } A; DeferredInlineMethodDefinitions.push_back(D); // Provide some coverage mapping even for methods that aren't emitted. // Don't do this for templated classes though, as they may not be // instantiable. if (!D->getParent()->getDescribedClassTemplate()) Builder->AddDeferredUnusedCoverageMapping(D); } /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl /// to (e.g. struct, union, enum, class) is completed. This allows the /// client hack on the type, which can occur at any point in the file /// (because these can be defined in declspecs). void HandleTagDeclDefinition(TagDecl *D) override { if (Diags.hasErrorOccurred()) return; Builder->UpdateCompletedType(D); // For MSVC compatibility, treat declarations of static data members with // inline initializers as definitions. if (Ctx->getLangOpts().MSVCCompat) { for (Decl *Member : D->decls()) { if (VarDecl *VD = dyn_cast<VarDecl>(Member)) { if (Ctx->isMSStaticDataMemberInlineDefinition(VD) && Ctx->DeclMustBeEmitted(VD)) { Builder->EmitGlobal(VD); } } } } } void HandleTagDeclRequiredDefinition(const TagDecl *D) override { if (Diags.hasErrorOccurred()) return; if (CodeGen::CGDebugInfo *DI = Builder->getModuleDebugInfo()) if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) DI->completeRequiredType(RD); } void HandleTranslationUnit(ASTContext &Ctx) override { if (Diags.hasErrorOccurred()) { if (Builder) Builder->clear(); M.reset(); return; } // HLSL Change Begins. if (&Ctx == this->Ctx) { if (Builder) { // Add semantic defines for extensions if any are available. auto &CodeGenOpts = const_cast<CodeGenOptions &>(Builder->getCodeGenOpts()); if (CodeGenOpts.HLSLExtensionsCodegen) { CodeGenOpts.HLSLExtensionsCodegen->WriteSemanticDefines( M.get()); // Builder->CodeGenOpts is a copy. So update it for every Builder. CodeGenOpts.HLSLExtensionsCodegen->UpdateCodeGenOptions( CodeGenOpts); } } } // HLSL Change Ends. if (Builder) Builder->Release(); // HLSL Change Begins if (CodeGenOpts.BindingTableParser) { hlsl::DxcBindingTable bindingTable; std::string errors; llvm::raw_string_ostream os(errors); if (!CodeGenOpts.BindingTableParser->Parse(os, &bindingTable)) { os.flush(); unsigned DiagID = Diags.getCustomDiagID( DiagnosticsEngine::Error, "%0"); Diags.Report(DiagID) << errors; } else { hlsl::WriteBindingTableToMetadata(*M, bindingTable); } } else { // Add resource binding overrides to the metadata. hlsl::WriteBindingTableToMetadata(*M, CodeGenOpts.HLSLBindingTable); } // Error may happen in Builder->Release for HLSL if (CodeGenOpts.HLSLEmbedSourcesInModule) { llvm::LLVMContext &LLVMCtx = M->getContext(); // Add all file contents in a list of filename/content pairs. llvm::NamedMDNode *pContents = nullptr; auto AddFile = [&](StringRef name, StringRef content) { if (pContents == nullptr) { pContents = M->getOrInsertNamedMetadata( hlsl::DxilMDHelper::kDxilSourceContentsMDName); } llvm::MDTuple *pFileInfo = llvm::MDNode::get( LLVMCtx, { llvm::MDString::get(LLVMCtx, name), llvm::MDString::get(LLVMCtx, content) }); pContents->addOperand(pFileInfo); }; std::map<std::string, StringRef> filesMap; bool bFoundMainFile = false; for (SourceManager::fileinfo_iterator it = Ctx.getSourceManager().fileinfo_begin(), end = Ctx.getSourceManager().fileinfo_end(); it != end; ++it) { if (it->first->isValid() && !it->second->IsSystemFile) { llvm::SmallString<256> path = StringRef(it->first->getName()); llvm::sys::path::native(path); StringRef contentBuffer = it->second->getRawBuffer()->getBuffer(); // If main file, write that to metadata first. // Add the rest to filesMap to sort by name. if (CodeGenOpts.MainFileName.compare(it->first->getName()) == 0) { assert(!bFoundMainFile && "otherwise, more than one file matches main filename"); AddFile(path, contentBuffer); bFoundMainFile = true; } else { // We want the include file paths to match the values passed into // the include handlers exactly. The SourceManager entries should // match it except the call to MakeAbsoluteOrCurDirRelative. filesMap[path.str()] = contentBuffer; } } } assert(bFoundMainFile && "otherwise, no file found matches main filename"); // Emit the rest of the files in sorted order. for (auto it : filesMap) { AddFile(it.first, it.second); } // Add Defines to Debug Info llvm::NamedMDNode *pDefines = M->getOrInsertNamedMetadata( hlsl::DxilMDHelper::kDxilSourceDefinesMDName); std::vector<llvm::Metadata *> vecDefines; vecDefines.resize(CodeGenOpts.HLSLDefines.size()); std::transform(CodeGenOpts.HLSLDefines.begin(), CodeGenOpts.HLSLDefines.end(), vecDefines.begin(), [&LLVMCtx](const std::string &str) { return llvm::MDString::get(LLVMCtx, str); }); llvm::MDTuple *pDefinesInfo = llvm::MDNode::get(LLVMCtx, vecDefines); pDefines->addOperand(pDefinesInfo); // Add main file name to debug info llvm::NamedMDNode *pSourceFilename = M->getOrInsertNamedMetadata( hlsl::DxilMDHelper::kDxilSourceMainFileNameMDName); llvm::SmallString<128> NormalizedPath; llvm::sys::path::native(CodeGenOpts.MainFileName, NormalizedPath); llvm::MDTuple *pFileName = llvm::MDNode::get( LLVMCtx, llvm::MDString::get(LLVMCtx, NormalizedPath)); pSourceFilename->addOperand(pFileName); // Pass in any other arguments to debug info llvm::NamedMDNode *pArgs = M->getOrInsertNamedMetadata( hlsl::DxilMDHelper::kDxilSourceArgsMDName); std::vector<llvm::Metadata *> vecArguments; vecArguments.resize(CodeGenOpts.HLSLArguments.size()); std::transform(CodeGenOpts.HLSLArguments.begin(), CodeGenOpts.HLSLArguments.end(), vecArguments.begin(), [&LLVMCtx](const std::string &str) { return llvm::MDString::get(LLVMCtx, str); }); llvm::MDTuple *pArgumentsInfo = llvm::MDNode::get(LLVMCtx, vecArguments); pArgs->addOperand(pArgumentsInfo); } if (Diags.hasErrorOccurred()) M.reset(); // HLSL Change Ends } void CompleteTentativeDefinition(VarDecl *D) override { if (Diags.hasErrorOccurred()) return; Builder->EmitTentativeDefinition(D); } void HandleVTable(CXXRecordDecl *RD) override { if (Diags.hasErrorOccurred()) return; Builder->EmitVTable(RD); } void HandleLinkerOptionPragma(llvm::StringRef Opts) override { Builder->AppendLinkerOptions(Opts); } void HandleDetectMismatch(llvm::StringRef Name, llvm::StringRef Value) override { Builder->AddDetectMismatch(Name, Value); } void HandleDependentLibrary(llvm::StringRef Lib) override { Builder->AddDependentLib(Lib); } }; } void CodeGenerator::anchor() { } CodeGenerator *clang::CreateLLVMCodeGen( DiagnosticsEngine &Diags, const std::string &ModuleName, const HeaderSearchOptions &HeaderSearchOpts, const PreprocessorOptions &PreprocessorOpts, const CodeGenOptions &CGO, llvm::LLVMContext &C, CoverageSourceInfo *CoverageInfo) { return new CodeGeneratorImpl(Diags, ModuleName, HeaderSearchOpts, PreprocessorOpts, CGO, C, CoverageInfo); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGAtomic.cpp
//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the code for emitting atomic operations. // //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" #include "CGCall.h" #include "CGRecordLayout.h" #include "CodeGenModule.h" #include "clang/AST/ASTContext.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Operator.h" using namespace clang; using namespace CodeGen; namespace { class AtomicInfo { CodeGenFunction &CGF; QualType AtomicTy; QualType ValueTy; uint64_t AtomicSizeInBits; uint64_t ValueSizeInBits; CharUnits AtomicAlign; CharUnits ValueAlign; CharUnits LValueAlign; TypeEvaluationKind EvaluationKind; bool UseLibcall; LValue LVal; CGBitFieldInfo BFI; public: AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0), EvaluationKind(TEK_Scalar), UseLibcall(true) { assert(!lvalue.isGlobalReg()); ASTContext &C = CGF.getContext(); if (lvalue.isSimple()) { AtomicTy = lvalue.getType(); if (auto *ATy = AtomicTy->getAs<AtomicType>()) ValueTy = ATy->getValueType(); else ValueTy = AtomicTy; EvaluationKind = CGF.getEvaluationKind(ValueTy); uint64_t ValueAlignInBits; uint64_t AtomicAlignInBits; TypeInfo ValueTI = C.getTypeInfo(ValueTy); ValueSizeInBits = ValueTI.Width; ValueAlignInBits = ValueTI.Align; TypeInfo AtomicTI = C.getTypeInfo(AtomicTy); AtomicSizeInBits = AtomicTI.Width; AtomicAlignInBits = AtomicTI.Align; assert(ValueSizeInBits <= AtomicSizeInBits); assert(ValueAlignInBits <= AtomicAlignInBits); AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits); ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits); if (lvalue.getAlignment().isZero()) lvalue.setAlignment(AtomicAlign); LVal = lvalue; } else if (lvalue.isBitField()) { ValueTy = lvalue.getType(); ValueSizeInBits = C.getTypeSize(ValueTy); auto &OrigBFI = lvalue.getBitFieldInfo(); auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment()); AtomicSizeInBits = C.toBits( C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1) .RoundUpToAlignment(lvalue.getAlignment())); auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldAddr()); auto OffsetInChars = (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) * lvalue.getAlignment(); VoidPtrAddr = CGF.Builder.CreateConstGEP1_64( VoidPtrAddr, OffsetInChars.getQuantity()); auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( VoidPtrAddr, CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(), "atomic_bitfield_base"); BFI = OrigBFI; BFI.Offset = Offset; BFI.StorageSize = AtomicSizeInBits; BFI.StorageOffset += OffsetInChars; LVal = LValue::MakeBitfield(Addr, BFI, lvalue.getType(), lvalue.getAlignment()); LVal.setTBAAInfo(lvalue.getTBAAInfo()); AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned); if (AtomicTy.isNull()) { llvm::APInt Size( /*numBits=*/32, C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity()); AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal, /*IndexTypeQuals=*/0); } AtomicAlign = ValueAlign = lvalue.getAlignment(); } else if (lvalue.isVectorElt()) { ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType(); ValueSizeInBits = C.getTypeSize(ValueTy); AtomicTy = lvalue.getType(); AtomicSizeInBits = C.getTypeSize(AtomicTy); AtomicAlign = ValueAlign = lvalue.getAlignment(); LVal = lvalue; } else { assert(lvalue.isExtVectorElt()); ValueTy = lvalue.getType(); ValueSizeInBits = C.getTypeSize(ValueTy); AtomicTy = ValueTy = CGF.getContext().getExtVectorType( lvalue.getType(), lvalue.getExtVectorAddr() ->getType() ->getPointerElementType() ->getVectorNumElements()); AtomicSizeInBits = C.getTypeSize(AtomicTy); AtomicAlign = ValueAlign = lvalue.getAlignment(); LVal = lvalue; } UseLibcall = !C.getTargetInfo().hasBuiltinAtomic( AtomicSizeInBits, C.toBits(lvalue.getAlignment())); } QualType getAtomicType() const { return AtomicTy; } QualType getValueType() const { return ValueTy; } CharUnits getAtomicAlignment() const { return AtomicAlign; } CharUnits getValueAlignment() const { return ValueAlign; } uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; } uint64_t getValueSizeInBits() const { return ValueSizeInBits; } TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; } bool shouldUseLibcall() const { return UseLibcall; } const LValue &getAtomicLValue() const { return LVal; } llvm::Value *getAtomicAddress() const { if (LVal.isSimple()) return LVal.getAddress(); else if (LVal.isBitField()) return LVal.getBitFieldAddr(); else if (LVal.isVectorElt()) return LVal.getVectorAddr(); assert(LVal.isExtVectorElt()); return LVal.getExtVectorAddr(); } /// Is the atomic size larger than the underlying value type? /// /// Note that the absence of padding does not mean that atomic /// objects are completely interchangeable with non-atomic /// objects: we might have promoted the alignment of a type /// without making it bigger. bool hasPadding() const { return (ValueSizeInBits != AtomicSizeInBits); } bool emitMemSetZeroIfNecessary() const; llvm::Value *getAtomicSizeValue() const { CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits); return CGF.CGM.getSize(size); } /// Cast the given pointer to an integer pointer suitable for /// atomic operations. llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const; /// Turn an atomic-layout object into an r-value. RValue convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot, SourceLocation loc, bool AsValue) const; /// \brief Converts a rvalue to integer value. llvm::Value *convertRValueToInt(RValue RVal) const; RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot, SourceLocation Loc, bool AsValue) const; /// Copy an atomic r-value into atomic-layout memory. void emitCopyIntoMemory(RValue rvalue) const; /// Project an l-value down to the value field. LValue projectValue() const { assert(LVal.isSimple()); llvm::Value *addr = getAtomicAddress(); if (hasPadding()) addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0); return LValue::MakeAddr(addr, getValueType(), LVal.getAlignment(), CGF.getContext(), LVal.getTBAAInfo()); } /// \brief Emits atomic load. /// \returns Loaded value. RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, bool AsValue, llvm::AtomicOrdering AO, bool IsVolatile); /// \brief Emits atomic compare-and-exchange sequence. /// \param Expected Expected value. /// \param Desired Desired value. /// \param Success Atomic ordering for success operation. /// \param Failure Atomic ordering for failed operation. /// \param IsWeak true if atomic operation is weak, false otherwise. /// \returns Pair of values: previous value from storage (value type) and /// boolean flag (i1 type) with true if success and false otherwise. std::pair<RValue, llvm::Value *> EmitAtomicCompareExchange( RValue Expected, RValue Desired, llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent, bool IsWeak = false); /// \brief Emits atomic update. /// \param AO Atomic ordering. /// \param UpdateOp Update operation for the current lvalue. void EmitAtomicUpdate(llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile); /// \brief Emits atomic update. /// \param AO Atomic ordering. void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, bool IsVolatile); /// Materialize an atomic r-value in atomic-layout memory. llvm::Value *materializeRValue(RValue rvalue) const; /// \brief Translates LLVM atomic ordering to GNU atomic ordering for /// libcalls. static AtomicExpr::AtomicOrderingKind translateAtomicOrdering(const llvm::AtomicOrdering AO); private: bool requiresMemSetZero(llvm::Type *type) const; /// \brief Creates temp alloca for intermediate operations on atomic value. llvm::Value *CreateTempAlloca() const; /// \brief Emits atomic load as a libcall. void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded, llvm::AtomicOrdering AO, bool IsVolatile); /// \brief Emits atomic load as LLVM instruction. llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile); /// \brief Emits atomic compare-and-exchange op as a libcall. llvm::Value *EmitAtomicCompareExchangeLibcall( llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr, llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent); /// \brief Emits atomic compare-and-exchange op as LLVM instruction. std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp( llvm::Value *ExpectedVal, llvm::Value *DesiredVal, llvm::AtomicOrdering Success = llvm::SequentiallyConsistent, llvm::AtomicOrdering Failure = llvm::SequentiallyConsistent, bool IsWeak = false); /// \brief Emit atomic update as libcalls. void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile); /// \brief Emit atomic update as LLVM instructions. void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile); /// \brief Emit atomic update as libcalls. void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal, bool IsVolatile); /// \brief Emit atomic update as LLVM instructions. void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal, bool IsVolatile); }; } AtomicExpr::AtomicOrderingKind AtomicInfo::translateAtomicOrdering(const llvm::AtomicOrdering AO) { switch (AO) { case llvm::Unordered: case llvm::NotAtomic: case llvm::Monotonic: return AtomicExpr::AO_ABI_memory_order_relaxed; case llvm::Acquire: return AtomicExpr::AO_ABI_memory_order_acquire; case llvm::Release: return AtomicExpr::AO_ABI_memory_order_release; case llvm::AcquireRelease: return AtomicExpr::AO_ABI_memory_order_acq_rel; case llvm::SequentiallyConsistent: return AtomicExpr::AO_ABI_memory_order_seq_cst; } llvm_unreachable("Unhandled AtomicOrdering"); } llvm::Value *AtomicInfo::CreateTempAlloca() const { auto *TempAlloca = CGF.CreateMemTemp( (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy : AtomicTy, "atomic-temp"); TempAlloca->setAlignment(getAtomicAlignment().getQuantity()); // Cast to pointer to value type for bitfields. if (LVal.isBitField()) return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast( TempAlloca, getAtomicAddress()->getType()); return TempAlloca; } static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args) { const CGFunctionInfo &fnInfo = CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args, FunctionType::ExtInfo(), RequiredArgs::All); llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo); llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName); return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args); } /// Does a store of the given IR type modify the full expected width? static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize) { return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize); } /// Does the atomic type require memsetting to zero before initialization? /// /// The IR type is provided as a way of making certain queries faster. bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const { // If the atomic type has size padding, we definitely need a memset. if (hasPadding()) return true; // Otherwise, do some simple heuristics to try to avoid it: switch (getEvaluationKind()) { // For scalars and complexes, check whether the store size of the // type uses the full size. case TEK_Scalar: return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits); case TEK_Complex: return !isFullSizeType(CGF.CGM, type->getStructElementType(0), AtomicSizeInBits / 2); // Padding in structs has an undefined bit pattern. User beware. case TEK_Aggregate: return false; } llvm_unreachable("bad evaluation kind"); } bool AtomicInfo::emitMemSetZeroIfNecessary() const { assert(LVal.isSimple()); llvm::Value *addr = LVal.getAddress(); if (!requiresMemSetZero(addr->getType()->getPointerElementType())) return false; CGF.Builder.CreateMemSet( addr, llvm::ConstantInt::get(CGF.Int8Ty, 0), CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(), LVal.getAlignment().getQuantity()); return true; } static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, uint64_t Size, unsigned Align, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder) { // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment. llvm::LoadInst *Expected = CGF.Builder.CreateLoad(Val1); Expected->setAlignment(Align); llvm::LoadInst *Desired = CGF.Builder.CreateLoad(Val2); Desired->setAlignment(Align); llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg( Ptr, Expected, Desired, SuccessOrder, FailureOrder); Pair->setVolatile(E->isVolatile()); Pair->setWeak(IsWeak); // Cmp holds the result of the compare-exchange operation: true on success, // false on failure. llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0); llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1); // This basic block is used to hold the store instruction if the operation // failed. llvm::BasicBlock *StoreExpectedBB = CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn); // This basic block is the exit point of the operation, we should end up // here regardless of whether or not the operation succeeded. llvm::BasicBlock *ContinueBB = CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn); // Update Expected if Expected isn't equal to Old, otherwise branch to the // exit point. CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB); CGF.Builder.SetInsertPoint(StoreExpectedBB); // Update the memory at Expected with Old's value. llvm::StoreInst *StoreExpected = CGF.Builder.CreateStore(Old, Val1); StoreExpected->setAlignment(Align); // Finally, branch to the exit point. CGF.Builder.CreateBr(ContinueBB); CGF.Builder.SetInsertPoint(ContinueBB); // Update the memory at Dest with Cmp's value. CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType())); return; } /// Given an ordering required on success, emit all possible cmpxchg /// instructions to cope with the provided (but possibly only dynamically known) /// FailureOrder. static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, llvm::Value *FailureOrderVal, uint64_t Size, unsigned Align, llvm::AtomicOrdering SuccessOrder) { llvm::AtomicOrdering FailureOrder; if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) { switch (FO->getSExtValue()) { default: FailureOrder = llvm::Monotonic; break; case AtomicExpr::AO_ABI_memory_order_consume: case AtomicExpr::AO_ABI_memory_order_acquire: FailureOrder = llvm::Acquire; break; case AtomicExpr::AO_ABI_memory_order_seq_cst: FailureOrder = llvm::SequentiallyConsistent; break; } if (FailureOrder >= SuccessOrder) { // Don't assert on undefined behaviour. FailureOrder = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder); } emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder, FailureOrder); return; } // Create all the relevant BB's llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr, *SeqCstBB = nullptr; MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn); if (SuccessOrder != llvm::Monotonic && SuccessOrder != llvm::Release) AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn); if (SuccessOrder == llvm::SequentiallyConsistent) SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn); llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn); llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB); // Emit all the different atomics // MonotonicBB is arbitrarily chosen as the default case; in practice, this // doesn't matter unless someone is crazy enough to use something that // doesn't fold to a constant for the ordering. CGF.Builder.SetInsertPoint(MonotonicBB); emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder, llvm::Monotonic); CGF.Builder.CreateBr(ContBB); if (AcquireBB) { CGF.Builder.SetInsertPoint(AcquireBB); emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder, llvm::Acquire); CGF.Builder.CreateBr(ContBB); SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume), AcquireBB); SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire), AcquireBB); } if (SeqCstBB) { CGF.Builder.SetInsertPoint(SeqCstBB); emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, Align, SuccessOrder, llvm::SequentiallyConsistent); CGF.Builder.CreateBr(ContBB); SI->addCase(CGF.Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst), SeqCstBB); } CGF.Builder.SetInsertPoint(ContBB); } static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest, llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) { llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add; llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0; switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: llvm_unreachable("Already handled!"); case AtomicExpr::AO__c11_atomic_compare_exchange_strong: emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, FailureOrder, Size, Align, Order); return; case AtomicExpr::AO__c11_atomic_compare_exchange_weak: emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, FailureOrder, Size, Align, Order); return; case AtomicExpr::AO__atomic_compare_exchange: case AtomicExpr::AO__atomic_compare_exchange_n: { if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) { emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr, Val1, Val2, FailureOrder, Size, Align, Order); } else { // Create all the relevant BB's llvm::BasicBlock *StrongBB = CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn); llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn); llvm::BasicBlock *ContBB = CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn); llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB); SI->addCase(CGF.Builder.getInt1(false), StrongBB); CGF.Builder.SetInsertPoint(StrongBB); emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2, FailureOrder, Size, Align, Order); CGF.Builder.CreateBr(ContBB); CGF.Builder.SetInsertPoint(WeakBB); emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2, FailureOrder, Size, Align, Order); CGF.Builder.CreateBr(ContBB); CGF.Builder.SetInsertPoint(ContBB); } return; } case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__atomic_load_n: case AtomicExpr::AO__atomic_load: { llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr); Load->setAtomic(Order); Load->setAlignment(Size); Load->setVolatile(E->isVolatile()); llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest); StoreDest->setAlignment(Align); return; } case AtomicExpr::AO__c11_atomic_store: case AtomicExpr::AO__atomic_store: case AtomicExpr::AO__atomic_store_n: { assert(!Dest && "Store does not return a value"); llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); LoadVal1->setAlignment(Align); llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr); Store->setAtomic(Order); Store->setAlignment(Size); Store->setVolatile(E->isVolatile()); return; } case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__atomic_exchange: Op = llvm::AtomicRMWInst::Xchg; break; case AtomicExpr::AO__atomic_add_fetch: PostOp = llvm::Instruction::Add; LLVM_FALLTHROUGH; // HLSL Change case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: Op = llvm::AtomicRMWInst::Add; break; case AtomicExpr::AO__atomic_sub_fetch: PostOp = llvm::Instruction::Sub; LLVM_FALLTHROUGH; // HLSL Change case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_sub: Op = llvm::AtomicRMWInst::Sub; break; case AtomicExpr::AO__atomic_and_fetch: PostOp = llvm::Instruction::And; LLVM_FALLTHROUGH; // HLSL Change case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_and: Op = llvm::AtomicRMWInst::And; break; case AtomicExpr::AO__atomic_or_fetch: PostOp = llvm::Instruction::Or; LLVM_FALLTHROUGH; // HLSL Change case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: Op = llvm::AtomicRMWInst::Or; break; case AtomicExpr::AO__atomic_xor_fetch: PostOp = llvm::Instruction::Xor; LLVM_FALLTHROUGH; // HLSL Change case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_xor: Op = llvm::AtomicRMWInst::Xor; break; case AtomicExpr::AO__atomic_nand_fetch: PostOp = llvm::Instruction::And; LLVM_FALLTHROUGH; // HLSL Change case AtomicExpr::AO__atomic_fetch_nand: Op = llvm::AtomicRMWInst::Nand; break; } llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1); LoadVal1->setAlignment(Align); llvm::AtomicRMWInst *RMWI = CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order); RMWI->setVolatile(E->isVolatile()); // For __atomic_*_fetch operations, perform the operation again to // determine the value which was written. llvm::Value *Result = RMWI; if (PostOp) Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1); if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch) Result = CGF.Builder.CreateNot(Result); llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest); StoreDest->setAlignment(Align); } // This function emits any expression (scalar, complex, or aggregate) // into a temporary alloca. static llvm::Value * EmitValToTemp(CodeGenFunction &CGF, Expr *E) { llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp"); CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(), /*Init*/ true); return DeclPtr; } static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars) { if (UseOptimizedLibcall) { // Load value and pass it to the function directly. unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity(); int64_t SizeInBits = CGF.getContext().toBits(SizeInChars); ValTy = CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false); llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(), SizeInBits)->getPointerTo(); Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false, Align, CGF.getContext().getPointerType(ValTy), Loc); // Coerce the value into an appropriately sized integer type. Args.add(RValue::get(Val), ValTy); } else { // Non-optimized functions always take a reference. Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)), CGF.getContext().VoidPtrTy); } } RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) { QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); QualType MemTy = AtomicTy; if (const AtomicType *AT = AtomicTy->getAs<AtomicType>()) MemTy = AT->getValueType(); CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy); uint64_t Size = sizeChars.getQuantity(); CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy); unsigned Align = alignChars.getQuantity(); unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth(); bool UseLibcall = (Size != Align || getContext().toBits(sizeChars) > MaxInlineWidthInBits); llvm::Value *IsWeak = nullptr, *OrderFail = nullptr, *Val1 = nullptr, *Val2 = nullptr; llvm::Value *Ptr = EmitScalarExpr(E->getPtr()); if (E->getOp() == AtomicExpr::AO__c11_atomic_init) { assert(!Dest && "Init does not return a value"); LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext()); EmitAtomicInit(E->getVal1(), lvalue); return RValue::get(nullptr); } llvm::Value *Order = EmitScalarExpr(E->getOrder()); switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: llvm_unreachable("Already handled!"); case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__atomic_load_n: break; case AtomicExpr::AO__atomic_load: Dest = EmitScalarExpr(E->getVal1()); break; case AtomicExpr::AO__atomic_store: Val1 = EmitScalarExpr(E->getVal1()); break; case AtomicExpr::AO__atomic_exchange: Val1 = EmitScalarExpr(E->getVal1()); Dest = EmitScalarExpr(E->getVal2()); break; case AtomicExpr::AO__c11_atomic_compare_exchange_strong: case AtomicExpr::AO__c11_atomic_compare_exchange_weak: case AtomicExpr::AO__atomic_compare_exchange_n: case AtomicExpr::AO__atomic_compare_exchange: Val1 = EmitScalarExpr(E->getVal1()); if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange) Val2 = EmitScalarExpr(E->getVal2()); else Val2 = EmitValToTemp(*this, E->getVal2()); OrderFail = EmitScalarExpr(E->getOrderFail()); if (E->getNumSubExprs() == 6) IsWeak = EmitScalarExpr(E->getWeak()); break; case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__c11_atomic_fetch_sub: if (MemTy->isPointerType()) { // For pointer arithmetic, we're required to do a bit of math: // adding 1 to an int* is not the same as adding 1 to a uintptr_t. // ... but only for the C11 builtins. The GNU builtins expect the // user to multiply by sizeof(T). QualType Val1Ty = E->getVal1()->getType(); llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1()); CharUnits PointeeIncAmt = getContext().getTypeSizeInChars(MemTy->getPointeeType()); Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt)); Val1 = CreateMemTemp(Val1Ty, ".atomictmp"); EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty)); break; } LLVM_FALLTHROUGH; // HLSL Change case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_sub: case AtomicExpr::AO__atomic_add_fetch: case AtomicExpr::AO__atomic_sub_fetch: case AtomicExpr::AO__c11_atomic_store: case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__atomic_store_n: case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_nand: case AtomicExpr::AO__atomic_and_fetch: case AtomicExpr::AO__atomic_or_fetch: case AtomicExpr::AO__atomic_xor_fetch: case AtomicExpr::AO__atomic_nand_fetch: Val1 = EmitValToTemp(*this, E->getVal1()); break; } QualType RValTy = E->getType().getUnqualifiedType(); auto GetDest = [&] { if (!RValTy->isVoidType() && !Dest) { Dest = CreateMemTemp(RValTy, ".atomicdst"); } return Dest; }; // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary . if (UseLibcall) { bool UseOptimizedLibcall = false; switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_and: case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_sub: case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_xor: // For these, only library calls for certain sizes exist. UseOptimizedLibcall = true; break; default: // Only use optimized library calls for sizes for which they exist. if (Size == 1 || Size == 2 || Size == 4 || Size == 8) UseOptimizedLibcall = true; break; } CallArgList Args; if (!UseOptimizedLibcall) { // For non-optimized library calls, the size is the first parameter Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)), getContext().getSizeType()); } // Atomic address is the first or second parameter Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy); std::string LibCallName; QualType LoweredMemTy = MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy; QualType RetTy; bool HaveRetTy = false; switch (E->getOp()) { // There is only one libcall for compare an exchange, because there is no // optimisation benefit possible from a libcall version of a weak compare // and exchange. // bool __atomic_compare_exchange(size_t size, void *mem, void *expected, // void *desired, int success, int failure) // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired, // int success, int failure) case AtomicExpr::AO__c11_atomic_compare_exchange_weak: case AtomicExpr::AO__c11_atomic_compare_exchange_strong: case AtomicExpr::AO__atomic_compare_exchange: case AtomicExpr::AO__atomic_compare_exchange_n: LibCallName = "__atomic_compare_exchange"; RetTy = getContext().BoolTy; HaveRetTy = true; Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy); AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy, E->getExprLoc(), sizeChars); Args.add(RValue::get(Order), getContext().IntTy); Order = OrderFail; break; // void __atomic_exchange(size_t size, void *mem, void *val, void *return, // int order) // T __atomic_exchange_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_exchange: case AtomicExpr::AO__atomic_exchange_n: case AtomicExpr::AO__atomic_exchange: LibCallName = "__atomic_exchange"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, E->getExprLoc(), sizeChars); break; // void __atomic_store(size_t size, void *mem, void *val, int order) // void __atomic_store_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_store: case AtomicExpr::AO__atomic_store: case AtomicExpr::AO__atomic_store_n: LibCallName = "__atomic_store"; RetTy = getContext().VoidTy; HaveRetTy = true; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, E->getExprLoc(), sizeChars); break; // void __atomic_load(size_t size, void *mem, void *return, int order) // T __atomic_load_N(T *mem, int order) case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__atomic_load: case AtomicExpr::AO__atomic_load_n: LibCallName = "__atomic_load"; break; // T __atomic_fetch_add_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_fetch_add: case AtomicExpr::AO__atomic_fetch_add: LibCallName = "__atomic_fetch_add"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy, E->getExprLoc(), sizeChars); break; // T __atomic_fetch_and_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_fetch_and: case AtomicExpr::AO__atomic_fetch_and: LibCallName = "__atomic_fetch_and"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, E->getExprLoc(), sizeChars); break; // T __atomic_fetch_or_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_fetch_or: case AtomicExpr::AO__atomic_fetch_or: LibCallName = "__atomic_fetch_or"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, E->getExprLoc(), sizeChars); break; // T __atomic_fetch_sub_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_fetch_sub: case AtomicExpr::AO__atomic_fetch_sub: LibCallName = "__atomic_fetch_sub"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy, E->getExprLoc(), sizeChars); break; // T __atomic_fetch_xor_N(T *mem, T val, int order) case AtomicExpr::AO__c11_atomic_fetch_xor: case AtomicExpr::AO__atomic_fetch_xor: LibCallName = "__atomic_fetch_xor"; AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy, E->getExprLoc(), sizeChars); break; default: return EmitUnsupportedRValue(E, "atomic library call"); } // Optimized functions have the size in their name. if (UseOptimizedLibcall) LibCallName += "_" + llvm::utostr(Size); // By default, assume we return a value of the atomic type. if (!HaveRetTy) { if (UseOptimizedLibcall) { // Value is returned directly. // The function returns an appropriately sized integer type. RetTy = getContext().getIntTypeForBitwidth( getContext().toBits(sizeChars), /*Signed=*/false); } else { // Value is returned through parameter before the order. RetTy = getContext().VoidTy; Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy); } } // order is always the last parameter Args.add(RValue::get(Order), getContext().IntTy); RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args); // The value is returned directly from the libcall. if (HaveRetTy && !RetTy->isVoidType()) return Res; // The value is returned via an explicit out param. if (RetTy->isVoidType()) return RValue::get(nullptr); // The value is returned directly for optimized libcalls but the caller is // expected an out-param. if (UseOptimizedLibcall) { llvm::Value *ResVal = Res.getScalarVal(); llvm::StoreInst *StoreDest = Builder.CreateStore( ResVal, Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo())); StoreDest->setAlignment(Align); } return convertTempToRValue(Dest, RValTy, E->getExprLoc()); } bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store || E->getOp() == AtomicExpr::AO__atomic_store || E->getOp() == AtomicExpr::AO__atomic_store_n; bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || E->getOp() == AtomicExpr::AO__atomic_load || E->getOp() == AtomicExpr::AO__atomic_load_n; llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(), Size * 8); llvm::Value *OrigDest = GetDest(); Ptr = Builder.CreateBitCast( Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace())); if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo()); if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo()); if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo()); if (isa<llvm::ConstantInt>(Order)) { int ord = cast<llvm::ConstantInt>(Order)->getZExtValue(); switch (ord) { case AtomicExpr::AO_ABI_memory_order_relaxed: EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Monotonic); break; case AtomicExpr::AO_ABI_memory_order_consume: case AtomicExpr::AO_ABI_memory_order_acquire: if (IsStore) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Acquire); break; case AtomicExpr::AO_ABI_memory_order_release: if (IsLoad) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Release); break; case AtomicExpr::AO_ABI_memory_order_acq_rel: if (IsLoad || IsStore) break; // Avoid crashing on code with undefined behavior EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::AcquireRelease); break; case AtomicExpr::AO_ABI_memory_order_seq_cst: EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::SequentiallyConsistent); break; default: // invalid order // We should not ever get here normally, but it's hard to // enforce that in general. break; } if (RValTy->isVoidType()) return RValue::get(nullptr); return convertTempToRValue(OrigDest, RValTy, E->getExprLoc()); } // Long case, when Order isn't obviously constant. // Create all the relevant BB's llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr, *ReleaseBB = nullptr, *AcqRelBB = nullptr, *SeqCstBB = nullptr; MonotonicBB = createBasicBlock("monotonic", CurFn); if (!IsStore) AcquireBB = createBasicBlock("acquire", CurFn); if (!IsLoad) ReleaseBB = createBasicBlock("release", CurFn); if (!IsLoad && !IsStore) AcqRelBB = createBasicBlock("acqrel", CurFn); SeqCstBB = createBasicBlock("seqcst", CurFn); llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); // Create the switch for the split // MonotonicBB is arbitrarily chosen as the default case; in practice, this // doesn't matter unless someone is crazy enough to use something that // doesn't fold to a constant for the ordering. Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB); // Emit all the different atomics Builder.SetInsertPoint(MonotonicBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Monotonic); Builder.CreateBr(ContBB); if (!IsStore) { Builder.SetInsertPoint(AcquireBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Acquire); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_consume), AcquireBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acquire), AcquireBB); } if (!IsLoad) { Builder.SetInsertPoint(ReleaseBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::Release); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_release), ReleaseBB); } if (!IsLoad && !IsStore) { Builder.SetInsertPoint(AcqRelBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::AcquireRelease); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_acq_rel), AcqRelBB); } Builder.SetInsertPoint(SeqCstBB); EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size, Align, llvm::SequentiallyConsistent); Builder.CreateBr(ContBB); SI->addCase(Builder.getInt32(AtomicExpr::AO_ABI_memory_order_seq_cst), SeqCstBB); // Cleanup and return Builder.SetInsertPoint(ContBB); if (RValTy->isVoidType()) return RValue::get(nullptr); return convertTempToRValue(OrigDest, RValTy, E->getExprLoc()); } llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const { unsigned addrspace = cast<llvm::PointerType>(addr->getType())->getAddressSpace(); llvm::IntegerType *ty = llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits); return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace)); } RValue AtomicInfo::convertTempToRValue(llvm::Value *addr, AggValueSlot resultSlot, SourceLocation loc, bool AsValue) const { if (LVal.isSimple()) { if (EvaluationKind == TEK_Aggregate) return resultSlot.asRValue(); // Drill into the padding structure if we have one. if (hasPadding()) addr = CGF.Builder.CreateStructGEP(nullptr, addr, 0); // Otherwise, just convert the temporary to an r-value using the // normal conversion routine. return CGF.convertTempToRValue(addr, getValueType(), loc); } if (!AsValue) // Get RValue from temp memory as atomic for non-simple lvalues return RValue::get( CGF.Builder.CreateAlignedLoad(addr, AtomicAlign.getQuantity())); if (LVal.isBitField()) return CGF.EmitLoadOfBitfieldLValue(LValue::MakeBitfield( addr, LVal.getBitFieldInfo(), LVal.getType(), LVal.getAlignment())); if (LVal.isVectorElt()) return CGF.EmitLoadOfLValue(LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(), LVal.getAlignment()), loc); assert(LVal.isExtVectorElt()); return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt( addr, LVal.getExtVectorElts(), LVal.getType(), LVal.getAlignment())); } RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot, SourceLocation Loc, bool AsValue) const { // Try not to in some easy cases. assert(IntVal->getType()->isIntegerTy() && "Expected integer value"); if (getEvaluationKind() == TEK_Scalar && (((!LVal.isBitField() || LVal.getBitFieldInfo().Size == ValueSizeInBits) && !hasPadding()) || !AsValue)) { auto *ValTy = AsValue ? CGF.ConvertTypeForMem(ValueTy) : getAtomicAddress()->getType()->getPointerElementType(); if (ValTy->isIntegerTy()) { assert(IntVal->getType() == ValTy && "Different integer types."); return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy)); } else if (ValTy->isPointerTy()) return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy)); else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy)) return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy)); } // Create a temporary. This needs to be big enough to hold the // atomic integer. llvm::Value *Temp; bool TempIsVolatile = false; CharUnits TempAlignment; if (AsValue && getEvaluationKind() == TEK_Aggregate) { assert(!ResultSlot.isIgnored()); Temp = ResultSlot.getAddr(); TempAlignment = getValueAlignment(); TempIsVolatile = ResultSlot.isVolatile(); } else { Temp = CreateTempAlloca(); TempAlignment = getAtomicAlignment(); } // Slam the integer into the temporary. llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp); CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity()) ->setVolatile(TempIsVolatile); return convertTempToRValue(Temp, ResultSlot, Loc, AsValue); } void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded, llvm::AtomicOrdering AO, bool) { // void __atomic_load(size_t size, void *mem, void *return, int order); CallArgList Args; Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType()); Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())), CGF.getContext().VoidPtrTy); Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)), CGF.getContext().VoidPtrTy); Args.add(RValue::get( llvm::ConstantInt::get(CGF.IntTy, translateAtomicOrdering(AO))), CGF.getContext().IntTy); emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args); } llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile) { // Okay, we're doing this natively. llvm::Value *Addr = emitCastToAtomicIntPointer(getAtomicAddress()); llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load"); Load->setAtomic(AO); // Other decoration. Load->setAlignment(getAtomicAlignment().getQuantity()); if (IsVolatile) Load->setVolatile(true); if (LVal.getTBAAInfo()) CGF.CGM.DecorateInstruction(Load, LVal.getTBAAInfo()); return Load; } /// An LValue is a candidate for having its loads and stores be made atomic if /// we are operating under /volatile:ms *and* the LValue itself is volatile and /// performing such an operation can be performed without a libcall. bool CodeGenFunction::LValueIsSuitableForInlineAtomic(LValue LV) { AtomicInfo AI(*this, LV); bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType()); // An atomic is inline if we don't need to use a libcall. bool AtomicIsInline = !AI.shouldUseLibcall(); return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline; } /// An type is a candidate for having its loads and stores be made atomic if /// we are operating under /volatile:ms *and* we know the access is volatile and /// performing such an operation can be performed without a libcall. bool CodeGenFunction::typeIsSuitableForInlineAtomic(QualType Ty, bool IsVolatile) const { // An atomic is inline if we don't need to use a libcall (e.g. it is builtin). bool AtomicIsInline = getContext().getTargetInfo().hasBuiltinAtomic( getContext().getTypeSize(Ty), getContext().getTypeAlign(Ty)); return CGM.getCodeGenOpts().MSVolatile && IsVolatile && AtomicIsInline; } RValue CodeGenFunction::EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot) { llvm::AtomicOrdering AO; bool IsVolatile = LV.isVolatileQualified(); if (LV.getType()->isAtomicType()) { AO = llvm::SequentiallyConsistent; } else { AO = llvm::Acquire; IsVolatile = true; } return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot); } RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc, bool AsValue, llvm::AtomicOrdering AO, bool IsVolatile) { // Check whether we should use a library call. if (shouldUseLibcall()) { llvm::Value *TempAddr; if (LVal.isSimple() && !ResultSlot.isIgnored()) { assert(getEvaluationKind() == TEK_Aggregate); TempAddr = ResultSlot.getAddr(); } else TempAddr = CreateTempAlloca(); EmitAtomicLoadLibcall(TempAddr, AO, IsVolatile); // Okay, turn that back into the original value or whole atomic (for // non-simple lvalues) type. return convertTempToRValue(TempAddr, ResultSlot, Loc, AsValue); } // Okay, we're doing this natively. auto *Load = EmitAtomicLoadOp(AO, IsVolatile); // If we're ignoring an aggregate return, don't do anything. if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored()) return RValue::getAggregate(nullptr, false); // Okay, turn that back into the original value or atomic (for non-simple // lvalues) type. return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue); } /// Emit a load from an l-value of atomic type. Note that the r-value /// we produce is an r-value of the atomic *value* type. RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc, llvm::AtomicOrdering AO, bool IsVolatile, AggValueSlot resultSlot) { AtomicInfo Atomics(*this, src); return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO, IsVolatile); } /// Copy an r-value into memory as part of storing to an atomic type. /// This needs to create a bit-pattern suitable for atomic operations. void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const { assert(LVal.isSimple()); // If we have an r-value, the rvalue should be of the atomic type, // which means that the caller is responsible for having zeroed // any padding. Just do an aggregate copy of that type. if (rvalue.isAggregate()) { CGF.EmitAggregateCopy(getAtomicAddress(), rvalue.getAggregateAddr(), getAtomicType(), (rvalue.isVolatileQualified() || LVal.isVolatileQualified()), LVal.getAlignment()); return; } // Okay, otherwise we're copying stuff. // Zero out the buffer if necessary. emitMemSetZeroIfNecessary(); // Drill past the padding if present. LValue TempLVal = projectValue(); // Okay, store the rvalue in. if (rvalue.isScalar()) { CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true); } else { CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true); } } /// Materialize an r-value into memory for the purposes of storing it /// to an atomic type. llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const { // Aggregate r-values are already in memory, and EmitAtomicStore // requires them to be values of the atomic type. if (rvalue.isAggregate()) return rvalue.getAggregateAddr(); // Otherwise, make a temporary and materialize into it. LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType(), getAtomicAlignment()); AtomicInfo Atomics(CGF, TempLV); Atomics.emitCopyIntoMemory(rvalue); return TempLV.getAddress(); } llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const { // If we've got a scalar value of the right size, try to avoid going // through memory. if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) { llvm::Value *Value = RVal.getScalarVal(); if (isa<llvm::IntegerType>(Value->getType())) return CGF.EmitToMemory(Value, ValueTy); else { llvm::IntegerType *InputIntTy = llvm::IntegerType::get( CGF.getLLVMContext(), LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits()); if (isa<llvm::PointerType>(Value->getType())) return CGF.Builder.CreatePtrToInt(Value, InputIntTy); else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy)) return CGF.Builder.CreateBitCast(Value, InputIntTy); } } // Otherwise, we need to go through memory. // Put the r-value in memory. llvm::Value *Addr = materializeRValue(RVal); // Cast the temporary to the atomic int type and pull a value out. Addr = emitCastToAtomicIntPointer(Addr); return CGF.Builder.CreateAlignedLoad(Addr, getAtomicAlignment().getQuantity()); } std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp( llvm::Value *ExpectedVal, llvm::Value *DesiredVal, llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) { // Do the atomic store. auto *Addr = emitCastToAtomicIntPointer(getAtomicAddress()); auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal, Success, Failure); // Other decoration. Inst->setVolatile(LVal.isVolatileQualified()); Inst->setWeak(IsWeak); // Okay, turn that back into the original value type. auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0); auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1); return std::make_pair(PreviousVal, SuccessFailureVal); } llvm::Value * AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr, llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure) { // bool __atomic_compare_exchange(size_t size, void *obj, void *expected, // void *desired, int success, int failure); CallArgList Args; Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType()); Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicAddress())), CGF.getContext().VoidPtrTy); Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)), CGF.getContext().VoidPtrTy); Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)), CGF.getContext().VoidPtrTy); Args.add(RValue::get(llvm::ConstantInt::get( CGF.IntTy, translateAtomicOrdering(Success))), CGF.getContext().IntTy); Args.add(RValue::get(llvm::ConstantInt::get( CGF.IntTy, translateAtomicOrdering(Failure))), CGF.getContext().IntTy); auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange", CGF.getContext().BoolTy, Args); return SuccessFailureRVal.getScalarVal(); } std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange( RValue Expected, RValue Desired, llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) { if (Failure >= Success) // Don't assert on undefined behavior. Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success); // Check whether we should use a library call. if (shouldUseLibcall()) { // Produce a source address. auto *ExpectedAddr = materializeRValue(Expected); auto *DesiredAddr = materializeRValue(Desired); auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, Success, Failure); return std::make_pair( convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(), SourceLocation(), /*AsValue=*/false), Res); } // If we've got a scalar value of the right size, try to avoid going // through memory. auto *ExpectedVal = convertRValueToInt(Expected); auto *DesiredVal = convertRValueToInt(Desired); auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success, Failure, IsWeak); return std::make_pair( ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(), SourceLocation(), /*AsValue=*/false), Res.second); } static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref<RValue(RValue)> &UpdateOp, llvm::Value *DesiredAddr) { llvm::Value *Ptr = nullptr; LValue UpdateLVal; RValue UpRVal; LValue AtomicLVal = Atomics.getAtomicLValue(); LValue DesiredLVal; if (AtomicLVal.isSimple()) { UpRVal = OldRVal; DesiredLVal = LValue::MakeAddr(DesiredAddr, AtomicLVal.getType(), AtomicLVal.getAlignment(), CGF.CGM.getContext()); } else { // Build new lvalue for temp address Ptr = Atomics.materializeRValue(OldRVal); if (AtomicLVal.isBitField()) { UpdateLVal = LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(), AtomicLVal.getType(), AtomicLVal.getAlignment()); DesiredLVal = LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(), AtomicLVal.getType(), AtomicLVal.getAlignment()); } else if (AtomicLVal.isVectorElt()) { UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(), AtomicLVal.getAlignment()); DesiredLVal = LValue::MakeVectorElt( DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(), AtomicLVal.getAlignment()); } else { assert(AtomicLVal.isExtVectorElt()); UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(), AtomicLVal.getAlignment()); DesiredLVal = LValue::MakeExtVectorElt( DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(), AtomicLVal.getAlignment()); } UpdateLVal.setTBAAInfo(AtomicLVal.getTBAAInfo()); DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo()); UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation()); } // Store new value in the corresponding memory area RValue NewRVal = UpdateOp(UpRVal); if (NewRVal.isScalar()) { CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal); } else { assert(NewRVal.isComplex()); CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal, /*isInit=*/false); } } void AtomicInfo::EmitAtomicUpdateLibcall( llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) { auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); llvm::Value *ExpectedAddr = CreateTempAlloca(); EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile); auto *ContBB = CGF.createBasicBlock("atomic_cont"); auto *ExitBB = CGF.createBasicBlock("atomic_exit"); CGF.EmitBlock(ContBB); auto *DesiredAddr = CreateTempAlloca(); if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || requiresMemSetZero( getAtomicAddress()->getType()->getPointerElementType())) { auto *OldVal = CGF.Builder.CreateAlignedLoad( ExpectedAddr, getAtomicAlignment().getQuantity()); CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr, getAtomicAlignment().getQuantity()); } auto OldRVal = convertTempToRValue(ExpectedAddr, AggValueSlot::ignored(), SourceLocation(), /*AsValue=*/false); EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr); auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure); CGF.Builder.CreateCondBr(Res, ExitBB, ContBB); CGF.EmitBlock(ExitBB, /*IsFinished=*/true); } void AtomicInfo::EmitAtomicUpdateOp( llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) { auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); // Do the atomic load. auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile); // For non-simple lvalues perform compare-and-swap procedure. auto *ContBB = CGF.createBasicBlock("atomic_cont"); auto *ExitBB = CGF.createBasicBlock("atomic_exit"); auto *CurBB = CGF.Builder.GetInsertBlock(); CGF.EmitBlock(ContBB); llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(), /*NumReservedValues=*/2); PHI->addIncoming(OldVal, CurBB); auto *NewAtomicAddr = CreateTempAlloca(); auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr); if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || requiresMemSetZero( getAtomicAddress()->getType()->getPointerElementType())) { CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr, getAtomicAlignment().getQuantity()); } auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(), SourceLocation(), /*AsValue=*/false); EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr); auto *DesiredVal = CGF.Builder.CreateAlignedLoad( NewAtomicIntAddr, getAtomicAlignment().getQuantity()); // Try to write new value using cmpxchg operation auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure); PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock()); CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB); CGF.EmitBlock(ExitBB, /*IsFinished=*/true); } static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue UpdateRVal, llvm::Value *DesiredAddr) { LValue AtomicLVal = Atomics.getAtomicLValue(); LValue DesiredLVal; // Build new lvalue for temp address if (AtomicLVal.isBitField()) { DesiredLVal = LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(), AtomicLVal.getType(), AtomicLVal.getAlignment()); } else if (AtomicLVal.isVectorElt()) { DesiredLVal = LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(), AtomicLVal.getAlignment()); } else { assert(AtomicLVal.isExtVectorElt()); DesiredLVal = LValue::MakeExtVectorElt( DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(), AtomicLVal.getAlignment()); } DesiredLVal.setTBAAInfo(AtomicLVal.getTBAAInfo()); // Store new value in the corresponding memory area assert(UpdateRVal.isScalar()); CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal); } void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal, bool IsVolatile) { auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); llvm::Value *ExpectedAddr = CreateTempAlloca(); EmitAtomicLoadLibcall(ExpectedAddr, AO, IsVolatile); auto *ContBB = CGF.createBasicBlock("atomic_cont"); auto *ExitBB = CGF.createBasicBlock("atomic_exit"); CGF.EmitBlock(ContBB); auto *DesiredAddr = CreateTempAlloca(); if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || requiresMemSetZero( getAtomicAddress()->getType()->getPointerElementType())) { auto *OldVal = CGF.Builder.CreateAlignedLoad( ExpectedAddr, getAtomicAlignment().getQuantity()); CGF.Builder.CreateAlignedStore(OldVal, DesiredAddr, getAtomicAlignment().getQuantity()); } EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr); auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr, DesiredAddr, AO, Failure); CGF.Builder.CreateCondBr(Res, ExitBB, ContBB); CGF.EmitBlock(ExitBB, /*IsFinished=*/true); } void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal, bool IsVolatile) { auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO); // Do the atomic load. auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile); // For non-simple lvalues perform compare-and-swap procedure. auto *ContBB = CGF.createBasicBlock("atomic_cont"); auto *ExitBB = CGF.createBasicBlock("atomic_exit"); auto *CurBB = CGF.Builder.GetInsertBlock(); CGF.EmitBlock(ContBB); llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(), /*NumReservedValues=*/2); PHI->addIncoming(OldVal, CurBB); auto *NewAtomicAddr = CreateTempAlloca(); auto *NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr); if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) || requiresMemSetZero( getAtomicAddress()->getType()->getPointerElementType())) { CGF.Builder.CreateAlignedStore(PHI, NewAtomicIntAddr, getAtomicAlignment().getQuantity()); } EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr); auto *DesiredVal = CGF.Builder.CreateAlignedLoad( NewAtomicIntAddr, getAtomicAlignment().getQuantity()); // Try to write new value using cmpxchg operation auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure); PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock()); CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB); CGF.EmitBlock(ExitBB, /*IsFinished=*/true); } void AtomicInfo::EmitAtomicUpdate( llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) { if (shouldUseLibcall()) { EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile); } else { EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile); } } void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal, bool IsVolatile) { if (shouldUseLibcall()) { EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile); } else { EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile); } } void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit) { bool IsVolatile = lvalue.isVolatileQualified(); llvm::AtomicOrdering AO; if (lvalue.getType()->isAtomicType()) { AO = llvm::SequentiallyConsistent; } else { AO = llvm::Release; IsVolatile = true; } return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit); } /// Emit a store to an l-value of atomic type. /// /// Note that the r-value is expected to be an r-value *of the atomic /// type*; this means that for aggregate r-values, it should include /// storage for any padding that was necessary. void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, llvm::AtomicOrdering AO, bool IsVolatile, bool isInit) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!rvalue.isAggregate() || rvalue.getAggregateAddr()->getType()->getPointerElementType() == dest.getAddress()->getType()->getPointerElementType()); AtomicInfo atomics(*this, dest); LValue LVal = atomics.getAtomicLValue(); // If this is an initialization, just put the value there normally. if (LVal.isSimple()) { if (isInit) { atomics.emitCopyIntoMemory(rvalue); return; } // Check whether we should use a library call. if (atomics.shouldUseLibcall()) { // Produce a source address. llvm::Value *srcAddr = atomics.materializeRValue(rvalue); // void __atomic_store(size_t size, void *mem, void *val, int order) CallArgList args; args.add(RValue::get(atomics.getAtomicSizeValue()), getContext().getSizeType()); args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicAddress())), getContext().VoidPtrTy); args.add(RValue::get(EmitCastToVoidPtr(srcAddr)), getContext().VoidPtrTy); args.add(RValue::get(llvm::ConstantInt::get( IntTy, AtomicInfo::translateAtomicOrdering(AO))), getContext().IntTy); emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args); return; } // Okay, we're doing this natively. llvm::Value *intValue = atomics.convertRValueToInt(rvalue); // Do the atomic store. llvm::Value *addr = atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress()); intValue = Builder.CreateIntCast( intValue, addr->getType()->getPointerElementType(), /*isSigned=*/false); llvm::StoreInst *store = Builder.CreateStore(intValue, addr); // Initializations don't need to be atomic. if (!isInit) store->setAtomic(AO); // Other decoration. store->setAlignment(dest.getAlignment().getQuantity()); if (IsVolatile) store->setVolatile(true); if (dest.getTBAAInfo()) CGM.DecorateInstruction(store, dest.getTBAAInfo()); return; } // Emit simple atomic update operation. atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile); } /// Emit a compare-and-exchange op for atomic type. /// std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange( LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak, AggValueSlot Slot) { // If this is an aggregate r-value, it should agree in type except // maybe for address-space qualification. assert(!Expected.isAggregate() || Expected.getAggregateAddr()->getType()->getPointerElementType() == Obj.getAddress()->getType()->getPointerElementType()); assert(!Desired.isAggregate() || Desired.getAggregateAddr()->getType()->getPointerElementType() == Obj.getAddress()->getType()->getPointerElementType()); AtomicInfo Atomics(*this, Obj); return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure, IsWeak); } void CodeGenFunction::EmitAtomicUpdate( LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) { AtomicInfo Atomics(*this, LVal); Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile); } void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) { AtomicInfo atomics(*this, dest); switch (atomics.getEvaluationKind()) { case TEK_Scalar: { llvm::Value *value = EmitScalarExpr(init); atomics.emitCopyIntoMemory(RValue::get(value)); return; } case TEK_Complex: { ComplexPairTy value = EmitComplexExpr(init); atomics.emitCopyIntoMemory(RValue::getComplex(value)); return; } case TEK_Aggregate: { // Fix up the destination if the initializer isn't an expression // of atomic type. bool Zeroed = false; if (!init->getType()->isAtomicType()) { Zeroed = atomics.emitMemSetZeroIfNecessary(); dest = atomics.projectValue(); } // Evaluate the expression directly into the destination. AggValueSlot slot = AggValueSlot::forLValue(dest, AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased, Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed); EmitAggExpr(init, slot); return; } } llvm_unreachable("bad evaluation kind"); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGHLSLMSFinishCodeGen.cpp
/////////////////////////////////////////////////////////////////////////////// // // // CGHLSLMSFinishCodeGen.cpp // // Copyright (C) Microsoft Corporation. All rights reserved. // // This file is distributed under the University of Illinois Open Source // // License. See LICENSE.TXT for details. // // // // Impliment FinishCodeGen. // // // /////////////////////////////////////////////////////////////////////////////// #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/DxilValueCache.h" #include "llvm/IR/CFG.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/InstIterator.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/Support/Debug.h" #include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/ValueMapper.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "clang/Basic/LangOptions.h" #include "clang/Frontend/CodeGenOptions.h" #include "clang/Parse/ParseHLSL.h" // root sig would be in Parser if part of lang #include "clang/Sema/SemaDiagnostic.h" #include "dxc/DXIL/DxilConstants.h" #include "dxc/DXIL/DxilOperations.h" #include "dxc/DXIL/DxilResourceProperties.h" #include "dxc/DXIL/DxilTypeSystem.h" #include "dxc/DXIL/DxilUtil.h" #include "dxc/DxilRootSignature/DxilRootSignature.h" #include "dxc/HLSL/DxilExportMap.h" #include "dxc/HLSL/DxilGenerationPass.h" #include "dxc/HLSL/HLMatrixType.h" #include "dxc/HLSL/HLModule.h" #include "dxc/HLSL/HLSLExtensionsCodegenHelper.h" #include "dxc/HlslIntrinsicOp.h" #include <fenv.h> #include <memory> #include <vector> #include "CGHLSLMSHelper.h" using namespace llvm; using namespace hlsl; using namespace CGHLSLMSHelper; namespace { template <typename BuilderTy> Value *EmitHLOperationCall(HLModule &HLM, BuilderTy &Builder, HLOpcodeGroup group, unsigned opcode, Type *RetType, ArrayRef<Value *> paramList, llvm::Module &M) { // Add the opcode param llvm::Type *opcodeTy = llvm::Type::getInt32Ty(M.getContext()); Function *opFunc = HLM.GetHLOperationFunction(group, opcode, RetType, paramList, M); SmallVector<Value *, 4> opcodeParamList; Value *opcodeConst = Constant::getIntegerValue(opcodeTy, APInt(32, opcode)); opcodeParamList.emplace_back(opcodeConst); opcodeParamList.append(paramList.begin(), paramList.end()); return Builder.CreateCall(opFunc, opcodeParamList); } template <typename BuilderTy> Value *CreateHandleFromResPtr(Value *ResPtr, HLModule &HLM, llvm::Type *HandleTy, BuilderTy &Builder) { Module &M = *HLM.GetModule(); // Load to make sure resource only have Ld/St use so mem2reg could remove // temp resource. Value *ldObj = Builder.CreateLoad(ResPtr); Value *args[] = {ldObj}; Value *Handle = EmitHLOperationCall<BuilderTy>( HLM, Builder, HLOpcodeGroup::HLCreateHandle, 0, HandleTy, args, M); return Handle; } Value *CreateNodeOutputHandle(HLModule &HLM, llvm::Type *HandleTy, IRBuilder<> &Builder, unsigned index) { Module &M = *HLM.GetModule(); HLOpcodeGroup opCodeGroup = HLOpcodeGroup::HLCreateNodeOutputHandle; unsigned opCode = static_cast<unsigned>(HLOpcodeGroup::HLCreateNodeOutputHandle); Value *mdIndex = Builder.getInt32(index); Value *args[] = {mdIndex}; CallInst *Handle = HLM.EmitHLOperationCall(Builder, opCodeGroup, opCode, HandleTy, args, M); return Handle; } Value *CreateAnnotateNodeHandle(HLModule &HLM, Value *NodeHandle, IRBuilder<> &Builder, NodeInfo Info) { llvm::Type *NodeHandleTy = HLM.GetOP()->GetNodeHandleType(); llvm::Type *NodeInfoTy = HLM.GetOP()->GetNodePropertiesType(); Module &M = *HLM.GetModule(); StructType *ST = cast<StructType>(NodeInfoTy); Constant *NodeProperties[] = { ConstantInt::get(ST->getElementType(0), Info.IOFlags), ConstantInt::get(ST->getElementType(1), Info.RecordSize)}; Constant *NodeInfo = ConstantStruct::get(ST, NodeProperties); Value *args[] = {NodeHandle, NodeInfo}; Value *Handle = HLM.EmitHLOperationCall( Builder, HLOpcodeGroup::HLAnnotateNodeHandle, static_cast<unsigned>(HLOpcodeGroup::HLAnnotateNodeHandle), NodeHandleTy, args, M); return Handle; } Value *CreateNodeInputRecordHandle(Value *NodeArg, HLModule &HLM, llvm::Type *HandleTy, IRBuilder<> &Builder, unsigned index) { Module &M = *HLM.GetModule(); HLOpcodeGroup opCodeGroup = HLOpcodeGroup::HLCreateNodeInputRecordHandle; auto opCode = static_cast<unsigned>(HLOpcodeGroup::HLCreateNodeInputRecordHandle); DXASSERT_NOMSG(index == 0); Value *mdIndex = Builder.getInt32(index); Value *args[] = {mdIndex}; CallInst *Handle = HLM.EmitHLOperationCall(Builder, opCodeGroup, opCode, HandleTy, args, M); return Handle; } Value *CreateAnnotateNodeRecordHandle(HLModule &HLM, Value *NodeRecordHandle, IRBuilder<> &Builder, NodeRecordInfo Info) { llvm::Type *NodeRecordHandleTy = HLM.GetOP()->GetNodeRecordHandleType(); llvm::Type *NodeRecordInfoTy = HLM.GetOP()->GetNodeRecordPropertiesType(); Module &M = *HLM.GetModule(); StructType *ST = cast<StructType>(NodeRecordInfoTy); Constant *NodeRecordProperties[] = { ConstantInt::get(ST->getElementType(0), Info.IOFlags), ConstantInt::get(ST->getElementType(1), Info.RecordSize)}; Constant *NodeRecordInfo = ConstantStruct::get(ST, NodeRecordProperties); Value *args[] = {NodeRecordHandle, NodeRecordInfo}; Value *Handle = HLM.EmitHLOperationCall( Builder, HLOpcodeGroup::HLAnnotateNodeRecordHandle, static_cast<unsigned>(HLOpcodeGroup::HLAnnotateNodeRecordHandle), NodeRecordHandleTy, args, M); return Handle; } template <typename BuilderTy> Value *CreateAnnotateHandle(HLModule &HLM, Value *Handle, DxilResourceProperties &RP, llvm::Type *ResTy, BuilderTy &Builder) { Constant *RPConstant = resource_helper::getAsConstant( RP, HLM.GetOP()->GetResourcePropertiesType(), *HLM.GetShaderModel()); return EmitHLOperationCall<BuilderTy>( HLM, Builder, HLOpcodeGroup::HLAnnotateHandle, (unsigned)HLOpcodeGroup::HLAnnotateHandle, Handle->getType(), {Handle, RPConstant, UndefValue::get(ResTy)}, *HLM.GetModule()); } template <typename BuilderTy> Value *CastHandleToRes(HLModule &HLM, Value *Handle, llvm::Type *ResTy, BuilderTy &Builder) { Value *Res = EmitHLOperationCall<BuilderTy>(HLM, Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::HandleToResCast, ResTy, {Handle}, *HLM.GetModule()); return Res; } // Lower CBV bitcast use to handle use. // Leave the load/store. void LowerDynamicCBVUseToHandle(HLModule &HLM, DxilObjectProperties &objectProperties) { Type *HandleTy = HLM.GetOP()->GetHandleType(); Module &M = *HLM.GetModule(); // Collect BitCast use of CBV. SmallVector<std::pair<BitCastInst *, DxilResourceProperties>, 4> BitCasts; for (auto it : objectProperties.resMap) { DxilResourceProperties RP = it.second; if (RP.getResourceKind() != DXIL::ResourceKind::CBuffer && RP.getResourceKind() != DXIL::ResourceKind::TBuffer) continue; Value *V = it.first; // Skip external globals. if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) { if (GV->getLinkage() != GlobalValue::LinkageTypes::InternalLinkage) continue; } for (auto UserIt = V->user_begin(); UserIt != V->user_end();) { User *U = *(UserIt++); if (U->user_empty()) continue; if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { BitCasts.emplace_back(std::make_pair(BCI, RP)); continue; } DXASSERT((!isa<BitCastOperator>(U) || U->user_empty()), "all BitCast should be BitCastInst"); } } for (auto it : BitCasts) { BitCastInst *BCI = it.first; DxilResourceProperties RP = it.second; IRBuilder<> B(BCI); B.AllowFolding = false; Value *ObjV = BCI->getOperand(0); Value *Handle = CreateHandleFromResPtr(ObjV, HLM, HandleTy, B); Type *ResTy = ObjV->getType()->getPointerElementType(); Handle = CreateAnnotateHandle(HLM, Handle, RP, ResTy, B); // Create cb subscript. llvm::Type *opcodeTy = B.getInt32Ty(); llvm::Type *idxTy = opcodeTy; Constant *zeroIdx = ConstantInt::get(opcodeTy, 0); Type *cbTy = BCI->getType(); llvm::FunctionType *SubscriptFuncTy = llvm::FunctionType::get(cbTy, {opcodeTy, HandleTy, idxTy}, false); Function *subscriptFunc = GetOrCreateHLFunction(M, SubscriptFuncTy, HLOpcodeGroup::HLSubscript, (unsigned)HLSubscriptOpcode::CBufferSubscript); Constant *opArg = ConstantInt::get( opcodeTy, (unsigned)HLSubscriptOpcode::CBufferSubscript); Value *args[] = {opArg, Handle, zeroIdx}; Instruction *cbSubscript = cast<Instruction>(B.CreateCall(subscriptFunc, {args})); BCI->replaceAllUsesWith(cbSubscript); BCI->eraseFromParent(); } } bool IsHLSLSamplerDescType(llvm::Type *Ty) { if (llvm::StructType *ST = dyn_cast<llvm::StructType>(Ty)) { if (!ST->hasName()) return false; StringRef name = ST->getName(); if (name == "struct..Sampler") return true; } return false; } #ifndef NDEBUG static bool ConsumePrefix(StringRef &Str, StringRef Prefix) { if (!Str.startswith(Prefix)) return false; Str = Str.substr(Prefix.size()); return true; } bool IsHLSLBufferViewType(llvm::Type *Ty) { if (llvm::StructType *ST = dyn_cast<llvm::StructType>(Ty)) { if (!ST->hasName()) return false; StringRef name = ST->getName(); if (!(ConsumePrefix(name, "class.") || ConsumePrefix(name, "struct."))) return false; if (name.startswith("ConstantBuffer<") || name.startswith("TextureBuffer<")) return true; } return false; } #endif void LowerGetResourceFromHeap( HLModule &HLM, std::vector<std::pair<Function *, unsigned>> &intrinsicMap) { llvm::Module &M = *HLM.GetModule(); llvm::Type *HandleTy = HLM.GetOP()->GetHandleType(); unsigned GetResFromHeapOp = static_cast<unsigned>(IntrinsicOp::IOP_CreateResourceFromHeap); DenseMap<Instruction *, Instruction *> ResourcePtrToHandlePtrMap; for (auto it : intrinsicMap) { unsigned opcode = it.second; if (opcode != GetResFromHeapOp) continue; Function *F = it.first; HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(F); if (group != HLOpcodeGroup::HLIntrinsic) continue; for (auto uit = F->user_begin(); uit != F->user_end();) { CallInst *CI = cast<CallInst>(*(uit++)); // Arg 0 is this pointer. unsigned ArgIdx = 1; Instruction *ResPtr = cast<Instruction>(CI->getArgOperand(ArgIdx)); Value *Index = CI->getArgOperand(ArgIdx + 1); IRBuilder<> Builder(CI); // Make a handle from GetResFromHeap. Value *IsSampler = Builder.getInt1( IsHLSLSamplerDescType(ResPtr->getType()->getPointerElementType())); Value *Handle = HLM.EmitHLOperationCall( Builder, HLOpcodeGroup::HLIntrinsic, GetResFromHeapOp, HandleTy, {Index, IsSampler}, M); // Find the handle ptr for res ptr. auto it = ResourcePtrToHandlePtrMap.find(ResPtr); Instruction *HandlePtr = nullptr; if (it != ResourcePtrToHandlePtrMap.end()) { HandlePtr = it->second; } else { IRBuilder<> AllocaBuilder( ResPtr->getParent()->getParent()->getEntryBlock().begin()); HandlePtr = AllocaBuilder.CreateAlloca(HandleTy); ResourcePtrToHandlePtrMap[ResPtr] = HandlePtr; } // Store handle to handle ptr. Builder.CreateStore(Handle, HandlePtr); CI->eraseFromParent(); } } // Replace load of Resource ptr into load of handel ptr. for (auto it : ResourcePtrToHandlePtrMap) { Instruction *resPtr = it.first; Instruction *handlePtr = it.second; for (auto uit = resPtr->user_begin(); uit != resPtr->user_end();) { User *U = *(uit++); BitCastInst *BCI = cast<BitCastInst>(U); DXASSERT( dxilutil::IsHLSLResourceType( BCI->getType()->getPointerElementType()) || IsHLSLBufferViewType(BCI->getType()->getPointerElementType()), "illegal cast of resource ptr"); for (auto cuit = BCI->user_begin(); cuit != BCI->user_end();) { LoadInst *LI = cast<LoadInst>(*(cuit++)); IRBuilder<> Builder(LI); Value *Handle = Builder.CreateLoad(handlePtr); Value *Res = HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::HandleToResCast, LI->getType(), {Handle}, M); LI->replaceAllUsesWith(Res); LI->eraseFromParent(); } BCI->eraseFromParent(); } resPtr->eraseFromParent(); } } void ReplaceBoolVectorSubscript(CallInst *CI) { Value *Ptr = CI->getArgOperand(0); Value *Idx = CI->getArgOperand(1); Value *IdxList[] = {ConstantInt::get(Idx->getType(), 0), Idx}; for (auto It = CI->user_begin(), E = CI->user_end(); It != E;) { Instruction *user = cast<Instruction>(*(It++)); IRBuilder<> Builder(user); Value *GEP = Builder.CreateInBoundsGEP(Ptr, IdxList); if (LoadInst *LI = dyn_cast<LoadInst>(user)) { Value *NewLd = Builder.CreateLoad(GEP); Value *cast = Builder.CreateZExt(NewLd, LI->getType()); LI->replaceAllUsesWith(cast); LI->eraseFromParent(); } else { // Must be a store inst here. StoreInst *SI = cast<StoreInst>(user); Value *V = SI->getValueOperand(); Value *cast = Builder.CreateICmpNE(V, llvm::ConstantInt::get(V->getType(), 0)); Builder.CreateStore(cast, GEP); SI->eraseFromParent(); } } CI->eraseFromParent(); } void ReplaceBoolVectorSubscript(Function *F) { for (auto It = F->user_begin(), E = F->user_end(); It != E;) { User *user = *(It++); CallInst *CI = cast<CallInst>(user); ReplaceBoolVectorSubscript(CI); } } // Returns a valid field annotation (if present) for the matrix type of // templated resource on matrix type. Example:- AppendStructuredBuffer<float4x4> // abuf; Return the field annotation of the matrix type in the above decl. static DxilFieldAnnotation * GetTemplatedResMatAnnotation(Function *F, unsigned argOpIdx, unsigned matAnnotationIdx) { for (User *U : F->users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { if (argOpIdx >= CI->getNumArgOperands()) continue; Value *resArg = CI->getArgOperand(argOpIdx); Type *resArgTy = resArg->getType(); if (resArgTy->isPointerTy()) resArgTy = cast<PointerType>(resArgTy)->getPointerElementType(); if (isa<StructType>(resArgTy)) { DxilTypeSystem &TS = F->getParent()->GetHLModule().GetTypeSystem(); auto *SA = TS.GetStructAnnotation(cast<StructType>(resArgTy)); auto *FA = &(SA->GetFieldAnnotation(matAnnotationIdx)); if (FA && FA->HasMatrixAnnotation()) { return FA; } } } } return nullptr; } // Add function body for intrinsic if possible. Function *CreateOpFunction(llvm::Module &M, Function *F, llvm::FunctionType *funcTy, HLOpcodeGroup group, unsigned opcode) { Function *opFunc = nullptr; AttributeSet attribs = F->getAttributes().getFnAttributes(); llvm::Type *opcodeTy = llvm::Type::getInt32Ty(M.getContext()); if (group == HLOpcodeGroup::HLIntrinsic) { IntrinsicOp intriOp = static_cast<IntrinsicOp>(opcode); switch (intriOp) { case IntrinsicOp::MOP_Append: case IntrinsicOp::MOP_Consume: { bool bAppend = intriOp == IntrinsicOp::MOP_Append; llvm::Type *handleTy = funcTy->getParamType(HLOperandIndex::kHandleOpIdx); // Don't generate body for OutputStream::Append. if (bAppend && HLModule::IsStreamOutputPtrType(handleTy)) { opFunc = GetOrCreateHLFunction(M, funcTy, group, opcode, attribs); break; } opFunc = GetOrCreateHLFunctionWithBody(M, funcTy, group, opcode, bAppend ? "append" : "consume"); llvm::Type *counterTy = llvm::Type::getInt32Ty(M.getContext()); llvm::FunctionType *IncCounterFuncTy = llvm::FunctionType::get(counterTy, {opcodeTy, handleTy}, false); unsigned counterOpcode = bAppend ? (unsigned)IntrinsicOp::MOP_IncrementCounter : (unsigned)IntrinsicOp::MOP_DecrementCounter; Function *incCounterFunc = GetOrCreateHLFunction( M, IncCounterFuncTy, group, counterOpcode, attribs); llvm::Type *idxTy = counterTy; llvm::Type *valTy = bAppend ? funcTy->getParamType(HLOperandIndex::kAppendValOpIndex) : funcTy->getReturnType(); // Return type for subscript should be pointer type, hence in memory // representation llvm::Type *subscriptTy = valTy; bool isBoolScalarOrVector = false; if (!subscriptTy->isPointerTy()) { if (subscriptTy->getScalarType()->isIntegerTy(1)) { isBoolScalarOrVector = true; llvm::Type *memReprType = llvm::IntegerType::get(subscriptTy->getContext(), 32); subscriptTy = subscriptTy->isVectorTy() ? llvm::VectorType::get(memReprType, subscriptTy->getVectorNumElements()) : memReprType; } subscriptTy = llvm::PointerType::get(subscriptTy, 0); } llvm::FunctionType *SubscriptFuncTy = llvm::FunctionType::get( subscriptTy, {opcodeTy, handleTy, idxTy}, false); Function *subscriptFunc = GetOrCreateHLFunction( M, SubscriptFuncTy, HLOpcodeGroup::HLSubscript, (unsigned)HLSubscriptOpcode::DefaultSubscript, attribs); BasicBlock *BB = BasicBlock::Create(opFunc->getContext(), "Entry", opFunc); IRBuilder<> Builder(BB); auto argIter = opFunc->args().begin(); // Skip the opcode arg. argIter++; Argument *thisArg = argIter++; // int counter = IncrementCounter/DecrementCounter(Buf); Value *incCounterOpArg = ConstantInt::get(idxTy, counterOpcode); Value *counter = Builder.CreateCall(incCounterFunc, {incCounterOpArg, thisArg}); // Buf[counter]; Value *subscriptOpArg = ConstantInt::get( idxTy, (unsigned)HLSubscriptOpcode::DefaultSubscript); Value *subscript = Builder.CreateCall(subscriptFunc, {subscriptOpArg, thisArg, counter}); constexpr unsigned kArgIdx = 0; constexpr unsigned kMatAnnotationIdx = 0; DxilFieldAnnotation *MatAnnotation = HLMatrixType::isa(valTy) ? GetTemplatedResMatAnnotation(F, kArgIdx, kMatAnnotationIdx) : nullptr; if (bAppend) { Argument *valArg = argIter; // Buf[counter] = val; if (valTy->isPointerTy()) { unsigned size = M.getDataLayout().getTypeAllocSize( subscript->getType()->getPointerElementType()); Builder.CreateMemCpy(subscript, valArg, size, 1); } else if (MatAnnotation) { // If the to-be-stored value is a matrix then we need to generate // an HL matrix store which is then handled appropriately in // HLMatrixLowerPass. bool isRowMajor = MatAnnotation->GetMatrixAnnotation().Orientation == MatrixOrientation::RowMajor; Value *matStoreVal = valArg; // The in-reg matrix orientation is always row-major. // If the in-memory matrix orientation is col-major, then we // need to change the orientation to col-major before storing // to memory if (!isRowMajor) { unsigned castOpCode = (unsigned)HLCastOpcode::RowMatrixToColMatrix; // Construct signature of the function that is used for converting // orientation of a matrix from row-major to col-major. FunctionType *MatCastFnType = FunctionType::get( matStoreVal->getType(), {Builder.getInt32Ty(), matStoreVal->getType()}, /* isVarArg */ false); // Create the conversion function. Function *MatCastFn = GetOrCreateHLFunction( M, MatCastFnType, HLOpcodeGroup::HLCast, castOpCode); Value *MatCastOpCode = ConstantInt::get(Builder.getInt32Ty(), castOpCode); // Insert call to the conversion function. matStoreVal = Builder.CreateCall(MatCastFn, {MatCastOpCode, matStoreVal}); } unsigned storeOpCode = isRowMajor ? (unsigned)HLMatLoadStoreOpcode::RowMatStore : (unsigned)HLMatLoadStoreOpcode::ColMatStore; // Construct signature of the function that is used for storing // the matrix value to the memory. FunctionType *MatStFnType = FunctionType::get( Builder.getVoidTy(), {Builder.getInt32Ty(), subscriptTy, matStoreVal->getType()}, /* isVarArg */ false); // Create the matrix store function. Function *MatStFn = GetOrCreateHLFunction( M, MatStFnType, HLOpcodeGroup::HLMatLoadStore, storeOpCode); Value *MatStOpCode = ConstantInt::get(Builder.getInt32Ty(), storeOpCode); // Insert call to the matrix store function. Builder.CreateCall(MatStFn, {MatStOpCode, subscript, matStoreVal}); } else { Value *storedVal = valArg; // Convert to memory representation if (isBoolScalarOrVector) storedVal = Builder.CreateZExt( storedVal, subscriptTy->getPointerElementType(), "frombool"); Builder.CreateStore(storedVal, subscript); } Builder.CreateRetVoid(); } else { // return Buf[counter]; if (valTy->isPointerTy()) Builder.CreateRet(subscript); else if (MatAnnotation) { // If the to-be-loaded value is a matrix then we need to generate // an HL matrix load which is then handled appropriately in // HLMatrixLowerPass. bool isRowMajor = MatAnnotation->GetMatrixAnnotation().Orientation == MatrixOrientation::RowMajor; unsigned loadOpCode = isRowMajor ? (unsigned)HLMatLoadStoreOpcode::RowMatLoad : (unsigned)HLMatLoadStoreOpcode::ColMatLoad; // Construct signature of the function that is used for loading // the matrix value from the memory. FunctionType *MatLdFnType = FunctionType::get(valTy, {Builder.getInt32Ty(), subscriptTy}, /* isVarArg */ false); // Create the matrix load function. Function *MatLdFn = GetOrCreateHLFunction( M, MatLdFnType, HLOpcodeGroup::HLMatLoadStore, loadOpCode); Value *MatStOpCode = ConstantInt::get(Builder.getInt32Ty(), loadOpCode); // Insert call to the matrix load function. Value *matLdVal = Builder.CreateCall(MatLdFn, {MatStOpCode, subscript}); // The in-reg matrix orientation is always row-major. // If the in-memory matrix orientation is col-major, then we // need to change the orientation to row-major after loading // from memory. if (!isRowMajor) { unsigned castOpCode = (unsigned)HLCastOpcode::ColMatrixToRowMatrix; // Construct signature of the function that is used for converting // orientation of a matrix from col-major to row-major. FunctionType *MatCastFnType = FunctionType::get(matLdVal->getType(), {Builder.getInt32Ty(), matLdVal->getType()}, /* isVarArg */ false); // Create the conversion function. Function *MatCastFn = GetOrCreateHLFunction( M, MatCastFnType, HLOpcodeGroup::HLCast, castOpCode); Value *MatCastOpCode = ConstantInt::get(Builder.getInt32Ty(), castOpCode); // Insert call to the conversion function. matLdVal = Builder.CreateCall(MatCastFn, {MatCastOpCode, matLdVal}); } Builder.CreateRet(matLdVal); } else { Value *retVal = Builder.CreateLoad(subscript); // Convert to register representation if (isBoolScalarOrVector) retVal = Builder.CreateICmpNE( retVal, Constant::getNullValue(retVal->getType()), "tobool"); Builder.CreateRet(retVal); } } } break; case IntrinsicOp::IOP_sincos: { opFunc = GetOrCreateHLFunctionWithBody(M, funcTy, group, opcode, "sincos"); llvm::Type *valTy = funcTy->getParamType(HLOperandIndex::kTrinaryOpSrc0Idx); llvm::FunctionType *sinFuncTy = llvm::FunctionType::get(valTy, {opcodeTy, valTy}, false); unsigned sinOp = static_cast<unsigned>(IntrinsicOp::IOP_sin); unsigned cosOp = static_cast<unsigned>(IntrinsicOp::IOP_cos); Function *sinFunc = GetOrCreateHLFunction(M, sinFuncTy, group, sinOp, attribs); Function *cosFunc = GetOrCreateHLFunction(M, sinFuncTy, group, cosOp, attribs); BasicBlock *BB = BasicBlock::Create(opFunc->getContext(), "Entry", opFunc); IRBuilder<> Builder(BB); auto argIter = opFunc->args().begin(); // Skip the opcode arg. argIter++; Argument *valArg = argIter++; Argument *sinPtrArg = argIter++; Argument *cosPtrArg = argIter++; Value *sinOpArg = ConstantInt::get(opcodeTy, sinOp); Value *sinVal = Builder.CreateCall(sinFunc, {sinOpArg, valArg}); Builder.CreateStore(sinVal, sinPtrArg); Value *cosOpArg = ConstantInt::get(opcodeTy, cosOp); Value *cosVal = Builder.CreateCall(cosFunc, {cosOpArg, valArg}); Builder.CreateStore(cosVal, cosPtrArg); // Ret. Builder.CreateRetVoid(); } break; default: opFunc = GetOrCreateHLFunction(M, funcTy, group, opcode, attribs); break; } } else if (group == HLOpcodeGroup::HLExtIntrinsic) { llvm::StringRef fnName = F->getName(); llvm::StringRef groupName = GetHLOpcodeGroupNameByAttr(F); opFunc = GetOrCreateHLFunction(M, funcTy, group, &groupName, &fnName, opcode, attribs); } else { opFunc = GetOrCreateHLFunction(M, funcTy, group, opcode, attribs); } return opFunc; } DxilResourceProperties GetResourcePropsFromIntrinsicObjectArg(Value *arg, HLModule &HLM, DxilTypeSystem &typeSys, DxilObjectProperties &objectProperties) { DxilResourceProperties RP = objectProperties.GetResource(arg); if (RP.isValid()) return RP; // Must be GEP. GEPOperator *GEP = cast<GEPOperator>(arg); // Find RP from GEP. Value *Ptr = GEP->getPointerOperand(); // When Ptr is array of resource, check if it is another GEP. while ( dxilutil::IsHLSLResourceType(dxilutil::GetArrayEltTy(Ptr->getType()))) { if (GEPOperator *ParentGEP = dyn_cast<GEPOperator>(Ptr)) { GEP = ParentGEP; Ptr = GEP->getPointerOperand(); } else { break; } } // When ptr is array of resource, ptr could be in // objectProperties. RP = objectProperties.GetResource(Ptr); if (RP.isValid()) return RP; DxilStructAnnotation *Anno = nullptr; for (auto gepIt = gep_type_begin(GEP), E = gep_type_end(GEP); gepIt != E; ++gepIt) { if (StructType *ST = dyn_cast<StructType>(*gepIt)) { Anno = typeSys.GetStructAnnotation(ST); DXASSERT(Anno, "missing type annotation"); unsigned Index = cast<ConstantInt>(gepIt.getOperand())->getLimitedValue(); DxilFieldAnnotation &fieldAnno = Anno->GetFieldAnnotation(Index); if (fieldAnno.HasResourceProperties()) { RP = fieldAnno.GetResourceProperties(); break; } } } DXASSERT(RP.isValid(), "invalid resource properties"); return RP; } void AddOpcodeParamForIntrinsic(HLModule &HLM, Function *F, unsigned opcode, DxilObjectProperties &objectProperties) { llvm::Module &M = *HLM.GetModule(); llvm::FunctionType *oldFuncTy = F->getFunctionType(); llvm::Type *HandleTy = HLM.GetOP()->GetHandleType(); llvm::Type *NodeRecordHandleTy = HLM.GetOP()->GetNodeRecordHandleType(); llvm::Type *NodeOutputHandleTy = HLM.GetOP()->GetNodeHandleType(); HLOpcodeGroup group = hlsl::GetHLOpcodeGroup(F); SmallVector<llvm::Type *, 4> paramTyList; // Add the opcode param llvm::Type *opcodeTy = llvm::Type::getInt32Ty(M.getContext()); paramTyList.emplace_back(opcodeTy); // Create a vector of the types of the original Intrinsic's // parameters. In the case of an intrinsic returning a struct bool bSRetHandle = false; bool bNodeRecordRet = false; bool bNodeOutputRet = false; bool bNodeOutputMethod = false; bool bNodeOutputArrayMethod = false; unsigned bRetArgIndex = 0; for (unsigned i = 0; i < oldFuncTy->getNumParams(); i++) { llvm::Type *Ty = oldFuncTy->getParamType(i); if (i == 0 && Ty->isPointerTy() && dxilutil::IsHLSLNodeOutputType(Ty->getPointerElementType())) bNodeOutputMethod = true; if (i == 0 && Ty->isPointerTy() && dxilutil::IsHLSLNodeOutputArrayType(Ty->getPointerElementType())) bNodeOutputArrayMethod = true; if (i == 1 && bNodeOutputMethod && Ty->isPointerTy() && dxilutil::IsHLSLNodeOutputRecordType(Ty->getPointerElementType())) { // Skip for return type from a method. // NodeOutput<recType>::GetGroupNodeOutputRecords // NodeOutput<recType>::GetThreadNodeOutputRecords bSRetHandle = true; bNodeRecordRet = true; bRetArgIndex = 1; continue; } if (i == 1 && bNodeOutputArrayMethod && Ty->isPointerTy() && dxilutil::IsHLSLNodeOutputType(Ty->getPointerElementType())) { // Skip for return type from a method bSRetHandle = true; bNodeOutputRet = true; bRetArgIndex = 1; continue; } if (Ty->isPointerTy()) { llvm::Type *PtrEltTy = Ty->getPointerElementType(); if (dxilutil::IsHLSLResourceType(PtrEltTy) || dxilutil::IsHLSLNodeRecordType(PtrEltTy) || dxilutil::IsHLSLNodeOutputType(PtrEltTy) || dxilutil::IsHLSLNodeOutputArrayType(PtrEltTy)) { // Skip for return type. if (i == 0 && F->arg_begin()->hasStructRetAttr()) { bSRetHandle = true; if (dxilutil::IsHLSLNodeRecordType(PtrEltTy)) { bNodeRecordRet = true; } continue; } // Use handle type for resource, Node(Input/Output) Record type. // This will make sure temp object variable only used by createHandle. if (dxilutil::IsHLSLResourceType(PtrEltTy)) Ty = HandleTy; else if (dxilutil::IsHLSLNodeRecordType(PtrEltTy)) Ty = NodeRecordHandleTy; else if (dxilutil::IsHLSLNodeOutputType(PtrEltTy)) Ty = NodeOutputHandleTy; else if (dxilutil::IsHLSLNodeOutputArrayType(PtrEltTy)) Ty = NodeOutputHandleTy; } } paramTyList.emplace_back(Ty); } if (group == HLOpcodeGroup::HLSubscript && opcode == static_cast<unsigned>(HLSubscriptOpcode::VectorSubscript)) { llvm::FunctionType *FT = F->getFunctionType(); llvm::Type *VecArgTy = FT->getParamType(0); llvm::VectorType *VType = cast<llvm::VectorType>(VecArgTy->getPointerElementType()); llvm::Type *Ty = VType->getElementType(); DXASSERT(Ty->isIntegerTy(), "Only bool could use VectorSubscript"); llvm::IntegerType *ITy = cast<IntegerType>(Ty); DXASSERT_LOCALVAR(ITy, ITy->getBitWidth() == 1, "Only bool could use VectorSubscript"); // The return type is i8*. // Replace all uses with i1*. ReplaceBoolVectorSubscript(F); return; } bool isDoubleSubscriptFunc = group == HLOpcodeGroup::HLSubscript && opcode == static_cast<unsigned>(HLSubscriptOpcode::DoubleSubscript); llvm::Type *RetTy = oldFuncTy->getReturnType(); if (bSRetHandle) { DXASSERT(RetTy->isVoidTy(), "else invalid return type"); RetTy = HandleTy; if (bNodeRecordRet) RetTy = NodeRecordHandleTy; else if (bNodeOutputRet) RetTy = NodeOutputHandleTy; } if (isDoubleSubscriptFunc) { CallInst *doubleSub = cast<CallInst>(*F->user_begin()); // Change currentIdx type into coord type. auto U = doubleSub->user_begin(); Value *user = *U; CallInst *secSub = cast<CallInst>(user); unsigned coordIdx = HLOperandIndex::kSubscriptIndexOpIdx; // opcode operand not add yet, so the index need -1. if (GetHLOpcodeGroupByName(secSub->getCalledFunction()) == HLOpcodeGroup::NotHL) coordIdx -= 1; Value *coord = secSub->getArgOperand(coordIdx); llvm::Type *coordTy = coord->getType(); paramTyList[HLOperandIndex::kSubscriptIndexOpIdx] = coordTy; // Add the sampleIdx or mipLevel parameter to the end. paramTyList.emplace_back(opcodeTy); // Change return type to be resource ret type. // opcode operand not add yet, so the index need -1. Value *objPtr = doubleSub->getArgOperand(HLOperandIndex::kSubscriptObjectOpIdx - 1); // Must be a GEP GEPOperator *objGEP = cast<GEPOperator>(objPtr); gep_type_iterator GEPIt = gep_type_begin(objGEP), E = gep_type_end(objGEP); llvm::Type *resTy = nullptr; while (GEPIt != E) { if (dxilutil::IsHLSLResourceType(*GEPIt)) { resTy = *GEPIt; break; } GEPIt++; } DXASSERT(resTy, "must find the resource type"); // Change object type to handle type. paramTyList[HLOperandIndex::kSubscriptObjectOpIdx] = HandleTy; // Change RetTy into pointer of resource return type. RetTy = cast<StructType>(resTy)->getElementType(0)->getPointerTo(); } llvm::FunctionType *funcTy = llvm::FunctionType::get(RetTy, paramTyList, oldFuncTy->isVarArg()); Function *opFunc = CreateOpFunction(M, F, funcTy, group, opcode); StringRef lower = hlsl::GetHLLowerStrategy(F); if (!lower.empty()) hlsl::SetHLLowerStrategy(opFunc, lower); DxilTypeSystem &typeSys = HLM.GetTypeSystem(); for (auto user = F->user_begin(); user != F->user_end();) { // User must be a call. CallInst *oldCI = cast<CallInst>(*(user++)); SmallVector<Value *, 4> opcodeParamList; Value *opcodeConst = Constant::getIntegerValue(opcodeTy, APInt(32, opcode)); opcodeParamList.emplace_back(opcodeConst); Value *retHandleArg = nullptr; Value *nodeRecordArg = nullptr; if (!bSRetHandle) { opcodeParamList.append(oldCI->arg_operands().begin(), oldCI->arg_operands().end()); } else { auto it = oldCI->arg_operands().begin(); unsigned argIndex = 0; auto start = it; while (argIndex < bRetArgIndex) { it++; argIndex++; } opcodeParamList.append(start, it); retHandleArg = *(it++); opcodeParamList.append(it, oldCI->arg_operands().end()); } IRBuilder<> Builder(oldCI); if (isDoubleSubscriptFunc) { // Change obj to the resource pointer. Value *objVal = opcodeParamList[HLOperandIndex::kSubscriptObjectOpIdx]; GEPOperator *objGEP = cast<GEPOperator>(objVal); SmallVector<Value *, 8> IndexList; IndexList.append(objGEP->idx_begin(), objGEP->idx_end()); Value *lastIndex = IndexList.back(); ConstantInt *constIndex = cast<ConstantInt>(lastIndex); DXASSERT_LOCALVAR(constIndex, constIndex->getLimitedValue() == 1, "last index must 1"); // Remove the last index. IndexList.pop_back(); objVal = objGEP->getPointerOperand(); DxilResourceProperties RP = GetResourcePropsFromIntrinsicObjectArg( objVal, HLM, typeSys, objectProperties); if (IndexList.size() > 1) objVal = Builder.CreateInBoundsGEP(objVal, IndexList); Value *Handle = CreateHandleFromResPtr(objVal, HLM, HandleTy, Builder); Type *ResTy = objVal->getType()->getPointerElementType(); Handle = CreateAnnotateHandle(HLM, Handle, RP, ResTy, Builder); // Change obj to the resource pointer. opcodeParamList[HLOperandIndex::kSubscriptObjectOpIdx] = Handle; // Set idx and mipIdx. Value *mipIdx = opcodeParamList[HLOperandIndex::kSubscriptIndexOpIdx]; auto U = oldCI->user_begin(); Value *user = *U; CallInst *secSub = cast<CallInst>(user); unsigned idxOpIndex = HLOperandIndex::kSubscriptIndexOpIdx; if (GetHLOpcodeGroupByName(secSub->getCalledFunction()) == HLOpcodeGroup::NotHL) idxOpIndex--; Value *idx = secSub->getArgOperand(idxOpIndex); DXASSERT(secSub->hasOneUse(), "subscript should only has one use"); // Add the sampleIdx or mipLevel parameter to the end. opcodeParamList[HLOperandIndex::kSubscriptIndexOpIdx] = idx; opcodeParamList.emplace_back(mipIdx); // Insert new call before secSub to make sure idx is ready to use. Builder.SetInsertPoint(secSub); } unsigned recordSizeWAR = 0; for (unsigned i = 1; i < opcodeParamList.size(); i++) { Value *arg = opcodeParamList[i]; llvm::Type *Ty = arg->getType(); if (Ty->isPointerTy()) { Ty = Ty->getPointerElementType(); if (dxilutil::IsHLSLResourceType(Ty)) { DxilResourceProperties RP = GetResourcePropsFromIntrinsicObjectArg( arg, HLM, typeSys, objectProperties); // Use object type directly, not by pointer. // This will make sure temp object variable only used by ld/st. if (GEPOperator *argGEP = dyn_cast<GEPOperator>(arg)) { std::vector<Value *> idxList(argGEP->idx_begin(), argGEP->idx_end()); // Create instruction to avoid GEPOperator. GetElementPtrInst *GEP = GetElementPtrInst::CreateInBounds( argGEP->getPointerOperand(), idxList); Builder.Insert(GEP); arg = GEP; } llvm::Type *ResTy = arg->getType()->getPointerElementType(); Value *Handle = CreateHandleFromResPtr(arg, HLM, HandleTy, Builder); Handle = CreateAnnotateHandle(HLM, Handle, RP, ResTy, Builder); opcodeParamList[i] = Handle; } if (dxilutil::IsHLSLNodeRecordType(Ty)) { nodeRecordArg = arg; Value *ldObj = Builder.CreateLoad(arg); Value *Handle = EmitHLOperationCall( HLM, Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::NodeRecordToHandleCast, NodeRecordHandleTy, {ldObj}, *HLM.GetModule()); opcodeParamList[i] = Handle; } if (dxilutil::IsHLSLNodeOutputArrayType(Ty)) { Value *ldObj = Builder.CreateLoad(arg); Value *Handle = EmitHLOperationCall( HLM, Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::NodeOutputToHandleCast, NodeOutputHandleTy, {ldObj}, *HLM.GetModule()); opcodeParamList[i] = Handle; // WAR for record size computation if (!dxilutil::IsHLSLEmptyNodeOutputArrayType(Ty)) { DxilStructAnnotation *pAnno = HLM.GetTypeSystem().GetStructAnnotation( dyn_cast<llvm::StructType>(Ty)); assert(pAnno != nullptr && pAnno->GetNumTemplateArgs() == 1 && "otherwise the node template is not declared properly"); llvm::Type *pRecType = const_cast<llvm::Type *>( pAnno->GetTemplateArgAnnotation(0).GetType()); recordSizeWAR = M.getDataLayout().getTypeAllocSize(pRecType); } } if (dxilutil::IsHLSLNodeOutputType(Ty)) { Value *ldObj = Builder.CreateLoad(arg); Value *Handle = EmitHLOperationCall( HLM, Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::NodeOutputToHandleCast, NodeOutputHandleTy, {ldObj}, *HLM.GetModule()); opcodeParamList[i] = Handle; } } } Value *CI = Builder.CreateCall(opFunc, opcodeParamList); if (group == HLOpcodeGroup::HLIntrinsic && opcode == static_cast<unsigned>(IntrinsicOp::MOP_OutputComplete)) { DXASSERT_NOMSG(nodeRecordArg->getType()->isPointerTy()); Type *T = nodeRecordArg->getType()->getPointerElementType(); Builder.CreateStore(Constant::getNullValue(T), nodeRecordArg); } if (retHandleArg) { Type *ResTy = retHandleArg->getType()->getPointerElementType(); if (dxilutil::IsHLSLNodeOutputRecordType(ResTy)) { CallInst *GetOpRecordCall = cast<CallInst>(CI); DXASSERT_NOMSG(group == HLOpcodeGroup::HLIntrinsic); IntrinsicOp opcode = static_cast<IntrinsicOp>(hlsl::GetHLOpcode(GetOpRecordCall)); DXIL::NodeIOKind kind; if (opcode == IntrinsicOp::MOP_GetGroupNodeOutputRecords) kind = DXIL::NodeIOKind::GroupNodeOutputRecords; else kind = DXIL::NodeIOKind::ThreadNodeOutputRecords; // Get record size from the node output record template argument DxilStructAnnotation *pAnno = HLM.GetTypeSystem().GetStructAnnotation( dyn_cast<llvm::StructType>(ResTy)); assert(pAnno != nullptr && pAnno->GetNumTemplateArgs() == 1 && "otherwise the node record template is not declared properly"); llvm::Type *pRecType = const_cast<llvm::Type *>( pAnno->GetTemplateArgAnnotation(0).GetType()); unsigned recordSize = M.getDataLayout().getTypeAllocSize(pRecType); NodeRecordInfo Info(kind, recordSize); CI = CreateAnnotateNodeRecordHandle(HLM, CI, Builder, Info); Value *Res = HLM.EmitHLOperationCall( Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::HandleToNodeRecordCast, ResTy, {CI}, M); Builder.CreateStore(Res, retHandleArg); } else if (dxilutil::IsHLSLNodeOutputType(ResTy)) { DXASSERT_NOMSG(group == HLOpcodeGroup::HLIndexNodeHandle); DXIL::NodeIOKind kind = DXIL::NodeIOKind::NodeOutputArray; unsigned recordSize; if (dxilutil::IsHLSLEmptyNodeOutputType(ResTy)) { kind = DXIL::NodeIOKind::EmptyOutputArray; recordSize = 0; } else { // TODO FIX AddStructAnnotation /*DxilStructAnnotation* pAnno = HLM.GetTypeSystem().GetStructAnnotation(dyn_cast<llvm::StructType>(ResTy)); assert(pAnno != nullptr && pAnno->GetNumTemplateArgs() == 1 && "otherwise the node template is not declared properly"); llvm::Type* pRecType = (llvm::Type*)pAnno->GetTemplateArgAnnotation(0).GetType(); recordSize = M.getDataLayout().getTypeAllocSize(pRecType);*/ recordSize = recordSizeWAR; } NodeInfo Info(kind, recordSize); CI = CreateAnnotateNodeHandle(HLM, CI, Builder, Info); Value *Res = HLM.EmitHLOperationCall( Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::HandleToNodeOutputCast, ResTy, {CI}, M); Builder.CreateStore(Res, retHandleArg); } else { Value *Res = HLM.EmitHLOperationCall( Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::HandleToResCast, ResTy, {CI}, M); Builder.CreateStore(Res, retHandleArg); } oldCI->eraseFromParent(); continue; } if (!isDoubleSubscriptFunc) { // replace new call and delete the old call oldCI->replaceAllUsesWith(CI); oldCI->eraseFromParent(); } else { // For double script. // Replace single users use with new CI. auto U = oldCI->user_begin(); Value *user = *U; CallInst *secSub = cast<CallInst>(user); secSub->replaceAllUsesWith(CI); secSub->eraseFromParent(); oldCI->eraseFromParent(); } } // delete the function F->eraseFromParent(); } void AddOpcodeParamForIntrinsics( HLModule &HLM, std::vector<std::pair<Function *, unsigned>> &intrinsicMap, DxilObjectProperties &objectProperties) { for (auto mapIter : intrinsicMap) { Function *F = mapIter.first; if (F->user_empty()) { // delete the function F->eraseFromParent(); continue; } unsigned opcode = mapIter.second; AddOpcodeParamForIntrinsic(HLM, F, opcode, objectProperties); } } // Returns whether the first argument of CI is NaN or not. If the argument is // a vector, returns a vector of boolean values. Constant *IsNaN(CallInst *CI) { Value *V = CI->getArgOperand(0); llvm::Type *Ty = V->getType(); if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Ty)) { Constant *CV = cast<Constant>(V); SmallVector<Constant *, 4> ConstVec; llvm::Type *CIElemTy = cast<llvm::VectorType>(CI->getType())->getElementType(); for (unsigned i = 0; i < VT->getNumElements(); i++) { ConstantFP *fpV = cast<ConstantFP>(CV->getAggregateElement(i)); bool isNan = fpV->getValueAPF().isNaN(); ConstVec.push_back(ConstantInt::get(CIElemTy, isNan ? 1 : 0)); } return ConstantVector::get(ConstVec); } else { ConstantFP *fV = cast<ConstantFP>(V); bool isNan = fV->getValueAPF().isNaN(); return ConstantInt::get(CI->getType(), isNan ? 1 : 0); } } // Returns a constant for atan2() intrinsic function for scalars. Constant *Atan2ForScalar(llvm::Type *ResultTy, ConstantFP *fpV0, ConstantFP *fpV1) { if (ResultTy->isDoubleTy()) { double dV0 = fpV0->getValueAPF().convertToDouble(); double dV1 = fpV1->getValueAPF().convertToDouble(); return ConstantFP::get(ResultTy, atan2(dV0, dV1)); } else { DXASSERT_NOMSG(ResultTy->isFloatTy()); float fV0 = fpV0->getValueAPF().convertToFloat(); float fV1 = fpV1->getValueAPF().convertToFloat(); return ConstantFP::get(ResultTy, atan2f(fV0, fV1)); } } // Returns Value for atan2() intrinsic function. If the argument of CI has // a vector type, it returns the vector value of atan2(). Value *Atan2(CallInst *CI) { Value *V0 = CI->getArgOperand(0); Value *V1 = CI->getArgOperand(1); if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(V0->getType())) { Constant *CV0 = cast<Constant>(V0); Constant *CV1 = cast<Constant>(V1); SmallVector<Constant *, 4> ConstVec; llvm::Type *CIElemTy = cast<llvm::VectorType>(CI->getType())->getElementType(); for (unsigned i = 0; i < VT->getNumElements(); i++) { ConstantFP *fpV0 = cast<ConstantFP>(CV0->getAggregateElement(i)); ConstantFP *fpV1 = cast<ConstantFP>(CV1->getAggregateElement(i)); ConstVec.push_back(Atan2ForScalar(CIElemTy, fpV0, fpV1)); } return ConstantVector::get(ConstVec); } else { ConstantFP *fpV0 = cast<ConstantFP>(V0); ConstantFP *fpV1 = cast<ConstantFP>(V1); return Atan2ForScalar(CI->getType(), fpV0, fpV1); } } } // namespace namespace CGHLSLMSHelper { void CopyAndAnnotateResourceArgument(llvm::Value *Src, llvm::Value *Dest, DxilResourceProperties &RP, hlsl::HLModule &HLM, clang::CodeGen::CodeGenFunction &CGF) { Type *ResTy = Src->getType()->getPointerElementType(); Type *HandleTy = HLM.GetOP()->GetHandleType(); // Ld resource -> annotateHandle -> turnback to resource. Value *Handle = CreateHandleFromResPtr(Src, HLM, HandleTy, CGF.Builder); Handle = CreateAnnotateHandle(HLM, Handle, RP, ResTy, CGF.Builder); Value *Res = CastHandleToRes(HLM, Handle, ResTy, CGF.Builder); CGF.Builder.CreateStore(Res, Dest); } } // namespace CGHLSLMSHelper namespace { // Returns true a global value is being updated bool GlobalHasStoreUserRec(Value *V, std::set<Value *> &visited) { bool isWriteEnabled = false; if (V && visited.find(V) == visited.end()) { visited.insert(V); for (User *U : V->users()) { if (isa<StoreInst>(U)) { return true; } else if (CallInst *CI = dyn_cast<CallInst>(U)) { Function *F = CI->getCalledFunction(); if (!F->isIntrinsic()) { HLOpcodeGroup hlGroup = GetHLOpcodeGroup(F); switch (hlGroup) { case HLOpcodeGroup::NotHL: return true; case HLOpcodeGroup::HLMatLoadStore: { HLMatLoadStoreOpcode opCode = static_cast<HLMatLoadStoreOpcode>(hlsl::GetHLOpcode(CI)); if (opCode == HLMatLoadStoreOpcode::ColMatStore || opCode == HLMatLoadStoreOpcode::RowMatStore) return true; break; } case HLOpcodeGroup::HLCast: case HLOpcodeGroup::HLSubscript: if (GlobalHasStoreUserRec(U, visited)) return true; break; default: break; } } } else if (isa<GEPOperator>(U) || isa<PHINode>(U) || isa<SelectInst>(U)) { if (GlobalHasStoreUserRec(U, visited)) return true; } } } return isWriteEnabled; } // Returns true if any of the direct user of a global is a store inst // otherwise recurse through the remaining users and check if any GEP // exists and which in turn has a store inst as user. bool GlobalHasStoreUser(GlobalVariable *GV) { std::set<Value *> visited; Value *V = cast<Value>(GV); return GlobalHasStoreUserRec(V, visited); } GlobalVariable *CreateStaticGlobal(llvm::Module *M, GlobalVariable *GV) { Constant *GC = M->getOrInsertGlobal(GV->getName().str() + ".static.copy", GV->getType()->getPointerElementType()); GlobalVariable *NGV = cast<GlobalVariable>(GC); if (GV->hasInitializer()) { NGV->setInitializer(GV->getInitializer()); } else { // The copy being static, it should be initialized per llvm rules NGV->setInitializer( Constant::getNullValue(GV->getType()->getPointerElementType())); } // static global should have internal linkage NGV->setLinkage(GlobalValue::InternalLinkage); return NGV; } void CreateWriteEnabledStaticGlobals(llvm::Module *M, llvm::Function *EF) { std::vector<GlobalVariable *> worklist; for (GlobalVariable &GV : M->globals()) { if (!GV.isConstant() && GV.getLinkage() != GlobalValue::InternalLinkage && // skip globals which are HLSL objects or group shared !dxilutil::IsHLSLObjectType(GV.getType()->getPointerElementType()) && !dxilutil::IsSharedMemoryGlobal(&GV)) { if (GlobalHasStoreUser(&GV)) worklist.emplace_back(&GV); // TODO: Ensure that constant globals aren't using initializer GV.setConstant(true); } } IRBuilder<> Builder( dxilutil::FirstNonAllocaInsertionPt(&EF->getEntryBlock())); for (GlobalVariable *GV : worklist) { GlobalVariable *NGV = CreateStaticGlobal(M, GV); GV->replaceAllUsesWith(NGV); // insert memcpy in all entryblocks uint64_t size = M->getDataLayout().getTypeAllocSize( GV->getType()->getPointerElementType()); Builder.CreateMemCpy(NGV, GV, size, 1); } } } // namespace namespace { void SetEntryFunction(HLModule &HLM, Function *Entry, clang::CodeGen::CodeGenModule &CGM) { if (Entry == nullptr) { clang::DiagnosticsEngine &Diags = CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID(clang::DiagnosticsEngine::Error, "cannot find entry function %0"); Diags.Report(DiagID) << CGM.getCodeGenOpts().HLSLEntryFunction; return; } HLM.SetEntryFunction(Entry); } Function *CloneFunction(Function *Orig, const llvm::Twine &Name, llvm::Module *llvmModule, hlsl::DxilTypeSystem &TypeSys, hlsl::DxilTypeSystem &SrcTypeSys) { Function *F = Function::Create(Orig->getFunctionType(), GlobalValue::LinkageTypes::ExternalLinkage, Name, llvmModule); SmallVector<ReturnInst *, 2> Returns; ValueToValueMapTy vmap; // Map params. auto entryParamIt = F->arg_begin(); for (Argument &param : Orig->args()) { vmap[&param] = (entryParamIt++); } llvm::CloneFunctionInto(F, Orig, vmap, /*ModuleLevelChagnes*/ false, Returns); TypeSys.CopyFunctionAnnotation(F, Orig, SrcTypeSys); return F; } // Clone shader entry function to be called by other functions. // The original function will be used as shader entry. void CloneShaderEntry(Function *ShaderF, StringRef EntryName, HLModule &HLM) { Function *F = CloneFunction(ShaderF, "", HLM.GetModule(), HLM.GetTypeSystem(), HLM.GetTypeSystem()); F->takeName(ShaderF); F->setLinkage(GlobalValue::LinkageTypes::InternalLinkage); // Set to name before mangled. ShaderF->setName(EntryName); DxilFunctionAnnotation *annot = HLM.GetFunctionAnnotation(F); DxilParameterAnnotation &cloneRetAnnot = annot->GetRetTypeAnnotation(); // Clear semantic for cloned one. cloneRetAnnot.SetSemanticString(""); cloneRetAnnot.SetSemanticIndexVec({}); for (unsigned i = 0; i < annot->GetNumParameters(); i++) { DxilParameterAnnotation &cloneParamAnnot = annot->GetParameterAnnotation(i); // Clear semantic for cloned one. cloneParamAnnot.SetSemanticString(""); cloneParamAnnot.SetSemanticIndexVec({}); } } } // namespace namespace { bool IsPatchConstantFunction( const Function *F, StringMap<PatchConstantInfo> &patchConstantFunctionMap) { DXASSERT_NOMSG(F != nullptr); for (auto &&p : patchConstantFunctionMap) { if (p.second.Func == F) return true; } return false; } void SetPatchConstantFunctionWithAttr( const EntryFunctionInfo &EntryFunc, const clang::HLSLPatchConstantFuncAttr *PatchConstantFuncAttr, StringMap<PatchConstantInfo> &patchConstantFunctionMap, std::unordered_map<Function *, std::unique_ptr<DxilFunctionProps>> &patchConstantFunctionPropsMap, HLModule &HLM, clang::CodeGen::CodeGenModule &CGM) { StringRef funcName = PatchConstantFuncAttr->getFunctionName(); auto Entry = patchConstantFunctionMap.find(funcName); if (Entry == patchConstantFunctionMap.end()) { clang::DiagnosticsEngine &Diags = CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID( clang::DiagnosticsEngine::Error, "Cannot find patchconstantfunc %0."); Diags.Report(PatchConstantFuncAttr->getLocation(), DiagID) << funcName; return; } if (Entry->second.NumOverloads != 1) { clang::DiagnosticsEngine &Diags = CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID(clang::DiagnosticsEngine::Warning, "Multiple overloads of patchconstantfunc %0."); unsigned NoteID = Diags.getCustomDiagID(clang::DiagnosticsEngine::Note, "This overload was selected."); Diags.Report(PatchConstantFuncAttr->getLocation(), DiagID) << funcName; Diags.Report(Entry->second.SL, NoteID); } Function *patchConstFunc = Entry->second.Func; DXASSERT( HLM.HasDxilFunctionProps(EntryFunc.Func), " else AddHLSLFunctionInfo did not save the dxil function props for the " "HS entry."); HLM.SetPatchConstantFunctionForHS(EntryFunc.Func, patchConstFunc); DXASSERT_NOMSG(patchConstantFunctionPropsMap.count(patchConstFunc)); // Check no inout parameter for patch constant function. DxilFunctionAnnotation *patchConstFuncAnnotation = HLM.GetFunctionAnnotation(patchConstFunc); for (unsigned i = 0; i < patchConstFuncAnnotation->GetNumParameters(); i++) { if (patchConstFuncAnnotation->GetParameterAnnotation(i) .GetParamInputQual() == DxilParamInputQual::Inout) { clang::DiagnosticsEngine &Diags = CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID( clang::DiagnosticsEngine::Error, "Patch Constant function %0 should not have inout param."); Diags.Report(Entry->second.SL, DiagID) << funcName; } } } void SetPatchConstantFunction( const EntryFunctionInfo &EntryFunc, std::unordered_map<Function *, const clang::HLSLPatchConstantFuncAttr *> &HSEntryPatchConstantFuncAttr, StringMap<PatchConstantInfo> &patchConstantFunctionMap, std::unordered_map<Function *, std::unique_ptr<DxilFunctionProps>> &patchConstantFunctionPropsMap, HLModule &HLM, clang::CodeGen::CodeGenModule &CGM) { auto AttrsIter = HSEntryPatchConstantFuncAttr.find(EntryFunc.Func); DXASSERT(AttrsIter != HSEntryPatchConstantFuncAttr.end(), "we have checked this in AddHLSLFunctionInfo()"); SetPatchConstantFunctionWithAttr(EntryFunc, AttrsIter->second, patchConstantFunctionMap, patchConstantFunctionPropsMap, HLM, CGM); } } // namespace namespace { // For case like: // cbuffer A { // float a; // int b; //} // // const static struct { // float a; // int b; //} ST = { a, b }; // Replace user of ST with a and b. bool ReplaceConstStaticGlobalUser(GEPOperator *GEP, std::vector<Constant *> &InitList, IRBuilder<> &Builder) { if (GEP->getNumIndices() < 2) { // Don't use sub element. return false; } SmallVector<Value *, 4> idxList; auto iter = GEP->idx_begin(); idxList.emplace_back(*(iter++)); ConstantInt *subIdx = dyn_cast<ConstantInt>(*(iter++)); DXASSERT(subIdx, "else dynamic indexing on struct field"); unsigned subIdxImm = subIdx->getLimitedValue(); DXASSERT(subIdxImm < InitList.size(), "else struct index out of bound"); Constant *subPtr = InitList[subIdxImm]; // Move every idx to idxList except idx for InitList. while (iter != GEP->idx_end()) { idxList.emplace_back(*(iter++)); } Value *NewGEP = Builder.CreateGEP(subPtr, idxList); GEP->replaceAllUsesWith(NewGEP); return true; } } // namespace namespace CGHLSLMSHelper { void ReplaceConstStaticGlobals( std::unordered_map<GlobalVariable *, std::vector<Constant *>> &staticConstGlobalInitListMap, std::unordered_map<GlobalVariable *, Function *> &staticConstGlobalCtorMap) { for (auto &iter : staticConstGlobalInitListMap) { GlobalVariable *GV = iter.first; std::vector<Constant *> &InitList = iter.second; LLVMContext &Ctx = GV->getContext(); // Do the replace. bool bPass = true; for (User *U : GV->users()) { IRBuilder<> Builder(Ctx); if (GetElementPtrInst *GEPInst = dyn_cast<GetElementPtrInst>(U)) { Builder.SetInsertPoint(GEPInst); bPass &= ReplaceConstStaticGlobalUser(cast<GEPOperator>(GEPInst), InitList, Builder); } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { bPass &= ReplaceConstStaticGlobalUser(GEP, InitList, Builder); } else { DXASSERT(false, "invalid user of const static global"); } } // Clear the Ctor which is useless now. if (bPass) { Function *Ctor = staticConstGlobalCtorMap[GV]; Ctor->getBasicBlockList().clear(); BasicBlock *Entry = BasicBlock::Create(Ctx, "", Ctor); IRBuilder<> Builder(Entry); Builder.CreateRetVoid(); } } } } // namespace CGHLSLMSHelper namespace { Value *CastLdValue(Value *Ptr, llvm::Type *FromTy, llvm::Type *ToTy, IRBuilder<> &Builder) { if (ToTy->isVectorTy()) { unsigned vecSize = ToTy->getVectorNumElements(); if (vecSize == 1 && ToTy->getVectorElementType() == FromTy) { Value *V = Builder.CreateLoad(Ptr); // ScalarToVec1Splat // Change scalar into vec1. Value *Vec1 = UndefValue::get(ToTy); return Builder.CreateInsertElement(Vec1, V, (uint64_t)0); } else if (vecSize == 1 && FromTy->isIntegerTy() && ToTy->getVectorElementType()->isIntegerTy(1)) { // load(bitcast i32* to <1 x i1>*) // Rewrite to // insertelement(icmp ne (load i32*), 0) Value *IntV = Builder.CreateLoad(Ptr); Value *BoolV = Builder.CreateICmpNE( IntV, ConstantInt::get(IntV->getType(), 0), "tobool"); Value *Vec1 = UndefValue::get(ToTy); return Builder.CreateInsertElement(Vec1, BoolV, (uint64_t)0); } else if (FromTy->isVectorTy() && vecSize == 1) { Value *V = Builder.CreateLoad(Ptr); // VectorTrunc // Change vector into vec1. int mask[] = {0}; return Builder.CreateShuffleVector(V, V, mask); } else if (FromTy->isArrayTy()) { llvm::Type *FromEltTy = FromTy->getArrayElementType(); llvm::Type *ToEltTy = ToTy->getVectorElementType(); if (FromTy->getArrayNumElements() == vecSize && FromEltTy == ToEltTy) { // ArrayToVector. Value *NewLd = UndefValue::get(ToTy); Value *zeroIdx = Builder.getInt32(0); for (unsigned i = 0; i < vecSize; i++) { Value *GEP = Builder.CreateInBoundsGEP(Ptr, {zeroIdx, Builder.getInt32(i)}); Value *Elt = Builder.CreateLoad(GEP); NewLd = Builder.CreateInsertElement(NewLd, Elt, i); } return NewLd; } } } else if (FromTy == Builder.getInt1Ty()) { Value *V = Builder.CreateLoad(Ptr); // BoolCast DXASSERT_NOMSG(ToTy->isIntegerTy()); return Builder.CreateZExt(V, ToTy); } return nullptr; } Value *CastStValue(Value *Ptr, Value *V, llvm::Type *FromTy, llvm::Type *ToTy, IRBuilder<> &Builder) { if (ToTy->isVectorTy()) { unsigned vecSize = ToTy->getVectorNumElements(); if (vecSize == 1 && ToTy->getVectorElementType() == FromTy) { // ScalarToVec1Splat // Change vec1 back to scalar. Value *Elt = Builder.CreateExtractElement(V, (uint64_t)0); return Elt; } else if (FromTy->isVectorTy() && vecSize == 1) { // VectorTrunc // Change vec1 into vector. // Should not happen. // Reported error at Sema::ImpCastExprToType. DXASSERT_NOMSG(0); } else if (FromTy->isArrayTy()) { llvm::Type *FromEltTy = FromTy->getArrayElementType(); llvm::Type *ToEltTy = ToTy->getVectorElementType(); if (FromTy->getArrayNumElements() == vecSize && FromEltTy == ToEltTy) { // ArrayToVector. Value *zeroIdx = Builder.getInt32(0); for (unsigned i = 0; i < vecSize; i++) { Value *Elt = Builder.CreateExtractElement(V, i); Value *GEP = Builder.CreateInBoundsGEP(Ptr, {zeroIdx, Builder.getInt32(i)}); Builder.CreateStore(Elt, GEP); } // The store already done. // Return null to ignore use of the return value. return nullptr; } } } else if (FromTy == Builder.getInt1Ty()) { // BoolCast // Change i1 to ToTy. DXASSERT_NOMSG(ToTy->isIntegerTy()); Value *CastV = Builder.CreateICmpNE(V, ConstantInt::get(V->getType(), 0)); return CastV; } return nullptr; } bool SimplifyBitCastLoad(LoadInst *LI, llvm::Type *FromTy, llvm::Type *ToTy, Value *Ptr) { IRBuilder<> Builder(LI); // Cast FromLd to ToTy. Value *CastV = CastLdValue(Ptr, FromTy, ToTy, Builder); if (CastV) { LI->replaceAllUsesWith(CastV); return true; } else { return false; } } bool SimplifyBitCastStore(StoreInst *SI, llvm::Type *FromTy, llvm::Type *ToTy, Value *Ptr) { IRBuilder<> Builder(SI); Value *V = SI->getValueOperand(); // Cast Val to FromTy. Value *CastV = CastStValue(Ptr, V, FromTy, ToTy, Builder); if (CastV) { Builder.CreateStore(CastV, Ptr); return true; } else { return false; } } bool SimplifyBitCastGEP(GEPOperator *GEP, llvm::Type *FromTy, llvm::Type *ToTy, Value *Ptr) { if (ToTy->isVectorTy()) { unsigned vecSize = ToTy->getVectorNumElements(); if (vecSize == 1 && ToTy->getVectorElementType() == FromTy) { // ScalarToVec1Splat GEP->replaceAllUsesWith(Ptr); return true; } else if (FromTy->isVectorTy() && vecSize == 1) { // VectorTrunc DXASSERT_NOMSG( !isa<llvm::VectorType>(GEP->getType()->getPointerElementType())); IRBuilder<> Builder(FromTy->getContext()); if (Instruction *I = dyn_cast<Instruction>(GEP)) Builder.SetInsertPoint(I); std::vector<Value *> idxList(GEP->idx_begin(), GEP->idx_end()); Value *NewGEP = Builder.CreateInBoundsGEP(Ptr, idxList); GEP->replaceAllUsesWith(NewGEP); return true; } else if (FromTy->isArrayTy()) { llvm::Type *FromEltTy = FromTy->getArrayElementType(); llvm::Type *ToEltTy = ToTy->getVectorElementType(); if (FromTy->getArrayNumElements() == vecSize && FromEltTy == ToEltTy) { // ArrayToVector. } } } else if (FromTy == llvm::Type::getInt1Ty(FromTy->getContext())) { // BoolCast } return false; } typedef SmallPtrSet<Instruction *, 4> SmallInstSet; void SimplifyBitCast(BitCastOperator *BC, SmallInstSet &deadInsts) { Value *Ptr = BC->getOperand(0); llvm::Type *FromTy = Ptr->getType(); llvm::Type *ToTy = BC->getType(); if (!FromTy->isPointerTy() || !ToTy->isPointerTy()) return; FromTy = FromTy->getPointerElementType(); ToTy = ToTy->getPointerElementType(); // Take care case like %2 = bitcast %struct.T* %1 to <1 x float>*. bool GEPCreated = false; if (FromTy->isStructTy()) { IRBuilder<> Builder(FromTy->getContext()); if (Instruction *I = dyn_cast<Instruction>(BC)) Builder.SetInsertPoint(I); Value *zeroIdx = Builder.getInt32(0); unsigned nestLevel = 1; while (llvm::StructType *ST = dyn_cast<llvm::StructType>(FromTy)) { if (ST->getNumElements() == 0) break; FromTy = ST->getElementType(0); nestLevel++; } std::vector<Value *> idxList(nestLevel, zeroIdx); Ptr = Builder.CreateGEP(Ptr, idxList); GEPCreated = true; } for (User *U : BC->users()) { if (LoadInst *LI = dyn_cast<LoadInst>(U)) { if (SimplifyBitCastLoad(LI, FromTy, ToTy, Ptr)) { LI->dropAllReferences(); deadInsts.insert(LI); } } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { if (SimplifyBitCastStore(SI, FromTy, ToTy, Ptr)) { SI->dropAllReferences(); deadInsts.insert(SI); } } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(U)) { if (SimplifyBitCastGEP(GEP, FromTy, ToTy, Ptr)) if (Instruction *I = dyn_cast<Instruction>(GEP)) { I->dropAllReferences(); deadInsts.insert(I); } } else if (dyn_cast<CallInst>(U)) { // Skip function call. } else if (dyn_cast<BitCastInst>(U)) { // Skip bitcast. } else if (dyn_cast<AddrSpaceCastInst>(U)) { // Skip addrspacecast. } else { DXASSERT(0, "not support yet"); } } // We created a GEP instruction but didn't end up consuming it, so delete it. if (GEPCreated && Ptr->use_empty()) { if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) GEP->eraseFromParent(); else cast<Constant>(Ptr)->destroyConstant(); } } typedef float(__cdecl *FloatUnaryEvalFuncType)(float); typedef double(__cdecl *DoubleUnaryEvalFuncType)(double); typedef APInt(__cdecl *IntBinaryEvalFuncType)(const APInt &, const APInt &); typedef float(__cdecl *FloatBinaryEvalFuncType)(float, float); typedef double(__cdecl *DoubleBinaryEvalFuncType)(double, double); typedef APInt(__cdecl *IntTernaryEvalFuncType)(const APInt &, const APInt &, const APInt &); typedef float(__cdecl *FloatTernaryEvalFuncType)(float, float, float); typedef double(__cdecl *DoubleTernaryEvalFuncType)(double, double, double); Value *EvalUnaryIntrinsic(ConstantFP *fpV, FloatUnaryEvalFuncType floatEvalFunc, DoubleUnaryEvalFuncType doubleEvalFunc) { llvm::Type *Ty = fpV->getType(); Value *Result = nullptr; if (Ty->isDoubleTy()) { double dV = fpV->getValueAPF().convertToDouble(); Value *dResult = ConstantFP::get(Ty, doubleEvalFunc(dV)); Result = dResult; } else { DXASSERT_NOMSG(Ty->isFloatTy()); float fV = fpV->getValueAPF().convertToFloat(); Value *dResult = ConstantFP::get(Ty, floatEvalFunc(fV)); Result = dResult; } return Result; } Value *EvalBinaryIntrinsic(Constant *cV0, Constant *cV1, FloatBinaryEvalFuncType floatEvalFunc, DoubleBinaryEvalFuncType doubleEvalFunc, IntBinaryEvalFuncType intEvalFunc) { llvm::Type *Ty = cV0->getType(); Value *Result = nullptr; if (Ty->isDoubleTy()) { ConstantFP *fpV0 = cast<ConstantFP>(cV0); ConstantFP *fpV1 = cast<ConstantFP>(cV1); double dV0 = fpV0->getValueAPF().convertToDouble(); double dV1 = fpV1->getValueAPF().convertToDouble(); Value *dResult = ConstantFP::get(Ty, doubleEvalFunc(dV0, dV1)); Result = dResult; } else if (Ty->isFloatTy()) { ConstantFP *fpV0 = cast<ConstantFP>(cV0); ConstantFP *fpV1 = cast<ConstantFP>(cV1); float fV0 = fpV0->getValueAPF().convertToFloat(); float fV1 = fpV1->getValueAPF().convertToFloat(); Value *dResult = ConstantFP::get(Ty, floatEvalFunc(fV0, fV1)); Result = dResult; } else { DXASSERT_NOMSG(Ty->isIntegerTy()); DXASSERT_NOMSG(intEvalFunc); ConstantInt *ciV0 = cast<ConstantInt>(cV0); ConstantInt *ciV1 = cast<ConstantInt>(cV1); const APInt &iV0 = ciV0->getValue(); const APInt &iV1 = ciV1->getValue(); Value *dResult = ConstantInt::get(Ty, intEvalFunc(iV0, iV1)); Result = dResult; } return Result; } Value *EvalTernaryIntrinsic(Constant *cV0, Constant *cV1, Constant *cV2, FloatTernaryEvalFuncType floatEvalFunc, DoubleTernaryEvalFuncType doubleEvalFunc, IntTernaryEvalFuncType intEvalFunc) { llvm::Type *Ty = cV0->getType(); Value *Result = nullptr; if (Ty->isDoubleTy()) { ConstantFP *fpV0 = cast<ConstantFP>(cV0); ConstantFP *fpV1 = cast<ConstantFP>(cV1); ConstantFP *fpV2 = cast<ConstantFP>(cV2); double dV0 = fpV0->getValueAPF().convertToDouble(); double dV1 = fpV1->getValueAPF().convertToDouble(); double dV2 = fpV2->getValueAPF().convertToDouble(); Value *dResult = ConstantFP::get(Ty, doubleEvalFunc(dV0, dV1, dV2)); Result = dResult; } else if (Ty->isFloatTy()) { ConstantFP *fpV0 = cast<ConstantFP>(cV0); ConstantFP *fpV1 = cast<ConstantFP>(cV1); ConstantFP *fpV2 = cast<ConstantFP>(cV2); float fV0 = fpV0->getValueAPF().convertToFloat(); float fV1 = fpV1->getValueAPF().convertToFloat(); float fV2 = fpV2->getValueAPF().convertToFloat(); Value *dResult = ConstantFP::get(Ty, floatEvalFunc(fV0, fV1, fV2)); Result = dResult; } else { DXASSERT_NOMSG(Ty->isIntegerTy()); DXASSERT_NOMSG(intEvalFunc); ConstantInt *ciV0 = cast<ConstantInt>(cV0); ConstantInt *ciV1 = cast<ConstantInt>(cV1); ConstantInt *ciV2 = cast<ConstantInt>(cV2); const APInt &iV0 = ciV0->getValue(); const APInt &iV1 = ciV1->getValue(); const APInt &iV2 = ciV2->getValue(); Value *dResult = ConstantInt::get(Ty, intEvalFunc(iV0, iV1, iV2)); Result = dResult; } return Result; } Value *EvalUnaryIntrinsic(CallInst *CI, FloatUnaryEvalFuncType floatEvalFunc, DoubleUnaryEvalFuncType doubleEvalFunc) { Value *V = CI->getArgOperand(0); llvm::Type *Ty = CI->getType(); Value *Result = nullptr; if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Ty)) { Result = UndefValue::get(Ty); Constant *CV = cast<Constant>(V); IRBuilder<> Builder(CI); for (unsigned i = 0; i < VT->getNumElements(); i++) { ConstantFP *fpV = cast<ConstantFP>(CV->getAggregateElement(i)); Value *EltResult = EvalUnaryIntrinsic(fpV, floatEvalFunc, doubleEvalFunc); Result = Builder.CreateInsertElement(Result, EltResult, i); } } else { ConstantFP *fpV = cast<ConstantFP>(V); Result = EvalUnaryIntrinsic(fpV, floatEvalFunc, doubleEvalFunc); } CI->replaceAllUsesWith(Result); CI->eraseFromParent(); return Result; } Value *EvalBinaryIntrinsic(CallInst *CI, FloatBinaryEvalFuncType floatEvalFunc, DoubleBinaryEvalFuncType doubleEvalFunc, IntBinaryEvalFuncType intEvalFunc = nullptr) { Value *V0 = CI->getArgOperand(0); Value *V1 = CI->getArgOperand(1); llvm::Type *Ty = CI->getType(); Value *Result = nullptr; if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Ty)) { Result = UndefValue::get(Ty); Constant *CV0 = cast<Constant>(V0); Constant *CV1 = cast<Constant>(V1); IRBuilder<> Builder(CI); for (unsigned i = 0; i < VT->getNumElements(); i++) { Constant *cV0 = cast<Constant>(CV0->getAggregateElement(i)); Constant *cV1 = cast<Constant>(CV1->getAggregateElement(i)); Value *EltResult = EvalBinaryIntrinsic(cV0, cV1, floatEvalFunc, doubleEvalFunc, intEvalFunc); Result = Builder.CreateInsertElement(Result, EltResult, i); } } else { Constant *cV0 = cast<Constant>(V0); Constant *cV1 = cast<Constant>(V1); Result = EvalBinaryIntrinsic(cV0, cV1, floatEvalFunc, doubleEvalFunc, intEvalFunc); } CI->replaceAllUsesWith(Result); CI->eraseFromParent(); return Result; CI->eraseFromParent(); return Result; } Value *EvalTernaryIntrinsic(CallInst *CI, FloatTernaryEvalFuncType floatEvalFunc, DoubleTernaryEvalFuncType doubleEvalFunc, IntTernaryEvalFuncType intEvalFunc = nullptr) { Value *V0 = CI->getArgOperand(0); Value *V1 = CI->getArgOperand(1); Value *V2 = CI->getArgOperand(2); llvm::Type *Ty = CI->getType(); Value *Result = nullptr; if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(Ty)) { Result = UndefValue::get(Ty); Constant *CV0 = cast<Constant>(V0); Constant *CV1 = cast<Constant>(V1); Constant *CV2 = cast<Constant>(V2); IRBuilder<> Builder(CI); for (unsigned i = 0; i < VT->getNumElements(); i++) { Constant *cV0 = cast<Constant>(CV0->getAggregateElement(i)); Constant *cV1 = cast<Constant>(CV1->getAggregateElement(i)); Constant *cV2 = cast<Constant>(CV2->getAggregateElement(i)); Value *EltResult = EvalTernaryIntrinsic(cV0, cV1, cV2, floatEvalFunc, doubleEvalFunc, intEvalFunc); Result = Builder.CreateInsertElement(Result, EltResult, i); } } else { Constant *cV0 = cast<Constant>(V0); Constant *cV1 = cast<Constant>(V1); Constant *cV2 = cast<Constant>(V2); Result = EvalTernaryIntrinsic(cV0, cV1, cV2, floatEvalFunc, doubleEvalFunc, intEvalFunc); } CI->replaceAllUsesWith(Result); CI->eraseFromParent(); return Result; CI->eraseFromParent(); return Result; } void SimpleTransformForHLDXIRInst(Instruction *I, SmallInstSet &deadInsts) { unsigned opcode = I->getOpcode(); switch (opcode) { case Instruction::BitCast: { BitCastOperator *BCI = cast<BitCastOperator>(I); SimplifyBitCast(BCI, deadInsts); } break; case Instruction::Load: { LoadInst *ldInst = cast<LoadInst>(I); DXASSERT(!HLMatrixType::isa(ldInst->getType()), "matrix load should use HL LdStMatrix"); Value *Ptr = ldInst->getPointerOperand(); if (ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(Ptr)) { if (BitCastOperator *BCO = dyn_cast<BitCastOperator>(CE)) { SimplifyBitCast(BCO, deadInsts); } } } break; case Instruction::Store: { StoreInst *stInst = cast<StoreInst>(I); Value *V = stInst->getValueOperand(); DXASSERT_LOCALVAR(V, !HLMatrixType::isa(V->getType()), "matrix store should use HL LdStMatrix"); Value *Ptr = stInst->getPointerOperand(); if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { if (BitCastOperator *BCO = dyn_cast<BitCastOperator>(CE)) { SimplifyBitCast(BCO, deadInsts); } } } break; } } } // namespace namespace CGHLSLMSHelper { Value *TryEvalIntrinsic(CallInst *CI, IntrinsicOp intriOp, hlsl::LangStd hlslVersion) { switch (intriOp) { case IntrinsicOp::IOP_tan: { return EvalUnaryIntrinsic(CI, tanf, tan); } break; case IntrinsicOp::IOP_tanh: { return EvalUnaryIntrinsic(CI, tanhf, tanh); } break; case IntrinsicOp::IOP_sin: { return EvalUnaryIntrinsic(CI, sinf, sin); } break; case IntrinsicOp::IOP_sinh: { return EvalUnaryIntrinsic(CI, sinhf, sinh); } break; case IntrinsicOp::IOP_cos: { return EvalUnaryIntrinsic(CI, cosf, cos); } break; case IntrinsicOp::IOP_cosh: { return EvalUnaryIntrinsic(CI, coshf, cosh); } break; case IntrinsicOp::IOP_asin: { return EvalUnaryIntrinsic(CI, asinf, asin); } break; case IntrinsicOp::IOP_acos: { return EvalUnaryIntrinsic(CI, acosf, acos); } break; case IntrinsicOp::IOP_atan: { return EvalUnaryIntrinsic(CI, atanf, atan); } break; case IntrinsicOp::IOP_atan2: { Value *atanV = Atan2(CI); CI->replaceAllUsesWith(atanV); CI->eraseFromParent(); return atanV; } break; case IntrinsicOp::IOP_sqrt: { return EvalUnaryIntrinsic(CI, sqrtf, sqrt); } break; case IntrinsicOp::IOP_rsqrt: { auto rsqrtF = [](float v) -> float { return 1.0 / sqrtf(v); }; auto rsqrtD = [](double v) -> double { return 1.0 / sqrt(v); }; return EvalUnaryIntrinsic(CI, rsqrtF, rsqrtD); } break; case IntrinsicOp::IOP_exp: { return EvalUnaryIntrinsic(CI, expf, exp); } break; case IntrinsicOp::IOP_exp2: { return EvalUnaryIntrinsic(CI, exp2f, exp2); } break; case IntrinsicOp::IOP_log: { return EvalUnaryIntrinsic(CI, logf, log); } break; case IntrinsicOp::IOP_log10: { return EvalUnaryIntrinsic(CI, log10f, log10); } break; case IntrinsicOp::IOP_log2: { return EvalUnaryIntrinsic(CI, log2f, log2); } break; case IntrinsicOp::IOP_pow: { return EvalBinaryIntrinsic(CI, powf, pow); } break; case IntrinsicOp::IOP_max: { auto maxF = [](float a, float b) -> float { return a > b ? a : b; }; auto maxD = [](double a, double b) -> double { return a > b ? a : b; }; auto imaxI = [](const APInt &a, const APInt &b) -> APInt { return a.sgt(b) ? a : b; }; return EvalBinaryIntrinsic(CI, maxF, maxD, imaxI); } break; case IntrinsicOp::IOP_min: { auto minF = [](float a, float b) -> float { return a < b ? a : b; }; auto minD = [](double a, double b) -> double { return a < b ? a : b; }; auto iminI = [](const APInt &a, const APInt &b) -> APInt { return a.slt(b) ? a : b; }; return EvalBinaryIntrinsic(CI, minF, minD, iminI); } break; case IntrinsicOp::IOP_umax: { DXASSERT_NOMSG( CI->getArgOperand(0)->getType()->getScalarType()->isIntegerTy()); auto umaxI = [](const APInt &a, const APInt &b) -> APInt { return a.ugt(b) ? a : b; }; return EvalBinaryIntrinsic(CI, nullptr, nullptr, umaxI); } break; case IntrinsicOp::IOP_umin: { DXASSERT_NOMSG( CI->getArgOperand(0)->getType()->getScalarType()->isIntegerTy()); auto uminI = [](const APInt &a, const APInt &b) -> APInt { return a.ult(b) ? a : b; }; return EvalBinaryIntrinsic(CI, nullptr, nullptr, uminI); } break; case IntrinsicOp::IOP_rcp: { auto rcpF = [](float v) -> float { return 1.0 / v; }; auto rcpD = [](double v) -> double { return 1.0 / v; }; return EvalUnaryIntrinsic(CI, rcpF, rcpD); } break; case IntrinsicOp::IOP_ceil: { return EvalUnaryIntrinsic(CI, ceilf, ceil); } break; case IntrinsicOp::IOP_floor: { return EvalUnaryIntrinsic(CI, floorf, floor); } break; case IntrinsicOp::IOP_round: { // round intrinsic could exhibit different behaviour for constant and // runtime evaluations. E.g., for round(0.5): constant evaluation results in // 1 (away from zero rounding), while runtime evaluation results in 0 // (nearest even rounding). // // For back compat, DXC still preserves the above behavior for language // versions 2016 or below. However, for newer language versions, DXC now // always use nearest even for round() intrinsic in all cases. if (hlslVersion <= hlsl::LangStd::v2016) { return EvalUnaryIntrinsic(CI, roundf, round); } else { auto roundingMode = fegetround(); fesetround(FE_TONEAREST); Value *result = EvalUnaryIntrinsic(CI, nearbyintf, nearbyint); fesetround(roundingMode); return result; } } break; case IntrinsicOp::IOP_trunc: { return EvalUnaryIntrinsic(CI, truncf, trunc); } break; case IntrinsicOp::IOP_frac: { auto fracF = [](float v) -> float { return v - floor(v); }; auto fracD = [](double v) -> double { return v - floor(v); }; return EvalUnaryIntrinsic(CI, fracF, fracD); } break; case IntrinsicOp::IOP_isnan: { Constant *cNan = IsNaN(CI); CI->replaceAllUsesWith(cNan); CI->eraseFromParent(); return cNan; } break; case IntrinsicOp::IOP_clamp: { auto clampF = [](float a, float b, float c) { return a < b ? b : a > c ? c : a; }; auto clampD = [](double a, double b, double c) { return a < b ? b : a > c ? c : a; }; auto clampI = [](const APInt &a, const APInt &b, const APInt &c) -> APInt { return a.slt(b) ? b : a.sgt(c) ? c : a; }; return EvalTernaryIntrinsic(CI, clampF, clampD, clampI); } break; default: return nullptr; } } // Do simple transform to make later lower pass easier. void SimpleTransformForHLDXIR(llvm::Module *pM) { SmallInstSet deadInsts; for (Function &F : pM->functions()) { for (BasicBlock &BB : F.getBasicBlockList()) { for (BasicBlock::iterator Iter = BB.begin(); Iter != BB.end();) { Instruction *I = (Iter++); if (deadInsts.count(I)) continue; // Skip dead instructions SimpleTransformForHLDXIRInst(I, deadInsts); } } } for (Instruction *I : deadInsts) I->dropAllReferences(); for (Instruction *I : deadInsts) I->eraseFromParent(); deadInsts.clear(); for (GlobalVariable &GV : pM->globals()) { if (dxilutil::IsStaticGlobal(&GV)) { for (User *U : GV.users()) { if (BitCastOperator *BCO = dyn_cast<BitCastOperator>(U)) { SimplifyBitCast(BCO, deadInsts); } } } } for (Instruction *I : deadInsts) I->dropAllReferences(); for (Instruction *I : deadInsts) I->eraseFromParent(); } } // namespace CGHLSLMSHelper namespace { unsigned RoundToAlign(unsigned num, unsigned mod) { // round num to next highest mod if (mod != 0) return mod * ((num + mod - 1) / mod); return num; } // Retrieve the last scalar or vector element type. // This has to be recursive for the nasty empty struct case. // returns true if found, false if we must backtrack. bool RetrieveLastElementType(Type *Ty, Type *&EltTy) { if (Ty->isStructTy()) { if (Ty->getStructNumElements() == 0) return false; for (unsigned i = Ty->getStructNumElements(); i > 0; --i) { if (RetrieveLastElementType(Ty->getStructElementType(i - 1), EltTy)) return true; } } else if (Ty->isArrayTy()) { if (RetrieveLastElementType(Ty->getArrayElementType(), EltTy)) return true; } else if ((Ty->isVectorTy() || Ty->isSingleValueType())) { EltTy = Ty->getScalarType(); return true; } return false; } // Here the size is CB size. // Offset still needs to be aligned based on type since this // is the legacy cbuffer global path. unsigned AlignCBufferOffset(unsigned offset, unsigned size, llvm::Type *Ty, bool bRowMajor, bool bMinPrecMode, bool &bCurRowIsMinPrec) { DXASSERT(!(offset & 1), "otherwise we have an invalid offset."); // resources, empty structure, or structures with only resources have // zero size, and need no alignment. if (size == 0) return offset; bool bNeedNewRow = Ty->isArrayTy(); // In min-precision mode, a new row is needed when // going into or out of min-precision component type. if (!bNeedNewRow) { bool bMinPrec = false; if (Ty->isStructTy()) { if (HLMatrixType mat = HLMatrixType::dyn_cast(Ty)) { bNeedNewRow |= !bRowMajor && mat.getNumColumns() > 1; bNeedNewRow |= bRowMajor && mat.getNumRows() > 1; bMinPrec = bMinPrecMode && mat.getElementType(false)->getScalarSizeInBits() < 32; } else { bNeedNewRow = true; if (bMinPrecMode) { // Need to get min-prec of last element of structure, // in case we pack something else into the end. Type *EltTy = nullptr; if (RetrieveLastElementType(Ty, EltTy)) bCurRowIsMinPrec = EltTy->getScalarSizeInBits() < 32; } } } else { DXASSERT_NOMSG(Ty->isVectorTy() || Ty->isSingleValueType()); // vector or scalar bMinPrec = bMinPrecMode && Ty->getScalarSizeInBits() < 32; } if (bMinPrecMode) { bNeedNewRow |= bCurRowIsMinPrec != bMinPrec; bCurRowIsMinPrec = bMinPrec; } } unsigned scalarSizeInBytes = Ty->getScalarSizeInBits() / 8; return AlignBufferOffsetInLegacy(offset, size, scalarSizeInBytes, bNeedNewRow); } unsigned AllocateDxilConstantBuffer( HLCBuffer &CB, std::unordered_map<Constant *, DxilFieldAnnotation> &constVarAnnotationMap, bool bMinPrecMode) { unsigned offset = 0; // Scan user allocated constants first. // Update offset. for (const std::unique_ptr<DxilResourceBase> &C : CB.GetConstants()) { if (C->GetLowerBound() == UINT_MAX) continue; unsigned size = C->GetRangeSize(); unsigned nextOffset = size + C->GetLowerBound(); if (offset < nextOffset) offset = nextOffset; } // Alloc after user allocated constants. bool bCurRowIsMinPrec = false; for (const std::unique_ptr<DxilResourceBase> &C : CB.GetConstants()) { if (C->GetLowerBound() != UINT_MAX) continue; unsigned size = C->GetRangeSize(); llvm::Type *Ty = C->GetHLSLType()->getPointerElementType(); auto fieldAnnotation = constVarAnnotationMap.at(C->GetGlobalSymbol()); bool bRowMajor = HLMatrixType::isa(Ty) ? fieldAnnotation.GetMatrixAnnotation().Orientation == MatrixOrientation::RowMajor : false; // Align offset. offset = AlignCBufferOffset(offset, size, Ty, bRowMajor, bMinPrecMode, bCurRowIsMinPrec); if (C->GetLowerBound() == UINT_MAX) { C->SetLowerBound(offset); } offset += size; } return offset; } void AllocateDxilConstantBuffers( HLModule &HLM, std::unordered_map<Constant *, DxilFieldAnnotation> &constVarAnnotationMap) { for (unsigned i = 0; i < HLM.GetCBuffers().size(); i++) { HLCBuffer &CB = *static_cast<HLCBuffer *>(&(HLM.GetCBuffer(i))); unsigned size = AllocateDxilConstantBuffer( CB, constVarAnnotationMap, HLM.GetHLOptions().bUseMinPrecision); CB.SetSize(size); } } } // namespace namespace { // Instruction pointer wrapper that cleans up the instruction if unused template <typename _InstructionT> struct CleanupIfUnused { _InstructionT *I = nullptr; CleanupIfUnused(Value *V) { if (V) I = cast<_InstructionT>(V); } ~CleanupIfUnused() { if (I && I->user_empty()) I->eraseFromParent(); I = nullptr; } operator _InstructionT *() { return I; } _InstructionT *operator->() { return I; } operator bool() { return I != nullptr; } }; // When replacing CBV use, it's possible for the resource itself to be used, // in which case we need to either use a cast from handle back to the resource // type, or if it's a pointer, use an alloca where that casted resource is // stored. // Later we will mutate the original resource type to a handle type, // and eliminate this cast. // This helper produces the cast and a store to new alloca in case either a // value or a pointer to the original resource type is needed, and cleans up any // unused instructions afterward. // Instructions are only produced if hlResTy is non-null, indicating // ConstantBuffer<> object type rather than legacy cbuffer type that has no // resource object. struct HandleToResHelper { HandleToResHelper(IRBuilder<> &Builder, Type *hlResTy, Value *Handle, Function &F, HLModule &HLM) { if (hlResTy) { Cast = cast<CallInst>( HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::HandleToResCast, hlResTy, {Handle}, *HLM.GetModule())); IRBuilder<> AllocaBuilder(F.getEntryBlock().getFirstInsertionPt()); Alloca = AllocaBuilder.CreateAlloca(hlResTy); Store = Builder.CreateStore(Cast, Alloca); } } ~HandleToResHelper() { if (Alloca && Alloca->hasOneUse()) { Store->eraseFromParent(); Alloca->eraseFromParent(); if (Cast->user_empty()) Cast->eraseFromParent(); } } operator bool() { return Store != nullptr; } StoreInst *Store = nullptr; AllocaInst *Alloca = nullptr; CallInst *Cast = nullptr; }; void ReplaceUseInFunction(Value *V, Value *NewV, Function *F, IRBuilder<> &Builder, HandleToResHelper *pH2ResHelper = nullptr) { Type *VTy = V->getType()->getPointerElementType(); bool bIsResourceTy = dxilutil::IsHLSLResourceType(VTy); for (auto U = V->user_begin(); U != V->user_end();) { User *user = *(U++); if (Instruction *I = dyn_cast<Instruction>(user)) { if (I->getParent()->getParent() == F) { // replace use with GEP if in F if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) { if (BCI->getType() == NewV->getType()) { I->replaceAllUsesWith(NewV); I->eraseFromParent(); continue; } } // Only replace when type matches. if (V->getType() == NewV->getType()) { I->replaceUsesOfWith(V, NewV); continue; } if (pH2ResHelper && *pH2ResHelper && bIsResourceTy) { // This means CBV case, so use casted value or pointer instead. if (LoadInst *LI = dyn_cast<LoadInst>(I)) { Instruction *OrigPtrInst = dyn_cast<Instruction>(LI->getPointerOperand()); LI->replaceAllUsesWith(pH2ResHelper->Cast); LI->eraseFromParent(); // Clean up original pointer instruction (GEP) if unused. if (OrigPtrInst && OrigPtrInst->user_empty()) OrigPtrInst->eraseFromParent(); continue; } else { I->replaceUsesOfWith(V, pH2ResHelper->Alloca); continue; } } DXASSERT( false, "otherwise, attempting CB user replacement with mismatching type"); } } else { // For constant operator, create local clone which use GEP. // Only support GEP and bitcast. if (GEPOperator *GEPOp = dyn_cast<GEPOperator>(user)) { std::vector<Value *> idxList(GEPOp->idx_begin(), GEPOp->idx_end()); Value *NewGEP = Builder.CreateInBoundsGEP(NewV, idxList); ReplaceUseInFunction(GEPOp, NewGEP, F, Builder); } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(user)) { // Change the init val into NewV with Store. // FIXME: This store needs to be placed in a new init function for // this GV, rather than being placed in whichever function we happen // to be iterating at this point. Otherwise it may not actually be // initialized when used from another function. GV->setInitializer(nullptr); Builder.CreateStore(NewV, GV); } else { // Must be bitcast here. BitCastOperator *BC = cast<BitCastOperator>(user); Value *NewBC = Builder.CreateBitCast(NewV, BC->getType()); ReplaceUseInFunction(BC, NewBC, F, Builder); } } } } void MarkUsedFunctionForConst(Value *V, std::unordered_set<Function *> &usedFunc) { for (auto U = V->user_begin(); U != V->user_end();) { User *user = *(U++); if (Instruction *I = dyn_cast<Instruction>(user)) { Function *F = I->getParent()->getParent(); usedFunc.insert(F); } else { // For constant operator, create local clone which use GEP. // Only support GEP and bitcast. if (GEPOperator *GEPOp = dyn_cast<GEPOperator>(user)) { MarkUsedFunctionForConst(GEPOp, usedFunc); } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(user)) { MarkUsedFunctionForConst(GV, usedFunc); } else { // Must be bitcast here. BitCastOperator *BC = cast<BitCastOperator>(user); MarkUsedFunctionForConst(BC, usedFunc); } } } } bool CreateCBufferVariable(HLCBuffer &CB, HLModule &HLM, llvm::Type *HandleTy) { bool bUsed = false; // Build Struct for CBuffer. SmallVector<llvm::Type *, 4> Elements; for (const std::unique_ptr<DxilResourceBase> &C : CB.GetConstants()) { Value *GV = C->GetGlobalSymbol(); if (!GV->use_empty()) bUsed = true; // Global variable must be pointer type. llvm::Type *Ty = C->GetHLSLType()->getPointerElementType(); Elements.emplace_back(Ty); } // Don't create CBuffer variable for unused cbuffer. if (!bUsed) return false; llvm::Module &M = *HLM.GetModule(); bool isCBArray = CB.IsArray(); llvm::GlobalVariable *cbGV = nullptr; llvm::Type *cbTy = nullptr; unsigned cbIndexDepth = 0; if (!isCBArray) { if (CB.IsView()) { llvm::StructType *CBStructTy = llvm::StructType::create(CB.GetResultType(), CB.GetGlobalName()); cbGV = new llvm::GlobalVariable(M, CBStructTy, /*IsConstant*/ true, llvm::GlobalValue::ExternalLinkage, /*InitVal*/ nullptr, CB.GetGlobalName()); cbTy = cbGV->getType(); } else { llvm::StructType *CBStructTy = llvm::StructType::create(Elements, CB.GetGlobalName()); cbGV = new llvm::GlobalVariable(M, CBStructTy, /*IsConstant*/ true, llvm::GlobalValue::ExternalLinkage, /*InitVal*/ nullptr, CB.GetGlobalName()); cbTy = cbGV->getType(); } } else { // For array of ConstantBuffer, create array of struct instead of struct of // array. DXASSERT(CB.GetConstants().size() == 1, "ConstantBuffer should have 1 constant"); llvm::Type *CBEltTy = CB.GetConstants()[0] ->GetHLSLType() ->getPointerElementType() ->getArrayElementType(); cbIndexDepth = 1; while (CBEltTy->isArrayTy()) { CBEltTy = CBEltTy->getArrayElementType(); cbIndexDepth++; } // Add one level struct type to match normal case. llvm::StructType *CBStructTy = llvm::StructType::create({CB.GetResultType()}, CB.GetGlobalName()); llvm::ArrayType *CBArrayTy = llvm::ArrayType::get(CBStructTy, CB.GetRangeSize()); cbGV = new llvm::GlobalVariable(M, CBArrayTy, /*IsConstant*/ true, llvm::GlobalValue::ExternalLinkage, /*InitVal*/ nullptr, CB.GetGlobalName()); cbTy = llvm::PointerType::get(CBStructTy, cbGV->getType()->getPointerAddressSpace()); } CB.SetGlobalSymbol(cbGV); llvm::Type *opcodeTy = llvm::Type::getInt32Ty(M.getContext()); llvm::Type *idxTy = opcodeTy; Constant *zeroIdx = ConstantInt::get(opcodeTy, 0); Value *HandleArgs[] = {cbGV, zeroIdx}; llvm::FunctionType *SubscriptFuncTy = llvm::FunctionType::get(cbTy, {opcodeTy, HandleTy, idxTy}, false); Function *subscriptFunc = GetOrCreateHLFunction(M, SubscriptFuncTy, HLOpcodeGroup::HLSubscript, (unsigned)HLSubscriptOpcode::CBufferSubscript); Constant *opArg = ConstantInt::get(opcodeTy, (unsigned)HLSubscriptOpcode::CBufferSubscript); Value *args[] = {opArg, nullptr, zeroIdx}; llvm::LLVMContext &Context = M.getContext(); llvm::Type *i32Ty = llvm::Type::getInt32Ty(Context); Value *zero = ConstantInt::get(i32Ty, (uint64_t)0); std::vector<Value *> indexArray(CB.GetConstants().size()); std::vector<std::unordered_set<Function *>> constUsedFuncList( CB.GetConstants().size()); for (const std::unique_ptr<DxilResourceBase> &C : CB.GetConstants()) { Value *idx = ConstantInt::get(i32Ty, C->GetID()); indexArray[C->GetID()] = idx; Value *GV = C->GetGlobalSymbol(); MarkUsedFunctionForConst(GV, constUsedFuncList[C->GetID()]); } // If ConstantBuffer<> style CBV, get resource type used for translating // handles back to resource objects. Type *hlResTy = nullptr; if (CB.IsView()) { hlResTy = CB.GetConstants().front()->GetHLSLType()->getPointerElementType(); hlResTy = dxilutil::StripArrayTypes(hlResTy); } for (Function &F : M.functions()) { if (F.isDeclaration()) continue; if (GetHLOpcodeGroupByName(&F) != HLOpcodeGroup::NotHL) continue; IRBuilder<> Builder(F.getEntryBlock().getFirstInsertionPt()); // create HL subscript to make all the use of cbuffer start from it. HandleArgs[HLOperandIndex::kCreateHandleResourceOpIdx - 1] = cbGV; CleanupIfUnused<CallInst> OrigHandle = HLM.EmitHLOperationCall( Builder, HLOpcodeGroup::HLCreateHandle, 0, HandleTy, HandleArgs, M); DxilResourceProperties RP = resource_helper::loadPropsFromResourceBase(&CB); CleanupIfUnused<CallInst> Handle = CreateAnnotateHandle( HLM, OrigHandle, RP, cbGV->getType()->getElementType(), Builder); args[HLOperandIndex::kSubscriptObjectOpIdx] = Handle; CleanupIfUnused<Instruction> cbSubscript = Builder.CreateCall(subscriptFunc, {args}); // Replace constant var with GEP pGV for (const std::unique_ptr<DxilResourceBase> &C : CB.GetConstants()) { Value *GV = C->GetGlobalSymbol(); if (constUsedFuncList[C->GetID()].count(&F) == 0) continue; Value *idx = indexArray[C->GetID()]; if (!isCBArray) { CleanupIfUnused<Instruction> GEP = Builder.CreateInBoundsGEP(cbSubscript, {zero, idx}); // TODO: make sure the debug info is synced to GEP. // GEP->setDebugLoc(GV); HandleToResHelper H2ResHelper(Builder, hlResTy, Handle, F, HLM); ReplaceUseInFunction(GV, GEP, &F, Builder, &H2ResHelper); } else { for (auto U = GV->user_begin(); U != GV->user_end();) { User *user = *(U++); if (user->user_empty()) continue; Instruction *I = dyn_cast<Instruction>(user); if (I && I->getParent()->getParent() != &F) continue; IRBuilder<> *instBuilder = &Builder; std::unique_ptr<IRBuilder<>> B; if (I) { B = llvm::make_unique<IRBuilder<>>(I); instBuilder = B.get(); } GEPOperator *GEPOp = cast<GEPOperator>(user); std::vector<Value *> idxList; DXASSERT(GEPOp->getNumIndices() >= 1 + cbIndexDepth, "must indexing ConstantBuffer array"); idxList.reserve(GEPOp->getNumIndices() - (cbIndexDepth - 1)); gep_type_iterator GI = gep_type_begin(*GEPOp), E = gep_type_end(*GEPOp); idxList.push_back(GI.getOperand()); // change array index with 0 for struct index. idxList.push_back(zero); GI++; Value *arrayIdx = GI.getOperand(); GI++; for (unsigned curIndex = 1; GI != E && curIndex < cbIndexDepth; ++GI, ++curIndex) { arrayIdx = instBuilder->CreateMul( arrayIdx, Builder.getInt32(GI->getArrayNumElements())); arrayIdx = instBuilder->CreateAdd(arrayIdx, GI.getOperand()); } for (; GI != E; ++GI) { idxList.push_back(GI.getOperand()); } HandleArgs[HLOperandIndex::kCreateHandleIndexOpIdx - 1] = arrayIdx; CleanupIfUnused<CallInst> OrigHandle = HLM.EmitHLOperationCall( *instBuilder, HLOpcodeGroup::HLCreateHandle, 0, HandleTy, HandleArgs, M); DxilResourceProperties RP = resource_helper::loadPropsFromResourceBase(&CB); CleanupIfUnused<CallInst> Handle = CreateAnnotateHandle( HLM, OrigHandle, RP, cbGV->getType()->getElementType(), *instBuilder); args[HLOperandIndex::kSubscriptObjectOpIdx] = Handle; args[HLOperandIndex::kSubscriptIndexOpIdx] = arrayIdx; CleanupIfUnused<Instruction> cbSubscript = instBuilder->CreateCall(subscriptFunc, {args}); CleanupIfUnused<Instruction> NewGEP = instBuilder->CreateInBoundsGEP(cbSubscript, idxList); HandleToResHelper H2ResHelper(*instBuilder, hlResTy, Handle, F, HLM); ReplaceUseInFunction(GEPOp, NewGEP, &F, *instBuilder, &H2ResHelper); } } } if (!cbSubscript->user_empty()) { // merge GEP use for cbSubscript. dxilutil::MergeGepUse(cbSubscript); } } return true; } void ConstructCBufferAnnotation( HLCBuffer &CB, DxilTypeSystem &dxilTypeSys, std::unordered_map<Constant *, DxilFieldAnnotation> &AnnotationMap) { Value *GV = CB.GetGlobalSymbol(); llvm::StructType *CBStructTy = dyn_cast<llvm::StructType>(GV->getType()->getPointerElementType()); if (!CBStructTy) { // For Array of ConstantBuffer. llvm::ArrayType *CBArrayTy = cast<llvm::ArrayType>(GV->getType()->getPointerElementType()); CBStructTy = cast<llvm::StructType>(CBArrayTy->getArrayElementType()); } DxilStructAnnotation *CBAnnotation = dxilTypeSys.AddStructAnnotation(CBStructTy); CBAnnotation->SetCBufferSize(CB.GetSize()); // Set fieldAnnotation for each constant var. for (const std::unique_ptr<DxilResourceBase> &C : CB.GetConstants()) { Constant *GV = C->GetGlobalSymbol(); DxilFieldAnnotation &fieldAnnotation = CBAnnotation->GetFieldAnnotation(C->GetID()); fieldAnnotation = AnnotationMap[GV]; // This is after CBuffer allocation. fieldAnnotation.SetCBufferOffset(C->GetLowerBound()); fieldAnnotation.SetFieldName(C->GetGlobalName()); } } void ConstructCBuffer( HLModule &HLM, llvm::Type *CBufferType, std::unordered_map<Constant *, DxilFieldAnnotation> &AnnotationMap) { DxilTypeSystem &dxilTypeSys = HLM.GetTypeSystem(); llvm::Type *HandleTy = HLM.GetOP()->GetHandleType(); for (unsigned i = 0; i < HLM.GetCBuffers().size(); i++) { HLCBuffer &CB = *static_cast<HLCBuffer *>(&(HLM.GetCBuffer(i))); if (CB.GetConstants().size() == 0) { // FIXME: Can we avoid creating a fake variable here, since this empty // cbuffer presumably can't be used? // Create Fake variable for cbuffer which is empty. llvm::GlobalVariable *pGV = new llvm::GlobalVariable( *HLM.GetModule(), CBufferType, true, llvm::GlobalValue::ExternalLinkage, nullptr, CB.GetGlobalName()); CB.SetGlobalSymbol(pGV); } else { bool bCreated = CreateCBufferVariable(CB, HLM, HandleTy); if (bCreated) ConstructCBufferAnnotation(CB, dxilTypeSys, AnnotationMap); else { // FIXME: Can we avoid creating a fake variable here, since this empty // cbuffer presumably can't be used? // Create Fake variable for cbuffer which is unused. llvm::GlobalVariable *pGV = new llvm::GlobalVariable( *HLM.GetModule(), CBufferType, true, llvm::GlobalValue::ExternalLinkage, nullptr, CB.GetGlobalName()); CB.SetGlobalSymbol(pGV); } } // Clear the constants which useless now. CB.GetConstants().clear(); } } } // namespace namespace CGHLSLMSHelper { // Align cbuffer offset in legacy mode (16 bytes per row). unsigned AlignBufferOffsetInLegacy(unsigned offset, unsigned size, unsigned scalarSizeInBytes, bool bNeedNewRow) { if (unsigned remainder = (offset & 0xf)) { // Start from new row if (remainder + size > 16 || bNeedNewRow) { return offset + 16 - remainder; } // If not, naturally align data return RoundToAlign(offset, scalarSizeInBytes); } return offset; } // Translate RayQuery constructor. From: // %call = call %"RayQuery<flags>" @<constructor>(%"RayQuery<flags>" %ptr) // To: // i32 %handle = AllocateRayQuery(i32 <IntrinsicOp::IOP_AllocateRayQuery>, i32 // %flags) %gep = GEP %"RayQuery<flags>" %ptr, 0, 0 store i32* %gep, i32 // %handle ; and replace uses of %call with %ptr void TranslateRayQueryConstructor(HLModule &HLM) { llvm::Module &M = *HLM.GetModule(); SmallVector<Function *, 4> Constructors; for (auto &F : M.functions()) { // Match templated RayQuery constructor instantiation by prefix and // signature. It should be impossible to achieve the same signature from // HLSL. if (!F.getName().startswith("\01??0?$RayQuery@$")) continue; llvm::Type *Ty = F.getReturnType(); if (!Ty->isPointerTy() || !dxilutil::IsHLSLRayQueryType(Ty->getPointerElementType())) continue; if (F.arg_size() != 1 || Ty != F.arg_begin()->getType()) continue; Constructors.emplace_back(&F); } for (auto pConstructorFunc : Constructors) { llvm::IntegerType *i32Ty = llvm::Type::getInt32Ty(M.getContext()); llvm::ConstantInt *i32Zero = llvm::ConstantInt::get(i32Ty, (uint64_t)0, false); llvm::FunctionType *funcTy = llvm::FunctionType::get(i32Ty, {i32Ty, i32Ty}, false); unsigned opcode = (unsigned)IntrinsicOp::IOP_AllocateRayQuery; llvm::ConstantInt *opVal = llvm::ConstantInt::get(i32Ty, opcode, false); Function *opFunc = GetOrCreateHLFunction(M, funcTy, HLOpcodeGroup::HLIntrinsic, opcode); while (!pConstructorFunc->user_empty()) { Value *V = *pConstructorFunc->user_begin(); llvm::CallInst *CI = cast<CallInst>(V); // Must be call llvm::Value *pThis = CI->getArgOperand(0); llvm::StructType *pRQType = cast<llvm::StructType>(pThis->getType()->getPointerElementType()); DxilStructAnnotation *SA = HLM.GetTypeSystem().GetStructAnnotation(pRQType); DXASSERT(SA, "otherwise, could not find type annoation for RayQuery " "specialization"); DXASSERT(SA->GetNumTemplateArgs() == 1 && SA->GetTemplateArgAnnotation(0).IsIntegral(), "otherwise, RayQuery has changed, or lacks template args"); llvm::IRBuilder<> Builder(CI); llvm::Value *rayFlags = Builder.getInt32(SA->GetTemplateArgAnnotation(0).GetIntegral()); llvm::Value *Call = Builder.CreateCall(opFunc, {opVal, rayFlags}, pThis->getName()); llvm::Value *GEP = Builder.CreateInBoundsGEP(pThis, {i32Zero, i32Zero}); Builder.CreateStore(Call, GEP); CI->replaceAllUsesWith(pThis); CI->eraseFromParent(); } pConstructorFunc->eraseFromParent(); } } void TranslateInputNodeRecordArgToHandle( hlsl::HLModule &HLM, llvm::MapVector<llvm::Argument *, NodeInputRecordProps> &NodeParams) { llvm::Module &Module = *HLM.GetModule(); Type *HandleTy = HLM.GetOP()->GetNodeRecordHandleType(); for (auto it : NodeParams) { NodeInputRecordProps Props = it.second; Argument *NodeParam = it.first; if (NodeParam->user_empty()) continue; IRBuilder<> Builder( NodeParam->getParent()->getEntryBlock().getFirstInsertionPt()); Value *Handle = CreateNodeInputRecordHandle(NodeParam, HLM, HandleTy, Builder, Props.MetadataIdx); Handle = CreateAnnotateNodeRecordHandle(HLM, Handle, Builder, Props.RecordInfo); llvm::Type *RetTy = NodeParam->getType()->getPointerElementType(); Value *Res = HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::HandleToNodeRecordCast, RetTy, {Handle}, Module); Builder.CreateStore(Res, NodeParam); } } void TranslateNodeOutputParamToHandle( hlsl::HLModule &HLM, llvm::MapVector<llvm::Argument *, NodeProps> &NodeParams) { Type *HandleTy = HLM.GetOP()->GetNodeHandleType(); for (auto it : NodeParams) { NodeProps Props = it.second; Argument *NodeParam = it.first; if (NodeParam->user_empty()) continue; IRBuilder<> Builder( NodeParam->getParent()->getEntryBlock().getFirstInsertionPt()); Value *Handle = CreateNodeOutputHandle(HLM, HandleTy, Builder, Props.MetadataIdx); Handle = CreateAnnotateNodeHandle(HLM, Handle, Builder, Props.Info); Value *Res = HLM.EmitHLOperationCall(Builder, HLOpcodeGroup::HLCast, (unsigned)HLCastOpcode::HandleToNodeOutputCast, NodeParam->getType()->getPointerElementType(), {Handle}, *HLM.GetModule()); Builder.CreateStore(Res, NodeParam); } } } // namespace CGHLSLMSHelper namespace { bool BuildImmInit(Function *Ctor) { GlobalVariable *GV = nullptr; SmallVector<Constant *, 4> ImmList; bool allConst = true; for (inst_iterator I = inst_begin(Ctor), E = inst_end(Ctor); I != E; ++I) { if (StoreInst *SI = dyn_cast<StoreInst>(&(*I))) { Value *V = SI->getValueOperand(); if (!isa<Constant>(V) || V->getType()->isPointerTy()) { allConst = false; break; } Value *Ptr = SI->getPointerOperand(); if (GEPOperator *GepOp = dyn_cast<GEPOperator>(Ptr)) { Ptr = GepOp->getPointerOperand(); if (GlobalVariable *pGV = dyn_cast<GlobalVariable>(Ptr)) { if (GV == nullptr) GV = pGV; else { DXASSERT(GV == pGV, "else pointer mismatch"); } } } // If initializing an array, make sure init value type matches array // element type if (GV) { llvm::Type *GVElemTy = GV->getType()->getElementType(); if (llvm::ArrayType *AT = dyn_cast<llvm::ArrayType>(GVElemTy)) { llvm::Type *ElTy = AT->getElementType(); if (V->getType() != ElTy) return false; } } ImmList.emplace_back(cast<Constant>(V)); } else { if (!isa<ReturnInst>(*I)) { allConst = false; break; } } } if (!allConst) return false; if (!GV) return false; llvm::Type *Ty = GV->getType()->getElementType(); llvm::ArrayType *AT = dyn_cast<llvm::ArrayType>(Ty); // TODO: support other types. if (!AT) return false; if (ImmList.size() != AT->getNumElements()) return false; Constant *Init = llvm::ConstantArray::get(AT, ImmList); GV->setInitializer(Init); return true; } void CallCtorFunctionsAtInsertPt(llvm::Module &M, llvm::SmallVector<llvm::Function *, 2> &Ctors, Instruction *InsertPt) { IRBuilder<> Builder(InsertPt); for (Function *Ctor : Ctors) { Builder.CreateCall(Ctor); } } void CollectFunctionCallers(Function *F, DenseSet<Function *> &Callers) { // worklist size max = call depth SmallVector<Function *, 8> worklist; worklist.push_back(F); // add callers while (worklist.size()) { Function *F = worklist.pop_back_val(); for (User *U : F->users()) { if (CallInst *CI = dyn_cast<CallInst>(U)) { Function *Caller = CI->getParent()->getParent(); if (Callers.insert(Caller).second == true) { // new caller worklist.push_back(Caller); } } } } } DenseSet<Function *> CollectExternalFunctionCallers(Module &M) { DenseSet<Function *> Callers; for (Function &F : M) { if (!F.isIntrinsic() && F.isDeclaration() && hlsl::GetHLOpcodeGroup(&F) == hlsl::HLOpcodeGroup::NotHL) { CollectFunctionCallers(&F, Callers); } } return Callers; } // If static initializers contain calls to external functions, this can // introduce inter-module init function ordering dependencies. Some // dependencies may even introduce contradictions. Creating and implementing an // intuitive standard approach to solve this is likely quite difficult. Better // to disallow the ambiguous and unlikely case for now. bool IsValidCtorFunction(Function *F, DenseSet<Function *> &Callers) { return Callers.count(F) == 0; } void ReportInitStaticGlobalWithExternalFunction( clang::CodeGen ::CodeGenModule &CGM, StringRef name) { clang::DiagnosticsEngine &Diags = CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID( clang::DiagnosticsEngine::Error, "Initializer for static global %0 makes " "disallowed call to external function."); std::string escaped; llvm::raw_string_ostream os(escaped); size_t end = name.find_first_of('@'); if (end != StringRef::npos) name = name.substr(0, end); StringRef prefix = "\01??__E"; if (name.startswith(prefix)) name = name.substr(prefix.size()); dxilutil::PrintEscapedString(name, os); Diags.Report(DiagID) << os.str(); } } // namespace namespace CGHLSLMSHelper { void CollectCtorFunctions(llvm::Module &M, llvm::StringRef globalName, llvm::SmallVector<llvm::Function *, 2> &Ctors, clang::CodeGen::CodeGenModule &CGM) { // add global call to entry func GlobalVariable *GV = M.getGlobalVariable(globalName); if (!GV) return; ConstantArray *CA = dyn_cast<ConstantArray>(GV->getInitializer()); if (!CA) return; DenseSet<Function *> Callers = CollectExternalFunctionCallers(M); bool allEvaluated = true; for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { if (isa<ConstantAggregateZero>(*i)) continue; ConstantStruct *CS = cast<ConstantStruct>(*i); if (isa<ConstantPointerNull>(CS->getOperand(1))) continue; // Must have a function or null ptr. if (!isa<Function>(CS->getOperand(1))) continue; Function *Ctor = cast<Function>(CS->getOperand(1)); DXASSERT(Ctor->getReturnType()->isVoidTy() && Ctor->arg_size() == 0, "function type must be void (void)"); for (inst_iterator I = inst_begin(Ctor), E = inst_end(Ctor); I != E; ++I) { if (CallInst *CI = dyn_cast<CallInst>(&(*I))) { Function *F = CI->getCalledFunction(); // Try to build imm initilizer. // If not work, add global call to entry func. if (BuildImmInit(F) == false) { allEvaluated = false; if (IsValidCtorFunction(F, Callers)) { Ctors.emplace_back(F); } else { ReportInitStaticGlobalWithExternalFunction(CGM, F->getName()); } } } else { DXASSERT(isa<ReturnInst>(&(*I)), "else invalid Global constructor function"); } } } // If all globals constructors are replaced with initializers, just get rid // of the GV. if (allEvaluated) { GV->eraseFromParent(); } } void ProcessCtorFunctions(llvm::Module &M, llvm::SmallVector<llvm::Function *, 2> &Ctors, llvm::Function *Entry, llvm::Function *PatchConstantFn) { if (PatchConstantFn) { // static globals are independent for entry function and patch constant // function. Update static global in entry function will not affect // value in patch constant function. So just call ctors for patch // constant function too. CallCtorFunctionsAtInsertPt( M, Ctors, PatchConstantFn->getEntryBlock().getFirstInsertionPt()); IRBuilder<> B(PatchConstantFn->getEntryBlock().getFirstInsertionPt()); // For static globals which has const initialize value, copy it at // beginning of patch constant function to avoid use value updated by // entry function. for (GlobalVariable &GV : M.globals()) { if (GV.isConstant()) continue; if (!GV.hasInitializer()) continue; if (GV.getName() == "llvm.global_ctors") continue; Value *V = GV.getInitializer(); if (isa<UndefValue>(V)) continue; B.CreateStore(V, &GV); } } CallCtorFunctionsAtInsertPt(M, Ctors, Entry->getEntryBlock().getFirstInsertionPt()); } void FinishCBuffer(HLModule &HLM, llvm::Type *CBufferType, std::unordered_map<Constant *, DxilFieldAnnotation> &constVarAnnotationMap) { // Allocate constant buffers. AllocateDxilConstantBuffers(HLM, constVarAnnotationMap); // TODO: create temp variable for constant which has store use. // Create Global variable and type annotation for each CBuffer. ConstructCBuffer(HLM, CBufferType, constVarAnnotationMap); } void AddRegBindingsForResourceInConstantBuffer( HLModule &HLM, llvm::DenseMap<llvm::Constant *, llvm::SmallVector<std::pair<DXIL::ResourceClass, unsigned>, 1>> &constantRegBindingMap) { for (unsigned i = 0; i < HLM.GetCBuffers().size(); i++) { HLCBuffer &CB = *static_cast<HLCBuffer *>(&(HLM.GetCBuffer(i))); auto &Constants = CB.GetConstants(); for (unsigned j = 0; j < Constants.size(); j++) { const std::unique_ptr<DxilResourceBase> &C = Constants[j]; Constant *CGV = C->GetGlobalSymbol(); auto &regBindings = constantRegBindingMap[CGV]; if (regBindings.empty()) continue; unsigned Srv = UINT_MAX; unsigned Uav = UINT_MAX; unsigned Sampler = UINT_MAX; for (auto it : regBindings) { unsigned RegNum = it.second; switch (it.first) { case DXIL::ResourceClass::SRV: Srv = RegNum; break; case DXIL::ResourceClass::UAV: Uav = RegNum; break; case DXIL::ResourceClass::Sampler: Sampler = RegNum; break; default: DXASSERT(0, "invalid resource class"); break; } } HLM.AddRegBinding(CB.GetID(), j, Srv, Uav, Sampler); } } } // extension codegen. void ExtensionCodeGen(HLModule &HLM, clang::CodeGen::CodeGenModule &CGM) { auto &Diags = CGM.getDiags(); // Add root signature from a #define. Overrides root signature in function // attribute. { using Status = HLSLExtensionsCodegenHelper::CustomRootSignature::Status; HLSLExtensionsCodegenHelper::CustomRootSignature customRootSig; HLSLExtensionsCodegenHelper::CustomRootSignature::Status status = CGM.getCodeGenOpts().HLSLExtensionsCodegen->GetCustomRootSignature( &customRootSig); if (status == Status::FOUND) { DxilRootSignatureVersion rootSigVer; // set root signature version. if (CGM.getLangOpts().RootSigMinor == 0) { rootSigVer = hlsl::DxilRootSignatureVersion::Version_1_0; } else { DXASSERT(CGM.getLangOpts().RootSigMinor == 1, "else CGMSHLSLRuntime Constructor needs to be updated"); rootSigVer = hlsl::DxilRootSignatureVersion::Version_1_1; } RootSignatureHandle RootSigHandle; CompileRootSignature( customRootSig.RootSignature, Diags, clang::SourceLocation::getFromRawEncoding( customRootSig.EncodedSourceLocation), rootSigVer, DxilRootSignatureCompilationFlags::GlobalRootSignature, &RootSigHandle); if (!RootSigHandle.IsEmpty()) { RootSigHandle.EnsureSerializedAvailable(); HLM.SetSerializedRootSignature(RootSigHandle.GetSerializedBytes(), RootSigHandle.GetSerializedSize()); } } } } } // namespace CGHLSLMSHelper namespace { void ReportDisallowedTypeInExportParam(clang::CodeGen ::CodeGenModule &CGM, StringRef name) { clang::DiagnosticsEngine &Diags = CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID(clang::DiagnosticsEngine::Error, "Exported function %0 must not contain a " "resource in parameter or return type."); std::string escaped; llvm::raw_string_ostream os(escaped); dxilutil::PrintEscapedString(name, os); Diags.Report(DiagID) << os.str(); } } // namespace namespace CGHLSLMSHelper { void FinishClipPlane(HLModule &HLM, std::vector<Function *> &clipPlaneFuncList, std::unordered_map<Value *, DebugLoc> &debugInfoMap, clang::CodeGen::CodeGenModule &CGM) { bool bDebugInfo = CGM.getCodeGenOpts().getDebugInfo() == clang::CodeGenOptions::FullDebugInfo; Module &M = *HLM.GetModule(); for (Function *F : clipPlaneFuncList) { DxilFunctionProps &props = HLM.GetDxilFunctionProps(F); IRBuilder<> Builder(F->getEntryBlock().getFirstInsertionPt()); for (unsigned i = 0; i < DXIL::kNumClipPlanes; i++) { Value *clipPlane = props.ShaderProps.VS.clipPlanes[i]; if (!clipPlane) continue; if (bDebugInfo) { Builder.SetCurrentDebugLocation(debugInfoMap[clipPlane]); } llvm::Type *Ty = clipPlane->getType()->getPointerElementType(); // Constant *zeroInit = ConstantFP::get(Ty, 0); GlobalVariable *GV = new llvm::GlobalVariable( M, Ty, /*IsConstant*/ false, // constant false to store. llvm::GlobalValue::ExternalLinkage, /*InitVal*/ nullptr, Twine("SV_ClipPlane") + Twine(i)); Value *initVal = Builder.CreateLoad(clipPlane); Builder.CreateStore(initVal, GV); props.ShaderProps.VS.clipPlanes[i] = GV; } } } } // namespace CGHLSLMSHelper namespace { void LowerExportFunctions(HLModule &HLM, clang::CodeGen::CodeGenModule &CGM, dxilutil::ExportMap &exportMap, StringMap<EntryFunctionInfo> &entryFunctionMap) { bool bIsLib = HLM.GetShaderModel()->IsLib(); Module &M = *HLM.GetModule(); if (bIsLib && !exportMap.empty()) { for (auto &it : entryFunctionMap) { if (HLM.HasDxilFunctionProps(it.second.Func)) { const DxilFunctionProps &props = HLM.GetDxilFunctionProps(it.second.Func); if (props.IsHS()) exportMap.RegisterExportedFunction( props.ShaderProps.HS.patchConstantFunc); } } } if (bIsLib && !exportMap.empty()) { exportMap.BeginProcessing(); for (Function &f : M.functions()) { if (f.isDeclaration() || f.isIntrinsic() || GetHLOpcodeGroup(&f) != HLOpcodeGroup::NotHL) continue; exportMap.ProcessFunction(&f, true); } // TODO: add subobject export names here. if (!exportMap.EndProcessing()) { for (auto &name : exportMap.GetNameCollisions()) { clang::DiagnosticsEngine &Diags = CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID( clang::DiagnosticsEngine::Error, "Export name collides with another export: %0"); std::string escaped; llvm::raw_string_ostream os(escaped); dxilutil::PrintEscapedString(name, os); Diags.Report(DiagID) << os.str(); } for (auto &name : exportMap.GetUnusedExports()) { clang::DiagnosticsEngine &Diags = CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID(clang::DiagnosticsEngine::Error, "Could not find target for export: %0"); std::string escaped; llvm::raw_string_ostream os(escaped); dxilutil::PrintEscapedString(name, os); Diags.Report(DiagID) << os.str(); } } } for (auto &it : exportMap.GetFunctionRenames()) { Function *F = it.first; auto &renames = it.second; if (renames.empty()) continue; // Rename the original, if necessary, then clone the rest if (renames.find(F->getName()) == renames.end()) F->setName(*renames.begin()); for (auto &itName : renames) { if (F->getName() != itName) { Function *pClone = CloneFunction(F, itName, &M, HLM.GetTypeSystem(), HLM.GetTypeSystem()); // add DxilFunctionProps if entry if (HLM.HasDxilFunctionProps(F)) { DxilFunctionProps &props = HLM.GetDxilFunctionProps(F); auto newProps = llvm::make_unique<DxilFunctionProps>(props); HLM.AddDxilFunctionProps(pClone, newProps); } } } } } void CheckResourceParameters(HLModule &HLM, clang::CodeGen::CodeGenModule &CGM) { Module &M = *HLM.GetModule(); for (Function &f : M.functions()) { // Skip llvm intrinsics, non-external linkage, entry/patch constant func, // and HL intrinsics if (!f.isIntrinsic() && f.getLinkage() == GlobalValue::LinkageTypes::ExternalLinkage && !HLM.HasDxilFunctionProps(&f) && !HLM.IsPatchConstantShader(&f) && GetHLOpcodeGroup(&f) == HLOpcodeGroup::NotHL) { // Verify no resources in param/return types if (dxilutil::ContainsHLSLObjectType(f.getReturnType())) { ReportDisallowedTypeInExportParam(CGM, f.getName()); continue; } for (auto &Arg : f.args()) { if (dxilutil::ContainsHLSLObjectType(Arg.getType())) { ReportDisallowedTypeInExportParam(CGM, f.getName()); break; } } } } } } // namespace namespace CGHLSLMSHelper { void UpdateLinkage(HLModule &HLM, clang::CodeGen::CodeGenModule &CGM, dxilutil::ExportMap &exportMap, StringMap<EntryFunctionInfo> &entryFunctionMap, StringMap<PatchConstantInfo> &patchConstantFunctionMap) { bool bIsLib = HLM.GetShaderModel()->IsLib(); Module &M = *HLM.GetModule(); // Pin entry point and constant buffers, mark everything else internal. for (Function &f : M.functions()) { if (!bIsLib) { if (&f == HLM.GetEntryFunction() || IsPatchConstantFunction(&f, patchConstantFunctionMap) || f.isDeclaration()) { if (f.isDeclaration() && !f.isIntrinsic() && GetHLOpcodeGroup(&f) == HLOpcodeGroup::NotHL) { clang::DiagnosticsEngine &Diags = CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID( clang::DiagnosticsEngine::Error, "External function used in non-library profile: %0"); std::string escaped; llvm::raw_string_ostream os(escaped); dxilutil::PrintEscapedString(f.getName(), os); Diags.Report(DiagID) << os.str(); return; } f.setLinkage(GlobalValue::LinkageTypes::ExternalLinkage); } else { f.setLinkage(GlobalValue::LinkageTypes::InternalLinkage); } } // Skip no inline functions, or all functions if we're not in the // AlwaysInline mode. if (f.hasFnAttribute(llvm::Attribute::NoInline) || (CGM.getCodeGenOpts().getInlining() != clang::CodeGenOptions::OnlyAlwaysInlining && bIsLib)) continue; // Always inline for used functions. if (!f.user_empty() && !f.isDeclaration()) f.addFnAttr(llvm::Attribute::AlwaysInline); } LowerExportFunctions(HLM, CGM, exportMap, entryFunctionMap); if (CGM.getCodeGenOpts().ExportShadersOnly) { for (Function &f : M.functions()) { // Skip declarations, intrinsics, shaders, and non-external linkage if (f.isDeclaration() || f.isIntrinsic() || GetHLOpcodeGroup(&f) != HLOpcodeGroup::NotHL || HLM.HasDxilFunctionProps(&f) || HLM.IsPatchConstantShader(&f) || f.getLinkage() != GlobalValue::LinkageTypes::ExternalLinkage) continue; // Mark non-shader user functions as InternalLinkage f.setLinkage(GlobalValue::LinkageTypes::InternalLinkage); } } // Now iterate hull shaders and make sure their corresponding patch constant // functions are marked ExternalLinkage: for (Function &f : M.functions()) { if (f.isDeclaration() || f.isIntrinsic() || GetHLOpcodeGroup(&f) != HLOpcodeGroup::NotHL || f.getLinkage() != GlobalValue::LinkageTypes::ExternalLinkage || !HLM.HasDxilFunctionProps(&f)) continue; DxilFunctionProps &props = HLM.GetDxilFunctionProps(&f); if (!props.IsHS()) continue; Function *PCFunc = props.ShaderProps.HS.patchConstantFunc; if (PCFunc->getLinkage() != GlobalValue::LinkageTypes::ExternalLinkage) PCFunc->setLinkage(GlobalValue::LinkageTypes::ExternalLinkage); } // Disallow resource arguments in (non-entry) function exports // unless offline linking target. if (bIsLib && HLM.GetShaderModel()->GetMinor() != ShaderModel::kOfflineMinor) { CheckResourceParameters(HLM, CGM); } } void FinishEntries( HLModule &HLM, const EntryFunctionInfo &Entry, clang::CodeGen::CodeGenModule &CGM, StringMap<EntryFunctionInfo> &entryFunctionMap, std::unordered_map<Function *, const clang::HLSLPatchConstantFuncAttr *> &HSEntryPatchConstantFuncAttr, StringMap<PatchConstantInfo> &patchConstantFunctionMap, std::unordered_map<Function *, std::unique_ptr<DxilFunctionProps>> &patchConstantFunctionPropsMap) { bool bIsLib = HLM.GetShaderModel()->IsLib(); // Library don't have entry. if (!bIsLib) { SetEntryFunction(HLM, Entry.Func, CGM); // If at this point we haven't determined the entry function it's an error. if (HLM.GetEntryFunction() == nullptr) { assert(CGM.getDiags().hasErrorOccurred() && "else SetEntryFunction should have reported this condition"); return; } // In back-compat mode (with /Gec flag) create a static global for each // const global to allow writing to it. // TODO: Verfiy the behavior of static globals in hull shader if (CGM.getLangOpts().EnableDX9CompatMode && CGM.getLangOpts().HLSLVersion <= hlsl::LangStd::v2016) CreateWriteEnabledStaticGlobals(HLM.GetModule(), HLM.GetEntryFunction()); if (HLM.GetShaderModel()->IsHS()) { SetPatchConstantFunction(Entry, HSEntryPatchConstantFuncAttr, patchConstantFunctionMap, patchConstantFunctionPropsMap, HLM, CGM); } } else { for (auto &it : entryFunctionMap) { // skip clone if RT entry if (HLM.GetDxilFunctionProps(it.second.Func).IsRay()) continue; // TODO: change flattened function names to dx.entry.<name>: // std::string entryName = (Twine(dxilutil::EntryPrefix) + // it.getKey()).str(); CloneShaderEntry(it.second.Func, it.getKey(), HLM); auto AttrIter = HSEntryPatchConstantFuncAttr.find(it.second.Func); if (AttrIter != HSEntryPatchConstantFuncAttr.end()) { SetPatchConstantFunctionWithAttr( it.second, AttrIter->second, patchConstantFunctionMap, patchConstantFunctionPropsMap, HLM, CGM); } } } } } // namespace CGHLSLMSHelper namespace CGHLSLMSHelper { void FinishIntrinsics( HLModule &HLM, std::vector<std::pair<Function *, unsigned>> &intrinsicMap, DxilObjectProperties &objectProperties) { // Lower getResourceHeap before AddOpcodeParamForIntrinsics to skip automatic // lower for getResourceFromHeap. LowerGetResourceFromHeap(HLM, intrinsicMap); // Lower bitcast use of CBV into cbSubscript. LowerDynamicCBVUseToHandle(HLM, objectProperties); // translate opcode into parameter for intrinsic functions // Do this before CloneShaderEntry and TranslateRayQueryConstructor to avoid // update valToResPropertiesMap for cloned inst. AddOpcodeParamForIntrinsics(HLM, intrinsicMap, objectProperties); } // Add the dx.break temporary intrinsic and create Call Instructions // to it for each branch that requires the artificial conditional. void AddDxBreak(Module &M, const SmallVector<llvm::BranchInst *, 16> &DxBreaks) { if (DxBreaks.empty()) return; // Collect functions that make use of any wave operations // Only they will need the dx.break condition added SmallPtrSet<Function *, 16> WaveUsers; for (Function &F : M.functions()) { HLOpcodeGroup opgroup = hlsl::GetHLOpcodeGroup(&F); if (F.isDeclaration() && IsHLWaveSensitive(&F) && (opgroup == HLOpcodeGroup::HLIntrinsic || opgroup == HLOpcodeGroup::HLExtIntrinsic)) { for (User *U : F.users()) { CallInst *CI = cast<CallInst>(U); WaveUsers.insert(CI->getParent()->getParent()); } } } // If there are no wave users, not even the function declaration is needed if (WaveUsers.empty()) return; // Create the dx.break function FunctionType *FT = llvm::FunctionType::get(llvm::Type::getInt1Ty(M.getContext()), false); Function *func = cast<llvm::Function>(M.getOrInsertFunction(DXIL::kDxBreakFuncName, FT)); func->addFnAttr(Attribute::AttrKind::NoUnwind); // For all break branches recorded previously, if the function they are in // makes any use of a wave op, it may need to be artificially conditional. // Make it so now. The CleanupDxBreak pass will remove those that aren't // needed when more is known. for (llvm::BranchInst *BI : DxBreaks) { if (WaveUsers.count(BI->getParent()->getParent())) { CallInst *Call = CallInst::Create(FT, func, ArrayRef<Value *>(), "", BI); BI->setCondition(Call); if (!BI->getMetadata(DXIL::kDxBreakMDName)) { BI->setMetadata(DXIL::kDxBreakMDName, llvm::MDNode::get(BI->getContext(), {})); } } } } } // namespace CGHLSLMSHelper namespace CGHLSLMSHelper { ScopeInfo::ScopeInfo(Function *F, clang::SourceLocation loc) : maxRetLevel(0), bAllReturnsInIf(true), sourceLoc(loc) { Scope FuncScope; FuncScope.kind = Scope::ScopeKind::FunctionScope; FuncScope.EndScopeBB = nullptr; FuncScope.bWholeScopeReturned = false; // Make it 0 to avoid check when get parent. // All loop on scopes should check kind != FunctionScope. FuncScope.parentScopeIndex = 0; scopes.emplace_back(FuncScope); scopeStack.emplace_back(0); } // When all returns is inside if which is not nested, the flow is still // structurized even there're more than one return. bool ScopeInfo::CanSkipStructurize() { return (bAllReturnsInIf && maxRetLevel < 2) || rets.size() < 2; } void ScopeInfo::AddScope(Scope::ScopeKind k, BasicBlock *endScopeBB) { Scope Scope; Scope.kind = k; Scope.bWholeScopeReturned = false; Scope.EndScopeBB = endScopeBB; Scope.parentScopeIndex = scopeStack.back(); scopeStack.emplace_back(scopes.size()); scopes.emplace_back(Scope); } void ScopeInfo::AddIf(BasicBlock *endIfBB) { AddScope(Scope::ScopeKind::IfScope, endIfBB); } void ScopeInfo::AddSwitch(BasicBlock *endSwitch) { AddScope(Scope::ScopeKind::SwitchScope, endSwitch); } void ScopeInfo::AddLoop(BasicBlock *loopContinue, BasicBlock *endLoop) { AddScope(Scope::ScopeKind::LoopScope, endLoop); scopes.back().loopContinueBB = loopContinue; } void ScopeInfo::AddRet(BasicBlock *bbWithRet) { Scope RetScope; RetScope.kind = Scope::ScopeKind::ReturnScope; RetScope.EndScopeBB = bbWithRet; RetScope.parentScopeIndex = scopeStack.back(); // - 1 for function scope which is at scopeStack[0]. unsigned retLevel = scopeStack.size() - 1; // save max nested level for ret. maxRetLevel = std::max<unsigned>(maxRetLevel, retLevel); bool bGotLoopOrSwitch = false; for (auto it = scopeStack.rbegin(); it != scopeStack.rend(); it++) { unsigned idx = *it; Scope &S = scopes[idx]; switch (S.kind) { default: break; case Scope::ScopeKind::LoopScope: case Scope::ScopeKind::SwitchScope: bGotLoopOrSwitch = true; // For return inside loop and switch, can just break. RetScope.parentScopeIndex = idx; break; } if (bGotLoopOrSwitch) break; } bAllReturnsInIf &= !bGotLoopOrSwitch; // return finish current scope. RetScope.bWholeScopeReturned = true; // save retScope to rets. rets.emplace_back(scopes.size()); scopes.emplace_back(RetScope); // Don't need to put retScope to stack since it cannot nested other scopes. } Scope &ScopeInfo::EndScope(bool bScopeFinishedWithRet) { unsigned idx = scopeStack.pop_back_val(); Scope &Scope = GetScope(idx); // If whole stmt is finished and end scope bb has not used(nothing branch to // it). Then the whole scope is returned. Scope.bWholeScopeReturned = bScopeFinishedWithRet && Scope.EndScopeBB->user_empty(); return Scope; } Scope &ScopeInfo::GetScope(unsigned i) { return scopes[i]; } void ScopeInfo::LegalizeWholeReturnedScope() { // legalize scopes which whole scope returned. // When whole scope is returned, the endScopeBB will be deleted in codeGen. // Here update it to parent scope's endScope. // Since the scopes are in order, so it will automatic update to the final // target. A->B->C will just get A->C. for (auto &S : scopes) { if (S.bWholeScopeReturned && S.kind != Scope::ScopeKind::ReturnScope) { S.EndScopeBB = scopes[S.parentScopeIndex].EndScopeBB; } } } void Scope::dump() { auto &OS = llvm::dbgs(); switch (kind) { case ScopeKind::IfScope: OS << "If\n"; break; case ScopeKind::LoopScope: OS << "Loop\n"; break; case ScopeKind::ReturnScope: OS << "Return\n"; break; case ScopeKind::SwitchScope: OS << "Switch\n"; break; case ScopeKind::FunctionScope: OS << "Function\n"; break; } if (kind == ScopeKind::FunctionScope) return; OS << "parent:" << parentScopeIndex << "\n"; if (bWholeScopeReturned) OS << "whole scope returned\n"; OS << "endBB:"; EndScopeBB->printAsOperand(OS); OS << "\n"; } void ScopeInfo::dump() { auto &OS = llvm::dbgs(); for (unsigned i = 0; i < scopes.size(); ++i) { Scope &scope = scopes[i]; OS << "Scope:" << i << "\n"; scope.dump(); } } } // namespace CGHLSLMSHelper namespace { void updateEndScope( ScopeInfo &ScopeInfo, DenseMap<BasicBlock *, SmallVector<unsigned, 2>> &EndBBToScopeIndexMap, BasicBlock *oldEndScope, BasicBlock *newEndScope) { auto it = EndBBToScopeIndexMap.find(oldEndScope); DXASSERT(it != EndBBToScopeIndexMap.end(), "fail to find endScopeBB in EndBBToScopeIndexMap"); SmallVector<unsigned, 2> &scopeList = it->second; // Update even when not share endBB with other scope. // The endBB might be used by nested returns like the return 1 in // if (b == 77) // return 0; // else if (b != 3) // return 1; for (unsigned i : scopeList) { Scope &S = ScopeInfo.GetScope(i); // Don't update return endBB, because that is the Block has return branch. if (S.kind != Scope::ScopeKind::ReturnScope) S.EndScopeBB = newEndScope; } EndBBToScopeIndexMap[newEndScope] = scopeList; } // Init ret value with undef to make sure it will not live thru loop inside // callers. // Because structurize return, the flow is controled by bIsReturned. The // semantic is the same as multiple return, but without konwledge of // bIsReturend, some path for structrized flow will have ret value not // initialized. // When function is called inside loop, ret value will live across the loop // after inline. void InitRetValue(BasicBlock *exitBB) { Value *RetValPtr = nullptr; if (ReturnInst *RI = dyn_cast<ReturnInst>(exitBB->getTerminator())) { if (Value *RetV = RI->getReturnValue()) { if (LoadInst *LI = dyn_cast<LoadInst>(RetV)) { RetValPtr = LI->getPointerOperand(); } } } if (!RetValPtr) return; if (AllocaInst *RetVAlloc = dyn_cast<AllocaInst>(RetValPtr)) { IRBuilder<> B(RetVAlloc->getNextNode()); Type *Ty = RetVAlloc->getAllocatedType(); Value *Init = UndefValue::get(Ty); if (Ty->isAggregateType()) { // TODO: support aggreagate type and out parameters. // Skip it here will cause undef on phi which the incoming path should // never hit. } else { B.CreateStore(Init, RetVAlloc); } } } static void ChangePredBranch(BasicBlock *BB, BasicBlock *NewBB) { for (auto predIt = pred_begin(BB); predIt != pred_end(BB);) { BasicBlock *Pred = *(predIt++); TerminatorInst *TI = Pred->getTerminator(); TI->replaceUsesOfWith(BB, NewBB); } } // For functions has multiple returns like // float foo(float a, float b, float c) { // float r = c; // if (a > 0) { // if (b > 0) { // return -1; // } // *** // } // ... // return r; // } // transform into // float foo(float a, float b, float c) { // bool bRet = false; // float retV; // float r = c; // if (a > 0) { // if (b > 0) { // bRet = true; // retV = -1; // } // if (!bRet) { // *** // } // } // if (!bRet) { // ... // retV = r; // } // return vRet; // } void StructurizeMultiRetFunction(Function *F, clang::DiagnosticsEngine &Diags, ScopeInfo &ScopeInfo, bool bWaveEnabledStage, SmallVector<BranchInst *, 16> &DxBreaks) { if (ScopeInfo.CanSkipStructurize()) return; // If there are cleanup blocks generated for lifetime markers, do // not structurize returns. The scope info recorded is no longer correct. if (ScopeInfo.HasCleanupBlocks()) { Diags.Report( ScopeInfo.GetSourceLocation(), clang::diag::warn_hlsl_structurize_exits_lifetime_markers_conflict) << F->getName(); return; } // Get bbWithRets. auto &rets = ScopeInfo.GetRetScopes(); IRBuilder<> B(F->getEntryBlock().begin()); Scope &FunctionScope = ScopeInfo.GetScope(0); Type *boolTy = Type::getInt1Ty(F->getContext()); Constant *cTrue = ConstantInt::get(boolTy, 1); Constant *cFalse = ConstantInt::get(boolTy, 0); // bool bIsReturned = false; AllocaInst *bIsReturned = B.CreateAlloca(boolTy, nullptr, "bReturned"); B.CreateStore(cFalse, bIsReturned); Scope &RetScope = ScopeInfo.GetScope(rets[0]); BasicBlock *exitBB = RetScope.EndScopeBB->getTerminator()->getSuccessor(0); FunctionScope.EndScopeBB = exitBB; // Find alloca for retunr val and init it to avoid undef after guard code with // bIsReturned. InitRetValue(exitBB); ScopeInfo.LegalizeWholeReturnedScope(); // Map from endScopeBB to scope index. // When 2 scopes share same endScopeBB, need to update endScopeBB after // structurize. DenseMap<BasicBlock *, SmallVector<unsigned, 2>> EndBBToScopeIndexMap; auto &scopes = ScopeInfo.GetScopes(); for (unsigned i = 0; i < scopes.size(); i++) { Scope &S = scopes[i]; EndBBToScopeIndexMap[S.EndScopeBB].emplace_back(i); } DenseSet<unsigned> guardedSet; for (auto it = rets.begin(); it != rets.end(); it++) { unsigned scopeIndex = *it; Scope *pCurScope = &ScopeInfo.GetScope(scopeIndex); Scope *pRetParentScope = &ScopeInfo.GetScope(pCurScope->parentScopeIndex); // skip ret not in nested control flow. if (pRetParentScope->kind == Scope::ScopeKind::FunctionScope) continue; do { BasicBlock *BB = pCurScope->EndScopeBB; // exit when scope is processed. if (guardedSet.count(scopeIndex)) break; guardedSet.insert(scopeIndex); Scope *pParentScope = &ScopeInfo.GetScope(pCurScope->parentScopeIndex); BasicBlock *EndBB = pParentScope->EndScopeBB; // When whole scope returned, just branch to endScope of parent. if (pCurScope->bWholeScopeReturned) { // For ret, just branch to endScope of parent. if (pCurScope->kind == Scope::ScopeKind::ReturnScope) { BasicBlock *retBB = pCurScope->EndScopeBB; TerminatorInst *retBr = retBB->getTerminator(); IRBuilder<> B(retBr); // Set bReturned to true. B.CreateStore(cTrue, bIsReturned); if (bWaveEnabledStage && pParentScope->kind == Scope::ScopeKind::LoopScope) { BranchInst *BI = B.CreateCondBr(cTrue, EndBB, pParentScope->loopContinueBB); DxBreaks.emplace_back(BI); retBr->eraseFromParent(); } else { // Update branch target. retBr->setSuccessor(0, EndBB); } } // For other scope, do nothing. Since whole scope is returned. // Just flow naturally to parent scope. } else { // When only part scope returned. // Use bIsReturned to guard to part which not returned. switch (pParentScope->kind) { case Scope::ScopeKind::ReturnScope: DXASSERT(0, "return scope must get whole scope returned."); break; case Scope::ScopeKind::FunctionScope: case Scope::ScopeKind::IfScope: { // inside if. // if (!bReturned) { // rest of if or else. // } BasicBlock *CmpBB = BasicBlock::Create(BB->getContext(), "bReturned.cmp.false", F, BB); // Make BB preds go to cmpBB. Do this instead of replaceAllUsesWith // because BB could have PHI nodes that reference it. ChangePredBranch(BB, CmpBB); // Update endscopeBB to CmpBB for scopes which has BB as endscope. updateEndScope(ScopeInfo, EndBBToScopeIndexMap, BB, CmpBB); IRBuilder<> B(CmpBB); Value *isRetured = B.CreateLoad(bIsReturned, "bReturned.load"); Value *notReturned = B.CreateICmpNE(isRetured, cFalse, "bReturned.not"); B.CreateCondBr(notReturned, EndBB, BB); } break; default: { // inside switch/loop // if (bReturned) { // br endOfScope. // } BasicBlock *CmpBB = BasicBlock::Create(BB->getContext(), "bReturned.cmp.true", F, BB); BasicBlock *BreakBB = BasicBlock::Create(BB->getContext(), "bReturned.break", F, BB); ChangePredBranch(BB, CmpBB); // Update endscopeBB to CmpBB for scopes which has BB as endscope. updateEndScope(ScopeInfo, EndBBToScopeIndexMap, BB, CmpBB); IRBuilder<> B(CmpBB); Value *isReturned = B.CreateLoad(bIsReturned, "bReturned.load"); isReturned = B.CreateICmpEQ(isReturned, cTrue, "bReturned.true"); B.CreateCondBr(isReturned, BreakBB, BB); B.SetInsertPoint(BreakBB); if (bWaveEnabledStage && pParentScope->kind == Scope::ScopeKind::LoopScope) { BranchInst *BI = B.CreateCondBr(cTrue, EndBB, pParentScope->loopContinueBB); DxBreaks.emplace_back(BI); } else { B.CreateBr(EndBB); } } break; } } scopeIndex = pCurScope->parentScopeIndex; pCurScope = &ScopeInfo.GetScope(scopeIndex); // done when reach function scope. } while (pCurScope->kind != Scope::ScopeKind::FunctionScope); } } } // namespace namespace CGHLSLMSHelper { void StructurizeMultiRet(Module &M, clang::CodeGen::CodeGenModule &CGM, DenseMap<Function *, ScopeInfo> &ScopeMap, bool bWaveEnabledStage, SmallVector<BranchInst *, 16> &DxBreaks) { if (CGM.getCodeGenOpts().HLSLExtensionsCodegen) { if (!CGM.getCodeGenOpts().HLSLExtensionsCodegen->IsOptionEnabled( hlsl::options::TOGGLE_STRUCTURIZE_RETURNS)) return; } else { if (!CGM.getCodeGenOpts().HLSLOptimizationToggles.IsEnabled( hlsl::options::TOGGLE_STRUCTURIZE_RETURNS)) return; } for (Function &F : M) { if (F.isDeclaration()) continue; auto it = ScopeMap.find(&F); if (it == ScopeMap.end()) continue; StructurizeMultiRetFunction(&F, CGM.getDiags(), it->second, bWaveEnabledStage, DxBreaks); } } bool DxilObjectProperties::AddResource(llvm::Value *V, const hlsl::DxilResourceProperties &RP) { if (RP.isValid()) { DXASSERT(!GetResource(V).isValid() || GetResource(V) == RP, "otherwise, property conflict"); resMap[V] = RP; return true; } return false; } bool DxilObjectProperties::IsResource(llvm::Value *V) { return resMap.count(V) != 0; } hlsl::DxilResourceProperties DxilObjectProperties::GetResource(llvm::Value *V) { auto it = resMap.find(V); if (it != resMap.end()) return it->second; return DxilResourceProperties(); } void DxilObjectProperties::updateGLC(llvm::Value *V) { auto it = resMap.find(V); if (it == resMap.end()) return; it->second.Basic.IsGloballyCoherent ^= 1; } } // namespace CGHLSLMSHelper
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGHLSLMSHelper.h
#pragma once #include "dxc/DXIL/DxilCBuffer.h" #include "dxc/DXIL/DxilNodeProps.h" #include "dxc/Support/HLSLVersion.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Instructions.h" #include <memory> #include <vector> namespace clang { class HLSLPatchConstantFuncAttr; namespace CodeGen { class CodeGenModule; class CodeGenFunction; } // namespace CodeGen } // namespace clang namespace llvm { class Function; class Module; class Value; class DebugLoc; class Constant; class GlobalVariable; class CallInst; class Instruction; template <typename T, unsigned N> class SmallVector; } // namespace llvm namespace hlsl { class HLModule; struct DxilResourceProperties; struct DxilFunctionProps; class DxilFieldAnnotation; enum class IntrinsicOp; namespace dxilutil { class ExportMap; } } // namespace hlsl namespace CGHLSLMSHelper { struct EntryFunctionInfo { clang::SourceLocation SL = clang::SourceLocation(); llvm::Function *Func = nullptr; }; // Map to save patch constant functions struct PatchConstantInfo { clang::SourceLocation SL = clang::SourceLocation(); llvm::Function *Func = nullptr; std::uint32_t NumOverloads = 0; }; /// Use this class to represent HLSL cbuffer in high-level DXIL. class HLCBuffer : public hlsl::DxilCBuffer { public: HLCBuffer(bool bIsView, bool bIsTBuf) : bIsView(bIsView), bIsTBuf(bIsTBuf), bIsArray(false), ResultTy(nullptr) { } virtual ~HLCBuffer() = default; void AddConst(std::unique_ptr<DxilResourceBase> &pItem) { pItem->SetID(constants.size()); constants.push_back(std::move(pItem)); } std::vector<std::unique_ptr<DxilResourceBase>> &GetConstants() { return constants; } bool IsView() { return bIsView; } bool IsTBuf() { return bIsTBuf; } bool IsArray() { return bIsArray; } void SetIsArray() { bIsArray = true; } llvm::Type *GetResultType() { return ResultTy; } void SetResultType(llvm::Type *Ty) { ResultTy = Ty; } private: std::vector<std::unique_ptr<DxilResourceBase>> constants; // constants inside const buffer bool bIsView; bool bIsTBuf; bool bIsArray; llvm::Type *ResultTy; }; // Scope to help transform multiple returns. struct Scope { enum class ScopeKind { IfScope, SwitchScope, LoopScope, ReturnScope, FunctionScope, }; ScopeKind kind; llvm::BasicBlock *EndScopeBB; // Save loopContinueBB to create dxBreak. llvm::BasicBlock *loopContinueBB; // For case like // if () { // ... // return; // } else { // ... // return; // } // // both path is returned. // When whole scope is returned, go to parent scope directly. // Anything after it is unreachable. bool bWholeScopeReturned; unsigned parentScopeIndex; void dump(); }; class ScopeInfo { public: ScopeInfo() {} ScopeInfo(llvm::Function *F, clang::SourceLocation loc); void AddIf(llvm::BasicBlock *endIfBB); void AddSwitch(llvm::BasicBlock *endSwitchBB); void AddLoop(llvm::BasicBlock *loopContinue, llvm::BasicBlock *endLoopBB); void AddRet(llvm::BasicBlock *bbWithRet); void AddCleanupBB(llvm::BasicBlock *cleanupBB) { hasCleanupBlocks = true; } Scope &EndScope(bool bScopeFinishedWithRet); Scope &GetScope(unsigned i); const llvm::SmallVector<unsigned, 2> &GetRetScopes() { return rets; } void LegalizeWholeReturnedScope(); llvm::SmallVector<Scope, 16> &GetScopes() { return scopes; } bool CanSkipStructurize(); void dump(); bool HasCleanupBlocks() const { return hasCleanupBlocks; } clang::SourceLocation GetSourceLocation() const { return sourceLoc; } private: void AddScope(Scope::ScopeKind k, llvm::BasicBlock *endScopeBB); bool hasCleanupBlocks = false; llvm::SmallVector<unsigned, 2> rets; unsigned maxRetLevel; bool bAllReturnsInIf; llvm::SmallVector<unsigned, 8> scopeStack; // save all scopes. llvm::SmallVector<Scope, 16> scopes; clang::SourceLocation sourceLoc; }; // Map from value to resource/wave matrix properties. // This only collect object variables(global/local/parameter), not object fields // inside struct. Object fields inside struct is saved by TypeAnnotation. struct DxilObjectProperties { bool AddResource(llvm::Value *V, const hlsl::DxilResourceProperties &RP); bool IsResource(llvm::Value *V); hlsl::DxilResourceProperties GetResource(llvm::Value *V); void updateGLC(llvm::Value *V); // MapVector for deterministic iteration order. llvm::MapVector<llvm::Value *, hlsl::DxilResourceProperties> resMap; }; void CopyAndAnnotateResourceArgument(llvm::Value *Src, llvm::Value *Dest, hlsl::DxilResourceProperties &RP, hlsl::HLModule &HLM, clang::CodeGen::CodeGenFunction &CGF); // Align cbuffer offset in legacy mode (16 bytes per row). unsigned AlignBufferOffsetInLegacy(unsigned offset, unsigned size, unsigned scalarSizeInBytes, bool bNeedNewRow); void FinishEntries(hlsl::HLModule &HLM, const EntryFunctionInfo &Entry, clang::CodeGen::CodeGenModule &CGM, llvm::StringMap<EntryFunctionInfo> &entryFunctionMap, std::unordered_map<llvm::Function *, const clang::HLSLPatchConstantFuncAttr *> &HSEntryPatchConstantFuncAttr, llvm::StringMap<PatchConstantInfo> &patchConstantFunctionMap, std::unordered_map<llvm::Function *, std::unique_ptr<hlsl::DxilFunctionProps>> &patchConstantFunctionPropsMap); void FinishIntrinsics( hlsl::HLModule &HLM, std::vector<std::pair<llvm::Function *, unsigned>> &intrinsicMap, DxilObjectProperties &valToResPropertiesMap); void AddDxBreak(llvm::Module &M, const llvm::SmallVector<llvm::BranchInst *, 16> &DxBreaks); void ReplaceConstStaticGlobals( std::unordered_map<llvm::GlobalVariable *, std::vector<llvm::Constant *>> &staticConstGlobalInitListMap, std::unordered_map<llvm::GlobalVariable *, llvm::Function *> &staticConstGlobalCtorMap); void FinishClipPlane( hlsl::HLModule &HLM, std::vector<llvm::Function *> &clipPlaneFuncList, std::unordered_map<llvm::Value *, llvm::DebugLoc> &debugInfoMap, clang::CodeGen::CodeGenModule &CGM); void AddRegBindingsForResourceInConstantBuffer( hlsl::HLModule &HLM, llvm::DenseMap< llvm::Constant *, llvm::SmallVector<std::pair<hlsl::DXIL::ResourceClass, unsigned>, 1>> &constantRegBindingMap); void FinishCBuffer( hlsl::HLModule &HLM, llvm::Type *CBufferType, std::unordered_map<llvm::Constant *, hlsl::DxilFieldAnnotation> &AnnotationMap); void ProcessCtorFunctions(llvm::Module &M, llvm::SmallVector<llvm::Function *, 2> &Ctors, llvm::Function *Entry, llvm::Function *PatchConstantFn); void CollectCtorFunctions(llvm::Module &M, llvm::StringRef globalName, llvm::SmallVector<llvm::Function *, 2> &Ctors, clang::CodeGen::CodeGenModule &CGM); void TranslateRayQueryConstructor(hlsl::HLModule &HLM); void TranslateInputNodeRecordArgToHandle( hlsl::HLModule &HLM, llvm::MapVector<llvm::Argument *, hlsl::NodeInputRecordProps> &NodeParams); void TranslateNodeOutputParamToHandle( hlsl::HLModule &HLM, llvm::MapVector<llvm::Argument *, hlsl::NodeProps> &NodeParams); void UpdateLinkage( hlsl::HLModule &HLM, clang::CodeGen::CodeGenModule &CGM, hlsl::dxilutil::ExportMap &exportMap, llvm::StringMap<EntryFunctionInfo> &entryFunctionMap, llvm::StringMap<PatchConstantInfo> &patchConstantFunctionMap); void StructurizeMultiRet(llvm::Module &M, clang::CodeGen::CodeGenModule &CGM, llvm::DenseMap<llvm::Function *, ScopeInfo> &ScopeMap, bool bWaveEnabledStage, llvm::SmallVector<llvm::BranchInst *, 16> &DxBreaks); llvm::Value *TryEvalIntrinsic(llvm::CallInst *CI, hlsl::IntrinsicOp intriOp, hlsl::LangStd hlslVersion); void SimpleTransformForHLDXIR(llvm::Module *pM); void ExtensionCodeGen(hlsl::HLModule &HLM, clang::CodeGen::CodeGenModule &CGM); } // namespace CGHLSLMSHelper
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGBuilder.h
//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H #define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H #include "llvm/IR/IRBuilder.h" namespace clang { namespace CodeGen { class CodeGenFunction; /// \brief This is an IRBuilder insertion helper that forwards to /// CodeGenFunction::InsertHelper, which adds necessary metadata to /// instructions. template <bool PreserveNames> class CGBuilderInserter : protected llvm::IRBuilderDefaultInserter<PreserveNames> { public: CGBuilderInserter() = default; explicit CGBuilderInserter(CodeGenFunction *CGF) : CGF(CGF) {} protected: /// \brief This forwards to CodeGenFunction::InsertHelper. void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB, llvm::BasicBlock::iterator InsertPt) const; private: CodeGenFunction *CGF = nullptr; }; // Don't preserve names on values in an optimized build. #ifdef NDEBUG #define PreserveNames false #else #define PreserveNames true #endif typedef CGBuilderInserter<PreserveNames> CGBuilderInserterTy; typedef llvm::IRBuilder<PreserveNames, llvm::ConstantFolder, CGBuilderInserterTy> CGBuilderTy; #undef PreserveNames } // end namespace CodeGen } // end namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenAction.cpp
//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "CoverageMappingGen.h" #include "clang/AST/ASTConsumer.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclGroup.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/TargetInfo.h" #include "clang/CodeGen/BackendUtil.h" #include "clang/CodeGen/CodeGenAction.h" #include "clang/CodeGen/ModuleBuilder.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Frontend/TextDiagnosticPrinter.h" // HLSL Change #include "clang/Lex/Preprocessor.h" #include "llvm/ADT/SmallString.h" #include "llvm/Bitcode/ReaderWriter.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/DiagnosticPrinter.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/IRReader/IRReader.h" #include "llvm/Linker/Linker.h" #include "llvm/Pass.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/Timer.h" #include <memory> using namespace clang; using namespace llvm; namespace clang { class BackendConsumer : public ASTConsumer { virtual void anchor(); DiagnosticsEngine &Diags; BackendAction Action; const CodeGenOptions &CodeGenOpts; const TargetOptions &TargetOpts; const LangOptions &LangOpts; raw_pwrite_stream *AsmOutStream; ASTContext *Context; Timer LLVMIRGeneration; std::unique_ptr<CodeGenerator> Gen; std::unique_ptr<llvm::Module> TheModule, LinkModule; public: BackendConsumer(BackendAction Action, DiagnosticsEngine &Diags, const HeaderSearchOptions &HeaderSearchOpts, const PreprocessorOptions &PPOpts, const CodeGenOptions &CodeGenOpts, const TargetOptions &TargetOpts, const LangOptions &LangOpts, bool TimePasses, const std::string &InFile, llvm::Module *LinkModule, raw_pwrite_stream *OS, LLVMContext &C, CoverageSourceInfo *CoverageInfo = nullptr) : Diags(Diags), Action(Action), CodeGenOpts(CodeGenOpts), TargetOpts(TargetOpts), LangOpts(LangOpts), AsmOutStream(OS), Context(nullptr), LLVMIRGeneration("LLVM IR Generation Time"), Gen(CreateLLVMCodeGen(Diags, InFile, HeaderSearchOpts, PPOpts, CodeGenOpts, C, CoverageInfo)), LinkModule(LinkModule) { llvm::TimePassesIsEnabled = TimePasses; } // HLSL Change Starts - avoid double free ~BackendConsumer() { if (TheModule.get() && Gen.get()) { Gen->ReleaseModule(); } } // HLSL Change Ends - avoid double free std::unique_ptr<llvm::Module> takeModule() { return std::move(TheModule); } llvm::Module *takeLinkModule() { return LinkModule.release(); } void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) override { Gen->HandleCXXStaticMemberVarInstantiation(VD); } void Initialize(ASTContext &Ctx) override { if (Context) { assert(Context == &Ctx); return; } Context = &Ctx; if (llvm::TimePassesIsEnabled) LLVMIRGeneration.startTimer(); Gen->Initialize(Ctx); TheModule.reset(Gen->GetModule()); if (llvm::TimePassesIsEnabled) LLVMIRGeneration.stopTimer(); } bool HandleTopLevelDecl(DeclGroupRef D) override { PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(), Context->getSourceManager(), "LLVM IR generation of declaration"); if (llvm::TimePassesIsEnabled) LLVMIRGeneration.startTimer(); Gen->HandleTopLevelDecl(D); if (llvm::TimePassesIsEnabled) LLVMIRGeneration.stopTimer(); return true; } void HandleInlineMethodDefinition(CXXMethodDecl *D) override { PrettyStackTraceDecl CrashInfo(D, SourceLocation(), Context->getSourceManager(), "LLVM IR generation of inline method"); if (llvm::TimePassesIsEnabled) LLVMIRGeneration.startTimer(); Gen->HandleInlineMethodDefinition(D); if (llvm::TimePassesIsEnabled) LLVMIRGeneration.stopTimer(); } void HandleTranslationUnit(ASTContext &C) override { { PrettyStackTraceString CrashInfo("Per-file LLVM IR generation"); if (llvm::TimePassesIsEnabled) LLVMIRGeneration.startTimer(); Gen->HandleTranslationUnit(C); if (llvm::TimePassesIsEnabled) LLVMIRGeneration.stopTimer(); } // Silently ignore if we weren't initialized for some reason. if (!TheModule) return; // Make sure IR generation is happy with the module. This is released by // the module provider. llvm::Module *M = Gen->ReleaseModule(); if (!M) { // The module has been released by IR gen on failures, do not double // free. TheModule.release(); return; } assert(TheModule.get() == M && "Unexpected module change during IR generation"); // Link LinkModule into this module if present, preserving its validity. if (LinkModule) { if (Linker::LinkModules(M, LinkModule.get(), [this](const DiagnosticInfo &DI) { linkerDiagnosticHandler(DI); })) return; } // Install an inline asm handler so that diagnostics get printed through // our diagnostics hooks. LLVMContext &Ctx = TheModule->getContext(); LLVMContext::InlineAsmDiagHandlerTy OldHandler = Ctx.getInlineAsmDiagnosticHandler(); void *OldContext = Ctx.getInlineAsmDiagnosticContext(); Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this); LLVMContext::DiagnosticHandlerTy OldDiagnosticHandler = Ctx.getDiagnosticHandler(); void *OldDiagnosticContext = Ctx.getDiagnosticContext(); Ctx.setDiagnosticHandler(DiagnosticHandler, this); EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts, C.getTargetInfo().getTargetDescription(), TheModule.get(), Action, AsmOutStream); Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext); Ctx.setDiagnosticHandler(OldDiagnosticHandler, OldDiagnosticContext); } void HandleTagDeclDefinition(TagDecl *D) override { PrettyStackTraceDecl CrashInfo(D, SourceLocation(), Context->getSourceManager(), "LLVM IR generation of declaration"); Gen->HandleTagDeclDefinition(D); } void HandleTagDeclRequiredDefinition(const TagDecl *D) override { Gen->HandleTagDeclRequiredDefinition(D); } void CompleteTentativeDefinition(VarDecl *D) override { Gen->CompleteTentativeDefinition(D); } void HandleVTable(CXXRecordDecl *RD) override { Gen->HandleVTable(RD); } void HandleLinkerOptionPragma(llvm::StringRef Opts) override { Gen->HandleLinkerOptionPragma(Opts); } void HandleDetectMismatch(llvm::StringRef Name, llvm::StringRef Value) override { Gen->HandleDetectMismatch(Name, Value); } void HandleDependentLibrary(llvm::StringRef Opts) override { Gen->HandleDependentLibrary(Opts); } static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context, unsigned LocCookie) { SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie); ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc); } void linkerDiagnosticHandler(const llvm::DiagnosticInfo &DI); static void DiagnosticHandler(const llvm::DiagnosticInfo &DI, void *Context) { ((BackendConsumer *)Context)->DiagnosticHandlerImpl(DI); } void InlineAsmDiagHandler2(const llvm::SMDiagnostic &, SourceLocation LocCookie); void DiagnosticHandlerImpl(const llvm::DiagnosticInfo &DI); /// \brief Specialized handler for InlineAsm diagnostic. /// \return True if the diagnostic has been successfully reported, false /// otherwise. bool InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D); /// \brief Specialized handler for StackSize diagnostic. /// \return True if the diagnostic has been successfully reported, false /// otherwise. bool StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D); /// \brief Specialized handlers for optimization remarks. /// Note that these handlers only accept remarks and they always handle /// them. void EmitOptimizationMessage(const llvm::DiagnosticInfoOptimizationBase &D, unsigned DiagID); void OptimizationRemarkHandler(const llvm::DiagnosticInfoOptimizationRemark &D); void OptimizationRemarkHandler( const llvm::DiagnosticInfoOptimizationRemarkMissed &D); void OptimizationRemarkHandler( const llvm::DiagnosticInfoOptimizationRemarkAnalysis &D); void OptimizationFailureHandler( const llvm::DiagnosticInfoOptimizationFailure &D); bool DxilDiagHandler(const llvm::DiagnosticInfoDxil &D); }; void BackendConsumer::anchor() {} } /// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr /// buffer to be a valid FullSourceLoc. static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D, SourceManager &CSM) { // Get both the clang and llvm source managers. The location is relative to // a memory buffer that the LLVM Source Manager is handling, we need to add // a copy to the Clang source manager. const llvm::SourceMgr &LSM = *D.getSourceMgr(); // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr // already owns its one and clang::SourceManager wants to own its one. const MemoryBuffer *LBuf = LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc())); // Create the copy and transfer ownership to clang::SourceManager. // TODO: Avoid copying files into memory. std::unique_ptr<llvm::MemoryBuffer> CBuf = llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(), LBuf->getBufferIdentifier()); // FIXME: Keep a file ID map instead of creating new IDs for each location. FileID FID = CSM.createFileID(std::move(CBuf)); // Translate the offset into the file. unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart(); SourceLocation NewLoc = CSM.getLocForStartOfFile(FID).getLocWithOffset(Offset); return FullSourceLoc(NewLoc, CSM); } /// InlineAsmDiagHandler2 - This function is invoked when the backend hits an /// error parsing inline asm. The SMDiagnostic indicates the error relative to /// the temporary memory buffer that the inline asm parser has set up. void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D, SourceLocation LocCookie) { // There are a couple of different kinds of errors we could get here. First, // we re-format the SMDiagnostic in terms of a clang diagnostic. // Strip "error: " off the start of the message string. StringRef Message = D.getMessage(); if (Message.startswith("error: ")) Message = Message.substr(7); // If the SMDiagnostic has an inline asm source location, translate it. FullSourceLoc Loc; if (D.getLoc() != SMLoc()) Loc = ConvertBackendLocation(D, Context->getSourceManager()); unsigned DiagID; switch (D.getKind()) { case llvm::SourceMgr::DK_Error: DiagID = diag::err_fe_inline_asm; break; case llvm::SourceMgr::DK_Warning: DiagID = diag::warn_fe_inline_asm; break; case llvm::SourceMgr::DK_Note: DiagID = diag::note_fe_inline_asm; break; } // If this problem has clang-level source location information, report the // issue in the source with a note showing the instantiated // code. if (LocCookie.isValid()) { Diags.Report(LocCookie, DiagID).AddString(Message); if (D.getLoc().isValid()) { DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here); // Convert the SMDiagnostic ranges into SourceRange and attach them // to the diagnostic. for (unsigned i = 0, e = D.getRanges().size(); i != e; ++i) { std::pair<unsigned, unsigned> Range = D.getRanges()[i]; unsigned Column = D.getColumnNo(); B << SourceRange(Loc.getLocWithOffset(Range.first - Column), Loc.getLocWithOffset(Range.second - Column)); } } return; } // Otherwise, report the backend issue as occurring in the generated .s file. // If Loc is invalid, we still need to report the issue, it just gets no // location info. Diags.Report(Loc, DiagID).AddString(Message); } #define ComputeDiagID(Severity, GroupName, DiagID) \ do { \ switch (Severity) { \ case llvm::DS_Error: \ DiagID = diag::err_fe_##GroupName; \ break; \ case llvm::DS_Warning: \ DiagID = diag::warn_fe_##GroupName; \ break; \ case llvm::DS_Remark: \ llvm_unreachable("'remark' severity not expected"); \ break; \ case llvm::DS_Note: \ DiagID = diag::note_fe_##GroupName; \ break; \ } \ } while (false) #define ComputeDiagRemarkID(Severity, GroupName, DiagID) \ do { \ switch (Severity) { \ case llvm::DS_Error: \ DiagID = diag::err_fe_##GroupName; \ break; \ case llvm::DS_Warning: \ DiagID = diag::warn_fe_##GroupName; \ break; \ case llvm::DS_Remark: \ DiagID = diag::remark_fe_##GroupName; \ break; \ case llvm::DS_Note: \ DiagID = diag::note_fe_##GroupName; \ break; \ } \ } while (false) bool BackendConsumer::InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D) { unsigned DiagID; ComputeDiagID(D.getSeverity(), inline_asm, DiagID); std::string Message = D.getMsgStr().str(); // If this problem has clang-level source location information, report the // issue as being a problem in the source with a note showing the instantiated // code. SourceLocation LocCookie = SourceLocation::getFromRawEncoding(D.getLocCookie()); if (LocCookie.isValid()) Diags.Report(LocCookie, DiagID).AddString(Message); else { // Otherwise, report the backend diagnostic as occurring in the generated // .s file. // If Loc is invalid, we still need to report the diagnostic, it just gets // no location info. FullSourceLoc Loc; Diags.Report(Loc, DiagID).AddString(Message); } // We handled all the possible severities. return true; } bool BackendConsumer::StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D) { if (D.getSeverity() != llvm::DS_Warning) // For now, the only support we have for StackSize diagnostic is warning. // We do not know how to format other severities. return false; if (const Decl *ND = Gen->GetDeclForMangledName(D.getFunction().getName())) { Diags.Report(ND->getASTContext().getFullLoc(ND->getLocation()), diag::warn_fe_frame_larger_than) << D.getStackSize() << Decl::castToDeclContext(ND); return true; } return false; } void BackendConsumer::EmitOptimizationMessage( const llvm::DiagnosticInfoOptimizationBase &D, unsigned DiagID) { // We only support warnings and remarks. assert(D.getSeverity() == llvm::DS_Remark || D.getSeverity() == llvm::DS_Warning); SourceManager &SourceMgr = Context->getSourceManager(); FileManager &FileMgr = SourceMgr.getFileManager(); StringRef Filename; unsigned Line, Column; SourceLocation DILoc; if (D.isLocationAvailable()) { D.getLocation(&Filename, &Line, &Column); const FileEntry *FE = FileMgr.getFile(Filename); if (FE && Line > 0) { // If -gcolumn-info was not used, Column will be 0. This upsets the // source manager, so pass 1 if Column is not set. DILoc = SourceMgr.translateFileLineCol(FE, Line, Column ? Column : 1); } } // If a location isn't available, try to approximate it using the associated // function definition. We use the definition's right brace to differentiate // from diagnostics that genuinely relate to the function itself. FullSourceLoc Loc(DILoc, SourceMgr); if (Loc.isInvalid()) if (const Decl *FD = Gen->GetDeclForMangledName(D.getFunction().getName())) Loc = FD->getASTContext().getFullLoc(FD->getBodyRBrace()); Diags.Report(Loc, DiagID) << AddFlagValue(D.getPassName()) << D.getMsg().str(); if (DILoc.isInvalid() && D.isLocationAvailable()) // If we were not able to translate the file:line:col information // back to a SourceLocation, at least emit a note stating that // we could not translate this location. This can happen in the // case of #line directives. Diags.Report(Loc, diag::note_fe_backend_optimization_remark_invalid_loc) << Filename << Line << Column; } void BackendConsumer::OptimizationRemarkHandler( const llvm::DiagnosticInfoOptimizationRemark &D) { // Optimization remarks are active only if the -Rpass flag has a regular // expression that matches the name of the pass name in \p D. if (CodeGenOpts.OptimizationRemarkPattern && CodeGenOpts.OptimizationRemarkPattern->match(D.getPassName())) EmitOptimizationMessage(D, diag::remark_fe_backend_optimization_remark); } void BackendConsumer::OptimizationRemarkHandler( const llvm::DiagnosticInfoOptimizationRemarkMissed &D) { // Missed optimization remarks are active only if the -Rpass-missed // flag has a regular expression that matches the name of the pass // name in \p D. if (CodeGenOpts.OptimizationRemarkMissedPattern && CodeGenOpts.OptimizationRemarkMissedPattern->match(D.getPassName())) EmitOptimizationMessage(D, diag::remark_fe_backend_optimization_remark_missed); } void BackendConsumer::OptimizationRemarkHandler( const llvm::DiagnosticInfoOptimizationRemarkAnalysis &D) { // Optimization analysis remarks are active only if the -Rpass-analysis // flag has a regular expression that matches the name of the pass // name in \p D. if (CodeGenOpts.OptimizationRemarkAnalysisPattern && CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName())) EmitOptimizationMessage( D, diag::remark_fe_backend_optimization_remark_analysis); } void BackendConsumer::OptimizationFailureHandler( const llvm::DiagnosticInfoOptimizationFailure &D) { EmitOptimizationMessage(D, diag::warn_fe_backend_optimization_failure); } void BackendConsumer::linkerDiagnosticHandler(const DiagnosticInfo &DI) { if (DI.getSeverity() != DS_Error) return; std::string MsgStorage; { raw_string_ostream Stream(MsgStorage); DiagnosticPrinterRawOStream DP(Stream); DI.print(DP); } Diags.Report(diag::err_fe_cannot_link_module) << LinkModule->getModuleIdentifier() << MsgStorage; } // HLSL Change start - Dxil Diagnostic Info reporter bool BackendConsumer::DxilDiagHandler(const llvm::DiagnosticInfoDxil &D) { unsigned DiagID; ComputeDiagID(D.getSeverity(), inline_asm, DiagID); SourceManager &SourceMgr = Context->getSourceManager(); SourceLocation DILoc; std::string Message = D.getMsgStr().str(); // Convert Filename/Line/Column triplet into SourceLocation if (D.hasLocation()) { FileManager &FileMgr = SourceMgr.getFileManager(); StringRef Filename = D.getFileName(); unsigned Line = D.getLine(); unsigned Column = D.getColumn(); const FileEntry *FE = FileMgr.getFile(Filename); if (FE && Line > 0) { DILoc = SourceMgr.translateFileLineCol(FE, Line, Column ? Column : 1); } } FullSourceLoc Loc(DILoc, SourceMgr); // If no location information is available, add function name if (Loc.isInvalid()) { auto *DiagClient = Diags.getClient(); auto *func = D.getFunction(); if (DiagClient && func) DiagClient->setPrefix("Function: " + func->getName().str()); // Clang will de-duplicate this so that it only emits once. Diags.Report( Diags.getCustomDiagID(DiagnosticsEngine::Note, "Debug information is disabled which may impact " "diagnostic location accuracy. Re-run without " "-fdisable-loc-tracking to improve accuracy.\n")); } Diags.Report(Loc, DiagID).AddString(Message); return true; } // HLSL Change end - Dxil Diagnostic Info reporter /// \brief This function is invoked when the backend needs /// to report something to the user. void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) { unsigned DiagID = diag::err_fe_inline_asm; llvm::DiagnosticSeverity Severity = DI.getSeverity(); // Get the diagnostic ID based. switch (DI.getKind()) { case llvm::DK_InlineAsm: if (InlineAsmDiagHandler(cast<DiagnosticInfoInlineAsm>(DI))) return; ComputeDiagID(Severity, inline_asm, DiagID); break; case llvm::DK_StackSize: if (StackSizeDiagHandler(cast<DiagnosticInfoStackSize>(DI))) return; ComputeDiagID(Severity, backend_frame_larger_than, DiagID); break; case llvm::DK_OptimizationRemark: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler(cast<DiagnosticInfoOptimizationRemark>(DI)); return; case llvm::DK_OptimizationRemarkMissed: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler(cast<DiagnosticInfoOptimizationRemarkMissed>(DI)); return; case llvm::DK_OptimizationRemarkAnalysis: // Optimization remarks are always handled completely by this // handler. There is no generic way of emitting them. OptimizationRemarkHandler( cast<DiagnosticInfoOptimizationRemarkAnalysis>(DI)); return; case llvm::DK_OptimizationFailure: // Optimization failures are always handled completely by this // handler. OptimizationFailureHandler(cast<DiagnosticInfoOptimizationFailure>(DI)); return; // HLSL Change start - Dxil Diagnostic Info reporter case llvm::DK_DXIL: if (DxilDiagHandler(cast<DiagnosticInfoDxil>(DI))) return; ComputeDiagID(Severity, inline_asm, DiagID); break; // HLSL Change end - Dxil Diagnostic Info reporter default: // Plugin IDs are not bound to any value as they are set dynamically. ComputeDiagRemarkID(Severity, backend_plugin, DiagID); break; } std::string MsgStorage; { raw_string_ostream Stream(MsgStorage); DiagnosticPrinterRawOStream DP(Stream); DI.print(DP); } // Report the backend message using the usual diagnostic mechanism. FullSourceLoc Loc; Diags.Report(Loc, DiagID).AddString(MsgStorage); } #undef ComputeDiagID CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext) : Act(_Act), LinkModule(nullptr), VMContext(_VMContext ? _VMContext : new LLVMContext), OwnsVMContext(!_VMContext) {} CodeGenAction::~CodeGenAction() { TheModule.reset(); if (OwnsVMContext) delete VMContext; } bool CodeGenAction::hasIRSupport() const { return true; } void CodeGenAction::EndSourceFileAction() { // If the consumer creation failed, do nothing. if (!getCompilerInstance().hasASTConsumer()) return; // If we were given a link module, release consumer's ownership of it. if (LinkModule) BEConsumer->takeLinkModule(); // Steal the module from the consumer. TheModule = BEConsumer->takeModule(); } std::unique_ptr<llvm::Module> CodeGenAction::takeModule() { return std::move(TheModule); } llvm::LLVMContext *CodeGenAction::takeLLVMContext() { OwnsVMContext = false; return VMContext; } static raw_pwrite_stream * GetOutputStream(CompilerInstance &CI, StringRef InFile, BackendAction Action) { switch (Action) { case Backend_EmitAssembly: return CI.createDefaultOutputFile(false, InFile, "s"); case Backend_EmitLL: return CI.createDefaultOutputFile(false, InFile, "ll"); case Backend_EmitBC: return CI.createDefaultOutputFile(true, InFile, "bc"); case Backend_EmitPasses: return CI.createDefaultOutputFile(true, InFile, "passes.txt"); case Backend_EmitNothing: return nullptr; case Backend_EmitMCNull: return CI.createNullOutputFile(); case Backend_EmitObj: return CI.createDefaultOutputFile(true, InFile, "o"); } llvm_unreachable("Invalid action!"); } std::unique_ptr<ASTConsumer> CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) { BackendAction BA = static_cast<BackendAction>(Act); raw_pwrite_stream *OS = GetOutputStream(CI, InFile, BA); if (BA != Backend_EmitNothing && !OS) return nullptr; llvm::Module *LinkModuleToUse = LinkModule; // If we were not given a link module, and the user requested that one be // loaded from bitcode, do so now. const std::string &LinkBCFile = CI.getCodeGenOpts().LinkBitcodeFile; if (!LinkModuleToUse && !LinkBCFile.empty()) { auto BCBuf = CI.getFileManager().getBufferForFile(LinkBCFile); if (!BCBuf) { CI.getDiagnostics().Report(diag::err_cannot_open_file) << LinkBCFile << BCBuf.getError().message(); return nullptr; } ErrorOr<std::unique_ptr<llvm::Module>> ModuleOrErr = getLazyBitcodeModule(std::move(*BCBuf), *VMContext); if (std::error_code EC = ModuleOrErr.getError()) { CI.getDiagnostics().Report(diag::err_cannot_open_file) << LinkBCFile << EC.message(); return nullptr; } LinkModuleToUse = ModuleOrErr.get().release(); } CoverageSourceInfo *CoverageInfo = nullptr; // Add the preprocessor callback only when the coverage mapping is generated. if (CI.getCodeGenOpts().CoverageMapping) { CoverageInfo = new CoverageSourceInfo; CI.getPreprocessor().addPPCallbacks( std::unique_ptr<PPCallbacks>(CoverageInfo)); } std::unique_ptr<BackendConsumer> Result(new BackendConsumer( BA, CI.getDiagnostics(), CI.getHeaderSearchOpts(), CI.getPreprocessorOpts(), CI.getCodeGenOpts(), CI.getTargetOpts(), CI.getLangOpts(), CI.getFrontendOpts().ShowTimers, InFile, LinkModuleToUse, OS, *VMContext, CoverageInfo)); BEConsumer = Result.get(); return std::move(Result); } static void BitcodeInlineAsmDiagHandler(const llvm::SMDiagnostic &SM, void *Context, unsigned LocCookie) { SM.print(nullptr, llvm::errs()); } void CodeGenAction::ExecuteAction() { // If this is an IR file, we have to treat it specially. if (getCurrentFileKind() == IK_LLVM_IR) { BackendAction BA = static_cast<BackendAction>(Act); CompilerInstance &CI = getCompilerInstance(); raw_pwrite_stream *OS = GetOutputStream(CI, getCurrentFile(), BA); if (BA != Backend_EmitNothing && !OS) return; bool Invalid; SourceManager &SM = CI.getSourceManager(); FileID FID = SM.getMainFileID(); llvm::MemoryBuffer *MainFile = SM.getBuffer(FID, &Invalid); if (Invalid) return; llvm::SMDiagnostic Err; TheModule = parseIR(MainFile->getMemBufferRef(), Err, *VMContext); if (!TheModule) { // Translate from the diagnostic info to the SourceManager location if // available. // TODO: Unify this with ConvertBackendLocation() SourceLocation Loc; if (Err.getLineNo() > 0) { assert(Err.getColumnNo() >= 0); Loc = SM.translateFileLineCol(SM.getFileEntryForID(FID), Err.getLineNo(), Err.getColumnNo() + 1); } // Strip off a leading diagnostic code if there is one. StringRef Msg = Err.getMessage(); if (Msg.startswith("error: ")) Msg = Msg.substr(7); unsigned DiagID = CI.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error, "%0"); CI.getDiagnostics().Report(Loc, DiagID) << Msg; return; } const TargetOptions &TargetOpts = CI.getTargetOpts(); if (TheModule->getTargetTriple() != TargetOpts.Triple) { CI.getDiagnostics().Report(SourceLocation(), diag::warn_fe_override_module) << TargetOpts.Triple; TheModule->setTargetTriple(TargetOpts.Triple); } LLVMContext &Ctx = TheModule->getContext(); Ctx.setInlineAsmDiagnosticHandler(BitcodeInlineAsmDiagHandler); EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(), TargetOpts, CI.getLangOpts(), CI.getTarget().getTargetDescription(), TheModule.get(), BA, OS); return; } // Otherwise follow the normal AST path. this->ASTFrontendAction::ExecuteAction(); } // void EmitAssemblyAction::anchor() { } EmitAssemblyAction::EmitAssemblyAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitAssembly, _VMContext) {} void EmitBCAction::anchor() { } EmitBCAction::EmitBCAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitBC, _VMContext) {} void EmitLLVMAction::anchor() { } EmitLLVMAction::EmitLLVMAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitLL, _VMContext) {} void EmitLLVMOnlyAction::anchor() { } EmitLLVMOnlyAction::EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitNothing, _VMContext) {} void EmitCodeGenOnlyAction::anchor() { } EmitCodeGenOnlyAction::EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitMCNull, _VMContext) {} void EmitObjAction::anchor() { } EmitObjAction::EmitObjAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitObj, _VMContext) {} // HLSL Change Starts void EmitOptDumpAction::anchor() { } EmitOptDumpAction::EmitOptDumpAction(llvm::LLVMContext *_VMContext) : CodeGenAction(Backend_EmitPasses, _VMContext) {} // HLSL Change Ends
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGExprScalar.cpp
//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This contains code to emit Expr nodes with scalar LLVM types as LLVM code. // //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" #include "CGCXXABI.h" #include "CGDebugInfo.h" #include "CGObjCRuntime.h" #include "CGHLSLRuntime.h" // HLSL Change #include "CodeGenModule.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/HlslTypes.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/TargetInfo.h" #include "clang/Frontend/CodeGenOptions.h" #include "dxc/DXIL/DxilUtil.h" // HLSL Change #include "llvm/IR/CFG.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include <cstdarg> using namespace clang; using namespace CodeGen; using llvm::Value; //===----------------------------------------------------------------------===// // Scalar Expression Emitter //===----------------------------------------------------------------------===// namespace { struct BinOpInfo { Value *LHS; Value *RHS; QualType Ty; // Computation Type. BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform bool FPContractable; const Expr *E; // Entire expr, for error unsupported. May not be binop. }; static bool MustVisitNullValue(const Expr *E) { // If a null pointer expression's type is the C++0x nullptr_t, then // it's not necessarily a simple constant and it must be evaluated // for its potential side effects. return E->getType()->isNullPtrType(); } class ScalarExprEmitter : public StmtVisitor<ScalarExprEmitter, Value*> { CodeGenFunction &CGF; CGBuilderTy &Builder; bool IgnoreResultAssign; llvm::LLVMContext &VMContext; public: ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), VMContext(cgf.getLLVMContext()) { } //===--------------------------------------------------------------------===// // Utilities //===--------------------------------------------------------------------===// bool TestAndClearIgnoreResultAssign() { bool I = IgnoreResultAssign; IgnoreResultAssign = false; return I; } llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) { return CGF.EmitCheckedLValue(E, TCK); } void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info); Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) { return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal(); } void EmitLValueAlignmentAssumption(const Expr *E, Value *V) { const AlignValueAttr *AVAttr = nullptr; if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) { const ValueDecl *VD = DRE->getDecl(); if (VD->getType()->isReferenceType()) { if (const auto *TTy = dyn_cast<TypedefType>(VD->getType().getNonReferenceType())) AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); } else { // Assumptions for function parameters are emitted at the start of the // function, so there is no need to repeat that here. if (isa<ParmVarDecl>(VD)) return; AVAttr = VD->getAttr<AlignValueAttr>(); } } if (!AVAttr) if (const auto *TTy = dyn_cast<TypedefType>(E->getType())) AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>(); if (!AVAttr) return; Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment()); llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue); CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue()); } /// EmitLoadOfLValue - Given an expression with complex type that represents a /// value l-value, this method emits the address of the l-value, then loads /// and returns the result. Value *EmitLoadOfLValue(const Expr *E) { Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load), E->getExprLoc()); EmitLValueAlignmentAssumption(E, V); return V; } /// EmitConversionToBool - Convert the specified expression value to a /// boolean (i1) truth value. This is equivalent to "Val != 0". Value *EmitConversionToBool(Value *Src, QualType DstTy); /// \brief Emit a check that a conversion to or from a floating-point type /// does not overflow. void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType, QualType DstType, llvm::Type *DstTy); /// EmitScalarConversion - Emit a conversion from the specified type to the /// specified destination type, both of which are LLVM scalar types. Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy); /// EmitComplexToScalarConversion - Emit a conversion from the specified /// complex type to the specified destination type, where the destination type /// is an LLVM scalar type. Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy); /// EmitNullValue - Emit a value that corresponds to null for the given type. Value *EmitNullValue(QualType Ty); /// EmitFloatToBoolConversion - Perform an FP to boolean conversion. Value *EmitFloatToBoolConversion(Value *V) { // Compare against 0.0 for fp scalars. llvm::Value *Zero = llvm::Constant::getNullValue(V->getType()); return Builder.CreateFCmpUNE(V, Zero, "tobool"); } /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion. Value *EmitPointerToBoolConversion(Value *V) { Value *Zero = llvm::ConstantPointerNull::get( cast<llvm::PointerType>(V->getType())); return Builder.CreateICmpNE(V, Zero, "tobool"); } Value *EmitIntToBoolConversion(Value *V) { // Because of the type rules of C, we often end up computing a // logical value, then zero extending it to int, then wanting it // as a logical value again. Optimize this common case. if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) { if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) { Value *Result = ZI->getOperand(0); // If there aren't any more uses, zap the instruction to save space. // Note that there can be more uses, for example if this // is the result of an assignment. if (ZI->use_empty()) ZI->eraseFromParent(); return Result; } } return Builder.CreateIsNotNull(V, "tobool"); } //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// Value *Visit(Expr *E) { ApplyDebugLocation DL(CGF, E); // HLSL Change Begins // generate matrix operations if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E)) { if (hlsl::IsHLSLMatType(E->getType()) || hlsl::IsHLSLMatType(BinOp->getLHS()->getType())) { if (BinOp->getOpcode() == BO_Assign) { LValue LHS = CGF.EmitLValue(BinOp->getLHS()); llvm::Value *RHS = CGF.EmitScalarExpr(BinOp->getRHS()); CGF.CGM.getHLSLRuntime().EmitHLSLMatrixStore(CGF, RHS, LHS.getAddress(), BinOp->getLHS()->getType()); return RHS; } else if (BinOp->getOpcode() != BO_Comma) { // comma handled by standard Visit below llvm::Value *LHS = CGF.EmitScalarExpr(BinOp->getLHS()); llvm::Value *RHS = CGF.EmitScalarExpr(BinOp->getRHS()); return CGF.CGM.getHLSLRuntime().EmitHLSLMatrixOperationCall( CGF, E, ConvertType(E->getType()), { LHS, RHS }); } } } else if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E)) { // ++/-- operators are handled in EmitScalarPrePostIncDec if (hlsl::IsHLSLMatType(E->getType()) && !UnOp->isIncrementDecrementOp()) { llvm::Value *Operand = CGF.EmitScalarExpr(UnOp->getSubExpr()); return CGF.CGM.getHLSLRuntime().EmitHLSLMatrixOperationCall( CGF, E, Operand->getType(), { Operand }); } } // HLSL Change Ends return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E); } Value *VisitStmt(Stmt *S) { S->dump(CGF.getContext().getSourceManager()); llvm_unreachable("Stmt can't have complex result type!"); } Value *VisitExpr(Expr *S); Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); } Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) { return Visit(E->getReplacement()); } Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { return Visit(GE->getResultExpr()); } // Leaves. Value *VisitIntegerLiteral(const IntegerLiteral *E) { return Builder.getInt(E->getValue()); } Value *VisitFloatingLiteral(const FloatingLiteral *E) { return llvm::ConstantFP::get(VMContext, E->getValue()); } Value *VisitCharacterLiteral(const CharacterLiteral *E) { return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); } // HLSL Change Start Value *VisitStringLiteral(const StringLiteral *E) { return CGF.CGM.GetConstantArrayFromStringLiteral(E); } // HLSL Change End Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) { return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); } Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); } Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) { return EmitNullValue(E->getType()); } Value *VisitGNUNullExpr(const GNUNullExpr *E) { return EmitNullValue(E->getType()); } Value *VisitOffsetOfExpr(OffsetOfExpr *E); Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E); Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel()); return Builder.CreateBitCast(V, ConvertType(E->getType())); } Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) { return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength()); } Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) { return CGF.EmitPseudoObjectRValue(E).getScalarVal(); } Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) { if (E->isGLValue()) return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc()); // Otherwise, assume the mapping is the scalar directly. return CGF.getOpaqueRValueMapping(E).getScalarVal(); } // l-values. Value *VisitDeclRefExpr(DeclRefExpr *E) { if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) { if (result.isReference()) return EmitLoadOfLValue(result.getReferenceLValue(CGF, E), E->getExprLoc()); return result.getValue(); } return EmitLoadOfLValue(E); } Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { return CGF.EmitObjCSelectorExpr(E); } Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { return CGF.EmitObjCProtocolExpr(E); } Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { return EmitLoadOfLValue(E); } Value *VisitObjCMessageExpr(ObjCMessageExpr *E) { if (E->getMethodDecl() && E->getMethodDecl()->getReturnType()->isReferenceType()) return EmitLoadOfLValue(E); return CGF.EmitObjCMessageExpr(E).getScalarVal(); } Value *VisitObjCIsaExpr(ObjCIsaExpr *E) { LValue LV = CGF.EmitObjCIsaExpr(E); Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal(); return V; } Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E); Value *VisitConvertVectorExpr(ConvertVectorExpr *E); Value *VisitMemberExpr(MemberExpr *E); Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } Value *VisitExtMatrixElementExpr(Expr *E) { return EmitLoadOfLValue(E); } // HLSL Change Value *VisitHLSLVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } // HLSL Change Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { return EmitLoadOfLValue(E); } Value *VisitInitListExpr(InitListExpr *E); Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { return EmitNullValue(E->getType()); } Value *VisitExplicitCastExpr(ExplicitCastExpr *E) { if (E->getType()->isVariablyModifiedType()) CGF.EmitVariablyModifiedType(E->getType()); if (CGDebugInfo *DI = CGF.getDebugInfo()) DI->EmitExplicitCastType(E->getType()); return VisitCastExpr(E); } Value *VisitCastExpr(CastExpr *E); Value *VisitCallExpr(const CallExpr *E) { if (E->getCallReturnType(CGF.getContext())->isReferenceType()) return EmitLoadOfLValue(E); // HLSL Change Starts if (CGF.getContext().getLangOpts().HLSL) { RValue RV = CGF.EmitCallExpr(E); Value *V = nullptr; if (RV.isScalar()) V = RV.getScalarVal(); else V = RV.getAggregateAddr(); EmitLValueAlignmentAssumption(E, V); return V; } // HLSL Change Ends Value *V = CGF.EmitCallExpr(E).getScalarVal(); EmitLValueAlignmentAssumption(E, V); return V; } Value *VisitStmtExpr(const StmtExpr *E); // Unary Operators. Value *VisitUnaryPostDec(const UnaryOperator *E) { LValue LV = EmitLValue(E->getSubExpr()); return EmitScalarPrePostIncDec(E, LV, false, false); } Value *VisitUnaryPostInc(const UnaryOperator *E) { LValue LV = EmitLValue(E->getSubExpr()); return EmitScalarPrePostIncDec(E, LV, true, false); } Value *VisitUnaryPreDec(const UnaryOperator *E) { LValue LV = EmitLValue(E->getSubExpr()); return EmitScalarPrePostIncDec(E, LV, false, true); } Value *VisitUnaryPreInc(const UnaryOperator *E) { LValue LV = EmitLValue(E->getSubExpr()); return EmitScalarPrePostIncDec(E, LV, true, true); } llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E, llvm::Value *InVal, bool IsInc); llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre); Value *VisitUnaryAddrOf(const UnaryOperator *E) { if (isa<MemberPointerType>(E->getType())) // never sugared return CGF.CGM.getMemberPointerConstant(E); return EmitLValue(E->getSubExpr()).getAddress(); } Value *VisitUnaryDeref(const UnaryOperator *E) { if (E->getType()->isVoidType()) return Visit(E->getSubExpr()); // the actual value should be unused return EmitLoadOfLValue(E); } Value *VisitUnaryPlus(const UnaryOperator *E) { // This differs from gcc, though, most likely due to a bug in gcc. TestAndClearIgnoreResultAssign(); return Visit(E->getSubExpr()); } Value *VisitUnaryMinus (const UnaryOperator *E); Value *VisitUnaryNot (const UnaryOperator *E); Value *VisitUnaryLNot (const UnaryOperator *E); Value *VisitUnaryReal (const UnaryOperator *E); Value *VisitUnaryImag (const UnaryOperator *E); Value *VisitUnaryExtension(const UnaryOperator *E) { return Visit(E->getSubExpr()); } // C++ Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) { return EmitLoadOfLValue(E); } Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { return Visit(DAE->getExpr()); } Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { CodeGenFunction::CXXDefaultInitExprScope Scope(CGF); return Visit(DIE->getExpr()); } Value *VisitCXXThisExpr(CXXThisExpr *TE) { return CGF.LoadCXXThis(); } Value *VisitExprWithCleanups(ExprWithCleanups *E) { CGF.enterFullExpression(E); CodeGenFunction::RunCleanupsScope Scope(CGF); return Visit(E->getSubExpr()); } Value *VisitCXXNewExpr(const CXXNewExpr *E) { return CGF.EmitCXXNewExpr(E); } Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) { CGF.EmitCXXDeleteExpr(E); return nullptr; } Value *VisitTypeTraitExpr(const TypeTraitExpr *E) { return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); } Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) { return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue()); } Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) { return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue()); } Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { // C++ [expr.pseudo]p1: // The result shall only be used as the operand for the function call // operator (), and the result of such a call has type void. The only // effect is the evaluation of the postfix-expression before the dot or // arrow. CGF.EmitScalarExpr(E->getBase()); return nullptr; } Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) { return EmitNullValue(E->getType()); } Value *VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); return nullptr; } Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) { return Builder.getInt1(E->getValue()); } // Binary Operators. Value *EmitMul(const BinOpInfo &Ops) { if (Ops.Ty->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); case LangOptions::SOB_Undefined: if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul"); LLVM_FALLTHROUGH; // HLSL Change case LangOptions::SOB_Trapping: return EmitOverflowCheckedBinOp(Ops); } } if (Ops.Ty->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) return EmitOverflowCheckedBinOp(Ops); if (Ops.LHS->getType()->isFPOrFPVectorTy()) return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul"); return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); } /// Create a binary op that checks for overflow. /// Currently only supports +, - and *. Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops); // Check for undefined division and modulus behaviors. void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops, llvm::Value *Zero,bool isDiv); // Common helper for getting how wide LHS of shift is. static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS); Value *EmitDiv(const BinOpInfo &Ops); Value *EmitRem(const BinOpInfo &Ops); Value *EmitAdd(const BinOpInfo &Ops); Value *EmitSub(const BinOpInfo &Ops); Value *EmitShl(const BinOpInfo &Ops); Value *EmitShr(const BinOpInfo &Ops); Value *EmitAnd(const BinOpInfo &Ops) { return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); } Value *EmitXor(const BinOpInfo &Ops) { return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); } Value *EmitOr (const BinOpInfo &Ops) { return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); } BinOpInfo EmitBinOps(const BinaryOperator *E); LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E, Value *(ScalarExprEmitter::*F)(const BinOpInfo &), Value *&Result); Value *EmitCompoundAssign(const CompoundAssignOperator *E, Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); // Binary operators and binary compound assignment operators. #define HANDLEBINOP(OP) \ Value *VisitBin ## OP(const BinaryOperator *E) { \ return Emit ## OP(EmitBinOps(E)); \ } \ Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ } HANDLEBINOP(Mul) HANDLEBINOP(Div) HANDLEBINOP(Rem) HANDLEBINOP(Add) HANDLEBINOP(Sub) HANDLEBINOP(Shl) HANDLEBINOP(Shr) HANDLEBINOP(And) HANDLEBINOP(Xor) HANDLEBINOP(Or) #undef HANDLEBINOP // Comparisons. Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc, unsigned SICmpOpc, unsigned FCmpOpc); #define VISITCOMP(CODE, UI, SI, FP) \ Value *VisitBin##CODE(const BinaryOperator *E) { \ return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ llvm::FCmpInst::FP); } VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT) VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT) VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE) VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE) VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ) VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE) #undef VISITCOMP Value *VisitBinAssign (const BinaryOperator *E); Value *VisitBinLAnd (const BinaryOperator *E); Value *VisitBinLOr (const BinaryOperator *E); Value *VisitBinComma (const BinaryOperator *E); Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); } Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); } // Other Operators. Value *VisitBlockExpr(const BlockExpr *BE); Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *); Value *VisitChooseExpr(ChooseExpr *CE); Value *VisitVAArgExpr(VAArgExpr *VE); Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { return CGF.EmitObjCStringLiteral(E); } Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) { return CGF.EmitObjCBoxedExpr(E); } Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) { return CGF.EmitObjCArrayLiteral(E); } Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) { return CGF.EmitObjCDictionaryLiteral(E); } Value *VisitAsTypeExpr(AsTypeExpr *CE); Value *VisitAtomicExpr(AtomicExpr *AE); }; } // end anonymous namespace. //===----------------------------------------------------------------------===// // Utilities //===----------------------------------------------------------------------===// /// EmitConversionToBool - Convert the specified expression value to a /// boolean (i1) truth value. This is equivalent to "Val != 0". Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs"); if (SrcType->isRealFloatingType()) return EmitFloatToBoolConversion(Src); if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType)) return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT); assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && "Unknown scalar type to convert"); if (isa<llvm::IntegerType>(Src->getType())) return EmitIntToBoolConversion(Src); assert(isa<llvm::PointerType>(Src->getType())); return EmitPointerToBoolConversion(Src); } void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType, QualType DstType, llvm::Type *DstTy) { CodeGenFunction::SanitizerScope SanScope(&CGF); using llvm::APFloat; using llvm::APSInt; llvm::Type *SrcTy = Src->getType(); llvm::Value *Check = nullptr; if (llvm::IntegerType *IntTy = dyn_cast<llvm::IntegerType>(SrcTy)) { // Integer to floating-point. This can fail for unsigned short -> __half // or unsigned __int128 -> float. assert(DstType->isFloatingType()); bool SrcIsUnsigned = OrigSrcType->isUnsignedIntegerOrEnumerationType(); APFloat LargestFloat = APFloat::getLargest(CGF.getContext().getFloatTypeSemantics(DstType)); APSInt LargestInt(IntTy->getBitWidth(), SrcIsUnsigned); bool IsExact; if (LargestFloat.convertToInteger(LargestInt, APFloat::rmTowardZero, &IsExact) != APFloat::opOK) // The range of representable values of this floating point type includes // all values of this integer type. Don't need an overflow check. return; llvm::Value *Max = llvm::ConstantInt::get(VMContext, LargestInt); if (SrcIsUnsigned) Check = Builder.CreateICmpULE(Src, Max); else { llvm::Value *Min = llvm::ConstantInt::get(VMContext, -LargestInt); llvm::Value *GE = Builder.CreateICmpSGE(Src, Min); llvm::Value *LE = Builder.CreateICmpSLE(Src, Max); Check = Builder.CreateAnd(GE, LE); } } else { const llvm::fltSemantics &SrcSema = CGF.getContext().getFloatTypeSemantics(OrigSrcType); if (isa<llvm::IntegerType>(DstTy)) { // Floating-point to integer. This has undefined behavior if the source is // +-Inf, NaN, or doesn't fit into the destination type (after truncation // to an integer). unsigned Width = CGF.getContext().getIntWidth(DstType); bool Unsigned = DstType->isUnsignedIntegerOrEnumerationType(); APSInt Min = APSInt::getMinValue(Width, Unsigned); APFloat MinSrc(SrcSema, APFloat::uninitialized); if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) & APFloat::opOverflow) // Don't need an overflow check for lower bound. Just check for // -Inf/NaN. MinSrc = APFloat::getInf(SrcSema, true); else // Find the largest value which is too small to represent (before // truncation toward zero). MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative); APSInt Max = APSInt::getMaxValue(Width, Unsigned); APFloat MaxSrc(SrcSema, APFloat::uninitialized); if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) & APFloat::opOverflow) // Don't need an overflow check for upper bound. Just check for // +Inf/NaN. MaxSrc = APFloat::getInf(SrcSema, false); else // Find the smallest value which is too large to represent (before // truncation toward zero). MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive); // If we're converting from __half, convert the range to float to match // the type of src. if (OrigSrcType->isHalfType()) { const llvm::fltSemantics &Sema = CGF.getContext().getFloatTypeSemantics(SrcType); bool IsInexact; MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact); } llvm::Value *GE = Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc)); llvm::Value *LE = Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc)); Check = Builder.CreateAnd(GE, LE); } else { // FIXME: Maybe split this sanitizer out from float-cast-overflow. // // Floating-point to floating-point. This has undefined behavior if the // source is not in the range of representable values of the destination // type. The C and C++ standards are spectacularly unclear here. We // diagnose finite out-of-range conversions, but allow infinities and NaNs // to convert to the corresponding value in the smaller type. // // C11 Annex F gives all such conversions defined behavior for IEC 60559 // conforming implementations. Unfortunately, LLVM's fptrunc instruction // does not. // Converting from a lower rank to a higher rank can never have // undefined behavior, since higher-rank types must have a superset // of values of lower-rank types. if (CGF.getContext().getFloatingTypeOrder(OrigSrcType, DstType) != 1) return; assert(!OrigSrcType->isHalfType() && "should not check conversion from __half, it has the lowest rank"); const llvm::fltSemantics &DstSema = CGF.getContext().getFloatTypeSemantics(DstType); APFloat MinBad = APFloat::getLargest(DstSema, false); APFloat MaxBad = APFloat::getInf(DstSema, false); bool IsInexact; MinBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact); MaxBad.convert(SrcSema, APFloat::rmTowardZero, &IsInexact); Value *AbsSrc = CGF.EmitNounwindRuntimeCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::fabs, Src->getType()), Src); llvm::Value *GE = Builder.CreateFCmpOGT(AbsSrc, llvm::ConstantFP::get(VMContext, MinBad)); llvm::Value *LE = Builder.CreateFCmpOLT(AbsSrc, llvm::ConstantFP::get(VMContext, MaxBad)); Check = Builder.CreateNot(Builder.CreateAnd(GE, LE)); } } // FIXME: Provide a SourceLocation. llvm::Constant *StaticArgs[] = { CGF.EmitCheckTypeDescriptor(OrigSrcType), CGF.EmitCheckTypeDescriptor(DstType) }; CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow), "float_cast_overflow", StaticArgs, OrigSrc); } /// EmitScalarConversion - Emit a conversion from the specified type to the /// specified destination type, both of which are LLVM scalar types. Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, QualType DstType) { SrcType = CGF.getContext().getCanonicalType(SrcType); DstType = CGF.getContext().getCanonicalType(DstType); if (SrcType == DstType) return Src; if (DstType->isVoidType()) return nullptr; llvm::Value *OrigSrc = Src; QualType OrigSrcType = SrcType; llvm::Type *SrcTy = Src->getType(); // Handle conversions to bool first, they are special: comparisons against 0. if (DstType->isBooleanType()) return EmitConversionToBool(Src, SrcType); llvm::Type *DstTy = ConvertType(DstType); // Cast from half through float if half isn't a native type. if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { // Cast to FP using the intrinsic if the half type itself isn't supported. if (DstTy->isFloatingPointTy()) { if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) return Builder.CreateCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy), Src); } else { // Cast to other types through float, using either the intrinsic or FPExt, // depending on whether the half type itself is supported // (as opposed to operations on half, available with NativeHalfType). if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) { Src = Builder.CreateCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, CGF.CGM.FloatTy), Src); } else { Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv"); } SrcType = CGF.getContext().FloatTy; SrcTy = CGF.FloatTy; } } // Ignore conversions like int -> uint. if (SrcTy == DstTy) return Src; // Handle pointer conversions next: pointers can only be converted to/from // other pointers and integers. Check for pointer types in terms of LLVM, as // some native types (like Obj-C id) may map to a pointer type. if (isa<llvm::PointerType>(DstTy)) { // The source value may be an integer, or a pointer. if (isa<llvm::PointerType>(SrcTy)) return Builder.CreateBitCast(Src, DstTy, "conv"); assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); // First, convert to the correct width so that we control the kind of // extension. llvm::Type *MiddleTy = CGF.IntPtrTy; bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); llvm::Value* IntResult = Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); // Then, cast to pointer. return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); } if (isa<llvm::PointerType>(SrcTy)) { // Must be an ptr to int cast. assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); return Builder.CreatePtrToInt(Src, DstTy, "conv"); } // A scalar can be splatted to an extended vector of the same element type if (DstType->isExtVectorType() && !SrcType->isVectorType()) { // Cast the scalar to element type QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType(); llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy); // Splat the element across to all elements unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Elt, "splat"); } // HLSL Change Starts if (CGF.getContext().getLangOpts().HLSL) { // A scalar can be splatted to an extended vector of the same element type if (DstTy->isVectorTy() && !SrcTy->isVectorTy()) { // Cast the scalar to element type const ExtVectorType *DstVecType = hlsl::ConvertHLSLVecMatTypeToExtVectorType(CGF.getContext(), DstType); QualType EltTy = DstVecType->getElementType(); llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy); // Splat the element across to all elements unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Elt, "splat"); } if (SrcTy->isVectorTy() && DstTy->isVectorTy()) { Value *Res = nullptr; const ExtVectorType *SrcVecType = hlsl::ConvertHLSLVecMatTypeToExtVectorType(CGF.getContext(), SrcType); const ExtVectorType *DstVecType = hlsl::ConvertHLSLVecMatTypeToExtVectorType(CGF.getContext(), DstType); const llvm::VectorType *SrcVecTy = cast<llvm::VectorType>(SrcTy); const llvm::VectorType *DstVecTy = cast<llvm::VectorType>(DstTy); if (SrcVecTy->getNumElements() != DstVecTy->getNumElements()) { if (SrcVecTy->getNumElements() == 1) { // Cast the scalar to element type const ExtVectorType *DstVecType = hlsl::ConvertHLSLVecMatTypeToExtVectorType(CGF.getContext(), DstType); QualType EltTy = DstVecType->getElementType(); llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy); // Splat the element across to all elements unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Elt, "splat"); } } if (isa<llvm::IntegerType>(SrcVecTy->getElementType())) { bool InputSigned = SrcVecType->getElementType()->isSignedIntegerOrEnumerationType(); if (isa<llvm::IntegerType>(DstVecTy->getElementType())) Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); else if (InputSigned) Res = Builder.CreateSIToFP(Src, DstTy, "conv"); else Res = Builder.CreateUIToFP(Src, DstTy, "conv"); } else if (isa<llvm::IntegerType>(DstVecTy->getElementType())) { assert(SrcVecTy->getElementType()->isFloatingPointTy() && "Unknown real conversion"); if (DstVecType->getElementType()->isSignedIntegerOrEnumerationType()) Res = Builder.CreateFPToSI(Src, DstTy, "conv"); else Res = Builder.CreateFPToUI(Src, DstTy, "conv"); } else { assert(SrcVecTy->getElementType()->isFloatingPointTy() && DstVecTy->getElementType()->isFloatingPointTy() && "Unknown real conversion"); if (DstVecTy->getElementType()->getTypeID() < SrcVecTy->getElementType()->getTypeID()) Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); else Res = Builder.CreateFPExt(Src, DstTy, "conv"); } return Res; } if (SrcTy->isVectorTy() && !DstTy->isVectorTy()) { return Builder.CreateExtractElement(Src, (uint64_t)0); } } // HLSL Change Ends // Allow bitcast from vector to integer/fp of the same size. if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) return Builder.CreateBitCast(Src, DstTy, "conv"); // Finally, we have the arithmetic types: real int/float. Value *Res = nullptr; llvm::Type *ResTy = DstTy; // An overflowing conversion has undefined behavior if either the source type // or the destination type is a floating-point type. if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) && (OrigSrcType->isFloatingType() || DstType->isFloatingType())) EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy); // Cast to half through float if half isn't a native type. if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { // Make sure we cast in a single step if from another FP type. if (SrcTy->isFloatingPointTy()) { // Use the intrinsic if the half type itself isn't supported // (as opposed to operations on half, available with NativeHalfType). if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) return Builder.CreateCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src); // If the half type is supported, just use an fptrunc. return Builder.CreateFPTrunc(Src, DstTy); } DstTy = CGF.FloatTy; } // HLSL Change Begins. if (const BuiltinType *BT = SrcType->getAs<BuiltinType>()) { if (BT->getKind() == BuiltinType::Kind::LitInt || BT->getKind() == BuiltinType::Kind::LitFloat) { Res = CGF.CGM.getHLSLRuntime().EmitHLSLLiteralCast(CGF, Src, SrcType, DstType); if (Res) return Res; } } // HLSL Change Ends. if (isa<llvm::IntegerType>(SrcTy)) { bool InputSigned = SrcType->isSignedIntegerOrEnumerationType(); if (isa<llvm::IntegerType>(DstTy)) Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); else if (InputSigned) Res = Builder.CreateSIToFP(Src, DstTy, "conv"); else Res = Builder.CreateUIToFP(Src, DstTy, "conv"); } else if (isa<llvm::IntegerType>(DstTy)) { assert(SrcTy->isFloatingPointTy() && "Unknown real conversion"); if (DstType->isSignedIntegerOrEnumerationType()) Res = Builder.CreateFPToSI(Src, DstTy, "conv"); else Res = Builder.CreateFPToUI(Src, DstTy, "conv"); } else { assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() && "Unknown real conversion"); if (DstTy->getTypeID() < SrcTy->getTypeID()) Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); else Res = Builder.CreateFPExt(Src, DstTy, "conv"); } if (DstTy != ResTy) { if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) { assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion"); Res = Builder.CreateCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy), Res); } else { Res = Builder.CreateFPTrunc(Res, ResTy, "conv"); } } return Res; } /// EmitComplexToScalarConversion - Emit a conversion from the specified complex /// type to the specified destination type, where the destination type is an /// LLVM scalar type. Value *ScalarExprEmitter:: EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy) { // Get the source element type. SrcTy = SrcTy->castAs<ComplexType>()->getElementType(); // Handle conversions to bool first, they are special: comparisons against 0. if (DstTy->isBooleanType()) { // Complex != 0 -> (Real != 0) | (Imag != 0) Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy); Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy); return Builder.CreateOr(Src.first, Src.second, "tobool"); } // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, // the imaginary part of the complex value is discarded and the value of the // real part is converted according to the conversion rules for the // corresponding real type. return EmitScalarConversion(Src.first, SrcTy, DstTy); } Value *ScalarExprEmitter::EmitNullValue(QualType Ty) { return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty); } /// \brief Emit a sanitization check for the given "binary" operation (which /// might actually be a unary increment which has been lowered to a binary /// operation). The check passes if all values in \p Checks (which are \c i1), /// are \c true. void ScalarExprEmitter::EmitBinOpCheck( ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) { assert(CGF.IsSanitizerScope); StringRef CheckName; SmallVector<llvm::Constant *, 4> StaticData; SmallVector<llvm::Value *, 2> DynamicData; BinaryOperatorKind Opcode = Info.Opcode; if (BinaryOperator::isCompoundAssignmentOp(Opcode)) Opcode = BinaryOperator::getOpForCompoundAssignment(Opcode); StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc())); const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E); if (UO && UO->getOpcode() == UO_Minus) { CheckName = "negate_overflow"; StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType())); DynamicData.push_back(Info.RHS); } else { if (BinaryOperator::isShiftOp(Opcode)) { // Shift LHS negative or too large, or RHS out of bounds. CheckName = "shift_out_of_bounds"; const BinaryOperator *BO = cast<BinaryOperator>(Info.E); StaticData.push_back( CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType())); StaticData.push_back( CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType())); } else if (Opcode == BO_Div || Opcode == BO_Rem) { // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1). CheckName = "divrem_overflow"; StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); } else { // Arithmetic overflow (+, -, *). switch (Opcode) { case BO_Add: CheckName = "add_overflow"; break; case BO_Sub: CheckName = "sub_overflow"; break; case BO_Mul: CheckName = "mul_overflow"; break; default: llvm_unreachable("unexpected opcode for bin op check"); } StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty)); } DynamicData.push_back(Info.LHS); DynamicData.push_back(Info.RHS); } CGF.EmitCheck(Checks, CheckName, StaticData, DynamicData); } //===----------------------------------------------------------------------===// // Visitor Methods //===----------------------------------------------------------------------===// Value *ScalarExprEmitter::VisitExpr(Expr *E) { CGF.ErrorUnsupported(E, "scalar expression"); if (E->getType()->isVoidType()) return nullptr; return llvm::UndefValue::get(CGF.ConvertType(E->getType())); } Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { // Vector Mask Case if (E->getNumSubExprs() == 2 || (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) { Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); Value *Mask; llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType()); unsigned LHSElts = LTy->getNumElements(); if (E->getNumSubExprs() == 3) { Mask = CGF.EmitScalarExpr(E->getExpr(2)); // Shuffle LHS & RHS into one input vector. SmallVector<llvm::Constant*, 32> concat; for (unsigned i = 0; i != LHSElts; ++i) { concat.push_back(Builder.getInt32(2*i)); concat.push_back(Builder.getInt32(2*i+1)); } Value* CV = llvm::ConstantVector::get(concat); LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat"); LHSElts *= 2; } else { Mask = RHS; } llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType()); llvm::Constant* EltMask; EltMask = llvm::ConstantInt::get(MTy->getElementType(), llvm::NextPowerOf2(LHSElts-1)-1); // Mask off the high bits of each shuffle index. Value *MaskBits = llvm::ConstantVector::getSplat(MTy->getNumElements(), EltMask); Mask = Builder.CreateAnd(Mask, MaskBits, "mask"); // newv = undef // mask = mask & maskbits // for each elt // n = extract mask i // x = extract val n // newv = insert newv, x, i llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(), MTy->getNumElements()); Value* NewV = llvm::UndefValue::get(RTy); for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i); Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx"); Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt"); NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins"); } return NewV; } Value* V1 = CGF.EmitScalarExpr(E->getExpr(0)); Value* V2 = CGF.EmitScalarExpr(E->getExpr(1)); SmallVector<llvm::Constant*, 32> indices; for (unsigned i = 2; i < E->getNumSubExprs(); ++i) { llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2); // Check for -1 and output it as undef in the IR. if (Idx.isSigned() && Idx.isAllOnesValue()) indices.push_back(llvm::UndefValue::get(CGF.Int32Ty)); else indices.push_back(Builder.getInt32(Idx.getZExtValue())); } Value *SV = llvm::ConstantVector::get(indices); return Builder.CreateShuffleVector(V1, V2, SV, "shuffle"); } Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) { QualType SrcType = E->getSrcExpr()->getType(), DstType = E->getType(); Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); SrcType = CGF.getContext().getCanonicalType(SrcType); DstType = CGF.getContext().getCanonicalType(DstType); if (SrcType == DstType) return Src; assert(SrcType->isVectorType() && "ConvertVector source type must be a vector"); assert(DstType->isVectorType() && "ConvertVector destination type must be a vector"); llvm::Type *SrcTy = Src->getType(); llvm::Type *DstTy = ConvertType(DstType); // Ignore conversions like int -> uint. if (SrcTy == DstTy) return Src; QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(), DstEltType = DstType->getAs<VectorType>()->getElementType(); assert(SrcTy->isVectorTy() && "ConvertVector source IR type must be a vector"); assert(DstTy->isVectorTy() && "ConvertVector destination IR type must be a vector"); llvm::Type *SrcEltTy = SrcTy->getVectorElementType(), *DstEltTy = DstTy->getVectorElementType(); if (DstEltType->isBooleanType()) { assert((SrcEltTy->isFloatingPointTy() || isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion"); llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy); if (SrcEltTy->isFloatingPointTy()) { return Builder.CreateFCmpUNE(Src, Zero, "tobool"); } else { return Builder.CreateICmpNE(Src, Zero, "tobool"); } } // We have the arithmetic types: real int/float. Value *Res = nullptr; if (isa<llvm::IntegerType>(SrcEltTy)) { bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType(); if (isa<llvm::IntegerType>(DstEltTy)) Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); else if (InputSigned) Res = Builder.CreateSIToFP(Src, DstTy, "conv"); else Res = Builder.CreateUIToFP(Src, DstTy, "conv"); } else if (isa<llvm::IntegerType>(DstEltTy)) { assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion"); if (DstEltType->isSignedIntegerOrEnumerationType()) Res = Builder.CreateFPToSI(Src, DstTy, "conv"); else Res = Builder.CreateFPToUI(Src, DstTy, "conv"); } else { assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() && "Unknown real conversion"); if (DstEltTy->getTypeID() < SrcEltTy->getTypeID()) Res = Builder.CreateFPTrunc(Src, DstTy, "conv"); else Res = Builder.CreateFPExt(Src, DstTy, "conv"); } return Res; } Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) { llvm::APSInt Value; if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) { if (E->isArrow()) CGF.EmitScalarExpr(E->getBase()); else EmitLValue(E->getBase()); return Builder.getInt(Value); } return EmitLoadOfLValue(E); } Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { TestAndClearIgnoreResultAssign(); // Emit subscript expressions in rvalue context's. For most cases, this just // loads the lvalue formed by the subscript expr. However, we have to be // careful, because the base of a vector subscript is occasionally an rvalue, // so we can't get it as an lvalue. if (!E->getBase()->getType()->isVectorType()) return EmitLoadOfLValue(E); // Handle the vector case. The base must be a vector, the index must be an // integer value. Value *Base = Visit(E->getBase()); Value *Idx = Visit(E->getIdx()); QualType IdxTy = E->getIdx()->getType(); if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true); return Builder.CreateExtractElement(Base, Idx, "vecext"); } static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off, llvm::Type *I32Ty) { int MV = SVI->getMaskValue(Idx); if (MV == -1) return llvm::UndefValue::get(I32Ty); return llvm::ConstantInt::get(I32Ty, Off+MV); } static llvm::Constant *getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) { if (C->getBitWidth() != 32) { assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) && "Index operand too large for shufflevector mask!"); return llvm::ConstantInt::get(I32Ty, C->getZExtValue()); } return C; } Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { bool Ignore = TestAndClearIgnoreResultAssign(); (void)Ignore; assert (Ignore == false && "init list ignored"); unsigned NumInitElements = E->getNumInits(); if (E->hadArrayRangeDesignator()) CGF.ErrorUnsupported(E, "GNU array range designator extension"); llvm::VectorType *VType = dyn_cast<llvm::VectorType>(ConvertType(E->getType())); if (!VType) { // HLSL Change Begins if (hlsl::IsHLSLMatType(E->getType())) { // init function for matrix return CGF.CGM.getHLSLRuntime().EmitHLSLInitListExpr(CGF, E, /*DestPtr*/nullptr); } // HLSL Change Ends if (NumInitElements == 0) { // C++11 value-initialization for the scalar. return EmitNullValue(E->getType()); } // We have a scalar in braces. Just use the first element. return Visit(E->getInit(0)); } else { if (hlsl::IsHLSLVecType(E->getType())) { return CGF.CGM.getHLSLRuntime().EmitHLSLInitListExpr(CGF, E, /*DestPtr*/nullptr); } } unsigned ResElts = VType->getNumElements(); // HLSL Note - Matrix swizzle members emit - Consider handling matrix swizzles here // Loop over initializers collecting the Value for each, and remembering // whether the source was swizzle (ExtVectorElementExpr). This will allow // us to fold the shuffle for the swizzle into the shuffle for the vector // initializer, since LLVM optimizers generally do not want to touch // shuffles. unsigned CurIdx = 0; bool VIsUndefShuffle = false; llvm::Value *V = llvm::UndefValue::get(VType); for (unsigned i = 0; i != NumInitElements; ++i) { Expr *IE = E->getInit(i); Value *Init = Visit(IE); SmallVector<llvm::Constant*, 16> Args; llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType()); // Handle scalar elements. If the scalar initializer is actually one // element of a different vector of the same width, use shuffle instead of // extract+insert. if (!VVT) { if (isa<ExtVectorElementExpr>(IE)) { llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init); if (EI->getVectorOperandType()->getNumElements() == ResElts) { llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand()); Value *LHS = nullptr, *RHS = nullptr; if (CurIdx == 0) { // insert into undef -> shuffle (src, undef) // shufflemask must use an i32 Args.push_back(getAsInt32(C, CGF.Int32Ty)); Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty)); LHS = EI->getVectorOperand(); RHS = V; VIsUndefShuffle = true; } else if (VIsUndefShuffle) { // insert into undefshuffle && size match -> shuffle (v, src) llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V); for (unsigned j = 0; j != CurIdx; ++j) Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty)); Args.push_back(Builder.getInt32(ResElts + C->getZExtValue())); Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty)); LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); RHS = EI->getVectorOperand(); VIsUndefShuffle = false; } if (!Args.empty()) { llvm::Constant *Mask = llvm::ConstantVector::get(Args); V = Builder.CreateShuffleVector(LHS, RHS, Mask); ++CurIdx; continue; } } } V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx), "vecinit"); VIsUndefShuffle = false; ++CurIdx; continue; } unsigned InitElts = VVT->getNumElements(); // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's // input is the same width as the vector being constructed, generate an // optimized shuffle of the swizzle input into the result. unsigned Offset = (CurIdx == 0) ? 0 : ResElts; if (isa<ExtVectorElementExpr>(IE)) { llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init); Value *SVOp = SVI->getOperand(0); llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType()); if (OpTy->getNumElements() == ResElts) { for (unsigned j = 0; j != CurIdx; ++j) { // If the current vector initializer is a shuffle with undef, merge // this shuffle directly into it. if (VIsUndefShuffle) { Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0, CGF.Int32Ty)); } else { Args.push_back(Builder.getInt32(j)); } } for (unsigned j = 0, je = InitElts; j != je; ++j) Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty)); Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty)); if (VIsUndefShuffle) V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0); Init = SVOp; } } // Extend init to result vector length, and then shuffle its contribution // to the vector initializer into V. if (Args.empty()) { for (unsigned j = 0; j != InitElts; ++j) Args.push_back(Builder.getInt32(j)); Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty)); llvm::Constant *Mask = llvm::ConstantVector::get(Args); Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT), Mask, "vext"); Args.clear(); for (unsigned j = 0; j != CurIdx; ++j) Args.push_back(Builder.getInt32(j)); for (unsigned j = 0; j != InitElts; ++j) Args.push_back(Builder.getInt32(j+Offset)); Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty)); } // If V is undef, make sure it ends up on the RHS of the shuffle to aid // merging subsequent shuffles into this one. if (CurIdx == 0) std::swap(V, Init); llvm::Constant *Mask = llvm::ConstantVector::get(Args); V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit"); VIsUndefShuffle = isa<llvm::UndefValue>(Init); CurIdx += InitElts; } // FIXME: evaluate codegen vs. shuffling against constant null vector. // Emit remaining default initializers. llvm::Type *EltTy = VType->getElementType(); // Emit remaining default initializers for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { Value *Idx = Builder.getInt32(CurIdx); llvm::Value *Init = llvm::Constant::getNullValue(EltTy); V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); } return V; } static bool ShouldNullCheckClassCastValue(const CastExpr *CE) { const Expr *E = CE->getSubExpr(); if (CE->getCastKind() == CK_UncheckedDerivedToBase) return false; if (isa<CXXThisExpr>(E)) { // We always assume that 'this' is never null. return false; } if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) { // And that glvalue casts are never null. if (ICE->getValueKind() != VK_RValue) return false; } return true; } // VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts // have to handle a more broad range of conversions than explicit casts, as they // handle things like function to ptr-to-function decay etc. Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) { Expr *E = CE->getSubExpr(); QualType DestTy = CE->getType(); CastKind Kind = CE->getCastKind(); // HLSL Change Begins if ((hlsl::IsHLSLMatType(E->getType()) || hlsl::IsHLSLMatType(CE->getType())) && Kind != CastKind::CK_FlatConversion) { llvm::Value *V = CGF.EmitScalarExpr(E); llvm::Type *RetTy = CGF.ConvertType(DestTy); if (V->getType() == RetTy) return V; return CGF.CGM.getHLSLRuntime().EmitHLSLMatrixOperationCall(CGF, CE, RetTy, { V }); } // HLSL Change Ends if (!DestTy->isVoidType()) TestAndClearIgnoreResultAssign(); // Since almost all cast kinds apply to scalars, this switch doesn't have // a default case, so the compiler will warn on a missing case. The cases // are in the same order as in the CastKind enum. switch (Kind) { case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); case CK_BuiltinFnToFnPtr: llvm_unreachable("builtin functions are handled elsewhere"); case CK_LValueBitCast: case CK_ObjCObjectLValueCast: { Value *V = EmitLValue(E).getAddress(); V = Builder.CreateBitCast(V, ConvertType(CGF.getContext().getPointerType(DestTy))); return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy), CE->getExprLoc()); } case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_AnyPointerToBlockPointerCast: case CK_BitCast: { Value *Src = Visit(const_cast<Expr*>(E)); llvm::Type *SrcTy = Src->getType(); llvm::Type *DstTy = ConvertType(DestTy); if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() && SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) { llvm_unreachable("wrong cast for pointers in different address spaces" "(must be an address space cast)!"); } if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) { if (auto PT = DestTy->getAs<PointerType>()) CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src, /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast, CE->getLocStart()); } return Builder.CreateBitCast(Src, DstTy); } case CK_AddressSpaceConversion: { Value *Src = Visit(const_cast<Expr*>(E)); return Builder.CreateAddrSpaceCast(Src, ConvertType(DestTy)); } case CK_AtomicToNonAtomic: case CK_NonAtomicToAtomic: case CK_NoOp: case CK_UserDefinedConversion: return Visit(const_cast<Expr*>(E)); case CK_BaseToDerived: { const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl(); assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!"); llvm::Value *V = Visit(E); llvm::Value *Derived = CGF.GetAddressOfDerivedClass(V, DerivedClassDecl, CE->path_begin(), CE->path_end(), ShouldNullCheckClassCastValue(CE)); // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is // performed and the object is not of the derived type. if (CGF.sanitizePerformTypeCheck()) CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(), Derived, DestTy->getPointeeType()); if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast)) CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived, /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast, CE->getLocStart()); return Derived; } case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { const CXXRecordDecl *DerivedClassDecl = E->getType()->getPointeeCXXRecordDecl(); assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!"); return CGF.GetAddressOfBaseClass( Visit(E), DerivedClassDecl, CE->path_begin(), CE->path_end(), ShouldNullCheckClassCastValue(CE), CE->getExprLoc()); } case CK_Dynamic: { Value *V = Visit(const_cast<Expr*>(E)); const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE); return CGF.EmitDynamicCast(V, DCE); } case CK_ArrayToPointerDecay: { assert(E->getType()->isArrayType() && "Array to pointer decay must have array source type!"); Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays. // Note that VLA pointers are always decayed, so we don't need to do // anything here. if (!E->getType()->isVariableArrayType()) { assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer"); llvm::Type *NewTy = ConvertType(E->getType()); V = CGF.Builder.CreatePointerCast( V, NewTy->getPointerTo(V->getType()->getPointerAddressSpace())); assert(isa<llvm::ArrayType>(V->getType()->getPointerElementType()) && "Expected pointer to array"); V = Builder.CreateStructGEP(NewTy, V, 0, "arraydecay"); } // Make sure the array decay ends up being the right type. This matters if // the array type was of an incomplete type. return CGF.Builder.CreatePointerCast(V, ConvertType(CE->getType())); } case CK_FunctionToPointerDecay: return EmitLValue(E).getAddress(); case CK_NullToPointer: if (MustVisitNullValue(E)) (void) Visit(E); return llvm::ConstantPointerNull::get( cast<llvm::PointerType>(ConvertType(DestTy))); case CK_NullToMemberPointer: { if (MustVisitNullValue(E)) (void) Visit(E); const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>(); return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT); } case CK_ReinterpretMemberPointer: case CK_BaseToDerivedMemberPointer: case CK_DerivedToBaseMemberPointer: { Value *Src = Visit(E); // Note that the AST doesn't distinguish between checked and // unchecked member pointer conversions, so we always have to // implement checked conversions here. This is inefficient when // actual control flow may be required in order to perform the // check, which it is for data member pointers (but not member // function pointers on Itanium and ARM). return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src); } case CK_ARCProduceObject: return CGF.EmitARCRetainScalarExpr(E); case CK_ARCConsumeObject: return CGF.EmitObjCConsumeObject(E->getType(), Visit(E)); case CK_ARCReclaimReturnedObject: { llvm::Value *value = Visit(E); value = CGF.EmitARCRetainAutoreleasedReturnValue(value); return CGF.EmitObjCConsumeObject(E->getType(), value); } case CK_ARCExtendBlockObject: return CGF.EmitARCExtendBlockObject(E); case CK_CopyAndAutoreleaseBlockObject: return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType()); case CK_FloatingRealToComplex: case CK_FloatingComplexCast: case CK_IntegralRealToComplex: case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: case CK_FloatingComplexToIntegralComplex: case CK_ConstructorConversion: case CK_ToUnion: llvm_unreachable("scalar cast to non-scalar value"); case CK_LValueToRValue: assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy)); assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!"); return Visit(const_cast<Expr*>(E)); case CK_IntegralToPointer: { Value *Src = Visit(const_cast<Expr*>(E)); // First, convert to the correct width so that we control the kind of // extension. llvm::Type *MiddleTy = CGF.IntPtrTy; bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType(); llvm::Value* IntResult = Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy)); } case CK_PointerToIntegral: assert(!DestTy->isBooleanType() && "bool should use PointerToBool"); return Builder.CreatePtrToInt(Visit(E), ConvertType(DestTy)); case CK_ToVoid: { CGF.EmitIgnoredExpr(E); return nullptr; } case CK_VectorSplat: { llvm::Type *DstTy = ConvertType(DestTy); Value *Elt = Visit(const_cast<Expr*>(E)); Elt = EmitScalarConversion(Elt, E->getType(), DestTy->getAs<VectorType>()->getElementType()); // Splat the element across to all elements unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Elt, "splat"); } case CK_IntegralCast: case CK_IntegralToFloating: case CK_FloatingToIntegral: case CK_FloatingCast: return EmitScalarConversion(Visit(E), E->getType(), DestTy); case CK_IntegralToBoolean: return EmitIntToBoolConversion(Visit(E)); case CK_PointerToBoolean: return EmitPointerToBoolConversion(Visit(E)); case CK_FloatingToBoolean: return EmitFloatToBoolConversion(Visit(E)); case CK_MemberPointerToBoolean: { llvm::Value *MemPtr = Visit(E); const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>(); return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT); } case CK_FloatingComplexToReal: case CK_IntegralComplexToReal: return CGF.EmitComplexExpr(E, false, true).first; case CK_FloatingComplexToBoolean: case CK_IntegralComplexToBoolean: { CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E); // TODO: kill this function off, inline appropriate case here return EmitComplexToScalarConversion(V, E->getType(), DestTy); } case CK_ZeroToOCLEvent: { assert(DestTy->isEventT() && "CK_ZeroToOCLEvent cast on non-event type"); return llvm::Constant::getNullValue(ConvertType(DestTy)); } // HLSL Change Starts case CK_HLSLMatrixTruncationCast: case CK_HLSLVectorTruncationCast: { // must be vector llvm::Value *val = Visit(E); if (const ExtVectorType *extVecTy = hlsl::ConvertHLSLVecMatTypeToExtVectorType(CGF.getContext(), CE->getType())) { uint32_t vecSize = extVecTy->getNumElements(); SmallVector<llvm::Constant *, 4> Mask; for (unsigned i = 0; i != vecSize; ++i) Mask.push_back(Builder.getInt32(i)); llvm::Value *MaskV = llvm::ConstantVector::get(Mask); return Builder.CreateShuffleVector( val, llvm::UndefValue::get(val->getType()), MaskV); } else if (E->getType()->isScalarType()) { return Builder.CreateExtractElement(val, (uint64_t)0); } } break; case CK_HLSLCC_FloatingToIntegral: case CK_HLSLCC_FloatingCast: { return EmitScalarConversion(Visit(E), E->getType(), DestTy); } case CK_HLSLVectorToMatrixCast: case CK_HLSLMatrixToVectorCast: { llvm::Value *val = Visit(E); llvm::Type *RetTy = CGF.ConvertType(DestTy); val = CGF.CGM.getHLSLRuntime().EmitHLSLMatrixLoad(CGF, val, E->getType()); return CGF.CGM.getHLSLRuntime().EmitHLSLMatrixOperationCall(CGF, CE, RetTy, { val }); } case CK_HLSLCC_IntegralCast: case CK_HLSLCC_IntegralToFloating: { return EmitScalarConversion(Visit(E), E->getType(), DestTy); } case CK_HLSLMatrixSplat: { llvm::Type *DstTy = ConvertType(DestTy); Value *Elt = Visit(const_cast<Expr *>(E)); return CGF.CGM.getHLSLRuntime().EmitHLSLMatrixOperationCall(CGF, CE, DstTy, { Elt }); } case CK_HLSLVectorSplat: { llvm::Type *DstTy = ConvertType(DestTy); Value *Elt = Visit(const_cast<Expr *>(E)); const ExtVectorType *extVecTy = hlsl::ConvertHLSLVecMatTypeToExtVectorType( CGF.getContext(), CE->getType()); Elt = EmitScalarConversion(Elt, E->getType(), extVecTy->getElementType()); // Splat the element across to all elements unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements(); return Builder.CreateVectorSplat(NumElements, Elt, "splat"); } case CK_HLSLVectorToScalarCast: case CK_HLSLMatrixToScalarCast: { return Builder.CreateExtractElement(Visit(E), (uint64_t)0); } case CK_FlatConversion: { llvm::Value *Src = Visit(E); // We should have an aggregate type (struct or array) on one side, // and a numeric type (scalar, vector or matrix) on the other. // If the aggregate type is the cast source, it should be a pointer. // Aggregate to aggregate casts are handled in CGExprAgg.cpp auto areCompoundAndNumeric = [](QualType lhs, QualType rhs) { return hlsl::IsHLSLAggregateType(lhs) && (rhs->isBuiltinType() || hlsl::IsHLSLVecMatType(rhs)); }; assert(Src->getType()->isPointerTy() ? areCompoundAndNumeric(E->getType(), DestTy) : areCompoundAndNumeric(DestTy, E->getType())); (void)areCompoundAndNumeric; llvm::Value *DstPtr = CGF.CreateMemTemp(DestTy, "flatconv"); CGF.CGM.getHLSLRuntime().EmitHLSLFlatConversion( CGF, Src, DstPtr, DestTy, E->getType()); // Return an rvalue // Matrices must be loaded with the special function if (hlsl::IsHLSLMatType(DestTy)) return CGF.CGM.getHLSLRuntime().EmitHLSLMatrixLoad(CGF, DstPtr, DestTy); // Structs/arrays are pointers to temporaries if (hlsl::IsHLSLAggregateType(DestTy)) return DstPtr; // Scalars/vectors are loaded regularly llvm::Value *Result = Builder.CreateLoad(DstPtr); return Result = CGF.EmitFromMemory(Result, DestTy); } case CK_HLSLCC_IntegralToBoolean: return EmitIntToBoolConversion(Visit(E)); case CK_HLSLCC_FloatingToBoolean: return EmitFloatToBoolConversion(Visit(E)); // HLSL Change Ends } llvm_unreachable("unknown scalar cast"); } Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { CodeGenFunction::StmtExprEvaluation eval(CGF); llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType()); if (!RetAlloca) return nullptr; return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()), E->getExprLoc()); } //===----------------------------------------------------------------------===// // Unary Operators //===----------------------------------------------------------------------===// static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc) { BinOpInfo BinOp; BinOp.LHS = InVal; BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false); BinOp.Ty = E->getType(); BinOp.Opcode = IsInc ? BO_Add : BO_Sub; BinOp.FPContractable = false; BinOp.E = E; return BinOp; } llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior( const UnaryOperator *E, llvm::Value *InVal, bool IsInc) { llvm::Value *Amount = llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true); StringRef Name = IsInc ? "inc" : "dec"; switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: return Builder.CreateAdd(InVal, Amount, Name); case LangOptions::SOB_Undefined: if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) return Builder.CreateNSWAdd(InVal, Amount, Name); LLVM_FALLTHROUGH; // HLSL Change case LangOptions::SOB_Trapping: return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc)); } llvm_unreachable("Unknown SignedOverflowBehaviorTy"); } llvm::Value * ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre) { QualType type = E->getSubExpr()->getType(); llvm::PHINode *atomicPHI = nullptr; llvm::Value *value; llvm::Value *input; int amount = (isInc ? 1 : -1); if (const AtomicType *atomicTy = type->getAs<AtomicType>()) { type = atomicTy->getValueType(); if (isInc && type->isBooleanType()) { llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type); if (isPre) { Builder.Insert(new llvm::StoreInst(True, LV.getAddress(), LV.isVolatileQualified(), LV.getAlignment().getQuantity(), llvm::SequentiallyConsistent)); return Builder.getTrue(); } // For atomic bool increment, we just store true and return it for // preincrement, do an atomic swap with true for postincrement return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, LV.getAddress(), True, llvm::SequentiallyConsistent); } // Special case for atomic increment / decrement on integers, emit // atomicrmw instructions. We skip this if we want to be doing overflow // checking, and fall into the slow path with the atomic cmpxchg loop. if (!type->isBooleanType() && type->isIntegerType() && !(type->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && CGF.getLangOpts().getSignedOverflowBehavior() != LangOptions::SOB_Trapping) { llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add : llvm::AtomicRMWInst::Sub; llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add : llvm::Instruction::Sub; llvm::Value *amt = CGF.EmitToMemory( llvm::ConstantInt::get(ConvertType(type), 1, true), type); llvm::Value *old = Builder.CreateAtomicRMW(aop, LV.getAddress(), amt, llvm::SequentiallyConsistent); return isPre ? Builder.CreateBinOp(op, old, amt) : old; } value = EmitLoadOfLValue(LV, E->getExprLoc()); input = value; // For every other atomic operation, we need to emit a load-op-cmpxchg loop llvm::BasicBlock *startBB = Builder.GetInsertBlock(); llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); value = CGF.EmitToMemory(value, type); Builder.CreateBr(opBB); Builder.SetInsertPoint(opBB); atomicPHI = Builder.CreatePHI(value->getType(), 2); atomicPHI->addIncoming(value, startBB); value = atomicPHI; } else { value = EmitLoadOfLValue(LV, E->getExprLoc()); input = value; } // Special case of integer increment that we have to check first: bool++. // Due to promotion rules, we get: // bool++ -> bool = bool + 1 // -> bool = (int)bool + 1 // -> bool = ((int)bool + 1 != 0) // An interesting aspect of this is that increment is always true. // Decrement does not have this property. if (isInc && type->isBooleanType()) { value = Builder.getTrue(); // Most common case by far: integer increment. } else if (type->isIntegerType()) { // Note that signed integer inc/dec with width less than int can't // overflow because of promotion rules; we're just eliding a few steps here. bool CanOverflow = value->getType()->getIntegerBitWidth() >= CGF.IntTy->getIntegerBitWidth(); if (CanOverflow && type->isSignedIntegerOrEnumerationType()) { value = EmitIncDecConsiderOverflowBehavior(E, value, isInc); } else if (CanOverflow && type->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) { value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, value, isInc)); } else { llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true); value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); } // Next most common: pointer increment. } else if (const PointerType *ptr = type->getAs<PointerType>()) { QualType type = ptr->getPointeeType(); // VLA types don't have constant size. if (const VariableArrayType *vla = CGF.getContext().getAsVariableArrayType(type)) { llvm::Value *numElts = CGF.getVLASize(vla).first; if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize"); if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, numElts, "vla.inc"); else value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc"); // Arithmetic on function pointers (!) is just +-1. } else if (type->isFunctionType()) { llvm::Value *amt = Builder.getInt32(amount); value = CGF.EmitCastToVoidPtr(value); if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, amt, "incdec.funcptr"); else value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr"); value = Builder.CreateBitCast(value, input->getType()); // For everything else, we can just do a simple increment. } else { llvm::Value *amt = Builder.getInt32(amount); if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, amt, "incdec.ptr"); else value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr"); } // Vector increment/decrement. } else if (type->isVectorType() || hlsl::IsHLSLVecType(type)) { // HLSL Change if (type->hasIntegerRepresentation()) { llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount); value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec"); } else { value = Builder.CreateFAdd( value, llvm::ConstantFP::get(value->getType(), amount), isInc ? "inc" : "dec"); } // HLSL Change Begins // HLSL matrix } else if (hlsl::IsHLSLMatType(type)) { // generate operator call value = CGF.CGM.getHLSLRuntime().EmitHLSLMatrixOperationCall( CGF, E, value->getType(), {value}); // store updated value CGF.CGM.getHLSLRuntime().EmitHLSLMatrixStore(CGF, value, LV.getAddress(), E->getType()); // If this is a postinc, return the value read from memory, otherwise use // the // updated value. return isPre ? value : input; // HLSL Change Ends // Floating point. } else if (type->isRealFloatingType()) { // Add the inc/dec to the real part. llvm::Value *amt; if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { // Another special case: half FP increment should be done via float if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) { value = Builder.CreateCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, CGF.CGM.FloatTy), input, "incdec.conv"); } else { value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv"); } } if (value->getType()->isFloatTy()) amt = llvm::ConstantFP::get(VMContext, llvm::APFloat(static_cast<float>(amount))); else if (value->getType()->isDoubleTy()) amt = llvm::ConstantFP::get(VMContext, llvm::APFloat(static_cast<double>(amount))); else { // Remaining types are either Half or LongDouble. Convert from float. llvm::APFloat F(static_cast<float>(amount)); bool ignored; // Don't use getFloatTypeSemantics because Half isn't // necessarily represented using the "half" LLVM type. F.convert(value->getType()->isHalfTy() ? CGF.getTarget().getHalfFormat() : CGF.getTarget().getLongDoubleFormat(), llvm::APFloat::rmTowardZero, &ignored); amt = llvm::ConstantFP::get(VMContext, F); } value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec"); if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) { if (!CGF.getContext().getLangOpts().HalfArgsAndReturns) { value = Builder.CreateCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy), value, "incdec.conv"); } else { value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv"); } } // Objective-C pointer types. } else { const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>(); value = CGF.EmitCastToVoidPtr(value); CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType()); if (!isInc) size = -size; llvm::Value *sizeValue = llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity()); if (CGF.getLangOpts().isSignedOverflowDefined()) value = Builder.CreateGEP(value, sizeValue, "incdec.objptr"); else value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr"); value = Builder.CreateBitCast(value, input->getType()); } if (atomicPHI) { llvm::BasicBlock *opBB = Builder.GetInsertBlock(); llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); auto Pair = CGF.EmitAtomicCompareExchange( LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc()); llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type); llvm::Value *success = Pair.second; atomicPHI->addIncoming(old, opBB); Builder.CreateCondBr(success, contBB, opBB); Builder.SetInsertPoint(contBB); return isPre ? value : input; } // Store the updated result through the lvalue. if (LV.isBitField()) CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value); else CGF.EmitStoreThroughLValue(RValue::get(value), LV); // If this is a postinc, return the value read from memory, otherwise use the // updated value. return isPre ? value : input; } Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { TestAndClearIgnoreResultAssign(); // Emit unary minus with EmitSub so we handle overflow cases etc. BinOpInfo BinOp; BinOp.RHS = Visit(E->getSubExpr()); if (BinOp.RHS->getType()->isFPOrFPVectorTy()) BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType()); else BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType()); BinOp.Ty = E->getType(); BinOp.Opcode = BO_Sub; BinOp.FPContractable = false; BinOp.E = E; return EmitSub(BinOp); } Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { TestAndClearIgnoreResultAssign(); Value *Op = Visit(E->getSubExpr()); return Builder.CreateNot(Op, "neg"); } Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { // Perform vector logical not on comparison with zero vector. if (E->getType()->isExtVectorType() || hlsl::IsHLSLVecType(E->getType())) { // HLSL Change Value *Oper = Visit(E->getSubExpr()); Value *Zero = llvm::Constant::getNullValue(Oper->getType()); Value *Result; if (Oper->getType()->isFPOrFPVectorTy()) Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp"); else Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp"); return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); } // Compare operand to zero. Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); // Invert value. // TODO: Could dynamically modify easy computations here. For example, if // the operand is an icmp ne, turn into icmp eq. BoolVal = Builder.CreateNot(BoolVal, "lnot"); // ZExt result to the expr type. return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); } Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) { // Try folding the offsetof to a constant. llvm::APSInt Value; if (E->EvaluateAsInt(Value, CGF.getContext())) return Builder.getInt(Value); // Loop over the components of the offsetof to compute the value. unsigned n = E->getNumComponents(); llvm::Type* ResultType = ConvertType(E->getType()); llvm::Value* Result = llvm::Constant::getNullValue(ResultType); QualType CurrentType = E->getTypeSourceInfo()->getType(); for (unsigned i = 0; i != n; ++i) { OffsetOfExpr::OffsetOfNode ON = E->getComponent(i); llvm::Value *Offset = nullptr; switch (ON.getKind()) { case OffsetOfExpr::OffsetOfNode::Array: { // Compute the index Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex()); llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr); bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType(); Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv"); // Save the element type CurrentType = CGF.getContext().getAsArrayType(CurrentType)->getElementType(); // Compute the element size llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType, CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity()); // Multiply out to compute the result Offset = Builder.CreateMul(Idx, ElemSize); break; } case OffsetOfExpr::OffsetOfNode::Field: { FieldDecl *MemberDecl = ON.getField(); RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl(); const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); // Compute the index of the field in its parent. unsigned i = 0; // FIXME: It would be nice if we didn't have to loop here! for (RecordDecl::field_iterator Field = RD->field_begin(), FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++i) { if (*Field == MemberDecl) break; } assert(i < RL.getFieldCount() && "offsetof field in wrong type"); // Compute the offset to the field int64_t OffsetInt = RL.getFieldOffset(i) / CGF.getContext().getCharWidth(); Offset = llvm::ConstantInt::get(ResultType, OffsetInt); // Save the element type. CurrentType = MemberDecl->getType(); break; } case OffsetOfExpr::OffsetOfNode::Identifier: llvm_unreachable("dependent __builtin_offsetof"); case OffsetOfExpr::OffsetOfNode::Base: { if (ON.getBase()->isVirtual()) { CGF.ErrorUnsupported(E, "virtual base in offsetof"); continue; } RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl(); const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD); // Save the element type. CurrentType = ON.getBase()->getType(); // Compute the offset to the base. const RecordType *BaseRT = CurrentType->getAs<RecordType>(); CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl()); CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD); Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity()); break; } } Result = Builder.CreateAdd(Result, Offset); } return Result; } /// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of /// argument of the sizeof expression as an integer. Value * ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr( const UnaryExprOrTypeTraitExpr *E) { QualType TypeToSize = E->getTypeOfArgument(); if (E->getKind() == UETT_SizeOf) { if (const VariableArrayType *VAT = CGF.getContext().getAsVariableArrayType(TypeToSize)) { if (E->isArgumentType()) { // sizeof(type) - make sure to emit the VLA size. CGF.EmitVariablyModifiedType(TypeToSize); } else { // C99 6.5.3.4p2: If the argument is an expression of type // VLA, it is evaluated. CGF.EmitIgnoredExpr(E->getArgumentExpr()); } QualType eltType; llvm::Value *numElts; std::tie(numElts, eltType) = CGF.getVLASize(VAT); llvm::Value *size = numElts; // Scale the number of non-VLA elements by the non-VLA element size. CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType); if (!eltSize.isOne()) size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), numElts); return size; } } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) { auto Alignment = CGF.getContext() .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign( E->getTypeOfArgument()->getPointeeType())) .getQuantity(); return llvm::ConstantInt::get(CGF.SizeTy, Alignment); } // If this isn't sizeof(vla), the result must be constant; use the constant // folding logic so we don't have to duplicate it here. return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext())); } Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { Expr *Op = E->getSubExpr(); if (Op->getType()->isAnyComplexType()) { // If it's an l-value, load through the appropriate subobject l-value. // Note that we have to ask E because Op might be an l-value that // this won't work for, e.g. an Obj-C property. if (E->isGLValue()) return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()).getScalarVal(); // Otherwise, calculate and project. return CGF.EmitComplexExpr(Op, false, true).first; } return Visit(Op); } Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { Expr *Op = E->getSubExpr(); if (Op->getType()->isAnyComplexType()) { // If it's an l-value, load through the appropriate subobject l-value. // Note that we have to ask E because Op might be an l-value that // this won't work for, e.g. an Obj-C property. if (Op->isGLValue()) return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc()).getScalarVal(); // Otherwise, calculate and project. return CGF.EmitComplexExpr(Op, true, false).second; } // __imag on a scalar returns zero. Emit the subexpr to ensure side // effects are evaluated, but not the actual value. if (Op->isGLValue()) CGF.EmitLValue(Op); else CGF.EmitScalarExpr(Op, true); return llvm::Constant::getNullValue(ConvertType(E->getType())); } //===----------------------------------------------------------------------===// // Binary Operators //===----------------------------------------------------------------------===// BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { TestAndClearIgnoreResultAssign(); BinOpInfo Result; Result.LHS = Visit(E->getLHS()); Result.RHS = Visit(E->getRHS()); Result.Ty = E->getType(); Result.Opcode = E->getOpcode(); Result.FPContractable = E->isFPContractable(); Result.E = E; return Result; } LValue ScalarExprEmitter::EmitCompoundAssignLValue( const CompoundAssignOperator *E, Value *(ScalarExprEmitter::*Func)(const BinOpInfo &), Value *&Result) { // HLSL Change Begins if (hlsl::IsHLSLMatType(E->getType())) { const Expr *LHS = E->getLHS(); const Expr *RHS = E->getRHS(); LValue L = CGF.EmitLValue(LHS); Value *LVal = L.getAddress(); Value *RVal = CGF.EmitScalarExpr(RHS); SmallVector<Value *, 4> paramList; Value *LdLVal = CGF.CGM.getHLSLRuntime().EmitHLSLMatrixLoad(CGF, LVal, LHS->getType()); paramList.emplace_back(LdLVal); paramList.emplace_back(RVal); // Set return type to be none reference type for Result llvm::Type *RetType = LVal->getType()->getPointerElementType(); Result = CGF.CGM.getHLSLRuntime().EmitHLSLMatrixOperationCall( CGF, E, RetType, paramList); // store result to LVal CGF.CGM.getHLSLRuntime().EmitHLSLMatrixStore(CGF, Result, LVal, LHS->getType()); return L; } // HLSL Change Ends QualType LHSTy = E->getLHS()->getType(); BinOpInfo OpInfo; if (E->getComputationResultType()->isAnyComplexType()) return CGF.EmitScalarCompoundAssignWithComplex(E, Result); // Emit the RHS first. __block variables need to have the rhs evaluated // first, plus this should improve codegen a little. OpInfo.RHS = Visit(E->getRHS()); OpInfo.Ty = E->getComputationResultType(); OpInfo.Opcode = E->getOpcode(); OpInfo.FPContractable = false; OpInfo.E = E; // Load/convert the LHS. LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); llvm::PHINode *atomicPHI = nullptr; if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) { QualType type = atomicTy->getValueType(); if (!type->isBooleanType() && type->isIntegerType() && !(type->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) && CGF.getLangOpts().getSignedOverflowBehavior() != LangOptions::SOB_Trapping) { llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP; switch (OpInfo.Opcode) { // We don't have atomicrmw operands for *, %, /, <<, >> case BO_MulAssign: case BO_DivAssign: case BO_RemAssign: case BO_ShlAssign: case BO_ShrAssign: break; case BO_AddAssign: aop = llvm::AtomicRMWInst::Add; break; case BO_SubAssign: aop = llvm::AtomicRMWInst::Sub; break; case BO_AndAssign: aop = llvm::AtomicRMWInst::And; break; case BO_XorAssign: aop = llvm::AtomicRMWInst::Xor; break; case BO_OrAssign: aop = llvm::AtomicRMWInst::Or; break; default: llvm_unreachable("Invalid compound assignment type"); } if (aop != llvm::AtomicRMWInst::BAD_BINOP) { llvm::Value *amt = CGF.EmitToMemory(EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy), LHSTy); Builder.CreateAtomicRMW(aop, LHSLV.getAddress(), amt, llvm::SequentiallyConsistent); return LHSLV; } } // FIXME: For floating point types, we should be saving and restoring the // floating point environment in the loop. llvm::BasicBlock *startBB = Builder.GetInsertBlock(); llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn); OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type); Builder.CreateBr(opBB); Builder.SetInsertPoint(opBB); atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2); atomicPHI->addIncoming(OpInfo.LHS, startBB); OpInfo.LHS = atomicPHI; } else OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc()); OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType()); // Expand the binary operator. Result = (this->*Func)(OpInfo); // Convert the result back to the LHS type. Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy); if (atomicPHI) { llvm::BasicBlock *opBB = Builder.GetInsertBlock(); llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn); auto Pair = CGF.EmitAtomicCompareExchange( LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc()); llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy); llvm::Value *success = Pair.second; atomicPHI->addIncoming(old, opBB); Builder.CreateCondBr(success, contBB, opBB); Builder.SetInsertPoint(contBB); return LHSLV; } // Store the result value into the LHS lvalue. Bit-fields are handled // specially because the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after the // assignment...'. if (LHSLV.isBitField()) CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result); else CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV); return LHSLV; } Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { bool Ignore = TestAndClearIgnoreResultAssign(); Value *RHS; LValue LHS = EmitCompoundAssignLValue(E, Func, RHS); // If the result is clearly ignored, return now. if (Ignore) return nullptr; // The result of an assignment in C is the assigned r-value. if (!CGF.getLangOpts().CPlusPlus) return RHS; // If the lvalue is non-volatile, return the computed value of the assignment. if (!LHS.isVolatileQualified()) return RHS; // Otherwise, reload the value. return EmitLoadOfLValue(LHS, E->getExprLoc()); } void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck( const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) { SmallVector<std::pair<llvm::Value *, SanitizerMask>, 2> Checks; if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero), SanitizerKind::IntegerDivideByZero)); } if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) && Ops.Ty->hasSignedIntegerRepresentation()) { llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType()); llvm::Value *IntMin = Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth())); llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL); llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin); llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne); llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or"); Checks.push_back( std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow)); } if (Checks.size() > 0) EmitBinOpCheck(Checks, Ops); } Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { { CodeGenFunction::SanitizerScope SanScope(&CGF); if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) && Ops.Ty->isIntegerType()) { llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true); } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) && Ops.Ty->isRealFloatingType()) { llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero); EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero), Ops); } } if (Ops.LHS->getType()->isFPOrFPVectorTy()) { llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); if (CGF.getLangOpts().OpenCL) { // OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp llvm::Type *ValTy = Val->getType(); if (ValTy->isFloatTy() || (isa<llvm::VectorType>(ValTy) && cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy())) CGF.SetFPAccuracy(Val, 2.5); } return Val; } else if (Ops.Ty->hasUnsignedIntegerRepresentation()) return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); else return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); } Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { // Rem in C can't be a floating point type: C99 6.5.5p2. if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) { CodeGenFunction::SanitizerScope SanScope(&CGF); llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty)); if (Ops.Ty->isIntegerType()) EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false); } // HLSL Change Begins. if (CGF.getLangOpts().HLSL) { if (Ops.LHS->getType()->getScalarType()->isFloatingPointTy()) { return Builder.CreateFRem(Ops.LHS, Ops.RHS, "frem"); } } // HLSL Change Ends. if (Ops.Ty->hasUnsignedIntegerRepresentation()) return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); else return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); } Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { unsigned IID; unsigned OpID = 0; bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType(); switch (Ops.Opcode) { case BO_Add: case BO_AddAssign: OpID = 1; IID = isSigned ? llvm::Intrinsic::sadd_with_overflow : llvm::Intrinsic::uadd_with_overflow; break; case BO_Sub: case BO_SubAssign: OpID = 2; IID = isSigned ? llvm::Intrinsic::ssub_with_overflow : llvm::Intrinsic::usub_with_overflow; break; case BO_Mul: case BO_MulAssign: OpID = 3; IID = isSigned ? llvm::Intrinsic::smul_with_overflow : llvm::Intrinsic::umul_with_overflow; break; default: llvm_unreachable("Unsupported operation for overflow detection"); } OpID <<= 1; if (isSigned) OpID |= 1; llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty); llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy); Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS}); Value *result = Builder.CreateExtractValue(resultAndOverflow, 0); Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1); // Handle overflow with llvm.trap if no custom handler has been specified. const std::string *handlerName = &CGF.getLangOpts().OverflowHandler; if (handlerName->empty()) { // If the signed-integer-overflow sanitizer is enabled, emit a call to its // runtime. Otherwise, this is a -ftrapv check, so just emit a trap. if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) { CodeGenFunction::SanitizerScope SanScope(&CGF); llvm::Value *NotOverflow = Builder.CreateNot(overflow); SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow : SanitizerKind::UnsignedIntegerOverflow; EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops); } else CGF.EmitTrapCheck(Builder.CreateNot(overflow)); return result; } // Branch in case of overflow. llvm::BasicBlock *initialBB = Builder.GetInsertBlock(); llvm::Function::iterator insertPt = initialBB; llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn, std::next(insertPt)); llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn); Builder.CreateCondBr(overflow, overflowBB, continueBB); // If an overflow handler is set, then we want to call it and then use its // result, if it returns. Builder.SetInsertPoint(overflowBB); // Get the overflow handler. llvm::Type *Int8Ty = CGF.Int8Ty; llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty }; llvm::FunctionType *handlerTy = llvm::FunctionType::get(CGF.Int64Ty, argTypes, true); llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName); // Sign extend the args to 64-bit, so that we can use the same handler for // all types of overflow. llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty); llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty); // Call the handler with the two arguments, the operation, and the size of // the result. llvm::Value *handlerArgs[] = { lhs, rhs, Builder.getInt8(OpID), Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()) }; llvm::Value *handlerResult = CGF.EmitNounwindRuntimeCall(handler, handlerArgs); // Truncate the result back to the desired size. handlerResult = Builder.CreateTrunc(handlerResult, opTy); Builder.CreateBr(continueBB); Builder.SetInsertPoint(continueBB); llvm::PHINode *phi = Builder.CreatePHI(opTy, 2); phi->addIncoming(result, initialBB); phi->addIncoming(handlerResult, overflowBB); return phi; } /// Emit pointer + index arithmetic. static Value *emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op, bool isSubtraction) { // Must have binary (not unary) expr here. Unary pointer // increment/decrement doesn't use this path. const BinaryOperator *expr = cast<BinaryOperator>(op.E); Value *pointer = op.LHS; Expr *pointerOperand = expr->getLHS(); Value *index = op.RHS; Expr *indexOperand = expr->getRHS(); // In a subtraction, the LHS is always the pointer. if (!isSubtraction && !pointer->getType()->isPointerTy()) { std::swap(pointer, index); std::swap(pointerOperand, indexOperand); } unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth(); if (width != CGF.PointerWidthInBits) { // Zero-extend or sign-extend the pointer value according to // whether the index is signed or not. bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType(); index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned, "idx.ext"); } // If this is subtraction, negate the index. if (isSubtraction) index = CGF.Builder.CreateNeg(index, "idx.neg"); if (CGF.SanOpts.has(SanitizerKind::ArrayBounds)) CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(), /*Accessed*/ false); const PointerType *pointerType = pointerOperand->getType()->getAs<PointerType>(); if (!pointerType) { QualType objectType = pointerOperand->getType() ->castAs<ObjCObjectPointerType>() ->getPointeeType(); llvm::Value *objectSize = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType)); index = CGF.Builder.CreateMul(index, objectSize); Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy); result = CGF.Builder.CreateGEP(result, index, "add.ptr"); return CGF.Builder.CreateBitCast(result, pointer->getType()); } QualType elementType = pointerType->getPointeeType(); if (const VariableArrayType *vla = CGF.getContext().getAsVariableArrayType(elementType)) { // The element count here is the total number of non-VLA elements. llvm::Value *numElements = CGF.getVLASize(vla).first; // Effectively, the multiply by the VLA size is part of the GEP. // GEP indexes are signed, and scaling an index isn't permitted to // signed-overflow, so we use the same semantics for our explicit // multiply. We suppress this if overflow is not undefined behavior. if (CGF.getLangOpts().isSignedOverflowDefined()) { index = CGF.Builder.CreateMul(index, numElements, "vla.index"); pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr"); } else { index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index"); pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr"); } return pointer; } // Explicitly handle GNU void* and function pointer arithmetic extensions. The // GNU void* casts amount to no-ops since our void* type is i8*, but this is // future proof. if (elementType->isVoidType() || elementType->isFunctionType()) { Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy); result = CGF.Builder.CreateGEP(result, index, "add.ptr"); return CGF.Builder.CreateBitCast(result, pointer->getType()); } if (CGF.getLangOpts().isSignedOverflowDefined()) return CGF.Builder.CreateGEP(pointer, index, "add.ptr"); return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr"); } // Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and // Addend. Use negMul and negAdd to negate the first operand of the Mul or // the add operand respectively. This allows fmuladd to represent a*b-c, or // c-a*b. Patterns in LLVM should catch the negated forms and translate them to // efficient operations. static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd) { assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set."); Value *MulOp0 = MulOp->getOperand(0); Value *MulOp1 = MulOp->getOperand(1); if (negMul) { MulOp0 = Builder.CreateFSub( llvm::ConstantFP::getZeroValueForNegation(MulOp0->getType()), MulOp0, "neg"); } else if (negAdd) { Addend = Builder.CreateFSub( llvm::ConstantFP::getZeroValueForNegation(Addend->getType()), Addend, "neg"); } Value *FMulAdd = Builder.CreateCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()), {MulOp0, MulOp1, Addend}); MulOp->eraseFromParent(); return FMulAdd; } // Check whether it would be legal to emit an fmuladd intrinsic call to // represent op and if so, build the fmuladd. // // Checks that (a) the operation is fusable, and (b) -ffp-contract=on. // Does NOT check the type of the operation - it's assumed that this function // will be called from contexts where it's known that the type is contractable. static Value* tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false) { assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign || op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) && "Only fadd/fsub can be the root of an fmuladd."); // Check whether this op is marked as fusable. if (!op.FPContractable) return nullptr; // Check whether -ffp-contract=on. (If -ffp-contract=off/fast, fusing is // either disabled, or handled entirely by the LLVM backend). if (CGF.CGM.getCodeGenOpts().getFPContractMode() != CodeGenOptions::FPC_On) return nullptr; // We have a potentially fusable op. Look for a mul on one of the operands. if (llvm::BinaryOperator* LHSBinOp = dyn_cast<llvm::BinaryOperator>(op.LHS)) { if (LHSBinOp->getOpcode() == llvm::Instruction::FMul) { assert(LHSBinOp->getNumUses() == 0 && "Operations with multiple uses shouldn't be contracted."); return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, false, isSub); } } else if (llvm::BinaryOperator* RHSBinOp = dyn_cast<llvm::BinaryOperator>(op.RHS)) { if (RHSBinOp->getOpcode() == llvm::Instruction::FMul) { assert(RHSBinOp->getNumUses() == 0 && "Operations with multiple uses shouldn't be contracted."); return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub, false); } } return nullptr; } Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) { if (op.LHS->getType()->isPointerTy() || op.RHS->getType()->isPointerTy()) return emitPointerArithmetic(CGF, op, /*subtraction*/ false); if (op.Ty->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: return Builder.CreateAdd(op.LHS, op.RHS, "add"); case LangOptions::SOB_Undefined: if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) return Builder.CreateNSWAdd(op.LHS, op.RHS, "add"); LLVM_FALLTHROUGH; // HLSL Change case LangOptions::SOB_Trapping: return EmitOverflowCheckedBinOp(op); } } if (op.Ty->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) return EmitOverflowCheckedBinOp(op); if (op.LHS->getType()->isFPOrFPVectorTy()) { // Try to form an fmuladd. if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder)) return FMulAdd; return Builder.CreateFAdd(op.LHS, op.RHS, "add"); } return Builder.CreateAdd(op.LHS, op.RHS, "add"); } Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) { // The LHS is always a pointer if either side is. if (!op.LHS->getType()->isPointerTy()) { if (op.Ty->isSignedIntegerOrEnumerationType()) { switch (CGF.getLangOpts().getSignedOverflowBehavior()) { case LangOptions::SOB_Defined: return Builder.CreateSub(op.LHS, op.RHS, "sub"); case LangOptions::SOB_Undefined: if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) return Builder.CreateNSWSub(op.LHS, op.RHS, "sub"); LLVM_FALLTHROUGH; // HLSL Change case LangOptions::SOB_Trapping: return EmitOverflowCheckedBinOp(op); } } if (op.Ty->isUnsignedIntegerType() && CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) return EmitOverflowCheckedBinOp(op); if (op.LHS->getType()->isFPOrFPVectorTy()) { // Try to form an fmuladd. if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true)) return FMulAdd; return Builder.CreateFSub(op.LHS, op.RHS, "sub"); } return Builder.CreateSub(op.LHS, op.RHS, "sub"); } // If the RHS is not a pointer, then we have normal pointer // arithmetic. if (!op.RHS->getType()->isPointerTy()) return emitPointerArithmetic(CGF, op, /*subtraction*/ true); // Otherwise, this is a pointer subtraction. // Do the raw subtraction part. llvm::Value *LHS = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast"); llvm::Value *RHS = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast"); Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); // Okay, figure out the element size. const BinaryOperator *expr = cast<BinaryOperator>(op.E); QualType elementType = expr->getLHS()->getType()->getPointeeType(); llvm::Value *divisor = nullptr; // For a variable-length array, this is going to be non-constant. if (const VariableArrayType *vla = CGF.getContext().getAsVariableArrayType(elementType)) { llvm::Value *numElements; std::tie(numElements, elementType) = CGF.getVLASize(vla); divisor = numElements; // Scale the number of non-VLA elements by the non-VLA element size. CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType); if (!eltSize.isOne()) divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor); // For everything elese, we can just compute it, safe in the // assumption that Sema won't let anything through that we can't // safely compute the size of. } else { CharUnits elementSize; // Handle GCC extension for pointer arithmetic on void* and // function pointer types. if (elementType->isVoidType() || elementType->isFunctionType()) elementSize = CharUnits::One(); else elementSize = CGF.getContext().getTypeSizeInChars(elementType); // Don't even emit the divide for element size of 1. if (elementSize.isOne()) return diffInChars; divisor = CGF.CGM.getSize(elementSize); } // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since // pointer difference in C is only defined in the case where both operands // are pointing to elements of an array. return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div"); } Value *ScalarExprEmitter::GetWidthMinusOneValue(Value* LHS,Value* RHS) { llvm::IntegerType *Ty; if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType())) Ty = cast<llvm::IntegerType>(VT->getElementType()); else Ty = cast<llvm::IntegerType>(LHS->getType()); return llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth() - 1); } Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { // LLVM requires the LHS and RHS to be the same type: promote or truncate the // RHS to the same size as the LHS. Value *RHS = Ops.RHS; if (Ops.LHS->getType() != RHS->getType()) RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); bool SanitizeBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) && Ops.Ty->hasSignedIntegerRepresentation(); bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent); // OpenCL 6.3j: shift values are effectively % word size of LHS. if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL) // HLSL Change RHS = Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shl.mask"); else if ((SanitizeBase || SanitizeExponent) && isa<llvm::IntegerType>(Ops.LHS->getType())) { CodeGenFunction::SanitizerScope SanScope(&CGF); SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks; llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS); llvm::Value *ValidExponent = Builder.CreateICmpULE(RHS, WidthMinusOne); if (SanitizeExponent) { Checks.push_back( std::make_pair(ValidExponent, SanitizerKind::ShiftExponent)); } if (SanitizeBase) { // Check whether we are shifting any non-zero bits off the top of the // integer. We only emit this check if exponent is valid - otherwise // instructions below will have undefined behavior themselves. llvm::BasicBlock *Orig = Builder.GetInsertBlock(); llvm::BasicBlock *Cont = CGF.createBasicBlock("cont"); llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check"); Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont); CGF.EmitBlock(CheckShiftBase); llvm::Value *BitsShiftedOff = Builder.CreateLShr(Ops.LHS, Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros", /*NUW*/true, /*NSW*/true), "shl.check"); if (CGF.getLangOpts().CPlusPlus) { // In C99, we are not permitted to shift a 1 bit into the sign bit. // Under C++11's rules, shifting a 1 bit into the sign bit is // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't // define signed left shifts, so we use the C99 and C++11 rules there). llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1); BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One); } llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0); llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero); CGF.EmitBlock(Cont); llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2); BaseCheck->addIncoming(Builder.getTrue(), Orig); BaseCheck->addIncoming(ValidBase, CheckShiftBase); Checks.push_back(std::make_pair(BaseCheck, SanitizerKind::ShiftBase)); } assert(!Checks.empty()); EmitBinOpCheck(Checks, Ops); } return Builder.CreateShl(Ops.LHS, RHS, "shl"); } Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { // LLVM requires the LHS and RHS to be the same type: promote or truncate the // RHS to the same size as the LHS. Value *RHS = Ops.RHS; if (Ops.LHS->getType() != RHS->getType()) RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); // OpenCL 6.3j: shift values are effectively % word size of LHS. if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL) RHS = Builder.CreateAnd(RHS, GetWidthMinusOneValue(Ops.LHS, RHS), "shr.mask"); else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) && isa<llvm::IntegerType>(Ops.LHS->getType())) { CodeGenFunction::SanitizerScope SanScope(&CGF); llvm::Value *Valid = Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)); EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops); } if (Ops.Ty->hasUnsignedIntegerRepresentation()) return Builder.CreateLShr(Ops.LHS, RHS, "shr"); // HLSL Change Begin - check unsigned for vector. if (hlsl::IsHLSLVecType(Ops.Ty)) { if (hlsl::GetHLSLVecElementType(Ops.Ty)->hasUnsignedIntegerRepresentation()) return Builder.CreateLShr(Ops.LHS, RHS, "shr"); } // HLSL Change End. return Builder.CreateAShr(Ops.LHS, RHS, "shr"); } enum IntrinsicType { VCMPEQ, VCMPGT }; #if 0 // HLSL Change - remove platform intrinsics // return corresponding comparison intrinsic for given vector type static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind) { llvm_unreachable("HLSL Does not support altivec vectors"); return llvm::Intrinsic::not_intrinsic; // HLSL Change - remove platform intrinsics switch (ElemKind) { default: llvm_unreachable("unexpected element type"); case BuiltinType::Char_U: case BuiltinType::UChar: return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : llvm::Intrinsic::ppc_altivec_vcmpgtub_p; case BuiltinType::Char_S: case BuiltinType::SChar: return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p : llvm::Intrinsic::ppc_altivec_vcmpgtsb_p; case BuiltinType::UShort: return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : llvm::Intrinsic::ppc_altivec_vcmpgtuh_p; case BuiltinType::Short: return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p : llvm::Intrinsic::ppc_altivec_vcmpgtsh_p; case BuiltinType::UInt: case BuiltinType::ULong: return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : llvm::Intrinsic::ppc_altivec_vcmpgtuw_p; case BuiltinType::Int: case BuiltinType::Long: return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p : llvm::Intrinsic::ppc_altivec_vcmpgtsw_p; case BuiltinType::Float: return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p : llvm::Intrinsic::ppc_altivec_vcmpgtfp_p; } } #endif // HLSL Change - remove platform intrinsics Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, unsigned SICmpOpc, unsigned FCmpOpc) { TestAndClearIgnoreResultAssign(); Value *Result; QualType LHSTy = E->getLHS()->getType(); QualType RHSTy = E->getRHS()->getType(); if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) { assert(E->getOpcode() == BO_EQ || E->getOpcode() == BO_NE); Value *LHS = CGF.EmitScalarExpr(E->getLHS()); Value *RHS = CGF.EmitScalarExpr(E->getRHS()); Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison( CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE); } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) { Value *LHS = Visit(E->getLHS()); Value *RHS = Visit(E->getRHS()); // HLSL Change Begins if (hlsl::IsHLSLMatType(LHSTy) && hlsl::IsHLSLMatType(RHSTy)) { llvm::Type *RetTy = CGF.ConvertType(E->getType()); return CGF.CGM.getHLSLRuntime().EmitHLSLMatrixOperationCall(CGF, E, RetTy, {LHS, RHS}); } // HLSL Change Ends // If AltiVec, the comparison results in a numeric type, so we use // intrinsics comparing vectors and giving 0 or 1 as a result if (LHSTy->isVectorType() && !E->getType()->isVectorType()) { llvm_unreachable("HLSL Does not support altivec vectors"); #if 0 // HLSL Change - remove platform intrinsics // constants for mapping CR6 register bits to predicate result enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6; llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic; // in several cases vector arguments order will be reversed Value *FirstVecArg = LHS, *SecondVecArg = RHS; QualType ElTy = LHSTy->getAs<VectorType>()->getElementType(); const BuiltinType *BTy = ElTy->getAs<BuiltinType>(); BuiltinType::Kind ElementKind = BTy->getKind(); switch(E->getOpcode()) { default: llvm_unreachable("is not a comparison operation"); case BO_EQ: CR6 = CR6_LT; ID = GetIntrinsic(VCMPEQ, ElementKind); break; case BO_NE: CR6 = CR6_EQ; ID = GetIntrinsic(VCMPEQ, ElementKind); break; case BO_LT: CR6 = CR6_LT; ID = GetIntrinsic(VCMPGT, ElementKind); std::swap(FirstVecArg, SecondVecArg); break; case BO_GT: CR6 = CR6_LT; ID = GetIntrinsic(VCMPGT, ElementKind); break; case BO_LE: if (ElementKind == BuiltinType::Float) { CR6 = CR6_LT; ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; std::swap(FirstVecArg, SecondVecArg); } else { CR6 = CR6_EQ; ID = GetIntrinsic(VCMPGT, ElementKind); } break; case BO_GE: if (ElementKind == BuiltinType::Float) { CR6 = CR6_LT; ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p; } else { CR6 = CR6_EQ; ID = GetIntrinsic(VCMPGT, ElementKind); std::swap(FirstVecArg, SecondVecArg); } break; } Value *CR6Param = Builder.getInt32(CR6); llvm::Function *F = CGF.CGM.getIntrinsic(ID); Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg}); return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType()); #endif // HLSL Change - remove platform intrinsics } if (LHS->getType()->isFPOrFPVectorTy()) { Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc, LHS, RHS, "cmp"); } else if (LHSTy->hasSignedIntegerRepresentation()) { Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc, LHS, RHS, "cmp"); } else { // Unsigned integers and pointers. Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, LHS, RHS, "cmp"); } // If this is a vector comparison, sign extend the result to the appropriate // vector integer type and return it (don't convert to bool). if (LHSTy->isVectorType()) return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); // HLSL Change Starts if (const ExtVectorType *vecTy = hlsl::ConvertHLSLVecMatTypeToExtVectorType( CGF.getContext(), LHSTy)) { // Return Result directly when E is already a vector. if (hlsl::IsHLSLVecType(E->getType())) return Result; QualType bVecTy = CGF.getContext().getExtVectorType( E->getType(), vecTy->getNumElements()); return Builder.CreateSExt(Result, ConvertType(bVecTy), "sext"); } // HLSL Change Ends } else { // Complex Comparison: can only be an equality comparison. CodeGenFunction::ComplexPairTy LHS, RHS; QualType CETy; if (auto *CTy = LHSTy->getAs<ComplexType>()) { LHS = CGF.EmitComplexExpr(E->getLHS()); CETy = CTy->getElementType(); } else { LHS.first = Visit(E->getLHS()); LHS.second = llvm::Constant::getNullValue(LHS.first->getType()); CETy = LHSTy; } if (auto *CTy = RHSTy->getAs<ComplexType>()) { RHS = CGF.EmitComplexExpr(E->getRHS()); assert(CGF.getContext().hasSameUnqualifiedType(CETy, CTy->getElementType()) && "The element types must always match."); (void)CTy; } else { RHS.first = Visit(E->getRHS()); RHS.second = llvm::Constant::getNullValue(RHS.first->getType()); assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) && "The element types must always match."); } Value *ResultR, *ResultI; if (CETy->isRealFloatingType()) { ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, LHS.first, RHS.first, "cmp.r"); ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, LHS.second, RHS.second, "cmp.i"); } else { // Complex comparisons can only be equality comparisons. As such, signed // and unsigned opcodes are the same. ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, LHS.first, RHS.first, "cmp.r"); ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, LHS.second, RHS.second, "cmp.i"); } if (E->getOpcode() == BO_EQ) { Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); } else { assert(E->getOpcode() == BO_NE && "Complex comparison other than == or != ?"); Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); } } return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType()); } Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { bool Ignore = TestAndClearIgnoreResultAssign(); Value *RHS; LValue LHS; switch (E->getLHS()->getType().getObjCLifetime()) { case Qualifiers::OCL_Strong: std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore); break; case Qualifiers::OCL_Autoreleasing: std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E); break; case Qualifiers::OCL_Weak: RHS = Visit(E->getRHS()); LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore); break; // No reason to do any of these differently. case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: // __block variables need to have the rhs evaluated first, plus // this should improve codegen just a little. RHS = Visit(E->getRHS()); LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store); // Store the value into the LHS. Bit-fields are handled specially // because the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after // the assignment...'. if (LHS.isBitField()) CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS); else CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS); } // If the result is clearly ignored, return now. if (Ignore) return nullptr; // The result of an assignment in C is the assigned r-value. if (!CGF.getLangOpts().CPlusPlus) return RHS; // If the lvalue is non-volatile, return the computed value of the assignment. if (!LHS.isVolatileQualified()) return RHS; // Otherwise, reload the value. return EmitLoadOfLValue(LHS, E->getExprLoc()); } Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { // Perform vector logical and on comparisons with zero vectors. if (E->getType()->isVectorType()) { CGF.incrementProfileCounter(E); Value *LHS = Visit(E->getLHS()); Value *RHS = Visit(E->getRHS()); Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); if (LHS->getType()->isFPOrFPVectorTy()) { LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); } else { LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); } Value *And = Builder.CreateAnd(LHS, RHS); return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); } llvm::Type *ResTy = ConvertType(E->getType()); // If we have 0 && RHS, see if we can elide RHS, if so, just return 0. // If we have 1 && X, just emit X without inserting the control flow. bool LHSCondVal; if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { if (LHSCondVal) { // If we have 1 && X, just emit X. CGF.incrementProfileCounter(E); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); // ZExt result to int or bool. return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext"); } // 0 && RHS: If it is safe, just elide the RHS, and return 0/false. if (!CGF.ContainsLabel(E->getRHS())) { // HLSL Change Begins. if (CGF.getLangOpts().HLSL && CGF.getLangOpts().HLSLVersion < hlsl::LangStd::v2021) { // HLSL does not short circuit by default. Visit(E->getRHS()); } // HLSL Change Ends. return llvm::Constant::getNullValue(ResTy); } } // HLSL Change Begins. if (CGF.getLangOpts().HLSL && CGF.getLangOpts().HLSLVersion < hlsl::LangStd::v2021) { // HLSL does not short circuit by default. Value *LHS = Visit(E->getLHS()); Value *RHS = Visit(E->getRHS()); if (ResTy->isVectorTy()) { Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); if (LHS->getType()->isFPOrFPVectorTy()) { LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); } else { LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); } } Value *And = Builder.CreateAnd(LHS, RHS); return Builder.CreateSExt(And, ConvertType(E->getType()), "sext"); } // HLSL Change Ends. llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); CodeGenFunction::ConditionalEvaluation eval(CGF); // Branch on the LHS first. If it is false, go to the failure (cont) block. CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock, CGF.getProfileCount(E->getRHS())); // Any edges into the ContBlock are now from an (indeterminate number of) // edges from this first condition. All of these values will be false. Start // setting up the PHI node in the Cont Block for this. llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, "", ContBlock); for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); PI != PE; ++PI) PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); eval.begin(CGF); CGF.EmitBlock(RHSBlock); CGF.incrementProfileCounter(E); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); eval.end(CGF); // Reaquire the RHS block, as there may be subblocks inserted. RHSBlock = Builder.GetInsertBlock(); // Emit an unconditional branch from this block to ContBlock. { // There is no need to emit line number for unconditional branch. auto NL = ApplyDebugLocation::CreateEmpty(CGF); CGF.EmitBlock(ContBlock); } // Insert an entry into the phi node for the edge with the value of RHSCond. PN->addIncoming(RHSCond, RHSBlock); // ZExt result to int. return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext"); } Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { // Perform vector logical or on comparisons with zero vectors. if (E->getType()->isVectorType()) { CGF.incrementProfileCounter(E); Value *LHS = Visit(E->getLHS()); Value *RHS = Visit(E->getRHS()); Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); if (LHS->getType()->isFPOrFPVectorTy()) { LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); } else { LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); } Value *Or = Builder.CreateOr(LHS, RHS); return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); } llvm::Type *ResTy = ConvertType(E->getType()); // If we have 1 || RHS, see if we can elide RHS, if so, just return 1. // If we have 0 || X, just emit X without inserting the control flow. bool LHSCondVal; if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) { if (!LHSCondVal) { // If we have 0 || X, just emit X. CGF.incrementProfileCounter(E); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); // ZExt result to int or bool. return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext"); } // 1 || RHS: If it is safe, just elide the RHS, and return 1/true. if (!CGF.ContainsLabel(E->getRHS())) { // HLSL Change Begins. if (CGF.getLangOpts().HLSL && CGF.getLangOpts().HLSLVersion < hlsl::LangStd::v2021) { // HLSL does not short circuit by default. Visit(E->getRHS()); } // HLSL Change Ends. return llvm::ConstantInt::get(ResTy, 1); } } // HLSL Change Begins. if (CGF.getLangOpts().HLSL && CGF.getLangOpts().HLSLVersion < hlsl::LangStd::v2021) { // HLSL does not short circuit by default. Value *LHS = Visit(E->getLHS()); Value *RHS = Visit(E->getRHS()); if (ResTy->isVectorTy()) { Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType()); if (LHS->getType()->isFPOrFPVectorTy()) { LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp"); RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp"); } else { LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp"); RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp"); } } Value *Or = Builder.CreateOr(LHS, RHS); return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext"); } // HLSL Change Ends. llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); CodeGenFunction::ConditionalEvaluation eval(CGF); // Branch on the LHS first. If it is true, go to the success (cont) block. CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock, CGF.getCurrentProfileCount() - CGF.getProfileCount(E->getRHS())); // Any edges into the ContBlock are now from an (indeterminate number of) // edges from this first condition. All of these values will be true. Start // setting up the PHI node in the Cont Block for this. llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2, "", ContBlock); for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); PI != PE; ++PI) PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI); eval.begin(CGF); // Emit the RHS condition as a bool value. CGF.EmitBlock(RHSBlock); CGF.incrementProfileCounter(E); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); eval.end(CGF); // Reaquire the RHS block, as there may be subblocks inserted. RHSBlock = Builder.GetInsertBlock(); // Emit an unconditional branch from this block to ContBlock. Insert an entry // into the phi node for the edge with the value of RHSCond. CGF.EmitBlock(ContBlock); PN->addIncoming(RHSCond, RHSBlock); // ZExt result to int. return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext"); } Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { CGF.EmitIgnoredExpr(E->getLHS()); CGF.EnsureInsertPoint(); return Visit(E->getRHS()); } //===----------------------------------------------------------------------===// // Other Operators //===----------------------------------------------------------------------===// /// isCheapEnoughToEvaluateUnconditionally - Return true if the specified /// expression is cheap enough and side-effect-free enough to evaluate /// unconditionally instead of conditionally. This is used to convert control /// flow into selects in some cases. static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, CodeGenFunction &CGF) { // Anything that is an integer or floating point constant is fine. return E->IgnoreParens()->isEvaluatable(CGF.getContext()); // Even non-volatile automatic variables can't be evaluated unconditionally. // Referencing a thread_local may cause non-trivial initialization work to // occur. If we're inside a lambda and one of the variables is from the scope // outside the lambda, that function may have returned already. Reading its // locals is a bad idea. Also, these reads may introduce races there didn't // exist in the source-level program. } Value *ScalarExprEmitter:: VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { TestAndClearIgnoreResultAssign(); // Bind the common expression if necessary. CodeGenFunction::OpaqueValueMapping binding(CGF, E); Expr *condExpr = E->getCond(); Expr *lhsExpr = E->getTrueExpr(); Expr *rhsExpr = E->getFalseExpr(); // If the condition constant folds and can be elided, try to avoid emitting // the condition and the dead arm. bool CondExprBool; if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { Expr *live = lhsExpr, *dead = rhsExpr; if (!CondExprBool) std::swap(live, dead); // If the dead side doesn't have labels we need, just emit the Live part. if (!CGF.ContainsLabel(dead)) { if (CondExprBool) CGF.incrementProfileCounter(E); Value *Result = Visit(live); // If the live part is a throw expression, it acts like it has a void // type, so evaluating it returns a null Value*. However, a conditional // with non-void type must return a non-null Value*. if (!Result && !E->getType()->isVoidType()) Result = llvm::UndefValue::get(CGF.ConvertType(E->getType())); return Result; } } // OpenCL: If the condition is a vector, we can treat this condition like // the select function. if (CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) { CGF.incrementProfileCounter(E); llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); llvm::Value *LHS = Visit(lhsExpr); llvm::Value *RHS = Visit(rhsExpr); llvm::Type *condType = ConvertType(condExpr->getType()); llvm::VectorType *vecTy = cast<llvm::VectorType>(condType); unsigned numElem = vecTy->getNumElements(); llvm::Type *elemType = vecTy->getElementType(); llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy); llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec); llvm::Value *tmp = Builder.CreateSExt(TestMSB, llvm::VectorType::get(elemType, numElem), "sext"); llvm::Value *tmp2 = Builder.CreateNot(tmp); // Cast float to int to perform ANDs if necessary. llvm::Value *RHSTmp = RHS; llvm::Value *LHSTmp = LHS; bool wasCast = false; llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType()); if (rhsVTy->getElementType()->isFloatingPointTy()) { RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType()); LHSTmp = Builder.CreateBitCast(LHS, tmp->getType()); wasCast = true; } llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2); llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp); llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond"); if (wasCast) tmp5 = Builder.CreateBitCast(tmp5, RHS->getType()); return tmp5; } // HLSL Change Starts if (CGF.getLangOpts().HLSL && CGF.getLangOpts().HLSLVersion < hlsl::LangStd::v2021) { // HLSL does not short circuit by default before HLSL 2021 if (hlsl::IsHLSLVecType(E->getType()) || E->getType()->isArithmeticType()) { llvm::Value *CondV = CGF.EmitScalarExpr(condExpr); llvm::Value *LHS = Visit(lhsExpr); llvm::Value *RHS = Visit(rhsExpr); if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(CondV->getType())) { llvm::VectorType *ResultVT = cast<llvm::VectorType>(LHS->getType()); llvm::Value *result = llvm::UndefValue::get(ResultVT); for (unsigned i = 0; i < VT->getNumElements(); i++) { llvm::Value *EltCond = Builder.CreateExtractElement(CondV, i); llvm::Value *EltL = Builder.CreateExtractElement(LHS, i); llvm::Value *EltR = Builder.CreateExtractElement(RHS, i); llvm::Value *EltSelect = Builder.CreateSelect(EltCond, EltL, EltR); result = Builder.CreateInsertElement(result, EltSelect, i); } return result; } else { return Builder.CreateSelect(CondV, LHS, RHS); } } if (hlsl::IsHLSLMatType(E->getType())) { llvm::Value *Cond = CGF.EmitScalarExpr(condExpr); llvm::Value *LHS = Visit(lhsExpr); llvm::Value *RHS = Visit(rhsExpr); return CGF.CGM.getHLSLRuntime().EmitHLSLMatrixOperationCall( CGF, E, LHS->getType(), {Cond, LHS, RHS}); } } // HLSL Change Ends // If this is a really simple expression (like x ? 4 : 5), emit this as a // select instead of as control flow. We can only do this if it is cheap and // safe to evaluate the LHS and RHS unconditionally. if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) && isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) { CGF.incrementProfileCounter(E); llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr); llvm::Value *LHS = Visit(lhsExpr); llvm::Value *RHS = Visit(rhsExpr); if (!LHS) { // If the conditional has void type, make sure we return a null Value*. assert(!RHS && "LHS and RHS types must match"); return nullptr; } return Builder.CreateSelect(CondV, LHS, RHS, "cond"); } // HLSL Change Begins llvm::Instruction *ResultAlloca = nullptr; if (CGF.getLangOpts().HLSL && CGF.getLangOpts().HLSLVersion >= hlsl::LangStd::v2021 && hlsl::IsHLSLMatType(E->getType())) { llvm::Type *MatTy = CGF.ConvertTypeForMem(E->getType()); ResultAlloca = CGF.CreateTempAlloca(MatTy); ResultAlloca->moveBefore(hlsl::dxilutil::FindAllocaInsertionPt( Builder.GetInsertBlock()->getParent())); } // HLSL Change Ends llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); CodeGenFunction::ConditionalEvaluation eval(CGF); CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock, CGF.getProfileCount(lhsExpr)); CGF.EmitBlock(LHSBlock); CGF.incrementProfileCounter(E); eval.begin(CGF); Value *LHS = Visit(lhsExpr); // HLSL Change Begin - Handle matrix ternary if (ResultAlloca) CGF.CGM.getHLSLRuntime().EmitHLSLMatrixStore(CGF, LHS, ResultAlloca, E->getType()); // HLSL Change End eval.end(CGF); LHSBlock = Builder.GetInsertBlock(); Builder.CreateBr(ContBlock); CGF.EmitBlock(RHSBlock); eval.begin(CGF); Value *RHS = Visit(rhsExpr); // HLSL Change Begin - Handle matrix ternary if (ResultAlloca) CGF.CGM.getHLSLRuntime().EmitHLSLMatrixStore(CGF, RHS, ResultAlloca, E->getType()); // HLSL Change End eval.end(CGF); RHSBlock = Builder.GetInsertBlock(); CGF.EmitBlock(ContBlock); // HLSL Change Begin - Handle matrix ternary if (ResultAlloca) return CGF.CGM.getHLSLRuntime().EmitHLSLMatrixLoad(CGF, ResultAlloca, E->getType()); // HLSL Change End // If the LHS or RHS is a throw expression, it will be legitimately null. if (!LHS) return RHS; if (!RHS) return LHS; // Create a PHI node for the real part. llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond"); PN->addIncoming(LHS, LHSBlock); PN->addIncoming(RHS, RHSBlock); return PN; } Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { return Visit(E->getChosenSubExpr()); } Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { QualType Ty = VE->getType(); if (Ty->isVariablyModifiedType()) CGF.EmitVariablyModifiedType(Ty); llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr()); llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); llvm::Type *ArgTy = ConvertType(VE->getType()); // If EmitVAArg fails, we fall back to the LLVM instruction. if (!ArgPtr) return Builder.CreateVAArg(ArgValue, ArgTy); // FIXME Volatility. llvm::Value *Val = Builder.CreateLoad(ArgPtr); // If EmitVAArg promoted the type, we must truncate it. if (ArgTy != Val->getType()) { if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy()) Val = Builder.CreateIntToPtr(Val, ArgTy); else Val = Builder.CreateTrunc(Val, ArgTy); } return Val; } Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) { return CGF.EmitBlockLiteral(block); } Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) { Value *Src = CGF.EmitScalarExpr(E->getSrcExpr()); llvm::Type *DstTy = ConvertType(E->getType()); // Going from vec4->vec3 or vec3->vec4 is a special case and requires // a shuffle vector instead of a bitcast. llvm::Type *SrcTy = Src->getType(); if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) { unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements(); unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements(); if ((numElementsDst == 3 && numElementsSrc == 4) || (numElementsDst == 4 && numElementsSrc == 3)) { // In the case of going from int4->float3, a bitcast is needed before // doing a shuffle. llvm::Type *srcElemTy = cast<llvm::VectorType>(SrcTy)->getElementType(); llvm::Type *dstElemTy = cast<llvm::VectorType>(DstTy)->getElementType(); if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy()) || (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) { // Create a float type of the same size as the source or destination. llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy, numElementsSrc); Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast"); } llvm::Value *UnV = llvm::UndefValue::get(Src->getType()); SmallVector<llvm::Constant*, 3> Args; Args.push_back(Builder.getInt32(0)); Args.push_back(Builder.getInt32(1)); Args.push_back(Builder.getInt32(2)); if (numElementsDst == 4) Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); llvm::Constant *Mask = llvm::ConstantVector::get(Args); return Builder.CreateShuffleVector(Src, UnV, Mask, "astype"); } } return Builder.CreateBitCast(Src, DstTy, "astype"); } Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) { return CGF.EmitAtomicExpr(E).getScalarVal(); } //===----------------------------------------------------------------------===// // Entry Point into this File //===----------------------------------------------------------------------===// /// EmitScalarExpr - Emit the computation of the specified expression of scalar /// type, ignoring the result. Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { assert(E && hasScalarEvaluationKind(E->getType()) && "Invalid scalar expression to emit"); return ScalarExprEmitter(*this, IgnoreResultAssign) .Visit(const_cast<Expr *>(E)); } /// EmitScalarConversion - Emit a conversion from the specified type to the /// specified destination type, both of which are LLVM scalar types. Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy) { assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) && "Invalid scalar expression to emit"); return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy); } /// EmitComplexToScalarConversion - Emit a conversion from the specified complex /// type to the specified destination type, where the destination type is an /// LLVM scalar type. Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy) { assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) && "Invalid complex -> scalar conversion"); return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy, DstTy); } llvm::Value *CodeGenFunction:: EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre) { return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre); } LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) { llvm::Value *V; // object->isa or (*object).isa // Generate code as for: *(Class*)object // build Class* type llvm::Type *ClassPtrTy = ConvertType(E->getType()); Expr *BaseExpr = E->getBase(); if (BaseExpr->isRValue()) { V = CreateMemTemp(E->getType(), "resval"); llvm::Value *Src = EmitScalarExpr(BaseExpr); Builder.CreateStore(Src, V); V = ScalarExprEmitter(*this).EmitLoadOfLValue( MakeNaturalAlignAddrLValue(V, E->getType()), E->getExprLoc()); } else { if (E->isArrow()) V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr); else V = EmitLValue(BaseExpr).getAddress(); } // build Class* type ClassPtrTy = ClassPtrTy->getPointerTo(); V = Builder.CreateBitCast(V, ClassPtrTy); return MakeNaturalAlignAddrLValue(V, E->getType()); } LValue CodeGenFunction::EmitCompoundAssignmentLValue( const CompoundAssignOperator *E) { ScalarExprEmitter Scalar(*this); Value *Result = nullptr; switch (E->getOpcode()) { #define COMPOUND_OP(Op) \ case BO_##Op##Assign: \ return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \ Result) COMPOUND_OP(Mul); COMPOUND_OP(Div); COMPOUND_OP(Rem); COMPOUND_OP(Add); COMPOUND_OP(Sub); COMPOUND_OP(Shl); COMPOUND_OP(Shr); COMPOUND_OP(And); COMPOUND_OP(Xor); COMPOUND_OP(Or); #undef COMPOUND_OP case BO_PtrMemD: case BO_PtrMemI: case BO_Mul: case BO_Div: case BO_Rem: case BO_Add: case BO_Sub: case BO_Shl: case BO_Shr: case BO_LT: case BO_GT: case BO_LE: case BO_GE: case BO_EQ: case BO_NE: case BO_And: case BO_Xor: case BO_Or: case BO_LAnd: case BO_LOr: case BO_Assign: case BO_Comma: llvm_unreachable("Not valid compound assignment operators"); } llvm_unreachable("Unhandled compound assignment operator"); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGBlocks.cpp
//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This contains code to emit blocks. // //===----------------------------------------------------------------------===// #include "CGBlocks.h" #include "CGDebugInfo.h" #include "CGObjCRuntime.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "clang/AST/DeclObjC.h" #include "llvm/ADT/SmallSet.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Module.h" #include <algorithm> #include <cstdio> using namespace clang; using namespace CodeGen; CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name) : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false), HasCXXObject(false), UsesStret(false), HasCapturedVariableLayout(false), StructureType(nullptr), Block(block), DominatingIP(nullptr) { // Skip asm prefix, if any. 'name' is usually taken directly from // the mangled name of the enclosing function. if (!name.empty() && name[0] == '\01') name = name.substr(1); } // Anchor the vtable to this translation unit. CodeGenModule::ByrefHelpers::~ByrefHelpers() {} /// Build the given block as a global block. static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM, const CGBlockInfo &blockInfo, llvm::Constant *blockFn); /// Build the helper function to copy a block. static llvm::Constant *buildCopyHelper(CodeGenModule &CGM, const CGBlockInfo &blockInfo) { return CodeGenFunction(CGM).GenerateCopyHelperFunction(blockInfo); } /// Build the helper function to dispose of a block. static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM, const CGBlockInfo &blockInfo) { return CodeGenFunction(CGM).GenerateDestroyHelperFunction(blockInfo); } /// buildBlockDescriptor - Build the block descriptor meta-data for a block. /// buildBlockDescriptor is accessed from 5th field of the Block_literal /// meta-data and contains stationary information about the block literal. /// Its definition will have 4 (or optinally 6) words. /// \code /// struct Block_descriptor { /// unsigned long reserved; /// unsigned long size; // size of Block_literal metadata in bytes. /// void *copy_func_helper_decl; // optional copy helper. /// void *destroy_func_decl; // optioanl destructor helper. /// void *block_method_encoding_address; // @encode for block literal signature. /// void *block_layout_info; // encoding of captured block variables. /// }; /// \endcode static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM, const CGBlockInfo &blockInfo) { ASTContext &C = CGM.getContext(); llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy); llvm::Type *i8p = NULL; if (CGM.getLangOpts().OpenCL) i8p = llvm::Type::getInt8PtrTy( CGM.getLLVMContext(), C.getTargetAddressSpace(LangAS::opencl_constant)); else i8p = CGM.getTypes().ConvertType(C.VoidPtrTy); SmallVector<llvm::Constant*, 6> elements; // reserved elements.push_back(llvm::ConstantInt::get(ulong, 0)); // Size // FIXME: What is the right way to say this doesn't fit? We should give // a user diagnostic in that case. Better fix would be to change the // API to size_t. elements.push_back(llvm::ConstantInt::get(ulong, blockInfo.BlockSize.getQuantity())); // Optional copy/dispose helpers. if (blockInfo.NeedsCopyDispose) { // copy_func_helper_decl elements.push_back(buildCopyHelper(CGM, blockInfo)); // destroy_func_decl elements.push_back(buildDisposeHelper(CGM, blockInfo)); } // Signature. Mandatory ObjC-style method descriptor @encode sequence. std::string typeAtEncoding = CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr()); elements.push_back(llvm::ConstantExpr::getBitCast( CGM.GetAddrOfConstantCString(typeAtEncoding), i8p)); // GC layout. if (C.getLangOpts().ObjC1) { if (CGM.getLangOpts().getGC() != LangOptions::NonGC) elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo)); else elements.push_back(CGM.getObjCRuntime().BuildRCBlockLayout(CGM, blockInfo)); } else elements.push_back(llvm::Constant::getNullValue(i8p)); llvm::Constant *init = llvm::ConstantStruct::getAnon(elements); llvm::GlobalVariable *global = new llvm::GlobalVariable(CGM.getModule(), init->getType(), true, llvm::GlobalValue::InternalLinkage, init, "__block_descriptor_tmp"); return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType()); } /* Purely notional variadic template describing the layout of a block. template <class _ResultType, class... _ParamTypes, class... _CaptureTypes> struct Block_literal { /// Initialized to one of: /// extern void *_NSConcreteStackBlock[]; /// extern void *_NSConcreteGlobalBlock[]; /// /// In theory, we could start one off malloc'ed by setting /// BLOCK_NEEDS_FREE, giving it a refcount of 1, and using /// this isa: /// extern void *_NSConcreteMallocBlock[]; struct objc_class *isa; /// These are the flags (with corresponding bit number) that the /// compiler is actually supposed to know about. /// 25. BLOCK_HAS_COPY_DISPOSE - indicates that the block /// descriptor provides copy and dispose helper functions /// 26. BLOCK_HAS_CXX_OBJ - indicates that there's a captured /// object with a nontrivial destructor or copy constructor /// 28. BLOCK_IS_GLOBAL - indicates that the block is allocated /// as global memory /// 29. BLOCK_USE_STRET - indicates that the block function /// uses stret, which objc_msgSend needs to know about /// 30. BLOCK_HAS_SIGNATURE - indicates that the block has an /// @encoded signature string /// And we're not supposed to manipulate these: /// 24. BLOCK_NEEDS_FREE - indicates that the block has been moved /// to malloc'ed memory /// 27. BLOCK_IS_GC - indicates that the block has been moved to /// to GC-allocated memory /// Additionally, the bottom 16 bits are a reference count which /// should be zero on the stack. int flags; /// Reserved; should be zero-initialized. int reserved; /// Function pointer generated from block literal. _ResultType (*invoke)(Block_literal *, _ParamTypes...); /// Block description metadata generated from block literal. struct Block_descriptor *block_descriptor; /// Captured values follow. _CapturesTypes captures...; }; */ /// The number of fields in a block header. const unsigned BlockHeaderSize = 5; namespace { /// A chunk of data that we actually have to capture in the block. struct BlockLayoutChunk { CharUnits Alignment; CharUnits Size; Qualifiers::ObjCLifetime Lifetime; const BlockDecl::Capture *Capture; // null for 'this' llvm::Type *Type; BlockLayoutChunk(CharUnits align, CharUnits size, Qualifiers::ObjCLifetime lifetime, const BlockDecl::Capture *capture, llvm::Type *type) : Alignment(align), Size(size), Lifetime(lifetime), Capture(capture), Type(type) {} /// Tell the block info that this chunk has the given field index. void setIndex(CGBlockInfo &info, unsigned index) { if (!Capture) info.CXXThisIndex = index; else info.Captures[Capture->getVariable()] = CGBlockInfo::Capture::makeIndex(index); } }; /// Order by 1) all __strong together 2) next, all byfref together 3) next, /// all __weak together. Preserve descending alignment in all situations. bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) { CharUnits LeftValue, RightValue; bool LeftByref = left.Capture ? left.Capture->isByRef() : false; bool RightByref = right.Capture ? right.Capture->isByRef() : false; if (left.Lifetime == Qualifiers::OCL_Strong && left.Alignment >= right.Alignment) LeftValue = CharUnits::fromQuantity(64); else if (LeftByref && left.Alignment >= right.Alignment) LeftValue = CharUnits::fromQuantity(32); else if (left.Lifetime == Qualifiers::OCL_Weak && left.Alignment >= right.Alignment) LeftValue = CharUnits::fromQuantity(16); else LeftValue = left.Alignment; if (right.Lifetime == Qualifiers::OCL_Strong && right.Alignment >= left.Alignment) RightValue = CharUnits::fromQuantity(64); else if (RightByref && right.Alignment >= left.Alignment) RightValue = CharUnits::fromQuantity(32); else if (right.Lifetime == Qualifiers::OCL_Weak && right.Alignment >= left.Alignment) RightValue = CharUnits::fromQuantity(16); else RightValue = right.Alignment; return LeftValue > RightValue; } } /// Determines if the given type is safe for constant capture in C++. static bool isSafeForCXXConstantCapture(QualType type) { const RecordType *recordType = type->getBaseElementTypeUnsafe()->getAs<RecordType>(); // Only records can be unsafe. if (!recordType) return true; const auto *record = cast<CXXRecordDecl>(recordType->getDecl()); // Maintain semantics for classes with non-trivial dtors or copy ctors. if (!record->hasTrivialDestructor()) return false; if (record->hasNonTrivialCopyConstructor()) return false; // Otherwise, we just have to make sure there aren't any mutable // fields that might have changed since initialization. return !record->hasMutableFields(); } /// It is illegal to modify a const object after initialization. /// Therefore, if a const object has a constant initializer, we don't /// actually need to keep storage for it in the block; we'll just /// rematerialize it at the start of the block function. This is /// acceptable because we make no promises about address stability of /// captured variables. static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM, CodeGenFunction *CGF, const VarDecl *var) { QualType type = var->getType(); // We can only do this if the variable is const. if (!type.isConstQualified()) return nullptr; // Furthermore, in C++ we have to worry about mutable fields: // C++ [dcl.type.cv]p4: // Except that any class member declared mutable can be // modified, any attempt to modify a const object during its // lifetime results in undefined behavior. if (CGM.getLangOpts().CPlusPlus && !isSafeForCXXConstantCapture(type)) return nullptr; // If the variable doesn't have any initializer (shouldn't this be // invalid?), it's not clear what we should do. Maybe capture as // zero? const Expr *init = var->getInit(); if (!init) return nullptr; return CGM.EmitConstantInit(*var, CGF); } /// Get the low bit of a nonzero character count. This is the /// alignment of the nth byte if the 0th byte is universally aligned. static CharUnits getLowBit(CharUnits v) { return CharUnits::fromQuantity(v.getQuantity() & (~v.getQuantity() + 1)); } static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info, SmallVectorImpl<llvm::Type*> &elementTypes) { ASTContext &C = CGM.getContext(); // The header is basically a 'struct { void *; int; int; void *; void *; }'. CharUnits ptrSize, ptrAlign, intSize, intAlign; std::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.VoidPtrTy); std::tie(intSize, intAlign) = C.getTypeInfoInChars(C.IntTy); // Are there crazy embedded platforms where this isn't true? assert(intSize <= ptrSize && "layout assumptions horribly violated"); CharUnits headerSize = ptrSize; if (2 * intSize < ptrAlign) headerSize += ptrSize; else headerSize += 2 * intSize; headerSize += 2 * ptrSize; info.BlockAlign = ptrAlign; info.BlockSize = headerSize; assert(elementTypes.empty()); llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy); llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy); elementTypes.push_back(i8p); elementTypes.push_back(intTy); elementTypes.push_back(intTy); elementTypes.push_back(i8p); elementTypes.push_back(CGM.getBlockDescriptorType()); assert(elementTypes.size() == BlockHeaderSize); } /// Compute the layout of the given block. Attempts to lay the block /// out with minimal space requirements. static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF, CGBlockInfo &info) { ASTContext &C = CGM.getContext(); const BlockDecl *block = info.getBlockDecl(); SmallVector<llvm::Type*, 8> elementTypes; initializeForBlockHeader(CGM, info, elementTypes); if (!block->hasCaptures()) { info.StructureType = llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true); info.CanBeGlobal = true; return; } else if (C.getLangOpts().ObjC1 && CGM.getLangOpts().getGC() == LangOptions::NonGC) info.HasCapturedVariableLayout = true; // Collect the layout chunks. SmallVector<BlockLayoutChunk, 16> layout; layout.reserve(block->capturesCXXThis() + (block->capture_end() - block->capture_begin())); CharUnits maxFieldAlign; // First, 'this'. if (block->capturesCXXThis()) { assert(CGF && CGF->CurFuncDecl && isa<CXXMethodDecl>(CGF->CurFuncDecl) && "Can't capture 'this' outside a method"); QualType thisType = cast<CXXMethodDecl>(CGF->CurFuncDecl)->getThisType(C); llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType); std::pair<CharUnits,CharUnits> tinfo = CGM.getContext().getTypeInfoInChars(thisType); maxFieldAlign = std::max(maxFieldAlign, tinfo.second); layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first, Qualifiers::OCL_None, nullptr, llvmType)); } // Next, all the block captures. for (const auto &CI : block->captures()) { const VarDecl *variable = CI.getVariable(); if (CI.isByRef()) { // We have to copy/dispose of the __block reference. info.NeedsCopyDispose = true; // Just use void* instead of a pointer to the byref type. QualType byRefPtrTy = C.VoidPtrTy; llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy); std::pair<CharUnits,CharUnits> tinfo = CGM.getContext().getTypeInfoInChars(byRefPtrTy); maxFieldAlign = std::max(maxFieldAlign, tinfo.second); layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first, Qualifiers::OCL_None, &CI, llvmType)); continue; } // Otherwise, build a layout chunk with the size and alignment of // the declaration. if (llvm::Constant *constant = tryCaptureAsConstant(CGM, CGF, variable)) { info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant); continue; } // If we have a lifetime qualifier, honor it for capture purposes. // That includes *not* copying it if it's __unsafe_unretained. Qualifiers::ObjCLifetime lifetime = variable->getType().getObjCLifetime(); if (lifetime) { switch (lifetime) { case Qualifiers::OCL_None: llvm_unreachable("impossible"); case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: break; case Qualifiers::OCL_Strong: case Qualifiers::OCL_Weak: info.NeedsCopyDispose = true; } // Block pointers require copy/dispose. So do Objective-C pointers. } else if (variable->getType()->isObjCRetainableType()) { info.NeedsCopyDispose = true; // used for mrr below. lifetime = Qualifiers::OCL_Strong; // So do types that require non-trivial copy construction. } else if (CI.hasCopyExpr()) { info.NeedsCopyDispose = true; info.HasCXXObject = true; // And so do types with destructors. } else if (CGM.getLangOpts().CPlusPlus) { if (const CXXRecordDecl *record = variable->getType()->getAsCXXRecordDecl()) { if (!record->hasTrivialDestructor()) { info.HasCXXObject = true; info.NeedsCopyDispose = true; } } } QualType VT = variable->getType(); CharUnits size = C.getTypeSizeInChars(VT); CharUnits align = C.getDeclAlign(variable); maxFieldAlign = std::max(maxFieldAlign, align); llvm::Type *llvmType = CGM.getTypes().ConvertTypeForMem(VT); layout.push_back(BlockLayoutChunk(align, size, lifetime, &CI, llvmType)); } // If that was everything, we're done here. if (layout.empty()) { info.StructureType = llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true); info.CanBeGlobal = true; return; } // Sort the layout by alignment. We have to use a stable sort here // to get reproducible results. There should probably be an // llvm::array_pod_stable_sort. std::stable_sort(layout.begin(), layout.end()); // Needed for blocks layout info. info.BlockHeaderForcedGapOffset = info.BlockSize; info.BlockHeaderForcedGapSize = CharUnits::Zero(); CharUnits &blockSize = info.BlockSize; info.BlockAlign = std::max(maxFieldAlign, info.BlockAlign); // Assuming that the first byte in the header is maximally aligned, // get the alignment of the first byte following the header. CharUnits endAlign = getLowBit(blockSize); // If the end of the header isn't satisfactorily aligned for the // maximum thing, look for things that are okay with the header-end // alignment, and keep appending them until we get something that's // aligned right. This algorithm is only guaranteed optimal if // that condition is satisfied at some point; otherwise we can get // things like: // header // next byte has alignment 4 // something_with_size_5; // next byte has alignment 1 // something_with_alignment_8; // which has 7 bytes of padding, as opposed to the naive solution // which might have less (?). if (endAlign < maxFieldAlign) { SmallVectorImpl<BlockLayoutChunk>::iterator li = layout.begin() + 1, le = layout.end(); // Look for something that the header end is already // satisfactorily aligned for. for (; li != le && endAlign < li->Alignment; ++li) ; // If we found something that's naturally aligned for the end of // the header, keep adding things... if (li != le) { SmallVectorImpl<BlockLayoutChunk>::iterator first = li; for (; li != le; ++li) { assert(endAlign >= li->Alignment); li->setIndex(info, elementTypes.size()); elementTypes.push_back(li->Type); blockSize += li->Size; endAlign = getLowBit(blockSize); // ...until we get to the alignment of the maximum field. if (endAlign >= maxFieldAlign) { if (li == first) { // No user field was appended. So, a gap was added. // Save total gap size for use in block layout bit map. info.BlockHeaderForcedGapSize = li->Size; } break; } } // Don't re-append everything we just appended. layout.erase(first, li); } } assert(endAlign == getLowBit(blockSize)); // At this point, we just have to add padding if the end align still // isn't aligned right. if (endAlign < maxFieldAlign) { CharUnits newBlockSize = blockSize.RoundUpToAlignment(maxFieldAlign); CharUnits padding = newBlockSize - blockSize; elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty, padding.getQuantity())); blockSize = newBlockSize; endAlign = getLowBit(blockSize); // might be > maxFieldAlign } assert(endAlign >= maxFieldAlign); assert(endAlign == getLowBit(blockSize)); // Slam everything else on now. This works because they have // strictly decreasing alignment and we expect that size is always a // multiple of alignment. for (SmallVectorImpl<BlockLayoutChunk>::iterator li = layout.begin(), le = layout.end(); li != le; ++li) { if (endAlign < li->Alignment) { // size may not be multiple of alignment. This can only happen with // an over-aligned variable. We will be adding a padding field to // make the size be multiple of alignment. CharUnits padding = li->Alignment - endAlign; elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty, padding.getQuantity())); blockSize += padding; endAlign = getLowBit(blockSize); } assert(endAlign >= li->Alignment); li->setIndex(info, elementTypes.size()); elementTypes.push_back(li->Type); blockSize += li->Size; endAlign = getLowBit(blockSize); } info.StructureType = llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true); } /// Enter the scope of a block. This should be run at the entrance to /// a full-expression so that the block's cleanups are pushed at the /// right place in the stack. static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) { assert(CGF.HaveInsertPoint()); // Allocate the block info and place it at the head of the list. CGBlockInfo &blockInfo = *new CGBlockInfo(block, CGF.CurFn->getName()); blockInfo.NextBlockInfo = CGF.FirstBlockInfo; CGF.FirstBlockInfo = &blockInfo; // Compute information about the layout, etc., of this block, // pushing cleanups as necessary. computeBlockInfo(CGF.CGM, &CGF, blockInfo); // Nothing else to do if it can be global. if (blockInfo.CanBeGlobal) return; // Make the allocation for the block. blockInfo.Address = CGF.CreateTempAlloca(blockInfo.StructureType, "block"); blockInfo.Address->setAlignment(blockInfo.BlockAlign.getQuantity()); // If there are cleanups to emit, enter them (but inactive). if (!blockInfo.NeedsCopyDispose) return; // Walk through the captures (in order) and find the ones not // captured by constant. for (const auto &CI : block->captures()) { // Ignore __block captures; there's nothing special in the // on-stack block that we need to do for them. if (CI.isByRef()) continue; // Ignore variables that are constant-captured. const VarDecl *variable = CI.getVariable(); CGBlockInfo::Capture &capture = blockInfo.getCapture(variable); if (capture.isConstant()) continue; // Ignore objects that aren't destructed. QualType::DestructionKind dtorKind = variable->getType().isDestructedType(); if (dtorKind == QualType::DK_none) continue; CodeGenFunction::Destroyer *destroyer; // Block captures count as local values and have imprecise semantics. // They also can't be arrays, so need to worry about that. if (dtorKind == QualType::DK_objc_strong_lifetime) { destroyer = CodeGenFunction::destroyARCStrongImprecise; } else { destroyer = CGF.getDestroyer(dtorKind); } // GEP down to the address. llvm::Value *addr = CGF.Builder.CreateStructGEP( blockInfo.StructureType, blockInfo.Address, capture.getIndex()); // We can use that GEP as the dominating IP. if (!blockInfo.DominatingIP) blockInfo.DominatingIP = cast<llvm::Instruction>(addr); CleanupKind cleanupKind = InactiveNormalCleanup; bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind); if (useArrayEHCleanup) cleanupKind = InactiveNormalAndEHCleanup; CGF.pushDestroy(cleanupKind, addr, variable->getType(), destroyer, useArrayEHCleanup); // Remember where that cleanup was. capture.setCleanup(CGF.EHStack.stable_begin()); } } /// Enter a full-expression with a non-trivial number of objects to /// clean up. This is in this file because, at the moment, the only /// kind of cleanup object is a BlockDecl*. void CodeGenFunction::enterNonTrivialFullExpression(const ExprWithCleanups *E) { assert(E->getNumObjects() != 0); ArrayRef<ExprWithCleanups::CleanupObject> cleanups = E->getObjects(); for (ArrayRef<ExprWithCleanups::CleanupObject>::iterator i = cleanups.begin(), e = cleanups.end(); i != e; ++i) { enterBlockScope(*this, *i); } } /// Find the layout for the given block in a linked list and remove it. static CGBlockInfo *findAndRemoveBlockInfo(CGBlockInfo **head, const BlockDecl *block) { while (true) { assert(head && *head); CGBlockInfo *cur = *head; // If this is the block we're looking for, splice it out of the list. if (cur->getBlockDecl() == block) { *head = cur->NextBlockInfo; return cur; } head = &cur->NextBlockInfo; } } /// Destroy a chain of block layouts. void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) { assert(head && "destroying an empty chain"); do { CGBlockInfo *cur = head; head = cur->NextBlockInfo; delete cur; } while (head != nullptr); } /// Emit a block literal expression in the current function. llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) { // If the block has no captures, we won't have a pre-computed // layout for it. if (!blockExpr->getBlockDecl()->hasCaptures()) { CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName()); computeBlockInfo(CGM, this, blockInfo); blockInfo.BlockExpression = blockExpr; return EmitBlockLiteral(blockInfo); } // Find the block info for this block and take ownership of it. std::unique_ptr<CGBlockInfo> blockInfo; blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo, blockExpr->getBlockDecl())); blockInfo->BlockExpression = blockExpr; return EmitBlockLiteral(*blockInfo); } llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { // Using the computed layout, generate the actual block function. bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda(); llvm::Constant *blockFn = CodeGenFunction(CGM, true).GenerateBlockFunction(CurGD, blockInfo, LocalDeclMap, isLambdaConv); blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy); // If there is nothing to capture, we can emit this as a global block. if (blockInfo.CanBeGlobal) return buildGlobalBlock(CGM, blockInfo, blockFn); // Otherwise, we have to emit this as a local block. llvm::Constant *isa = CGM.getNSConcreteStackBlock(); isa = llvm::ConstantExpr::getBitCast(isa, VoidPtrTy); // Build the block descriptor. llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo); llvm::Type *blockTy = blockInfo.StructureType; llvm::AllocaInst *blockAddr = blockInfo.Address; assert(blockAddr && "block has no address!"); // Compute the initial on-stack block flags. BlockFlags flags = BLOCK_HAS_SIGNATURE; if (blockInfo.HasCapturedVariableLayout) flags |= BLOCK_HAS_EXTENDED_LAYOUT; if (blockInfo.NeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE; if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ; if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET; // Initialize the block literal. Builder.CreateStore( isa, Builder.CreateStructGEP(blockTy, blockAddr, 0, "block.isa")); Builder.CreateStore( llvm::ConstantInt::get(IntTy, flags.getBitMask()), Builder.CreateStructGEP(blockTy, blockAddr, 1, "block.flags")); Builder.CreateStore( llvm::ConstantInt::get(IntTy, 0), Builder.CreateStructGEP(blockTy, blockAddr, 2, "block.reserved")); Builder.CreateStore( blockFn, Builder.CreateStructGEP(blockTy, blockAddr, 3, "block.invoke")); Builder.CreateStore(descriptor, Builder.CreateStructGEP(blockTy, blockAddr, 4, "block.descriptor")); // Finally, capture all the values into the block. const BlockDecl *blockDecl = blockInfo.getBlockDecl(); // First, 'this'. if (blockDecl->capturesCXXThis()) { llvm::Value *addr = Builder.CreateStructGEP( blockTy, blockAddr, blockInfo.CXXThisIndex, "block.captured-this.addr"); Builder.CreateStore(LoadCXXThis(), addr); } // Next, captured variables. for (const auto &CI : blockDecl->captures()) { const VarDecl *variable = CI.getVariable(); const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable); // Ignore constant captures. if (capture.isConstant()) continue; QualType type = variable->getType(); CharUnits align = getContext().getDeclAlign(variable); // This will be a [[type]]*, except that a byref entry will just be // an i8**. llvm::Value *blockField = Builder.CreateStructGEP( blockTy, blockAddr, capture.getIndex(), "block.captured"); // Compute the address of the thing we're going to move into the // block literal. llvm::Value *src; if (BlockInfo && CI.isNested()) { // We need to use the capture from the enclosing block. const CGBlockInfo::Capture &enclosingCapture = BlockInfo->getCapture(variable); // This is a [[type]]*, except that a byref entry wil just be an i8**. src = Builder.CreateStructGEP(BlockInfo->StructureType, LoadBlockStruct(), enclosingCapture.getIndex(), "block.capture.addr"); } else if (blockDecl->isConversionFromLambda()) { // The lambda capture in a lambda's conversion-to-block-pointer is // special; we'll simply emit it directly. src = nullptr; } else { // Just look it up in the locals map, which will give us back a // [[type]]*. If that doesn't work, do the more elaborate DRE // emission. src = LocalDeclMap.lookup(variable); if (!src) { DeclRefExpr declRef( const_cast<VarDecl *>(variable), /*RefersToEnclosingVariableOrCapture*/ CI.isNested(), type, VK_LValue, SourceLocation()); src = EmitDeclRefLValue(&declRef).getAddress(); } } // For byrefs, we just write the pointer to the byref struct into // the block field. There's no need to chase the forwarding // pointer at this point, since we're building something that will // live a shorter life than the stack byref anyway. if (CI.isByRef()) { // Get a void* that points to the byref struct. if (CI.isNested()) src = Builder.CreateAlignedLoad(src, align.getQuantity(), "byref.capture"); else src = Builder.CreateBitCast(src, VoidPtrTy); // Write that void* into the capture field. Builder.CreateAlignedStore(src, blockField, align.getQuantity()); // If we have a copy constructor, evaluate that into the block field. } else if (const Expr *copyExpr = CI.getCopyExpr()) { if (blockDecl->isConversionFromLambda()) { // If we have a lambda conversion, emit the expression // directly into the block instead. AggValueSlot Slot = AggValueSlot::forAddr(blockField, align, Qualifiers(), AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased); EmitAggExpr(copyExpr, Slot); } else { EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr); } // If it's a reference variable, copy the reference into the block field. } else if (type->isReferenceType()) { llvm::Value *ref = Builder.CreateAlignedLoad(src, align.getQuantity(), "ref.val"); Builder.CreateAlignedStore(ref, blockField, align.getQuantity()); // If this is an ARC __strong block-pointer variable, don't do a // block copy. // // TODO: this can be generalized into the normal initialization logic: // we should never need to do a block-copy when initializing a local // variable, because the local variable's lifetime should be strictly // contained within the stack block's. } else if (type.getObjCLifetime() == Qualifiers::OCL_Strong && type->isBlockPointerType()) { // Load the block and do a simple retain. LValue srcLV = MakeAddrLValue(src, type, align); llvm::Value *value = EmitLoadOfScalar(srcLV, SourceLocation()); value = EmitARCRetainNonBlock(value); // Do a primitive store to the block field. LValue destLV = MakeAddrLValue(blockField, type, align); EmitStoreOfScalar(value, destLV, /*init*/ true); // Otherwise, fake up a POD copy into the block field. } else { // Fake up a new variable so that EmitScalarInit doesn't think // we're referring to the variable in its own initializer. ImplicitParamDecl blockFieldPseudoVar(getContext(), /*DC*/ nullptr, SourceLocation(), /*name*/ nullptr, type); // We use one of these or the other depending on whether the // reference is nested. DeclRefExpr declRef(const_cast<VarDecl *>(variable), /*RefersToEnclosingVariableOrCapture*/ CI.isNested(), type, VK_LValue, SourceLocation()); ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue, &declRef, VK_RValue); // FIXME: Pass a specific location for the expr init so that the store is // attributed to a reasonable location - otherwise it may be attributed to // locations of subexpressions in the initialization. EmitExprAsInit(&l2r, &blockFieldPseudoVar, MakeAddrLValue(blockField, type, align), /*captured by init*/ false); } // Activate the cleanup if layout pushed one. if (!CI.isByRef()) { EHScopeStack::stable_iterator cleanup = capture.getCleanup(); if (cleanup.isValid()) ActivateCleanupBlock(cleanup, blockInfo.DominatingIP); } } // Cast to the converted block-pointer type, which happens (somewhat // unfortunately) to be a pointer to function type. llvm::Value *result = Builder.CreateBitCast(blockAddr, ConvertType(blockInfo.getBlockExpr()->getType())); return result; } llvm::Type *CodeGenModule::getBlockDescriptorType() { if (BlockDescriptorType) return BlockDescriptorType; llvm::Type *UnsignedLongTy = getTypes().ConvertType(getContext().UnsignedLongTy); // struct __block_descriptor { // unsigned long reserved; // unsigned long block_size; // // // later, the following will be added // // struct { // void (*copyHelper)(); // void (*copyHelper)(); // } helpers; // !!! optional // // const char *signature; // the block signature // const char *layout; // reserved // }; BlockDescriptorType = llvm::StructType::create("struct.__block_descriptor", UnsignedLongTy, UnsignedLongTy, nullptr); // Now form a pointer to that. BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType); return BlockDescriptorType; } llvm::Type *CodeGenModule::getGenericBlockLiteralType() { if (GenericBlockLiteralType) return GenericBlockLiteralType; llvm::Type *BlockDescPtrTy = getBlockDescriptorType(); // struct __block_literal_generic { // void *__isa; // int __flags; // int __reserved; // void (*__invoke)(void *); // struct __block_descriptor *__descriptor; // }; GenericBlockLiteralType = llvm::StructType::create("struct.__block_literal_generic", VoidPtrTy, IntTy, IntTy, VoidPtrTy, BlockDescPtrTy, nullptr); return GenericBlockLiteralType; } RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue) { const BlockPointerType *BPT = E->getCallee()->getType()->getAs<BlockPointerType>(); llvm::Value *Callee = EmitScalarExpr(E->getCallee()); // Get a pointer to the generic block literal. llvm::Type *BlockLiteralTy = llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType()); // Bitcast the callee to a block literal. llvm::Value *BlockLiteral = Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal"); // Get the function pointer from the literal. llvm::Value *FuncPtr = Builder.CreateStructGEP( CGM.getGenericBlockLiteralType(), BlockLiteral, 3); BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy); // Add the block literal. CallArgList Args; Args.add(RValue::get(BlockLiteral), getContext().VoidPtrTy); QualType FnType = BPT->getPointeeType(); // And the rest of the arguments. EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arg_begin(), E->arg_end()); // Load the function. llvm::Value *Func = Builder.CreateLoad(FuncPtr); const FunctionType *FuncTy = FnType->castAs<FunctionType>(); const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeBlockFunctionCall(Args, FuncTy); // Cast the function pointer to the right type. llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo); llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); Func = Builder.CreateBitCast(Func, BlockFTyPtr); // And call the block. return EmitCall(FnInfo, Func, ReturnValue, Args); } llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable, bool isByRef) { assert(BlockInfo && "evaluating block ref without block information?"); const CGBlockInfo::Capture &capture = BlockInfo->getCapture(variable); // Handle constant captures. if (capture.isConstant()) return LocalDeclMap[variable]; llvm::Value *addr = Builder.CreateStructGEP(BlockInfo->StructureType, LoadBlockStruct(), capture.getIndex(), "block.capture.addr"); if (isByRef) { // addr should be a void** right now. Load, then cast the result // to byref*. addr = Builder.CreateLoad(addr); auto *byrefType = BuildByRefType(variable); llvm::PointerType *byrefPointerType = llvm::PointerType::get(byrefType, 0); addr = Builder.CreateBitCast(addr, byrefPointerType, "byref.addr"); // Follow the forwarding pointer. addr = Builder.CreateStructGEP(byrefType, addr, 1, "byref.forwarding"); addr = Builder.CreateLoad(addr, "byref.addr.forwarded"); // Cast back to byref* and GEP over to the actual object. addr = Builder.CreateBitCast(addr, byrefPointerType); addr = Builder.CreateStructGEP(byrefType, addr, getByRefValueLLVMField(variable).second, variable->getNameAsString()); } if (variable->getType()->isReferenceType()) addr = Builder.CreateLoad(addr, "ref.tmp"); return addr; } llvm::Constant * CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr, const char *name) { CGBlockInfo blockInfo(blockExpr->getBlockDecl(), name); blockInfo.BlockExpression = blockExpr; // Compute information about the layout, etc., of this block. computeBlockInfo(*this, nullptr, blockInfo); // Using that metadata, generate the actual block function. llvm::Constant *blockFn; { llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(), blockInfo, LocalDeclMap, false); } blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy); return buildGlobalBlock(*this, blockInfo, blockFn); } static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM, const CGBlockInfo &blockInfo, llvm::Constant *blockFn) { assert(blockInfo.CanBeGlobal); // Generate the constants for the block literal initializer. llvm::Constant *fields[BlockHeaderSize]; // isa fields[0] = CGM.getNSConcreteGlobalBlock(); // __flags BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE; if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET; fields[1] = llvm::ConstantInt::get(CGM.IntTy, flags.getBitMask()); // Reserved fields[2] = llvm::Constant::getNullValue(CGM.IntTy); // Function fields[3] = blockFn; // Descriptor fields[4] = buildBlockDescriptor(CGM, blockInfo); llvm::Constant *init = llvm::ConstantStruct::getAnon(fields); llvm::GlobalVariable *literal = new llvm::GlobalVariable(CGM.getModule(), init->getType(), /*constant*/ true, llvm::GlobalVariable::InternalLinkage, init, "__block_literal_global"); literal->setAlignment(blockInfo.BlockAlign.getQuantity()); // Return a constant of the appropriately-casted type. llvm::Type *requiredType = CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType()); return llvm::ConstantExpr::getBitCast(literal, requiredType); } llvm::Function * CodeGenFunction::GenerateBlockFunction(GlobalDecl GD, const CGBlockInfo &blockInfo, const DeclMapTy &ldm, bool IsLambdaConversionToBlock) { const BlockDecl *blockDecl = blockInfo.getBlockDecl(); CurGD = GD; CurEHLocation = blockInfo.getBlockExpr()->getLocEnd(); BlockInfo = &blockInfo; // Arrange for local static and local extern declarations to appear // to be local to this function as well, in case they're directly // referenced in a block. for (DeclMapTy::const_iterator i = ldm.begin(), e = ldm.end(); i != e; ++i) { const auto *var = dyn_cast<VarDecl>(i->first); if (var && !var->hasLocalStorage()) LocalDeclMap[var] = i->second; } // Begin building the function declaration. // Build the argument list. FunctionArgList args; // The first argument is the block pointer. Just take it as a void* // and cast it later. QualType selfTy = getContext().VoidPtrTy; IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor"); ImplicitParamDecl selfDecl(getContext(), const_cast<BlockDecl*>(blockDecl), SourceLocation(), II, selfTy); args.push_back(&selfDecl); // Now add the rest of the parameters. args.append(blockDecl->param_begin(), blockDecl->param_end()); // Create the function declaration. const FunctionProtoType *fnType = blockInfo.getBlockExpr()->getFunctionType(); const CGFunctionInfo &fnInfo = CGM.getTypes().arrangeFreeFunctionDeclaration( fnType->getReturnType(), args, fnType->getExtInfo(), fnType->isVariadic()); if (CGM.ReturnSlotInterferesWithArgs(fnInfo)) blockInfo.UsesStret = true; llvm::FunctionType *fnLLVMType = CGM.getTypes().GetFunctionType(fnInfo); StringRef name = CGM.getBlockMangledName(GD, blockDecl); llvm::Function *fn = llvm::Function::Create( fnLLVMType, llvm::GlobalValue::InternalLinkage, name, &CGM.getModule()); CGM.SetInternalFunctionAttributes(blockDecl, fn, fnInfo); // Begin generating the function. StartFunction(blockDecl, fnType->getReturnType(), fn, fnInfo, args, blockDecl->getLocation(), blockInfo.getBlockExpr()->getBody()->getLocStart()); // Okay. Undo some of what StartFunction did. // Pull the 'self' reference out of the local decl map. llvm::Value *blockAddr = LocalDeclMap[&selfDecl]; LocalDeclMap.erase(&selfDecl); BlockPointer = Builder.CreateBitCast(blockAddr, blockInfo.StructureType->getPointerTo(), "block"); // At -O0 we generate an explicit alloca for the BlockPointer, so the RA // won't delete the dbg.declare intrinsics for captured variables. llvm::Value *BlockPointerDbgLoc = BlockPointer; if (CGM.getCodeGenOpts().OptimizationLevel == 0) { // Allocate a stack slot for it, so we can point the debugger to it llvm::AllocaInst *Alloca = CreateTempAlloca(BlockPointer->getType(), "block.addr"); unsigned Align = getContext().getDeclAlign(&selfDecl).getQuantity(); Alloca->setAlignment(Align); // Set the DebugLocation to empty, so the store is recognized as a // frame setup instruction by llvm::DwarfDebug::beginFunction(). auto NL = ApplyDebugLocation::CreateEmpty(*this); Builder.CreateAlignedStore(BlockPointer, Alloca, Align); BlockPointerDbgLoc = Alloca; } // If we have a C++ 'this' reference, go ahead and force it into // existence now. if (blockDecl->capturesCXXThis()) { llvm::Value *addr = Builder.CreateStructGEP(blockInfo.StructureType, BlockPointer, blockInfo.CXXThisIndex, "block.captured-this"); CXXThisValue = Builder.CreateLoad(addr, "this"); } // Also force all the constant captures. for (const auto &CI : blockDecl->captures()) { const VarDecl *variable = CI.getVariable(); const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable); if (!capture.isConstant()) continue; unsigned align = getContext().getDeclAlign(variable).getQuantity(); llvm::AllocaInst *alloca = CreateMemTemp(variable->getType(), "block.captured-const"); alloca->setAlignment(align); Builder.CreateAlignedStore(capture.getConstant(), alloca, align); LocalDeclMap[variable] = alloca; } // Save a spot to insert the debug information for all the DeclRefExprs. llvm::BasicBlock *entry = Builder.GetInsertBlock(); llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint(); --entry_ptr; if (IsLambdaConversionToBlock) EmitLambdaBlockInvokeBody(); else { PGO.assignRegionCounters(blockDecl, fn); incrementProfileCounter(blockDecl->getBody()); EmitStmt(blockDecl->getBody()); } // Remember where we were... llvm::BasicBlock *resume = Builder.GetInsertBlock(); // Go back to the entry. ++entry_ptr; Builder.SetInsertPoint(entry, entry_ptr); // Emit debug information for all the DeclRefExprs. // FIXME: also for 'this' if (CGDebugInfo *DI = getDebugInfo()) { for (const auto &CI : blockDecl->captures()) { const VarDecl *variable = CI.getVariable(); DI->EmitLocation(Builder, variable->getLocation()); if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) { const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable); if (capture.isConstant()) { DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable], Builder); continue; } DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointerDbgLoc, Builder, blockInfo, entry_ptr == entry->end() ? nullptr : entry_ptr); } } // Recover location if it was changed in the above loop. DI->EmitLocation(Builder, cast<CompoundStmt>(blockDecl->getBody())->getRBracLoc()); } // And resume where we left off. if (resume == nullptr) Builder.ClearInsertionPoint(); else Builder.SetInsertPoint(resume); FinishFunction(cast<CompoundStmt>(blockDecl->getBody())->getRBracLoc()); return fn; } /* notes.push_back(HelperInfo()); HelperInfo &note = notes.back(); note.index = capture.getIndex(); note.RequiresCopying = (ci->hasCopyExpr() || BlockRequiresCopying(type)); note.cxxbar_import = ci->getCopyExpr(); if (ci->isByRef()) { note.flag = BLOCK_FIELD_IS_BYREF; if (type.isObjCGCWeak()) note.flag |= BLOCK_FIELD_IS_WEAK; } else if (type->isBlockPointerType()) { note.flag = BLOCK_FIELD_IS_BLOCK; } else { note.flag = BLOCK_FIELD_IS_OBJECT; } */ /// Generate the copy-helper function for a block closure object: /// static void block_copy_helper(block_t *dst, block_t *src); /// The runtime will have previously initialized 'dst' by doing a /// bit-copy of 'src'. /// /// Note that this copies an entire block closure object to the heap; /// it should not be confused with a 'byref copy helper', which moves /// the contents of an individual __block variable to the heap. llvm::Constant * CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) { ASTContext &C = getContext(); FunctionArgList args; ImplicitParamDecl dstDecl(getContext(), nullptr, SourceLocation(), nullptr, C.VoidPtrTy); args.push_back(&dstDecl); ImplicitParamDecl srcDecl(getContext(), nullptr, SourceLocation(), nullptr, C.VoidPtrTy); args.push_back(&srcDecl); const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration( C.VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false); // FIXME: it would be nice if these were mergeable with things with // identical semantics. llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); llvm::Function *Fn = llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_block_", &CGM.getModule()); IdentifierInfo *II = &CGM.getContext().Idents.get("__copy_helper_block_"); FunctionDecl *FD = FunctionDecl::Create(C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, C.VoidTy, nullptr, SC_Static, false, false); auto NL = ApplyDebugLocation::CreateEmpty(*this); StartFunction(FD, C.VoidTy, Fn, FI, args); // Create a scope with an artificial location for the body of this function. auto AL = ApplyDebugLocation::CreateArtificial(*this); llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo(); llvm::Value *src = GetAddrOfLocalVar(&srcDecl); src = Builder.CreateLoad(src); src = Builder.CreateBitCast(src, structPtrTy, "block.source"); llvm::Value *dst = GetAddrOfLocalVar(&dstDecl); dst = Builder.CreateLoad(dst); dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest"); const BlockDecl *blockDecl = blockInfo.getBlockDecl(); for (const auto &CI : blockDecl->captures()) { const VarDecl *variable = CI.getVariable(); QualType type = variable->getType(); const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable); if (capture.isConstant()) continue; const Expr *copyExpr = CI.getCopyExpr(); BlockFieldFlags flags; bool useARCWeakCopy = false; bool useARCStrongCopy = false; if (copyExpr) { assert(!CI.isByRef()); // don't bother computing flags } else if (CI.isByRef()) { flags = BLOCK_FIELD_IS_BYREF; if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK; } else if (type->isObjCRetainableType()) { flags = BLOCK_FIELD_IS_OBJECT; bool isBlockPointer = type->isBlockPointerType(); if (isBlockPointer) flags = BLOCK_FIELD_IS_BLOCK; // Special rules for ARC captures: if (getLangOpts().ObjCAutoRefCount) { Qualifiers qs = type.getQualifiers(); // We need to register __weak direct captures with the runtime. if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) { useARCWeakCopy = true; // We need to retain the copied value for __strong direct captures. } else if (qs.getObjCLifetime() == Qualifiers::OCL_Strong) { // If it's a block pointer, we have to copy the block and // assign that to the destination pointer, so we might as // well use _Block_object_assign. Otherwise we can avoid that. if (!isBlockPointer) useARCStrongCopy = true; // Otherwise the memcpy is fine. } else { continue; } // Non-ARC captures of retainable pointers are strong and // therefore require a call to _Block_object_assign. } else { // fall through } } else { continue; } unsigned index = capture.getIndex(); llvm::Value *srcField = Builder.CreateStructGEP(blockInfo.StructureType, src, index); llvm::Value *dstField = Builder.CreateStructGEP(blockInfo.StructureType, dst, index); // If there's an explicit copy expression, we do that. if (copyExpr) { EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr); } else if (useARCWeakCopy) { EmitARCCopyWeak(dstField, srcField); } else { llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src"); if (useARCStrongCopy) { // At -O0, store null into the destination field (so that the // storeStrong doesn't over-release) and then call storeStrong. // This is a workaround to not having an initStrong call. if (CGM.getCodeGenOpts().OptimizationLevel == 0) { auto *ty = cast<llvm::PointerType>(srcValue->getType()); llvm::Value *null = llvm::ConstantPointerNull::get(ty); Builder.CreateStore(null, dstField); EmitARCStoreStrongCall(dstField, srcValue, true); // With optimization enabled, take advantage of the fact that // the blocks runtime guarantees a memcpy of the block data, and // just emit a retain of the src field. } else { EmitARCRetainNonBlock(srcValue); // We don't need this anymore, so kill it. It's not quite // worth the annoyance to avoid creating it in the first place. cast<llvm::Instruction>(dstField)->eraseFromParent(); } } else { srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy); llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy); llvm::Value *args[] = { dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask()) }; bool copyCanThrow = false; if (CI.isByRef() && variable->getType()->getAsCXXRecordDecl()) { const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(variable); if (copyExpr) { copyCanThrow = true; // FIXME: reuse the noexcept logic } } if (copyCanThrow) { EmitRuntimeCallOrInvoke(CGM.getBlockObjectAssign(), args); } else { EmitNounwindRuntimeCall(CGM.getBlockObjectAssign(), args); } } } } FinishFunction(); return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); } /// Generate the destroy-helper function for a block closure object: /// static void block_destroy_helper(block_t *theBlock); /// /// Note that this destroys a heap-allocated block closure object; /// it should not be confused with a 'byref destroy helper', which /// destroys the heap-allocated contents of an individual __block /// variable. llvm::Constant * CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) { ASTContext &C = getContext(); FunctionArgList args; ImplicitParamDecl srcDecl(getContext(), nullptr, SourceLocation(), nullptr, C.VoidPtrTy); args.push_back(&srcDecl); const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration( C.VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false); // FIXME: We'd like to put these into a mergable by content, with // internal linkage. llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); llvm::Function *Fn = llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, "__destroy_helper_block_", &CGM.getModule()); IdentifierInfo *II = &CGM.getContext().Idents.get("__destroy_helper_block_"); FunctionDecl *FD = FunctionDecl::Create(C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, C.VoidTy, nullptr, SC_Static, false, false); // Create a scope with an artificial location for the body of this function. auto NL = ApplyDebugLocation::CreateEmpty(*this); StartFunction(FD, C.VoidTy, Fn, FI, args); auto AL = ApplyDebugLocation::CreateArtificial(*this); llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo(); llvm::Value *src = GetAddrOfLocalVar(&srcDecl); src = Builder.CreateLoad(src); src = Builder.CreateBitCast(src, structPtrTy, "block"); const BlockDecl *blockDecl = blockInfo.getBlockDecl(); CodeGenFunction::RunCleanupsScope cleanups(*this); for (const auto &CI : blockDecl->captures()) { const VarDecl *variable = CI.getVariable(); QualType type = variable->getType(); const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable); if (capture.isConstant()) continue; BlockFieldFlags flags; const CXXDestructorDecl *dtor = nullptr; bool useARCWeakDestroy = false; bool useARCStrongDestroy = false; if (CI.isByRef()) { flags = BLOCK_FIELD_IS_BYREF; if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK; } else if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) { if (record->hasTrivialDestructor()) continue; dtor = record->getDestructor(); } else if (type->isObjCRetainableType()) { flags = BLOCK_FIELD_IS_OBJECT; if (type->isBlockPointerType()) flags = BLOCK_FIELD_IS_BLOCK; // Special rules for ARC captures. if (getLangOpts().ObjCAutoRefCount) { Qualifiers qs = type.getQualifiers(); // Don't generate special dispose logic for a captured object // unless it's __strong or __weak. if (!qs.hasStrongOrWeakObjCLifetime()) continue; // Support __weak direct captures. if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) useARCWeakDestroy = true; // Tools really want us to use objc_storeStrong here. else useARCStrongDestroy = true; } } else { continue; } unsigned index = capture.getIndex(); llvm::Value *srcField = Builder.CreateStructGEP(blockInfo.StructureType, src, index); // If there's an explicit copy expression, we do that. if (dtor) { PushDestructorCleanup(dtor, srcField); // If this is a __weak capture, emit the release directly. } else if (useARCWeakDestroy) { EmitARCDestroyWeak(srcField); // Destroy strong objects with a call if requested. } else if (useARCStrongDestroy) { EmitARCDestroyStrong(srcField, ARCImpreciseLifetime); // Otherwise we call _Block_object_dispose. It wouldn't be too // hard to just emit this as a cleanup if we wanted to make sure // that things were done in reverse. } else { llvm::Value *value = Builder.CreateLoad(srcField); value = Builder.CreateBitCast(value, VoidPtrTy); BuildBlockRelease(value, flags); } } cleanups.ForceCleanup(); FinishFunction(); return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); } namespace { /// Emits the copy/dispose helper functions for a __block object of id type. class ObjectByrefHelpers : public CodeGenModule::ByrefHelpers { BlockFieldFlags Flags; public: ObjectByrefHelpers(CharUnits alignment, BlockFieldFlags flags) : ByrefHelpers(alignment), Flags(flags) {} void emitCopy(CodeGenFunction &CGF, llvm::Value *destField, llvm::Value *srcField) override { destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy); srcField = CGF.Builder.CreateBitCast(srcField, CGF.VoidPtrPtrTy); llvm::Value *srcValue = CGF.Builder.CreateLoad(srcField); unsigned flags = (Flags | BLOCK_BYREF_CALLER).getBitMask(); llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags); llvm::Value *fn = CGF.CGM.getBlockObjectAssign(); llvm::Value *args[] = { destField, srcValue, flagsVal }; CGF.EmitNounwindRuntimeCall(fn, args); } void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override { field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0)); llvm::Value *value = CGF.Builder.CreateLoad(field); CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER); } void profileImpl(llvm::FoldingSetNodeID &id) const override { id.AddInteger(Flags.getBitMask()); } }; /// Emits the copy/dispose helpers for an ARC __block __weak variable. class ARCWeakByrefHelpers : public CodeGenModule::ByrefHelpers { public: ARCWeakByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {} void emitCopy(CodeGenFunction &CGF, llvm::Value *destField, llvm::Value *srcField) override { CGF.EmitARCMoveWeak(destField, srcField); } void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override { CGF.EmitARCDestroyWeak(field); } void profileImpl(llvm::FoldingSetNodeID &id) const override { // 0 is distinguishable from all pointers and byref flags id.AddInteger(0); } }; /// Emits the copy/dispose helpers for an ARC __block __strong variable /// that's not of block-pointer type. class ARCStrongByrefHelpers : public CodeGenModule::ByrefHelpers { public: ARCStrongByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {} void emitCopy(CodeGenFunction &CGF, llvm::Value *destField, llvm::Value *srcField) override { // Do a "move" by copying the value and then zeroing out the old // variable. llvm::LoadInst *value = CGF.Builder.CreateLoad(srcField); value->setAlignment(Alignment.getQuantity()); llvm::Value *null = llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType())); if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { llvm::StoreInst *store = CGF.Builder.CreateStore(null, destField); store->setAlignment(Alignment.getQuantity()); CGF.EmitARCStoreStrongCall(destField, value, /*ignored*/ true); CGF.EmitARCStoreStrongCall(srcField, null, /*ignored*/ true); return; } llvm::StoreInst *store = CGF.Builder.CreateStore(value, destField); store->setAlignment(Alignment.getQuantity()); store = CGF.Builder.CreateStore(null, srcField); store->setAlignment(Alignment.getQuantity()); } void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override { CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime); } void profileImpl(llvm::FoldingSetNodeID &id) const override { // 1 is distinguishable from all pointers and byref flags id.AddInteger(1); } }; /// Emits the copy/dispose helpers for an ARC __block __strong /// variable that's of block-pointer type. class ARCStrongBlockByrefHelpers : public CodeGenModule::ByrefHelpers { public: ARCStrongBlockByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {} void emitCopy(CodeGenFunction &CGF, llvm::Value *destField, llvm::Value *srcField) override { // Do the copy with objc_retainBlock; that's all that // _Block_object_assign would do anyway, and we'd have to pass the // right arguments to make sure it doesn't get no-op'ed. llvm::LoadInst *oldValue = CGF.Builder.CreateLoad(srcField); oldValue->setAlignment(Alignment.getQuantity()); llvm::Value *copy = CGF.EmitARCRetainBlock(oldValue, /*mandatory*/ true); llvm::StoreInst *store = CGF.Builder.CreateStore(copy, destField); store->setAlignment(Alignment.getQuantity()); } void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override { CGF.EmitARCDestroyStrong(field, ARCImpreciseLifetime); } void profileImpl(llvm::FoldingSetNodeID &id) const override { // 2 is distinguishable from all pointers and byref flags id.AddInteger(2); } }; /// Emits the copy/dispose helpers for a __block variable with a /// nontrivial copy constructor or destructor. class CXXByrefHelpers : public CodeGenModule::ByrefHelpers { QualType VarType; const Expr *CopyExpr; public: CXXByrefHelpers(CharUnits alignment, QualType type, const Expr *copyExpr) : ByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {} bool needsCopy() const override { return CopyExpr != nullptr; } void emitCopy(CodeGenFunction &CGF, llvm::Value *destField, llvm::Value *srcField) override { if (!CopyExpr) return; CGF.EmitSynthesizedCXXCopyCtor(destField, srcField, CopyExpr); } void emitDispose(CodeGenFunction &CGF, llvm::Value *field) override { EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin(); CGF.PushDestructorCleanup(VarType, field); CGF.PopCleanupBlocks(cleanupDepth); } void profileImpl(llvm::FoldingSetNodeID &id) const override { id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr()); } }; } // end anonymous namespace static llvm::Constant * generateByrefCopyHelper(CodeGenFunction &CGF, llvm::StructType &byrefType, unsigned valueFieldIndex, CodeGenModule::ByrefHelpers &byrefInfo) { ASTContext &Context = CGF.getContext(); QualType R = Context.VoidTy; FunctionArgList args; ImplicitParamDecl dst(CGF.getContext(), nullptr, SourceLocation(), nullptr, Context.VoidPtrTy); args.push_back(&dst); ImplicitParamDecl src(CGF.getContext(), nullptr, SourceLocation(), nullptr, Context.VoidPtrTy); args.push_back(&src); const CGFunctionInfo &FI = CGF.CGM.getTypes().arrangeFreeFunctionDeclaration( R, args, FunctionType::ExtInfo(), /*variadic=*/false); CodeGenTypes &Types = CGF.CGM.getTypes(); llvm::FunctionType *LTy = Types.GetFunctionType(FI); // FIXME: We'd like to put these into a mergable by content, with // internal linkage. llvm::Function *Fn = llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, "__Block_byref_object_copy_", &CGF.CGM.getModule()); IdentifierInfo *II = &Context.Idents.get("__Block_byref_object_copy_"); FunctionDecl *FD = FunctionDecl::Create(Context, Context.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, R, nullptr, SC_Static, false, false); CGF.StartFunction(FD, R, Fn, FI, args); if (byrefInfo.needsCopy()) { llvm::Type *byrefPtrType = byrefType.getPointerTo(0); // dst->x llvm::Value *destField = CGF.GetAddrOfLocalVar(&dst); destField = CGF.Builder.CreateLoad(destField); destField = CGF.Builder.CreateBitCast(destField, byrefPtrType); destField = CGF.Builder.CreateStructGEP(&byrefType, destField, valueFieldIndex, "x"); // src->x llvm::Value *srcField = CGF.GetAddrOfLocalVar(&src); srcField = CGF.Builder.CreateLoad(srcField); srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType); srcField = CGF.Builder.CreateStructGEP(&byrefType, srcField, valueFieldIndex, "x"); byrefInfo.emitCopy(CGF, destField, srcField); } CGF.FinishFunction(); return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy); } /// Build the copy helper for a __block variable. static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM, llvm::StructType &byrefType, unsigned byrefValueIndex, CodeGenModule::ByrefHelpers &info) { CodeGenFunction CGF(CGM); return generateByrefCopyHelper(CGF, byrefType, byrefValueIndex, info); } /// Generate code for a __block variable's dispose helper. static llvm::Constant * generateByrefDisposeHelper(CodeGenFunction &CGF, llvm::StructType &byrefType, unsigned byrefValueIndex, CodeGenModule::ByrefHelpers &byrefInfo) { ASTContext &Context = CGF.getContext(); QualType R = Context.VoidTy; FunctionArgList args; ImplicitParamDecl src(CGF.getContext(), nullptr, SourceLocation(), nullptr, Context.VoidPtrTy); args.push_back(&src); const CGFunctionInfo &FI = CGF.CGM.getTypes().arrangeFreeFunctionDeclaration( R, args, FunctionType::ExtInfo(), /*variadic=*/false); CodeGenTypes &Types = CGF.CGM.getTypes(); llvm::FunctionType *LTy = Types.GetFunctionType(FI); // FIXME: We'd like to put these into a mergable by content, with // internal linkage. llvm::Function *Fn = llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, "__Block_byref_object_dispose_", &CGF.CGM.getModule()); IdentifierInfo *II = &Context.Idents.get("__Block_byref_object_dispose_"); FunctionDecl *FD = FunctionDecl::Create(Context, Context.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, R, nullptr, SC_Static, false, false); CGF.StartFunction(FD, R, Fn, FI, args); if (byrefInfo.needsDispose()) { llvm::Value *V = CGF.GetAddrOfLocalVar(&src); V = CGF.Builder.CreateLoad(V); V = CGF.Builder.CreateBitCast(V, byrefType.getPointerTo(0)); V = CGF.Builder.CreateStructGEP(&byrefType, V, byrefValueIndex, "x"); byrefInfo.emitDispose(CGF, V); } CGF.FinishFunction(); return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy); } /// Build the dispose helper for a __block variable. static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM, llvm::StructType &byrefType, unsigned byrefValueIndex, CodeGenModule::ByrefHelpers &info) { CodeGenFunction CGF(CGM); return generateByrefDisposeHelper(CGF, byrefType, byrefValueIndex, info); } /// Lazily build the copy and dispose helpers for a __block variable /// with the given information. template <class T> static T *buildByrefHelpers(CodeGenModule &CGM, llvm::StructType &byrefTy, unsigned byrefValueIndex, T &byrefInfo) { // Increase the field's alignment to be at least pointer alignment, // since the layout of the byref struct will guarantee at least that. byrefInfo.Alignment = std::max(byrefInfo.Alignment, CharUnits::fromQuantity(CGM.PointerAlignInBytes)); llvm::FoldingSetNodeID id; byrefInfo.Profile(id); void *insertPos; CodeGenModule::ByrefHelpers *node = CGM.ByrefHelpersCache.FindNodeOrInsertPos(id, insertPos); if (node) return static_cast<T*>(node); byrefInfo.CopyHelper = buildByrefCopyHelper(CGM, byrefTy, byrefValueIndex, byrefInfo); byrefInfo.DisposeHelper = buildByrefDisposeHelper(CGM, byrefTy, byrefValueIndex,byrefInfo); T *copy = new (CGM.getContext()) T(byrefInfo); CGM.ByrefHelpersCache.InsertNode(copy, insertPos); return copy; } /// Build the copy and dispose helpers for the given __block variable /// emission. Places the helpers in the global cache. Returns null /// if no helpers are required. CodeGenModule::ByrefHelpers * CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType, const AutoVarEmission &emission) { const VarDecl &var = *emission.Variable; QualType type = var.getType(); unsigned byrefValueIndex = getByRefValueLLVMField(&var).second; if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) { const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(&var); if (!copyExpr && record->hasTrivialDestructor()) return nullptr; CXXByrefHelpers byrefInfo(emission.Alignment, type, copyExpr); return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); } // Otherwise, if we don't have a retainable type, there's nothing to do. // that the runtime does extra copies. if (!type->isObjCRetainableType()) return nullptr; Qualifiers qs = type.getQualifiers(); // If we have lifetime, that dominates. if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) { assert(getLangOpts().ObjCAutoRefCount); switch (lifetime) { case Qualifiers::OCL_None: llvm_unreachable("impossible"); // These are just bits as far as the runtime is concerned. case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: return nullptr; // Tell the runtime that this is ARC __weak, called by the // byref routines. case Qualifiers::OCL_Weak: { ARCWeakByrefHelpers byrefInfo(emission.Alignment); return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); } // ARC __strong __block variables need to be retained. case Qualifiers::OCL_Strong: // Block pointers need to be copied, and there's no direct // transfer possible. if (type->isBlockPointerType()) { ARCStrongBlockByrefHelpers byrefInfo(emission.Alignment); return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); // Otherwise, we transfer ownership of the retain from the stack // to the heap. } else { ARCStrongByrefHelpers byrefInfo(emission.Alignment); return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); } } llvm_unreachable("fell out of lifetime switch!"); } BlockFieldFlags flags; if (type->isBlockPointerType()) { flags |= BLOCK_FIELD_IS_BLOCK; } else if (CGM.getContext().isObjCNSObjectType(type) || type->isObjCObjectPointerType()) { flags |= BLOCK_FIELD_IS_OBJECT; } else { return nullptr; } if (type.isObjCGCWeak()) flags |= BLOCK_FIELD_IS_WEAK; ObjectByrefHelpers byrefInfo(emission.Alignment, flags); return ::buildByrefHelpers(CGM, byrefType, byrefValueIndex, byrefInfo); } std::pair<llvm::Type *, unsigned> CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const { assert(ByRefValueInfo.count(VD) && "Did not find value!"); return ByRefValueInfo.find(VD)->second; } llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr, const VarDecl *V) { auto P = getByRefValueLLVMField(V); llvm::Value *Loc = Builder.CreateStructGEP(P.first, BaseAddr, 1, "forwarding"); Loc = Builder.CreateLoad(Loc); Loc = Builder.CreateStructGEP(P.first, Loc, P.second, V->getNameAsString()); return Loc; } /// BuildByRefType - This routine changes a __block variable declared as T x /// into: /// /// struct { /// void *__isa; /// void *__forwarding; /// int32_t __flags; /// int32_t __size; /// void *__copy_helper; // only if needed /// void *__destroy_helper; // only if needed /// void *__byref_variable_layout;// only if needed /// char padding[X]; // only if needed /// T x; /// } x /// llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) { std::pair<llvm::Type *, unsigned> &Info = ByRefValueInfo[D]; if (Info.first) return Info.first; QualType Ty = D->getType(); SmallVector<llvm::Type *, 8> types; llvm::StructType *ByRefType = llvm::StructType::create(getLLVMContext(), "struct.__block_byref_" + D->getNameAsString()); // void *__isa; types.push_back(Int8PtrTy); // void *__forwarding; types.push_back(llvm::PointerType::getUnqual(ByRefType)); // int32_t __flags; types.push_back(Int32Ty); // int32_t __size; types.push_back(Int32Ty); // Note that this must match *exactly* the logic in buildByrefHelpers. bool HasCopyAndDispose = getContext().BlockRequiresCopying(Ty, D); if (HasCopyAndDispose) { /// void *__copy_helper; types.push_back(Int8PtrTy); /// void *__destroy_helper; types.push_back(Int8PtrTy); } bool HasByrefExtendedLayout = false; Qualifiers::ObjCLifetime Lifetime; if (getContext().getByrefLifetime(Ty, Lifetime, HasByrefExtendedLayout) && HasByrefExtendedLayout) /// void *__byref_variable_layout; types.push_back(Int8PtrTy); bool Packed = false; CharUnits Align = getContext().getDeclAlign(D); if (Align > getContext().toCharUnitsFromBits(getTarget().getPointerAlign(0))) { // We have to insert padding. // The struct above has 2 32-bit integers. unsigned CurrentOffsetInBytes = 4 * 2; // And either 2, 3, 4 or 5 pointers. unsigned noPointers = 2; if (HasCopyAndDispose) noPointers += 2; if (HasByrefExtendedLayout) noPointers += 1; CurrentOffsetInBytes += noPointers * CGM.getDataLayout().getTypeAllocSize(Int8PtrTy); // Align the offset. unsigned AlignedOffsetInBytes = llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity()); unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes; if (NumPaddingBytes > 0) { llvm::Type *Ty = Int8Ty; // FIXME: We need a sema error for alignment larger than the minimum of // the maximal stack alignment and the alignment of malloc on the system. if (NumPaddingBytes > 1) Ty = llvm::ArrayType::get(Ty, NumPaddingBytes); types.push_back(Ty); // We want a packed struct. Packed = true; } } // T x; types.push_back(ConvertTypeForMem(Ty)); ByRefType->setBody(types, Packed); Info.first = ByRefType; Info.second = types.size() - 1; return Info.first; } /// Initialize the structural components of a __block variable, i.e. /// everything but the actual object. void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) { // Find the address of the local. llvm::Value *addr = emission.Address; // That's an alloca of the byref structure type. llvm::StructType *byrefType = cast<llvm::StructType>( cast<llvm::PointerType>(addr->getType())->getElementType()); // Build the byref helpers if necessary. This is null if we don't need any. CodeGenModule::ByrefHelpers *helpers = buildByrefHelpers(*byrefType, emission); const VarDecl &D = *emission.Variable; QualType type = D.getType(); bool HasByrefExtendedLayout; Qualifiers::ObjCLifetime ByrefLifetime; bool ByRefHasLifetime = getContext().getByrefLifetime(type, ByrefLifetime, HasByrefExtendedLayout); llvm::Value *V; // Initialize the 'isa', which is just 0 or 1. int isa = 0; if (type.isObjCGCWeak()) isa = 1; V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa"); Builder.CreateStore(V, Builder.CreateStructGEP(nullptr, addr, 0, "byref.isa")); // Store the address of the variable into its own forwarding pointer. Builder.CreateStore( addr, Builder.CreateStructGEP(nullptr, addr, 1, "byref.forwarding")); // Blocks ABI: // c) the flags field is set to either 0 if no helper functions are // needed or BLOCK_BYREF_HAS_COPY_DISPOSE if they are, BlockFlags flags; if (helpers) flags |= BLOCK_BYREF_HAS_COPY_DISPOSE; if (ByRefHasLifetime) { if (HasByrefExtendedLayout) flags |= BLOCK_BYREF_LAYOUT_EXTENDED; else switch (ByrefLifetime) { case Qualifiers::OCL_Strong: flags |= BLOCK_BYREF_LAYOUT_STRONG; break; case Qualifiers::OCL_Weak: flags |= BLOCK_BYREF_LAYOUT_WEAK; break; case Qualifiers::OCL_ExplicitNone: flags |= BLOCK_BYREF_LAYOUT_UNRETAINED; break; case Qualifiers::OCL_None: if (!type->isObjCObjectPointerType() && !type->isBlockPointerType()) flags |= BLOCK_BYREF_LAYOUT_NON_OBJECT; break; default: break; } if (CGM.getLangOpts().ObjCGCBitmapPrint) { printf("\n Inline flag for BYREF variable layout (%d):", flags.getBitMask()); if (flags & BLOCK_BYREF_HAS_COPY_DISPOSE) printf(" BLOCK_BYREF_HAS_COPY_DISPOSE"); if (flags & BLOCK_BYREF_LAYOUT_MASK) { BlockFlags ThisFlag(flags.getBitMask() & BLOCK_BYREF_LAYOUT_MASK); if (ThisFlag == BLOCK_BYREF_LAYOUT_EXTENDED) printf(" BLOCK_BYREF_LAYOUT_EXTENDED"); if (ThisFlag == BLOCK_BYREF_LAYOUT_STRONG) printf(" BLOCK_BYREF_LAYOUT_STRONG"); if (ThisFlag == BLOCK_BYREF_LAYOUT_WEAK) printf(" BLOCK_BYREF_LAYOUT_WEAK"); if (ThisFlag == BLOCK_BYREF_LAYOUT_UNRETAINED) printf(" BLOCK_BYREF_LAYOUT_UNRETAINED"); if (ThisFlag == BLOCK_BYREF_LAYOUT_NON_OBJECT) printf(" BLOCK_BYREF_LAYOUT_NON_OBJECT"); } printf("\n"); } } Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()), Builder.CreateStructGEP(nullptr, addr, 2, "byref.flags")); CharUnits byrefSize = CGM.GetTargetTypeStoreSize(byrefType); V = llvm::ConstantInt::get(IntTy, byrefSize.getQuantity()); Builder.CreateStore(V, Builder.CreateStructGEP(nullptr, addr, 3, "byref.size")); if (helpers) { llvm::Value *copy_helper = Builder.CreateStructGEP(nullptr, addr, 4); Builder.CreateStore(helpers->CopyHelper, copy_helper); llvm::Value *destroy_helper = Builder.CreateStructGEP(nullptr, addr, 5); Builder.CreateStore(helpers->DisposeHelper, destroy_helper); } if (ByRefHasLifetime && HasByrefExtendedLayout) { llvm::Constant* ByrefLayoutInfo = CGM.getObjCRuntime().BuildByrefLayout(CGM, type); llvm::Value *ByrefInfoAddr = Builder.CreateStructGEP(nullptr, addr, helpers ? 6 : 4, "byref.layout"); // cast destination to pointer to source type. llvm::Type *DesTy = ByrefLayoutInfo->getType(); DesTy = DesTy->getPointerTo(); llvm::Value *BC = Builder.CreatePointerCast(ByrefInfoAddr, DesTy); Builder.CreateStore(ByrefLayoutInfo, BC); } } void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) { llvm::Value *F = CGM.getBlockObjectDispose(); llvm::Value *args[] = { Builder.CreateBitCast(V, Int8PtrTy), llvm::ConstantInt::get(Int32Ty, flags.getBitMask()) }; EmitNounwindRuntimeCall(F, args); // FIXME: throwing destructors? } namespace { struct CallBlockRelease : EHScopeStack::Cleanup { llvm::Value *Addr; CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {} void Emit(CodeGenFunction &CGF, Flags flags) override { // Should we be passing FIELD_IS_WEAK here? CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF); } }; } /// Enter a cleanup to destroy a __block variable. Note that this /// cleanup should be a no-op if the variable hasn't left the stack /// yet; if a cleanup is required for the variable itself, that needs /// to be done externally. void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) { // We don't enter this cleanup if we're in pure-GC mode. if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) return; EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, emission.Address); } /// Adjust the declaration of something from the blocks API. static void configureBlocksRuntimeObject(CodeGenModule &CGM, llvm::Constant *C) { if (!CGM.getLangOpts().BlocksRuntimeOptional) return; auto *GV = cast<llvm::GlobalValue>(C->stripPointerCasts()); if (GV->isDeclaration() && GV->hasExternalLinkage()) GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage); } llvm::Constant *CodeGenModule::getBlockObjectDispose() { if (BlockObjectDispose) return BlockObjectDispose; llvm::Type *args[] = { Int8PtrTy, Int32Ty }; llvm::FunctionType *fty = llvm::FunctionType::get(VoidTy, args, false); BlockObjectDispose = CreateRuntimeFunction(fty, "_Block_object_dispose"); configureBlocksRuntimeObject(*this, BlockObjectDispose); return BlockObjectDispose; } llvm::Constant *CodeGenModule::getBlockObjectAssign() { if (BlockObjectAssign) return BlockObjectAssign; llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty }; llvm::FunctionType *fty = llvm::FunctionType::get(VoidTy, args, false); BlockObjectAssign = CreateRuntimeFunction(fty, "_Block_object_assign"); configureBlocksRuntimeObject(*this, BlockObjectAssign); return BlockObjectAssign; } llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() { if (NSConcreteGlobalBlock) return NSConcreteGlobalBlock; NSConcreteGlobalBlock = GetOrCreateLLVMGlobal("_NSConcreteGlobalBlock", Int8PtrTy->getPointerTo(), nullptr); configureBlocksRuntimeObject(*this, NSConcreteGlobalBlock); return NSConcreteGlobalBlock; } llvm::Constant *CodeGenModule::getNSConcreteStackBlock() { if (NSConcreteStackBlock) return NSConcreteStackBlock; NSConcreteStackBlock = GetOrCreateLLVMGlobal("_NSConcreteStackBlock", Int8PtrTy->getPointerTo(), nullptr); configureBlocksRuntimeObject(*this, NSConcreteStackBlock); return NSConcreteStackBlock; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGObjCMac.cpp
//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This provides Objective-C code generation targeting the Apple runtime. // //===----------------------------------------------------------------------===// #include "CGObjCRuntime.h" #include "CGBlocks.h" #include "CGCleanup.h" #include "CGRecordLayout.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtObjC.h" #include "clang/Basic/LangOptions.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/Frontend/CodeGenOptions.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallString.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Support/raw_ostream.h" #include <cstdio> using namespace clang; using namespace CodeGen; // HLSL Change Starts // No ObjC codegen support, so simply skip all of this compilation. // Here are enough stubs to link the current targets. #if 0 // HLSL Change Ends namespace { // FIXME: We should find a nicer way to make the labels for metadata, string // concatenation is lame. class ObjCCommonTypesHelper { protected: llvm::LLVMContext &VMContext; private: // The types of these functions don't really matter because we // should always bitcast before calling them. /// id objc_msgSend (id, SEL, ...) /// /// The default messenger, used for sends whose ABI is unchanged from /// the all-integer/pointer case. llvm::Constant *getMessageSendFn() const { // Add the non-lazy-bind attribute, since objc_msgSend is likely to // be called a lot. llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSend", llvm::AttributeSet::get(CGM.getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::NonLazyBind)); } /// void objc_msgSend_stret (id, SEL, ...) /// /// The messenger used when the return value is an aggregate returned /// by indirect reference in the first argument, and therefore the /// self and selector parameters are shifted over by one. llvm::Constant *getMessageSendStretFn() const { llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy, params, true), "objc_msgSend_stret"); } /// [double | long double] objc_msgSend_fpret(id self, SEL op, ...) /// /// The messenger used when the return value is returned on the x87 /// floating-point stack; without a special entrypoint, the nil case /// would be unbalanced. llvm::Constant *getMessageSendFpretFn() const { llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.DoubleTy, params, true), "objc_msgSend_fpret"); } /// _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...) /// /// The messenger used when the return value is returned in two values on the /// x87 floating point stack; without a special entrypoint, the nil case /// would be unbalanced. Only used on 64-bit X86. llvm::Constant *getMessageSendFp2retFn() const { llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; llvm::Type *longDoubleType = llvm::Type::getX86_FP80Ty(VMContext); llvm::Type *resultType = llvm::StructType::get(longDoubleType, longDoubleType, nullptr); return CGM.CreateRuntimeFunction(llvm::FunctionType::get(resultType, params, true), "objc_msgSend_fp2ret"); } /// id objc_msgSendSuper(struct objc_super *super, SEL op, ...) /// /// The messenger used for super calls, which have different dispatch /// semantics. The class passed is the superclass of the current /// class. llvm::Constant *getMessageSendSuperFn() const { llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSendSuper"); } /// id objc_msgSendSuper2(struct objc_super *super, SEL op, ...) /// /// A slightly different messenger used for super calls. The class /// passed is the current class. llvm::Constant *getMessageSendSuperFn2() const { llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSendSuper2"); } /// void objc_msgSendSuper_stret(void *stretAddr, struct objc_super *super, /// SEL op, ...) /// /// The messenger used for super calls which return an aggregate indirectly. llvm::Constant *getMessageSendSuperStretFn() const { llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.VoidTy, params, true), "objc_msgSendSuper_stret"); } /// void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super, /// SEL op, ...) /// /// objc_msgSendSuper_stret with the super2 semantics. llvm::Constant *getMessageSendSuperStretFn2() const { llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy }; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.VoidTy, params, true), "objc_msgSendSuper2_stret"); } llvm::Constant *getMessageSendSuperFpretFn() const { // There is no objc_msgSendSuper_fpret? How can that work? return getMessageSendSuperFn(); } llvm::Constant *getMessageSendSuperFpretFn2() const { // There is no objc_msgSendSuper_fpret? How can that work? return getMessageSendSuperFn2(); } protected: CodeGen::CodeGenModule &CGM; public: llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy; llvm::Type *Int8PtrTy, *Int8PtrPtrTy; llvm::Type *IvarOffsetVarTy; /// ObjectPtrTy - LLVM type for object handles (typeof(id)) llvm::Type *ObjectPtrTy; /// PtrObjectPtrTy - LLVM type for id * llvm::Type *PtrObjectPtrTy; /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL)) llvm::Type *SelectorPtrTy; private: /// ProtocolPtrTy - LLVM type for external protocol handles /// (typeof(Protocol)) llvm::Type *ExternalProtocolPtrTy; public: llvm::Type *getExternalProtocolPtrTy() { if (!ExternalProtocolPtrTy) { // FIXME: It would be nice to unify this with the opaque type, so that the // IR comes out a bit cleaner. CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType()); ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T); } return ExternalProtocolPtrTy; } // SuperCTy - clang type for struct objc_super. QualType SuperCTy; // SuperPtrCTy - clang type for struct objc_super *. QualType SuperPtrCTy; /// SuperTy - LLVM type for struct objc_super. llvm::StructType *SuperTy; /// SuperPtrTy - LLVM type for struct objc_super *. llvm::Type *SuperPtrTy; /// PropertyTy - LLVM type for struct objc_property (struct _prop_t /// in GCC parlance). llvm::StructType *PropertyTy; /// PropertyListTy - LLVM type for struct objc_property_list /// (_prop_list_t in GCC parlance). llvm::StructType *PropertyListTy; /// PropertyListPtrTy - LLVM type for struct objc_property_list*. llvm::Type *PropertyListPtrTy; // MethodTy - LLVM type for struct objc_method. llvm::StructType *MethodTy; /// CacheTy - LLVM type for struct objc_cache. llvm::Type *CacheTy; /// CachePtrTy - LLVM type for struct objc_cache *. llvm::Type *CachePtrTy; llvm::Constant *getGetPropertyFn() { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); // id objc_getProperty (id, SEL, ptrdiff_t, bool) SmallVector<CanQualType,4> Params; CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType()); CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType()); Params.push_back(IdType); Params.push_back(SelType); Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified()); Params.push_back(Ctx.BoolTy); llvm::FunctionType *FTy = Types.GetFunctionType(Types.arrangeLLVMFunctionInfo( IdType, false, false, Params, FunctionType::ExtInfo(), RequiredArgs::All)); return CGM.CreateRuntimeFunction(FTy, "objc_getProperty"); } llvm::Constant *getSetPropertyFn() { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool) SmallVector<CanQualType,6> Params; CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType()); CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType()); Params.push_back(IdType); Params.push_back(SelType); Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified()); Params.push_back(IdType); Params.push_back(Ctx.BoolTy); Params.push_back(Ctx.BoolTy); llvm::FunctionType *FTy = Types.GetFunctionType(Types.arrangeLLVMFunctionInfo( Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(), RequiredArgs::All)); return CGM.CreateRuntimeFunction(FTy, "objc_setProperty"); } llvm::Constant *getOptimizedSetPropertyFn(bool atomic, bool copy) { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); // void objc_setProperty_atomic(id self, SEL _cmd, // id newValue, ptrdiff_t offset); // void objc_setProperty_nonatomic(id self, SEL _cmd, // id newValue, ptrdiff_t offset); // void objc_setProperty_atomic_copy(id self, SEL _cmd, // id newValue, ptrdiff_t offset); // void objc_setProperty_nonatomic_copy(id self, SEL _cmd, // id newValue, ptrdiff_t offset); SmallVector<CanQualType,4> Params; CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType()); CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType()); Params.push_back(IdType); Params.push_back(SelType); Params.push_back(IdType); Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified()); llvm::FunctionType *FTy = Types.GetFunctionType(Types.arrangeLLVMFunctionInfo( Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(), RequiredArgs::All)); const char *name; if (atomic && copy) name = "objc_setProperty_atomic_copy"; else if (atomic && !copy) name = "objc_setProperty_atomic"; else if (!atomic && copy) name = "objc_setProperty_nonatomic_copy"; else name = "objc_setProperty_nonatomic"; return CGM.CreateRuntimeFunction(FTy, name); } llvm::Constant *getCopyStructFn() { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); // void objc_copyStruct (void *, const void *, size_t, bool, bool) SmallVector<CanQualType,5> Params; Params.push_back(Ctx.VoidPtrTy); Params.push_back(Ctx.VoidPtrTy); Params.push_back(Ctx.LongTy); Params.push_back(Ctx.BoolTy); Params.push_back(Ctx.BoolTy); llvm::FunctionType *FTy = Types.GetFunctionType(Types.arrangeLLVMFunctionInfo( Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(), RequiredArgs::All)); return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct"); } /// This routine declares and returns address of: /// void objc_copyCppObjectAtomic( /// void *dest, const void *src, /// void (*copyHelper) (void *dest, const void *source)); llvm::Constant *getCppAtomicObjectFunction() { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); /// void objc_copyCppObjectAtomic(void *dest, const void *src, void *helper); SmallVector<CanQualType,3> Params; Params.push_back(Ctx.VoidPtrTy); Params.push_back(Ctx.VoidPtrTy); Params.push_back(Ctx.VoidPtrTy); llvm::FunctionType *FTy = Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(), RequiredArgs::All)); return CGM.CreateRuntimeFunction(FTy, "objc_copyCppObjectAtomic"); } llvm::Constant *getEnumerationMutationFn() { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); // void objc_enumerationMutation (id) SmallVector<CanQualType,1> Params; Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType())); llvm::FunctionType *FTy = Types.GetFunctionType(Types.arrangeLLVMFunctionInfo( Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(), RequiredArgs::All)); return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation"); } /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function. llvm::Constant *getGcReadWeakFn() { // id objc_read_weak (id *) llvm::Type *args[] = { ObjectPtrTy->getPointerTo() }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_read_weak"); } /// GcAssignWeakFn -- LLVM objc_assign_weak function. llvm::Constant *getGcAssignWeakFn() { // id objc_assign_weak (id, id *) llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak"); } /// GcAssignGlobalFn -- LLVM objc_assign_global function. llvm::Constant *getGcAssignGlobalFn() { // id objc_assign_global(id, id *) llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_global"); } /// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function. llvm::Constant *getGcAssignThreadLocalFn() { // id objc_assign_threadlocal(id src, id * dest) llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal"); } /// GcAssignIvarFn -- LLVM objc_assign_ivar function. llvm::Constant *getGcAssignIvarFn() { // id objc_assign_ivar(id, id *, ptrdiff_t) llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(), CGM.PtrDiffTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar"); } /// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function. llvm::Constant *GcMemmoveCollectableFn() { // void *objc_memmove_collectable(void *dst, const void *src, size_t size) llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable"); } /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function. llvm::Constant *getGcAssignStrongCastFn() { // id objc_assign_strongCast(id, id *) llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() }; llvm::FunctionType *FTy = llvm::FunctionType::get(ObjectPtrTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast"); } /// ExceptionThrowFn - LLVM objc_exception_throw function. llvm::Constant *getExceptionThrowFn() { // void objc_exception_throw(id) llvm::Type *args[] = { ObjectPtrTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw"); } /// ExceptionRethrowFn - LLVM objc_exception_rethrow function. llvm::Constant *getExceptionRethrowFn() { // void objc_exception_rethrow(void) llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false); return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow"); } /// SyncEnterFn - LLVM object_sync_enter function. llvm::Constant *getSyncEnterFn() { // int objc_sync_enter (id) llvm::Type *args[] = { ObjectPtrTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.IntTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter"); } /// SyncExitFn - LLVM object_sync_exit function. llvm::Constant *getSyncExitFn() { // int objc_sync_exit (id) llvm::Type *args[] = { ObjectPtrTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.IntTy, args, false); return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit"); } llvm::Constant *getSendFn(bool IsSuper) const { return IsSuper ? getMessageSendSuperFn() : getMessageSendFn(); } llvm::Constant *getSendFn2(bool IsSuper) const { return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn(); } llvm::Constant *getSendStretFn(bool IsSuper) const { return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn(); } llvm::Constant *getSendStretFn2(bool IsSuper) const { return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn(); } llvm::Constant *getSendFpretFn(bool IsSuper) const { return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn(); } llvm::Constant *getSendFpretFn2(bool IsSuper) const { return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn(); } llvm::Constant *getSendFp2retFn(bool IsSuper) const { return IsSuper ? getMessageSendSuperFn() : getMessageSendFp2retFn(); } llvm::Constant *getSendFp2RetFn2(bool IsSuper) const { return IsSuper ? getMessageSendSuperFn2() : getMessageSendFp2retFn(); } ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm); }; /// ObjCTypesHelper - Helper class that encapsulates lazy /// construction of varies types used during ObjC generation. class ObjCTypesHelper : public ObjCCommonTypesHelper { public: /// SymtabTy - LLVM type for struct objc_symtab. llvm::StructType *SymtabTy; /// SymtabPtrTy - LLVM type for struct objc_symtab *. llvm::Type *SymtabPtrTy; /// ModuleTy - LLVM type for struct objc_module. llvm::StructType *ModuleTy; /// ProtocolTy - LLVM type for struct objc_protocol. llvm::StructType *ProtocolTy; /// ProtocolPtrTy - LLVM type for struct objc_protocol *. llvm::Type *ProtocolPtrTy; /// ProtocolExtensionTy - LLVM type for struct /// objc_protocol_extension. llvm::StructType *ProtocolExtensionTy; /// ProtocolExtensionTy - LLVM type for struct /// objc_protocol_extension *. llvm::Type *ProtocolExtensionPtrTy; /// MethodDescriptionTy - LLVM type for struct /// objc_method_description. llvm::StructType *MethodDescriptionTy; /// MethodDescriptionListTy - LLVM type for struct /// objc_method_description_list. llvm::StructType *MethodDescriptionListTy; /// MethodDescriptionListPtrTy - LLVM type for struct /// objc_method_description_list *. llvm::Type *MethodDescriptionListPtrTy; /// ProtocolListTy - LLVM type for struct objc_property_list. llvm::StructType *ProtocolListTy; /// ProtocolListPtrTy - LLVM type for struct objc_property_list*. llvm::Type *ProtocolListPtrTy; /// CategoryTy - LLVM type for struct objc_category. llvm::StructType *CategoryTy; /// ClassTy - LLVM type for struct objc_class. llvm::StructType *ClassTy; /// ClassPtrTy - LLVM type for struct objc_class *. llvm::Type *ClassPtrTy; /// ClassExtensionTy - LLVM type for struct objc_class_ext. llvm::StructType *ClassExtensionTy; /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *. llvm::Type *ClassExtensionPtrTy; // IvarTy - LLVM type for struct objc_ivar. llvm::StructType *IvarTy; /// IvarListTy - LLVM type for struct objc_ivar_list. llvm::Type *IvarListTy; /// IvarListPtrTy - LLVM type for struct objc_ivar_list *. llvm::Type *IvarListPtrTy; /// MethodListTy - LLVM type for struct objc_method_list. llvm::Type *MethodListTy; /// MethodListPtrTy - LLVM type for struct objc_method_list *. llvm::Type *MethodListPtrTy; /// ExceptionDataTy - LLVM type for struct _objc_exception_data. llvm::Type *ExceptionDataTy; /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function. llvm::Constant *getExceptionTryEnterFn() { llvm::Type *params[] = { ExceptionDataTy->getPointerTo() }; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.VoidTy, params, false), "objc_exception_try_enter"); } /// ExceptionTryExitFn - LLVM objc_exception_try_exit function. llvm::Constant *getExceptionTryExitFn() { llvm::Type *params[] = { ExceptionDataTy->getPointerTo() }; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.VoidTy, params, false), "objc_exception_try_exit"); } /// ExceptionExtractFn - LLVM objc_exception_extract function. llvm::Constant *getExceptionExtractFn() { llvm::Type *params[] = { ExceptionDataTy->getPointerTo() }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, false), "objc_exception_extract"); } /// ExceptionMatchFn - LLVM objc_exception_match function. llvm::Constant *getExceptionMatchFn() { llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy }; return CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGM.Int32Ty, params, false), "objc_exception_match"); } /// SetJmpFn - LLVM _setjmp function. llvm::Constant *getSetJmpFn() { // This is specifically the prototype for x86. llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, params, false), "_setjmp", llvm::AttributeSet::get(CGM.getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::NonLazyBind)); } public: ObjCTypesHelper(CodeGen::CodeGenModule &cgm); }; /// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's /// modern abi class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper { public: // MethodListnfABITy - LLVM for struct _method_list_t llvm::StructType *MethodListnfABITy; // MethodListnfABIPtrTy - LLVM for struct _method_list_t* llvm::Type *MethodListnfABIPtrTy; // ProtocolnfABITy = LLVM for struct _protocol_t llvm::StructType *ProtocolnfABITy; // ProtocolnfABIPtrTy = LLVM for struct _protocol_t* llvm::Type *ProtocolnfABIPtrTy; // ProtocolListnfABITy - LLVM for struct _objc_protocol_list llvm::StructType *ProtocolListnfABITy; // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list* llvm::Type *ProtocolListnfABIPtrTy; // ClassnfABITy - LLVM for struct _class_t llvm::StructType *ClassnfABITy; // ClassnfABIPtrTy - LLVM for struct _class_t* llvm::Type *ClassnfABIPtrTy; // IvarnfABITy - LLVM for struct _ivar_t llvm::StructType *IvarnfABITy; // IvarListnfABITy - LLVM for struct _ivar_list_t llvm::StructType *IvarListnfABITy; // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t* llvm::Type *IvarListnfABIPtrTy; // ClassRonfABITy - LLVM for struct _class_ro_t llvm::StructType *ClassRonfABITy; // ImpnfABITy - LLVM for id (*)(id, SEL, ...) llvm::Type *ImpnfABITy; // CategorynfABITy - LLVM for struct _category_t llvm::StructType *CategorynfABITy; // New types for nonfragile abi messaging. // MessageRefTy - LLVM for: // struct _message_ref_t { // IMP messenger; // SEL name; // }; llvm::StructType *MessageRefTy; // MessageRefCTy - clang type for struct _message_ref_t QualType MessageRefCTy; // MessageRefPtrTy - LLVM for struct _message_ref_t* llvm::Type *MessageRefPtrTy; // MessageRefCPtrTy - clang type for struct _message_ref_t* QualType MessageRefCPtrTy; // MessengerTy - Type of the messenger (shown as IMP above) llvm::FunctionType *MessengerTy; // SuperMessageRefTy - LLVM for: // struct _super_message_ref_t { // SUPER_IMP messenger; // SEL name; // }; llvm::StructType *SuperMessageRefTy; // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t* llvm::Type *SuperMessageRefPtrTy; llvm::Constant *getMessageSendFixupFn() { // id objc_msgSend_fixup(id, struct message_ref_t*, ...) llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSend_fixup"); } llvm::Constant *getMessageSendFpretFixupFn() { // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...) llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSend_fpret_fixup"); } llvm::Constant *getMessageSendStretFixupFn() { // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...) llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSend_stret_fixup"); } llvm::Constant *getMessageSendSuper2FixupFn() { // id objc_msgSendSuper2_fixup (struct objc_super *, // struct _super_message_ref_t*, ...) llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSendSuper2_fixup"); } llvm::Constant *getMessageSendSuper2StretFixupFn() { // id objc_msgSendSuper2_stret_fixup(struct objc_super *, // struct _super_message_ref_t*, ...) llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy, params, true), "objc_msgSendSuper2_stret_fixup"); } llvm::Constant *getObjCEndCatchFn() { return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy, false), "objc_end_catch"); } llvm::Constant *getObjCBeginCatchFn() { llvm::Type *params[] = { Int8PtrTy }; return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy, params, false), "objc_begin_catch"); } llvm::StructType *EHTypeTy; llvm::Type *EHTypePtrTy; ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm); }; class CGObjCCommonMac : public CodeGen::CGObjCRuntime { public: // FIXME - accessibility class GC_IVAR { public: unsigned ivar_bytepos; unsigned ivar_size; GC_IVAR(unsigned bytepos = 0, unsigned size = 0) : ivar_bytepos(bytepos), ivar_size(size) {} // Allow sorting based on byte pos. bool operator<(const GC_IVAR &b) const { return ivar_bytepos < b.ivar_bytepos; } }; class SKIP_SCAN { public: unsigned skip; unsigned scan; SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0) : skip(_skip), scan(_scan) {} }; /// opcode for captured block variables layout 'instructions'. /// In the following descriptions, 'I' is the value of the immediate field. /// (field following the opcode). /// enum BLOCK_LAYOUT_OPCODE { /// An operator which affects how the following layout should be /// interpreted. /// I == 0: Halt interpretation and treat everything else as /// a non-pointer. Note that this instruction is equal /// to '\0'. /// I != 0: Currently unused. BLOCK_LAYOUT_OPERATOR = 0, /// The next I+1 bytes do not contain a value of object pointer type. /// Note that this can leave the stream unaligned, meaning that /// subsequent word-size instructions do not begin at a multiple of /// the pointer size. BLOCK_LAYOUT_NON_OBJECT_BYTES = 1, /// The next I+1 words do not contain a value of object pointer type. /// This is simply an optimized version of BLOCK_LAYOUT_BYTES for /// when the required skip quantity is a multiple of the pointer size. BLOCK_LAYOUT_NON_OBJECT_WORDS = 2, /// The next I+1 words are __strong pointers to Objective-C /// objects or blocks. BLOCK_LAYOUT_STRONG = 3, /// The next I+1 words are pointers to __block variables. BLOCK_LAYOUT_BYREF = 4, /// The next I+1 words are __weak pointers to Objective-C /// objects or blocks. BLOCK_LAYOUT_WEAK = 5, /// The next I+1 words are __unsafe_unretained pointers to /// Objective-C objects or blocks. BLOCK_LAYOUT_UNRETAINED = 6 /// The next I+1 words are block or object pointers with some /// as-yet-unspecified ownership semantics. If we add more /// flavors of ownership semantics, values will be taken from /// this range. /// /// This is included so that older tools can at least continue /// processing the layout past such things. //BLOCK_LAYOUT_OWNERSHIP_UNKNOWN = 7..10, /// All other opcodes are reserved. Halt interpretation and /// treat everything else as opaque. }; class RUN_SKIP { public: enum BLOCK_LAYOUT_OPCODE opcode; CharUnits block_var_bytepos; CharUnits block_var_size; RUN_SKIP(enum BLOCK_LAYOUT_OPCODE Opcode = BLOCK_LAYOUT_OPERATOR, CharUnits BytePos = CharUnits::Zero(), CharUnits Size = CharUnits::Zero()) : opcode(Opcode), block_var_bytepos(BytePos), block_var_size(Size) {} // Allow sorting based on byte pos. bool operator<(const RUN_SKIP &b) const { return block_var_bytepos < b.block_var_bytepos; } }; protected: llvm::LLVMContext &VMContext; // FIXME! May not be needing this after all. unsigned ObjCABI; // gc ivar layout bitmap calculation helper caches. SmallVector<GC_IVAR, 16> SkipIvars; SmallVector<GC_IVAR, 16> IvarsInfo; // arc/mrr layout of captured block literal variables. SmallVector<RUN_SKIP, 16> RunSkipBlockVars; /// LazySymbols - Symbols to generate a lazy reference for. See /// DefinedSymbols and FinishModule(). llvm::SetVector<IdentifierInfo*> LazySymbols; /// DefinedSymbols - External symbols which are defined by this /// module. The symbols in this list and LazySymbols are used to add /// special linker symbols which ensure that Objective-C modules are /// linked properly. llvm::SetVector<IdentifierInfo*> DefinedSymbols; /// ClassNames - uniqued class names. llvm::StringMap<llvm::GlobalVariable*> ClassNames; /// MethodVarNames - uniqued method variable names. llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames; /// DefinedCategoryNames - list of category names in form Class_Category. llvm::SetVector<std::string> DefinedCategoryNames; /// MethodVarTypes - uniqued method type signatures. We have to use /// a StringMap here because have no other unique reference. llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes; /// MethodDefinitions - map of methods which have been defined in /// this translation unit. llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions; /// PropertyNames - uniqued method variable names. llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames; /// ClassReferences - uniqued class references. llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences; /// SelectorReferences - uniqued selector references. llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences; /// Protocols - Protocols for which an objc_protocol structure has /// been emitted. Forward declarations are handled by creating an /// empty structure whose initializer is filled in when/if defined. llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols; /// DefinedProtocols - Protocols which have actually been /// defined. We should not need this, see FIXME in GenerateProtocol. llvm::DenseSet<IdentifierInfo*> DefinedProtocols; /// DefinedClasses - List of defined classes. SmallVector<llvm::GlobalValue*, 16> DefinedClasses; /// ImplementedClasses - List of @implemented classes. SmallVector<const ObjCInterfaceDecl*, 16> ImplementedClasses; /// DefinedNonLazyClasses - List of defined "non-lazy" classes. SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyClasses; /// DefinedCategories - List of defined categories. SmallVector<llvm::GlobalValue*, 16> DefinedCategories; /// DefinedNonLazyCategories - List of defined "non-lazy" categories. SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyCategories; /// GetNameForMethod - Return a name for the given method. /// \param[out] NameOut - The return value. void GetNameForMethod(const ObjCMethodDecl *OMD, const ObjCContainerDecl *CD, SmallVectorImpl<char> &NameOut); /// GetMethodVarName - Return a unique constant for the given /// selector's name. The return value has type char *. llvm::Constant *GetMethodVarName(Selector Sel); llvm::Constant *GetMethodVarName(IdentifierInfo *Ident); /// GetMethodVarType - Return a unique constant for the given /// method's type encoding string. The return value has type char *. // FIXME: This is a horrible name. llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D, bool Extended = false); llvm::Constant *GetMethodVarType(const FieldDecl *D); /// GetPropertyName - Return a unique constant for the given /// name. The return value has type char *. llvm::Constant *GetPropertyName(IdentifierInfo *Ident); // FIXME: This can be dropped once string functions are unified. llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD, const Decl *Container); /// GetClassName - Return a unique constant for the given selector's /// runtime name (which may change via use of objc_runtime_name attribute on /// class or protocol definition. The return value has type char *. llvm::Constant *GetClassName(StringRef RuntimeName); llvm::Function *GetMethodDefinition(const ObjCMethodDecl *MD); /// BuildIvarLayout - Builds ivar layout bitmap for the class /// implementation for the __strong or __weak case. /// llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI, bool ForStrongLayout); llvm::Constant *BuildIvarLayoutBitmap(std::string &BitMap); void BuildAggrIvarRecordLayout(const RecordType *RT, unsigned int BytePos, bool ForStrongLayout, bool &HasUnion); void BuildAggrIvarLayout(const ObjCImplementationDecl *OI, const llvm::StructLayout *Layout, const RecordDecl *RD, ArrayRef<const FieldDecl*> RecFields, unsigned int BytePos, bool ForStrongLayout, bool &HasUnion); Qualifiers::ObjCLifetime getBlockCaptureLifetime(QualType QT, bool ByrefLayout); void UpdateRunSkipBlockVars(bool IsByref, Qualifiers::ObjCLifetime LifeTime, CharUnits FieldOffset, CharUnits FieldSize); void BuildRCBlockVarRecordLayout(const RecordType *RT, CharUnits BytePos, bool &HasUnion, bool ByrefLayout=false); void BuildRCRecordLayout(const llvm::StructLayout *RecLayout, const RecordDecl *RD, ArrayRef<const FieldDecl*> RecFields, CharUnits BytePos, bool &HasUnion, bool ByrefLayout); uint64_t InlineLayoutInstruction(SmallVectorImpl<unsigned char> &Layout); llvm::Constant *getBitmapBlockLayout(bool ComputeByrefLayout); /// GetIvarLayoutName - Returns a unique constant for the given /// ivar layout bitmap. llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident, const ObjCCommonTypesHelper &ObjCTypes); /// EmitPropertyList - Emit the given property list. The return /// value has type PropertyListPtrTy. llvm::Constant *EmitPropertyList(Twine Name, const Decl *Container, const ObjCContainerDecl *OCD, const ObjCCommonTypesHelper &ObjCTypes); /// EmitProtocolMethodTypes - Generate the array of extended method type /// strings. The return value has type Int8PtrPtrTy. llvm::Constant *EmitProtocolMethodTypes(Twine Name, ArrayRef<llvm::Constant*> MethodTypes, const ObjCCommonTypesHelper &ObjCTypes); /// PushProtocolProperties - Push protocol's property on the input stack. void PushProtocolProperties( llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet, SmallVectorImpl<llvm::Constant*> &Properties, const Decl *Container, const ObjCProtocolDecl *Proto, const ObjCCommonTypesHelper &ObjCTypes); /// GetProtocolRef - Return a reference to the internal protocol /// description, creating an empty one if it has not been /// defined. The return value has type ProtocolPtrTy. llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD); /// CreateMetadataVar - Create a global variable with internal /// linkage for use by the Objective-C runtime. /// /// This is a convenience wrapper which not only creates the /// variable, but also sets the section and alignment and adds the /// global to the "llvm.used" list. /// /// \param Name - The variable name. /// \param Init - The variable initializer; this is also used to /// define the type of the variable. /// \param Section - The section the variable should go into, or empty. /// \param Align - The alignment for the variable, or 0. /// \param AddToUsed - Whether the variable should be added to /// "llvm.used". llvm::GlobalVariable *CreateMetadataVar(Twine Name, llvm::Constant *Init, StringRef Section, unsigned Align, bool AddToUsed); CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, llvm::Value *Sel, llvm::Value *Arg0, QualType Arg0Ty, bool IsSuper, const CallArgList &CallArgs, const ObjCMethodDecl *OMD, const ObjCCommonTypesHelper &ObjCTypes); /// EmitImageInfo - Emit the image info marker used to encode some module /// level information. void EmitImageInfo(); public: CGObjCCommonMac(CodeGen::CodeGenModule &cgm) : CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) { } llvm::Constant *GenerateConstantString(const StringLiteral *SL) override; llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD, const ObjCContainerDecl *CD=nullptr) override; void GenerateProtocol(const ObjCProtocolDecl *PD) override; /// GetOrEmitProtocol - Get the protocol object for the given /// declaration, emitting it if necessary. The return value has type /// ProtocolPtrTy. virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0; /// GetOrEmitProtocolRef - Get a forward reference to the protocol /// object for the given declaration, emitting it if needed. These /// forward references will be filled in with empty bodies if no /// definition is seen. The return value has type ProtocolPtrTy. virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0; llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM, const CGBlockInfo &blockInfo) override; llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM, const CGBlockInfo &blockInfo) override; llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM, QualType T) override; }; class CGObjCMac : public CGObjCCommonMac { private: ObjCTypesHelper ObjCTypes; /// EmitModuleInfo - Another marker encoding module level /// information. void EmitModuleInfo(); /// EmitModuleSymols - Emit module symbols, the list of defined /// classes and categories. The result has type SymtabPtrTy. llvm::Constant *EmitModuleSymbols(); /// FinishModule - Write out global data structures at the end of /// processing a translation unit. void FinishModule(); /// EmitClassExtension - Generate the class extension structure used /// to store the weak ivar layout and properties. The return value /// has type ClassExtensionPtrTy. llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID); /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy, /// for the given class. llvm::Value *EmitClassRef(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID); llvm::Value *EmitClassRefFromId(CodeGenFunction &CGF, IdentifierInfo *II); llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override; /// EmitSuperClassRef - Emits reference to class's main metadata class. llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID); /// EmitIvarList - Emit the ivar list for the given /// implementation. If ForClass is true the list of class ivars /// (i.e. metaclass ivars) is emitted, otherwise the list of /// interface ivars will be emitted. The return value has type /// IvarListPtrTy. llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID, bool ForClass); /// EmitMetaClass - Emit a forward reference to the class structure /// for the metaclass of the given interface. The return value has /// type ClassPtrTy. llvm::Constant *EmitMetaClassRef(const ObjCInterfaceDecl *ID); /// EmitMetaClass - Emit a class structure for the metaclass of the /// given implementation. The return value has type ClassPtrTy. llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID, llvm::Constant *Protocols, ArrayRef<llvm::Constant*> Methods); llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD); llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD); /// EmitMethodList - Emit the method list for the given /// implementation. The return value has type MethodListPtrTy. llvm::Constant *EmitMethodList(Twine Name, const char *Section, ArrayRef<llvm::Constant*> Methods); /// EmitMethodDescList - Emit a method description list for a list of /// method declarations. /// - TypeName: The name for the type containing the methods. /// - IsProtocol: True iff these methods are for a protocol. /// - ClassMethds: True iff these are class methods. /// - Required: When true, only "required" methods are /// listed. Similarly, when false only "optional" methods are /// listed. For classes this should always be true. /// - begin, end: The method list to output. /// /// The return value has type MethodDescriptionListPtrTy. llvm::Constant *EmitMethodDescList(Twine Name, const char *Section, ArrayRef<llvm::Constant*> Methods); /// GetOrEmitProtocol - Get the protocol object for the given /// declaration, emitting it if necessary. The return value has type /// ProtocolPtrTy. llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD) override; /// GetOrEmitProtocolRef - Get a forward reference to the protocol /// object for the given declaration, emitting it if needed. These /// forward references will be filled in with empty bodies if no /// definition is seen. The return value has type ProtocolPtrTy. llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) override; /// EmitProtocolExtension - Generate the protocol extension /// structure used to store optional instance and class methods, and /// protocol properties. The return value has type /// ProtocolExtensionPtrTy. llvm::Constant * EmitProtocolExtension(const ObjCProtocolDecl *PD, ArrayRef<llvm::Constant*> OptInstanceMethods, ArrayRef<llvm::Constant*> OptClassMethods, ArrayRef<llvm::Constant*> MethodTypesExt); /// EmitProtocolList - Generate the list of referenced /// protocols. The return value has type ProtocolListPtrTy. llvm::Constant *EmitProtocolList(Twine Name, ObjCProtocolDecl::protocol_iterator begin, ObjCProtocolDecl::protocol_iterator end); /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy, /// for the given selector. llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel, bool lval=false); public: CGObjCMac(CodeGen::CodeGenModule &cgm); llvm::Function *ModuleInitFunction() override; CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, llvm::Value *Receiver, const CallArgList &CallArgs, const ObjCInterfaceDecl *Class, const ObjCMethodDecl *Method) override; CodeGen::RValue GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, const ObjCInterfaceDecl *Class, bool isCategoryImpl, llvm::Value *Receiver, bool IsClassMessage, const CallArgList &CallArgs, const ObjCMethodDecl *Method) override; llvm::Value *GetClass(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID) override; llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel, bool lval = false) override; /// The NeXT/Apple runtimes do not support typed selectors; just emit an /// untyped one. llvm::Value *GetSelector(CodeGenFunction &CGF, const ObjCMethodDecl *Method) override; llvm::Constant *GetEHType(QualType T) override; void GenerateCategory(const ObjCCategoryImplDecl *CMD) override; void GenerateClass(const ObjCImplementationDecl *ClassDecl) override; void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) override {} llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF, const ObjCProtocolDecl *PD) override; llvm::Constant *GetPropertyGetFunction() override; llvm::Constant *GetPropertySetFunction() override; llvm::Constant *GetOptimizedPropertySetFunction(bool atomic, bool copy) override; llvm::Constant *GetGetStructFunction() override; llvm::Constant *GetSetStructFunction() override; llvm::Constant *GetCppAtomicObjectGetFunction() override; llvm::Constant *GetCppAtomicObjectSetFunction() override; llvm::Constant *EnumerationMutationFunction() override; void EmitTryStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtTryStmt &S) override; void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtSynchronizedStmt &S) override; void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S); void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S, bool ClearInsertionPoint=true) override; llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, llvm::Value *AddrWeakObj) override; void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst) override; void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest, bool threadlocal = false) override; void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest, llvm::Value *ivarOffset) override; void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest) override; void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, llvm::Value *dest, llvm::Value *src, llvm::Value *size) override; LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers) override; llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar) override; /// GetClassGlobal - Return the global variable for the Objective-C /// class of the given name. llvm::GlobalVariable *GetClassGlobal(const std::string &Name, bool Weak = false) override { llvm_unreachable("CGObjCMac::GetClassGlobal"); } }; class CGObjCNonFragileABIMac : public CGObjCCommonMac { private: ObjCNonFragileABITypesHelper ObjCTypes; llvm::GlobalVariable* ObjCEmptyCacheVar; llvm::GlobalVariable* ObjCEmptyVtableVar; /// SuperClassReferences - uniqued super class references. llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences; /// MetaClassReferences - uniqued meta class references. llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences; /// EHTypeReferences - uniqued class ehtype references. llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences; /// VTableDispatchMethods - List of methods for which we generate /// vtable-based message dispatch. llvm::DenseSet<Selector> VTableDispatchMethods; /// DefinedMetaClasses - List of defined meta-classes. std::vector<llvm::GlobalValue*> DefinedMetaClasses; /// isVTableDispatchedSelector - Returns true if SEL is a /// vtable-based selector. bool isVTableDispatchedSelector(Selector Sel); /// FinishNonFragileABIModule - Write out global data structures at the end of /// processing a translation unit. void FinishNonFragileABIModule(); /// AddModuleClassList - Add the given list of class pointers to the /// module with the provided symbol and section names. void AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container, const char *SymbolName, const char *SectionName); llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags, unsigned InstanceStart, unsigned InstanceSize, const ObjCImplementationDecl *ID); llvm::GlobalVariable * BuildClassMetaData(const std::string &ClassName, llvm::Constant *IsAGV, llvm::Constant *SuperClassGV, llvm::Constant *ClassRoGV, bool HiddenVisibility, bool Weak); llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD); llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD); /// EmitMethodList - Emit the method list for the given /// implementation. The return value has type MethodListnfABITy. llvm::Constant *EmitMethodList(Twine Name, const char *Section, ArrayRef<llvm::Constant*> Methods); /// EmitIvarList - Emit the ivar list for the given /// implementation. If ForClass is true the list of class ivars /// (i.e. metaclass ivars) is emitted, otherwise the list of /// interface ivars will be emitted. The return value has type /// IvarListnfABIPtrTy. llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID); llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID, const ObjCIvarDecl *Ivar, unsigned long int offset); /// GetOrEmitProtocol - Get the protocol object for the given /// declaration, emitting it if necessary. The return value has type /// ProtocolPtrTy. llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD) override; /// GetOrEmitProtocolRef - Get a forward reference to the protocol /// object for the given declaration, emitting it if needed. These /// forward references will be filled in with empty bodies if no /// definition is seen. The return value has type ProtocolPtrTy. llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) override; /// EmitProtocolList - Generate the list of referenced /// protocols. The return value has type ProtocolListPtrTy. llvm::Constant *EmitProtocolList(Twine Name, ObjCProtocolDecl::protocol_iterator begin, ObjCProtocolDecl::protocol_iterator end); CodeGen::RValue EmitVTableMessageSend(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, llvm::Value *Receiver, QualType Arg0Ty, bool IsSuper, const CallArgList &CallArgs, const ObjCMethodDecl *Method); /// GetClassGlobal - Return the global variable for the Objective-C /// class of the given name. llvm::GlobalVariable *GetClassGlobal(const std::string &Name, bool Weak = false) override; /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy, /// for the given class reference. llvm::Value *EmitClassRef(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID); llvm::Value *EmitClassRefFromId(CodeGenFunction &CGF, IdentifierInfo *II, bool Weak, const ObjCInterfaceDecl *ID); llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override; /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy, /// for the given super class reference. llvm::Value *EmitSuperClassRef(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID); /// EmitMetaClassRef - Return a Value * of the address of _class_t /// meta-data llvm::Value *EmitMetaClassRef(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID, bool Weak); /// ObjCIvarOffsetVariable - Returns the ivar offset variable for /// the given ivar. /// llvm::GlobalVariable * ObjCIvarOffsetVariable( const ObjCInterfaceDecl *ID, const ObjCIvarDecl *Ivar); /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy, /// for the given selector. llvm::Value *EmitSelector(CodeGenFunction &CGF, Selector Sel, bool lval=false); /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C /// interface. The return value has type EHTypePtrTy. llvm::Constant *GetInterfaceEHType(const ObjCInterfaceDecl *ID, bool ForDefinition); const char *getMetaclassSymbolPrefix() const { return "OBJC_METACLASS_$_"; } const char *getClassSymbolPrefix() const { return "OBJC_CLASS_$_"; } void GetClassSizeInfo(const ObjCImplementationDecl *OID, uint32_t &InstanceStart, uint32_t &InstanceSize); // Shamelessly stolen from Analysis/CFRefCount.cpp Selector GetNullarySelector(const char* name) const { IdentifierInfo* II = &CGM.getContext().Idents.get(name); return CGM.getContext().Selectors.getSelector(0, &II); } Selector GetUnarySelector(const char* name) const { IdentifierInfo* II = &CGM.getContext().Idents.get(name); return CGM.getContext().Selectors.getSelector(1, &II); } /// ImplementationIsNonLazy - Check whether the given category or /// class implementation is "non-lazy". bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const; bool IsIvarOffsetKnownIdempotent(const CodeGen::CodeGenFunction &CGF, const ObjCIvarDecl *IV) { // Annotate the load as an invariant load iff inside an instance method // and ivar belongs to instance method's class and one of its super class. // This check is needed because the ivar offset is a lazily // initialised value that may depend on objc_msgSend to perform a fixup on // the first message dispatch. // // An additional opportunity to mark the load as invariant arises when the // base of the ivar access is a parameter to an Objective C method. // However, because the parameters are not available in the current // interface, we cannot perform this check. if (const ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(CGF.CurFuncDecl)) if (MD->isInstanceMethod()) if (const ObjCInterfaceDecl *ID = MD->getClassInterface()) return IV->getContainingInterface()->isSuperClassOf(ID); return false; } public: CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm); // FIXME. All stubs for now! llvm::Function *ModuleInitFunction() override; CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, llvm::Value *Receiver, const CallArgList &CallArgs, const ObjCInterfaceDecl *Class, const ObjCMethodDecl *Method) override; CodeGen::RValue GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, const ObjCInterfaceDecl *Class, bool isCategoryImpl, llvm::Value *Receiver, bool IsClassMessage, const CallArgList &CallArgs, const ObjCMethodDecl *Method) override; llvm::Value *GetClass(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID) override; llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel, bool lvalue = false) override { return EmitSelector(CGF, Sel, lvalue); } /// The NeXT/Apple runtimes do not support typed selectors; just emit an /// untyped one. llvm::Value *GetSelector(CodeGenFunction &CGF, const ObjCMethodDecl *Method) override { return EmitSelector(CGF, Method->getSelector()); } void GenerateCategory(const ObjCCategoryImplDecl *CMD) override; void GenerateClass(const ObjCImplementationDecl *ClassDecl) override; void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) override {} llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF, const ObjCProtocolDecl *PD) override; llvm::Constant *GetEHType(QualType T) override; llvm::Constant *GetPropertyGetFunction() override { return ObjCTypes.getGetPropertyFn(); } llvm::Constant *GetPropertySetFunction() override { return ObjCTypes.getSetPropertyFn(); } llvm::Constant *GetOptimizedPropertySetFunction(bool atomic, bool copy) override { return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy); } llvm::Constant *GetSetStructFunction() override { return ObjCTypes.getCopyStructFn(); } llvm::Constant *GetGetStructFunction() override { return ObjCTypes.getCopyStructFn(); } llvm::Constant *GetCppAtomicObjectSetFunction() override { return ObjCTypes.getCppAtomicObjectFunction(); } llvm::Constant *GetCppAtomicObjectGetFunction() override { return ObjCTypes.getCppAtomicObjectFunction(); } llvm::Constant *EnumerationMutationFunction() override { return ObjCTypes.getEnumerationMutationFn(); } void EmitTryStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtTryStmt &S) override; void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtSynchronizedStmt &S) override; void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S, bool ClearInsertionPoint=true) override; llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, llvm::Value *AddrWeakObj) override; void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst) override; void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest, bool threadlocal = false) override; void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest, llvm::Value *ivarOffset) override; void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest) override; void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, llvm::Value *dest, llvm::Value *src, llvm::Value *size) override; LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers) override; llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar) override; }; /// A helper class for performing the null-initialization of a return /// value. struct NullReturnState { llvm::BasicBlock *NullBB; NullReturnState() : NullBB(nullptr) {} /// Perform a null-check of the given receiver. void init(CodeGenFunction &CGF, llvm::Value *receiver) { // Make blocks for the null-receiver and call edges. NullBB = CGF.createBasicBlock("msgSend.null-receiver"); llvm::BasicBlock *callBB = CGF.createBasicBlock("msgSend.call"); // Check for a null receiver and, if there is one, jump to the // null-receiver block. There's no point in trying to avoid it: // we're always going to put *something* there, because otherwise // we shouldn't have done this null-check in the first place. llvm::Value *isNull = CGF.Builder.CreateIsNull(receiver); CGF.Builder.CreateCondBr(isNull, NullBB, callBB); // Otherwise, start performing the call. CGF.EmitBlock(callBB); } /// Complete the null-return operation. It is valid to call this /// regardless of whether 'init' has been called. RValue complete(CodeGenFunction &CGF, RValue result, QualType resultType, const CallArgList &CallArgs, const ObjCMethodDecl *Method) { // If we never had to do a null-check, just use the raw result. if (!NullBB) return result; // The continuation block. This will be left null if we don't have an // IP, which can happen if the method we're calling is marked noreturn. llvm::BasicBlock *contBB = nullptr; // Finish the call path. llvm::BasicBlock *callBB = CGF.Builder.GetInsertBlock(); if (callBB) { contBB = CGF.createBasicBlock("msgSend.cont"); CGF.Builder.CreateBr(contBB); } // Okay, start emitting the null-receiver block. CGF.EmitBlock(NullBB); // Release any consumed arguments we've got. if (Method) { CallArgList::const_iterator I = CallArgs.begin(); for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(), e = Method->param_end(); i != e; ++i, ++I) { const ParmVarDecl *ParamDecl = (*i); if (ParamDecl->hasAttr<NSConsumedAttr>()) { RValue RV = I->RV; assert(RV.isScalar() && "NullReturnState::complete - arg not on object"); CGF.EmitARCRelease(RV.getScalarVal(), ARCImpreciseLifetime); } } } // The phi code below assumes that we haven't needed any control flow yet. assert(CGF.Builder.GetInsertBlock() == NullBB); // If we've got a void return, just jump to the continuation block. if (result.isScalar() && resultType->isVoidType()) { // No jumps required if the message-send was noreturn. if (contBB) CGF.EmitBlock(contBB); return result; } // If we've got a scalar return, build a phi. if (result.isScalar()) { // Derive the null-initialization value. llvm::Constant *null = CGF.CGM.EmitNullConstant(resultType); // If no join is necessary, just flow out. if (!contBB) return RValue::get(null); // Otherwise, build a phi. CGF.EmitBlock(contBB); llvm::PHINode *phi = CGF.Builder.CreatePHI(null->getType(), 2); phi->addIncoming(result.getScalarVal(), callBB); phi->addIncoming(null, NullBB); return RValue::get(phi); } // If we've got an aggregate return, null the buffer out. // FIXME: maybe we should be doing things differently for all the // cases where the ABI has us returning (1) non-agg values in // memory or (2) agg values in registers. if (result.isAggregate()) { assert(result.isAggregate() && "null init of non-aggregate result?"); CGF.EmitNullInitialization(result.getAggregateAddr(), resultType); if (contBB) CGF.EmitBlock(contBB); return result; } // Complex types. CGF.EmitBlock(contBB); CodeGenFunction::ComplexPairTy callResult = result.getComplexVal(); // Find the scalar type and its zero value. llvm::Type *scalarTy = callResult.first->getType(); llvm::Constant *scalarZero = llvm::Constant::getNullValue(scalarTy); // Build phis for both coordinates. llvm::PHINode *real = CGF.Builder.CreatePHI(scalarTy, 2); real->addIncoming(callResult.first, callBB); real->addIncoming(scalarZero, NullBB); llvm::PHINode *imag = CGF.Builder.CreatePHI(scalarTy, 2); imag->addIncoming(callResult.second, callBB); imag->addIncoming(scalarZero, NullBB); return RValue::getComplex(real, imag); } }; } // end anonymous namespace /* *** Helper Functions *** */ /// getConstantGEP() - Help routine to construct simple GEPs. static llvm::Constant *getConstantGEP(llvm::LLVMContext &VMContext, llvm::GlobalVariable *C, unsigned idx0, unsigned idx1) { llvm::Value *Idxs[] = { llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx0), llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx1) }; return llvm::ConstantExpr::getGetElementPtr(C->getValueType(), C, Idxs); } /// hasObjCExceptionAttribute - Return true if this class or any super /// class has the __objc_exception__ attribute. static bool hasObjCExceptionAttribute(ASTContext &Context, const ObjCInterfaceDecl *OID) { if (OID->hasAttr<ObjCExceptionAttr>()) return true; if (const ObjCInterfaceDecl *Super = OID->getSuperClass()) return hasObjCExceptionAttribute(Context, Super); return false; } /* *** CGObjCMac Public Interface *** */ CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm), ObjCTypes(cgm) { ObjCABI = 1; EmitImageInfo(); } /// GetClass - Return a reference to the class for the given interface /// decl. llvm::Value *CGObjCMac::GetClass(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID) { return EmitClassRef(CGF, ID); } /// GetSelector - Return the pointer to the unique'd string for this selector. llvm::Value *CGObjCMac::GetSelector(CodeGenFunction &CGF, Selector Sel, bool lval) { return EmitSelector(CGF, Sel, lval); } llvm::Value *CGObjCMac::GetSelector(CodeGenFunction &CGF, const ObjCMethodDecl *Method) { return EmitSelector(CGF, Method->getSelector()); } llvm::Constant *CGObjCMac::GetEHType(QualType T) { if (T->isObjCIdType() || T->isObjCQualifiedIdType()) { return CGM.GetAddrOfRTTIDescriptor( CGM.getContext().getObjCIdRedefinitionType(), /*ForEH=*/true); } if (T->isObjCClassType() || T->isObjCQualifiedClassType()) { return CGM.GetAddrOfRTTIDescriptor( CGM.getContext().getObjCClassRedefinitionType(), /*ForEH=*/true); } if (T->isObjCObjectPointerType()) return CGM.GetAddrOfRTTIDescriptor(T, /*ForEH=*/true); llvm_unreachable("asking for catch type for ObjC type in fragile runtime"); } /// Generate a constant CFString object. /* struct __builtin_CFString { const int *isa; // point to __CFConstantStringClassReference int flags; const char *str; long length; }; */ /// or Generate a constant NSString object. /* struct __builtin_NSString { const int *isa; // point to __NSConstantStringClassReference const char *str; unsigned int length; }; */ llvm::Constant *CGObjCCommonMac::GenerateConstantString( const StringLiteral *SL) { return (CGM.getLangOpts().NoConstantCFStrings == 0 ? CGM.GetAddrOfConstantCFString(SL) : CGM.GetAddrOfConstantString(SL)); } enum { kCFTaggedObjectID_Integer = (1 << 1) + 1 }; /// Generates a message send where the super is the receiver. This is /// a message send to self with special delivery semantics indicating /// which class's method should be called. CodeGen::RValue CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, const ObjCInterfaceDecl *Class, bool isCategoryImpl, llvm::Value *Receiver, bool IsClassMessage, const CodeGen::CallArgList &CallArgs, const ObjCMethodDecl *Method) { // Create and init a super structure; this is a (receiver, class) // pair we will pass to objc_msgSendSuper. llvm::Value *ObjCSuper = CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super"); llvm::Value *ReceiverAsObject = CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy); CGF.Builder.CreateStore( ReceiverAsObject, CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 0)); // If this is a class message the metaclass is passed as the target. llvm::Value *Target; if (IsClassMessage) { if (isCategoryImpl) { // Message sent to 'super' in a class method defined in a category // implementation requires an odd treatment. // If we are in a class method, we must retrieve the // _metaclass_ for the current class, pointed at by // the class's "isa" pointer. The following assumes that // isa" is the first ivar in a class (which it must be). Target = EmitClassRef(CGF, Class->getSuperClass()); Target = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, Target, 0); Target = CGF.Builder.CreateLoad(Target); } else { llvm::Constant *MetaClassPtr = EmitMetaClassRef(Class); llvm::Value *SuperPtr = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, MetaClassPtr, 1); llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr); Target = Super; } } else if (isCategoryImpl) Target = EmitClassRef(CGF, Class->getSuperClass()); else { llvm::Value *ClassPtr = EmitSuperClassRef(Class); ClassPtr = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, ClassPtr, 1); Target = CGF.Builder.CreateLoad(ClassPtr); } // FIXME: We shouldn't need to do this cast, rectify the ASTContext and // ObjCTypes types. llvm::Type *ClassTy = CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType()); Target = CGF.Builder.CreateBitCast(Target, ClassTy); CGF.Builder.CreateStore( Target, CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 1)); return EmitMessageSend(CGF, Return, ResultType, EmitSelector(CGF, Sel), ObjCSuper, ObjCTypes.SuperPtrCTy, true, CallArgs, Method, ObjCTypes); } /// Generate code for a message send expression. CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, llvm::Value *Receiver, const CallArgList &CallArgs, const ObjCInterfaceDecl *Class, const ObjCMethodDecl *Method) { return EmitMessageSend(CGF, Return, ResultType, EmitSelector(CGF, Sel), Receiver, CGF.getContext().getObjCIdType(), false, CallArgs, Method, ObjCTypes); } CodeGen::RValue CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, llvm::Value *Sel, llvm::Value *Arg0, QualType Arg0Ty, bool IsSuper, const CallArgList &CallArgs, const ObjCMethodDecl *Method, const ObjCCommonTypesHelper &ObjCTypes) { CallArgList ActualArgs; if (!IsSuper) Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy); ActualArgs.add(RValue::get(Arg0), Arg0Ty); ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType()); ActualArgs.addFrom(CallArgs); // If we're calling a method, use the formal signature. MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs); if (Method) assert(CGM.getContext().getCanonicalType(Method->getReturnType()) == CGM.getContext().getCanonicalType(ResultType) && "Result type mismatch!"); NullReturnState nullReturn; llvm::Constant *Fn = nullptr; if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) { if (!IsSuper) nullReturn.init(CGF, Arg0); Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper) : ObjCTypes.getSendStretFn(IsSuper); } else if (CGM.ReturnTypeUsesFPRet(ResultType)) { Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper) : ObjCTypes.getSendFpretFn(IsSuper); } else if (CGM.ReturnTypeUsesFP2Ret(ResultType)) { Fn = (ObjCABI == 2) ? ObjCTypes.getSendFp2RetFn2(IsSuper) : ObjCTypes.getSendFp2retFn(IsSuper); } else { // arm64 uses objc_msgSend for stret methods and yet null receiver check // must be made for it. if (!IsSuper && CGM.ReturnTypeUsesSRet(MSI.CallInfo)) nullReturn.init(CGF, Arg0); Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper) : ObjCTypes.getSendFn(IsSuper); } bool requiresnullCheck = false; if (CGM.getLangOpts().ObjCAutoRefCount && Method) for (const auto *ParamDecl : Method->params()) { if (ParamDecl->hasAttr<NSConsumedAttr>()) { if (!nullReturn.NullBB) nullReturn.init(CGF, Arg0); requiresnullCheck = true; break; } } Fn = llvm::ConstantExpr::getBitCast(Fn, MSI.MessengerType); RValue rvalue = CGF.EmitCall(MSI.CallInfo, Fn, Return, ActualArgs); return nullReturn.complete(CGF, rvalue, ResultType, CallArgs, requiresnullCheck ? Method : nullptr); } static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) { if (FQT.isObjCGCStrong()) return Qualifiers::Strong; if (FQT.isObjCGCWeak() || FQT.getObjCLifetime() == Qualifiers::OCL_Weak) return Qualifiers::Weak; // check for __unsafe_unretained if (FQT.getObjCLifetime() == Qualifiers::OCL_ExplicitNone) return Qualifiers::GCNone; if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType()) return Qualifiers::Strong; if (const PointerType *PT = FQT->getAs<PointerType>()) return GetGCAttrTypeForType(Ctx, PT->getPointeeType()); return Qualifiers::GCNone; } llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM, const CGBlockInfo &blockInfo) { llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy); if (CGM.getLangOpts().getGC() == LangOptions::NonGC && !CGM.getLangOpts().ObjCAutoRefCount) return nullPtr; bool hasUnion = false; SkipIvars.clear(); IvarsInfo.clear(); unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0); unsigned ByteSizeInBits = CGM.getTarget().getCharWidth(); // __isa is the first field in block descriptor and must assume by runtime's // convention that it is GC'able. IvarsInfo.push_back(GC_IVAR(0, 1)); const BlockDecl *blockDecl = blockInfo.getBlockDecl(); // Calculate the basic layout of the block structure. const llvm::StructLayout *layout = CGM.getDataLayout().getStructLayout(blockInfo.StructureType); // Ignore the optional 'this' capture: C++ objects are not assumed // to be GC'ed. // Walk the captured variables. for (const auto &CI : blockDecl->captures()) { const VarDecl *variable = CI.getVariable(); QualType type = variable->getType(); const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable); // Ignore constant captures. if (capture.isConstant()) continue; uint64_t fieldOffset = layout->getElementOffset(capture.getIndex()); // __block variables are passed by their descriptor address. if (CI.isByRef()) { IvarsInfo.push_back(GC_IVAR(fieldOffset, /*size in words*/ 1)); continue; } assert(!type->isArrayType() && "array variable should not be caught"); if (const RecordType *record = type->getAs<RecordType>()) { BuildAggrIvarRecordLayout(record, fieldOffset, true, hasUnion); continue; } Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), type); unsigned fieldSize = CGM.getContext().getTypeSize(type); if (GCAttr == Qualifiers::Strong) IvarsInfo.push_back(GC_IVAR(fieldOffset, fieldSize / WordSizeInBits)); else if (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak) SkipIvars.push_back(GC_IVAR(fieldOffset, fieldSize / ByteSizeInBits)); } if (IvarsInfo.empty()) return nullPtr; // Sort on byte position; captures might not be allocated in order, // and unions can do funny things. llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end()); llvm::array_pod_sort(SkipIvars.begin(), SkipIvars.end()); std::string BitMap; llvm::Constant *C = BuildIvarLayoutBitmap(BitMap); if (CGM.getLangOpts().ObjCGCBitmapPrint) { printf("\n block variable layout for block: "); const unsigned char *s = (const unsigned char*)BitMap.c_str(); for (unsigned i = 0, e = BitMap.size(); i < e; i++) if (!(s[i] & 0xf0)) printf("0x0%x%s", s[i], s[i] != 0 ? ", " : ""); else printf("0x%x%s", s[i], s[i] != 0 ? ", " : ""); printf("\n"); } return C; } /// getBlockCaptureLifetime - This routine returns life time of the captured /// block variable for the purpose of block layout meta-data generation. FQT is /// the type of the variable captured in the block. Qualifiers::ObjCLifetime CGObjCCommonMac::getBlockCaptureLifetime(QualType FQT, bool ByrefLayout) { if (CGM.getLangOpts().ObjCAutoRefCount) return FQT.getObjCLifetime(); // MRR. if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType()) return ByrefLayout ? Qualifiers::OCL_ExplicitNone : Qualifiers::OCL_Strong; return Qualifiers::OCL_None; } void CGObjCCommonMac::UpdateRunSkipBlockVars(bool IsByref, Qualifiers::ObjCLifetime LifeTime, CharUnits FieldOffset, CharUnits FieldSize) { // __block variables are passed by their descriptor address. if (IsByref) RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_BYREF, FieldOffset, FieldSize)); else if (LifeTime == Qualifiers::OCL_Strong) RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_STRONG, FieldOffset, FieldSize)); else if (LifeTime == Qualifiers::OCL_Weak) RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_WEAK, FieldOffset, FieldSize)); else if (LifeTime == Qualifiers::OCL_ExplicitNone) RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_UNRETAINED, FieldOffset, FieldSize)); else RunSkipBlockVars.push_back(RUN_SKIP(BLOCK_LAYOUT_NON_OBJECT_BYTES, FieldOffset, FieldSize)); } void CGObjCCommonMac::BuildRCRecordLayout(const llvm::StructLayout *RecLayout, const RecordDecl *RD, ArrayRef<const FieldDecl*> RecFields, CharUnits BytePos, bool &HasUnion, bool ByrefLayout) { bool IsUnion = (RD && RD->isUnion()); CharUnits MaxUnionSize = CharUnits::Zero(); const FieldDecl *MaxField = nullptr; const FieldDecl *LastFieldBitfieldOrUnnamed = nullptr; CharUnits MaxFieldOffset = CharUnits::Zero(); CharUnits LastBitfieldOrUnnamedOffset = CharUnits::Zero(); if (RecFields.empty()) return; unsigned ByteSizeInBits = CGM.getTarget().getCharWidth(); for (unsigned i = 0, e = RecFields.size(); i != e; ++i) { const FieldDecl *Field = RecFields[i]; // Note that 'i' here is actually the field index inside RD of Field, // although this dependency is hidden. const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); CharUnits FieldOffset = CGM.getContext().toCharUnitsFromBits(RL.getFieldOffset(i)); // Skip over unnamed or bitfields if (!Field->getIdentifier() || Field->isBitField()) { LastFieldBitfieldOrUnnamed = Field; LastBitfieldOrUnnamedOffset = FieldOffset; continue; } LastFieldBitfieldOrUnnamed = nullptr; QualType FQT = Field->getType(); if (FQT->isRecordType() || FQT->isUnionType()) { if (FQT->isUnionType()) HasUnion = true; BuildRCBlockVarRecordLayout(FQT->getAs<RecordType>(), BytePos + FieldOffset, HasUnion); continue; } if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) { const ConstantArrayType *CArray = dyn_cast_or_null<ConstantArrayType>(Array); uint64_t ElCount = CArray->getSize().getZExtValue(); assert(CArray && "only array with known element size is supported"); FQT = CArray->getElementType(); while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) { const ConstantArrayType *CArray = dyn_cast_or_null<ConstantArrayType>(Array); ElCount *= CArray->getSize().getZExtValue(); FQT = CArray->getElementType(); } if (FQT->isRecordType() && ElCount) { int OldIndex = RunSkipBlockVars.size() - 1; const RecordType *RT = FQT->getAs<RecordType>(); BuildRCBlockVarRecordLayout(RT, BytePos + FieldOffset, HasUnion); // Replicate layout information for each array element. Note that // one element is already done. uint64_t ElIx = 1; for (int FirstIndex = RunSkipBlockVars.size() - 1 ;ElIx < ElCount; ElIx++) { CharUnits Size = CGM.getContext().getTypeSizeInChars(RT); for (int i = OldIndex+1; i <= FirstIndex; ++i) RunSkipBlockVars.push_back( RUN_SKIP(RunSkipBlockVars[i].opcode, RunSkipBlockVars[i].block_var_bytepos + Size*ElIx, RunSkipBlockVars[i].block_var_size)); } continue; } } CharUnits FieldSize = CGM.getContext().getTypeSizeInChars(Field->getType()); if (IsUnion) { CharUnits UnionIvarSize = FieldSize; if (UnionIvarSize > MaxUnionSize) { MaxUnionSize = UnionIvarSize; MaxField = Field; MaxFieldOffset = FieldOffset; } } else { UpdateRunSkipBlockVars(false, getBlockCaptureLifetime(FQT, ByrefLayout), BytePos + FieldOffset, FieldSize); } } if (LastFieldBitfieldOrUnnamed) { if (LastFieldBitfieldOrUnnamed->isBitField()) { // Last field was a bitfield. Must update the info. uint64_t BitFieldSize = LastFieldBitfieldOrUnnamed->getBitWidthValue(CGM.getContext()); unsigned UnsSize = (BitFieldSize / ByteSizeInBits) + ((BitFieldSize % ByteSizeInBits) != 0); CharUnits Size = CharUnits::fromQuantity(UnsSize); Size += LastBitfieldOrUnnamedOffset; UpdateRunSkipBlockVars(false, getBlockCaptureLifetime(LastFieldBitfieldOrUnnamed->getType(), ByrefLayout), BytePos + LastBitfieldOrUnnamedOffset, Size); } else { assert(!LastFieldBitfieldOrUnnamed->getIdentifier() &&"Expected unnamed"); // Last field was unnamed. Must update skip info. CharUnits FieldSize = CGM.getContext().getTypeSizeInChars(LastFieldBitfieldOrUnnamed->getType()); UpdateRunSkipBlockVars(false, getBlockCaptureLifetime(LastFieldBitfieldOrUnnamed->getType(), ByrefLayout), BytePos + LastBitfieldOrUnnamedOffset, FieldSize); } } if (MaxField) UpdateRunSkipBlockVars(false, getBlockCaptureLifetime(MaxField->getType(), ByrefLayout), BytePos + MaxFieldOffset, MaxUnionSize); } void CGObjCCommonMac::BuildRCBlockVarRecordLayout(const RecordType *RT, CharUnits BytePos, bool &HasUnion, bool ByrefLayout) { const RecordDecl *RD = RT->getDecl(); SmallVector<const FieldDecl*, 16> Fields(RD->fields()); llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0)); const llvm::StructLayout *RecLayout = CGM.getDataLayout().getStructLayout(cast<llvm::StructType>(Ty)); BuildRCRecordLayout(RecLayout, RD, Fields, BytePos, HasUnion, ByrefLayout); } /// InlineLayoutInstruction - This routine produce an inline instruction for the /// block variable layout if it can. If not, it returns 0. Rules are as follow: /// If ((uintptr_t) layout) < (1 << 12), the layout is inline. In the 64bit world, /// an inline layout of value 0x0000000000000xyz is interpreted as follows: /// x captured object pointers of BLOCK_LAYOUT_STRONG. Followed by /// y captured object of BLOCK_LAYOUT_BYREF. Followed by /// z captured object of BLOCK_LAYOUT_WEAK. If any of the above is missing, zero /// replaces it. For example, 0x00000x00 means x BLOCK_LAYOUT_STRONG and no /// BLOCK_LAYOUT_BYREF and no BLOCK_LAYOUT_WEAK objects are captured. uint64_t CGObjCCommonMac::InlineLayoutInstruction( SmallVectorImpl<unsigned char> &Layout) { uint64_t Result = 0; if (Layout.size() <= 3) { unsigned size = Layout.size(); unsigned strong_word_count = 0, byref_word_count=0, weak_word_count=0; unsigned char inst; enum BLOCK_LAYOUT_OPCODE opcode ; switch (size) { case 3: inst = Layout[0]; opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4); if (opcode == BLOCK_LAYOUT_STRONG) strong_word_count = (inst & 0xF)+1; else return 0; inst = Layout[1]; opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4); if (opcode == BLOCK_LAYOUT_BYREF) byref_word_count = (inst & 0xF)+1; else return 0; inst = Layout[2]; opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4); if (opcode == BLOCK_LAYOUT_WEAK) weak_word_count = (inst & 0xF)+1; else return 0; break; case 2: inst = Layout[0]; opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4); if (opcode == BLOCK_LAYOUT_STRONG) { strong_word_count = (inst & 0xF)+1; inst = Layout[1]; opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4); if (opcode == BLOCK_LAYOUT_BYREF) byref_word_count = (inst & 0xF)+1; else if (opcode == BLOCK_LAYOUT_WEAK) weak_word_count = (inst & 0xF)+1; else return 0; } else if (opcode == BLOCK_LAYOUT_BYREF) { byref_word_count = (inst & 0xF)+1; inst = Layout[1]; opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4); if (opcode == BLOCK_LAYOUT_WEAK) weak_word_count = (inst & 0xF)+1; else return 0; } else return 0; break; case 1: inst = Layout[0]; opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4); if (opcode == BLOCK_LAYOUT_STRONG) strong_word_count = (inst & 0xF)+1; else if (opcode == BLOCK_LAYOUT_BYREF) byref_word_count = (inst & 0xF)+1; else if (opcode == BLOCK_LAYOUT_WEAK) weak_word_count = (inst & 0xF)+1; else return 0; break; default: return 0; } // Cannot inline when any of the word counts is 15. Because this is one less // than the actual work count (so 15 means 16 actual word counts), // and we can only display 0 thru 15 word counts. if (strong_word_count == 16 || byref_word_count == 16 || weak_word_count == 16) return 0; unsigned count = (strong_word_count != 0) + (byref_word_count != 0) + (weak_word_count != 0); if (size == count) { if (strong_word_count) Result = strong_word_count; Result <<= 4; if (byref_word_count) Result += byref_word_count; Result <<= 4; if (weak_word_count) Result += weak_word_count; } } return Result; } llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) { llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy); if (RunSkipBlockVars.empty()) return nullPtr; unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0); unsigned ByteSizeInBits = CGM.getTarget().getCharWidth(); unsigned WordSizeInBytes = WordSizeInBits/ByteSizeInBits; // Sort on byte position; captures might not be allocated in order, // and unions can do funny things. llvm::array_pod_sort(RunSkipBlockVars.begin(), RunSkipBlockVars.end()); SmallVector<unsigned char, 16> Layout; unsigned size = RunSkipBlockVars.size(); for (unsigned i = 0; i < size; i++) { enum BLOCK_LAYOUT_OPCODE opcode = RunSkipBlockVars[i].opcode; CharUnits start_byte_pos = RunSkipBlockVars[i].block_var_bytepos; CharUnits end_byte_pos = start_byte_pos; unsigned j = i+1; while (j < size) { if (opcode == RunSkipBlockVars[j].opcode) { end_byte_pos = RunSkipBlockVars[j++].block_var_bytepos; i++; } else break; } CharUnits size_in_bytes = end_byte_pos - start_byte_pos + RunSkipBlockVars[j-1].block_var_size; if (j < size) { CharUnits gap = RunSkipBlockVars[j].block_var_bytepos - RunSkipBlockVars[j-1].block_var_bytepos - RunSkipBlockVars[j-1].block_var_size; size_in_bytes += gap; } CharUnits residue_in_bytes = CharUnits::Zero(); if (opcode == BLOCK_LAYOUT_NON_OBJECT_BYTES) { residue_in_bytes = size_in_bytes % WordSizeInBytes; size_in_bytes -= residue_in_bytes; opcode = BLOCK_LAYOUT_NON_OBJECT_WORDS; } unsigned size_in_words = size_in_bytes.getQuantity() / WordSizeInBytes; while (size_in_words >= 16) { // Note that value in imm. is one less that the actual // value. So, 0xf means 16 words follow! unsigned char inst = (opcode << 4) | 0xf; Layout.push_back(inst); size_in_words -= 16; } if (size_in_words > 0) { // Note that value in imm. is one less that the actual // value. So, we subtract 1 away! unsigned char inst = (opcode << 4) | (size_in_words-1); Layout.push_back(inst); } if (residue_in_bytes > CharUnits::Zero()) { unsigned char inst = (BLOCK_LAYOUT_NON_OBJECT_BYTES << 4) | (residue_in_bytes.getQuantity()-1); Layout.push_back(inst); } } int e = Layout.size()-1; while (e >= 0) { unsigned char inst = Layout[e--]; enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4); if (opcode == BLOCK_LAYOUT_NON_OBJECT_BYTES || opcode == BLOCK_LAYOUT_NON_OBJECT_WORDS) Layout.pop_back(); else break; } uint64_t Result = InlineLayoutInstruction(Layout); if (Result != 0) { // Block variable layout instruction has been inlined. if (CGM.getLangOpts().ObjCGCBitmapPrint) { if (ComputeByrefLayout) printf("\n Inline instruction for BYREF variable layout: "); else printf("\n Inline instruction for block variable layout: "); printf("0x0%" PRIx64 "\n", Result); } if (WordSizeInBytes == 8) { const llvm::APInt Instruction(64, Result); return llvm::Constant::getIntegerValue(CGM.Int64Ty, Instruction); } else { const llvm::APInt Instruction(32, Result); return llvm::Constant::getIntegerValue(CGM.Int32Ty, Instruction); } } unsigned char inst = (BLOCK_LAYOUT_OPERATOR << 4) | 0; Layout.push_back(inst); std::string BitMap; for (unsigned i = 0, e = Layout.size(); i != e; i++) BitMap += Layout[i]; if (CGM.getLangOpts().ObjCGCBitmapPrint) { if (ComputeByrefLayout) printf("\n BYREF variable layout: "); else printf("\n block variable layout: "); for (unsigned i = 0, e = BitMap.size(); i != e; i++) { unsigned char inst = BitMap[i]; enum BLOCK_LAYOUT_OPCODE opcode = (enum BLOCK_LAYOUT_OPCODE) (inst >> 4); unsigned delta = 1; switch (opcode) { case BLOCK_LAYOUT_OPERATOR: printf("BL_OPERATOR:"); delta = 0; break; case BLOCK_LAYOUT_NON_OBJECT_BYTES: printf("BL_NON_OBJECT_BYTES:"); break; case BLOCK_LAYOUT_NON_OBJECT_WORDS: printf("BL_NON_OBJECT_WORD:"); break; case BLOCK_LAYOUT_STRONG: printf("BL_STRONG:"); break; case BLOCK_LAYOUT_BYREF: printf("BL_BYREF:"); break; case BLOCK_LAYOUT_WEAK: printf("BL_WEAK:"); break; case BLOCK_LAYOUT_UNRETAINED: printf("BL_UNRETAINED:"); break; } // Actual value of word count is one more that what is in the imm. // field of the instruction printf("%d", (inst & 0xf) + delta); if (i < e-1) printf(", "); else printf("\n"); } } llvm::GlobalVariable *Entry = CreateMetadataVar( "OBJC_CLASS_NAME_", llvm::ConstantDataArray::getString(VMContext, BitMap, false), "__TEXT,__objc_classname,cstring_literals", 1, true); return getConstantGEP(VMContext, Entry, 0, 0); } llvm::Constant *CGObjCCommonMac::BuildRCBlockLayout(CodeGenModule &CGM, const CGBlockInfo &blockInfo) { assert(CGM.getLangOpts().getGC() == LangOptions::NonGC); RunSkipBlockVars.clear(); bool hasUnion = false; unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0); unsigned ByteSizeInBits = CGM.getTarget().getCharWidth(); unsigned WordSizeInBytes = WordSizeInBits/ByteSizeInBits; const BlockDecl *blockDecl = blockInfo.getBlockDecl(); // Calculate the basic layout of the block structure. const llvm::StructLayout *layout = CGM.getDataLayout().getStructLayout(blockInfo.StructureType); // Ignore the optional 'this' capture: C++ objects are not assumed // to be GC'ed. if (blockInfo.BlockHeaderForcedGapSize != CharUnits::Zero()) UpdateRunSkipBlockVars(false, Qualifiers::OCL_None, blockInfo.BlockHeaderForcedGapOffset, blockInfo.BlockHeaderForcedGapSize); // Walk the captured variables. for (const auto &CI : blockDecl->captures()) { const VarDecl *variable = CI.getVariable(); QualType type = variable->getType(); const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable); // Ignore constant captures. if (capture.isConstant()) continue; CharUnits fieldOffset = CharUnits::fromQuantity(layout->getElementOffset(capture.getIndex())); assert(!type->isArrayType() && "array variable should not be caught"); if (!CI.isByRef()) if (const RecordType *record = type->getAs<RecordType>()) { BuildRCBlockVarRecordLayout(record, fieldOffset, hasUnion); continue; } CharUnits fieldSize; if (CI.isByRef()) fieldSize = CharUnits::fromQuantity(WordSizeInBytes); else fieldSize = CGM.getContext().getTypeSizeInChars(type); UpdateRunSkipBlockVars(CI.isByRef(), getBlockCaptureLifetime(type, false), fieldOffset, fieldSize); } return getBitmapBlockLayout(false); } llvm::Constant *CGObjCCommonMac::BuildByrefLayout(CodeGen::CodeGenModule &CGM, QualType T) { assert(CGM.getLangOpts().getGC() == LangOptions::NonGC); assert(!T->isArrayType() && "__block array variable should not be caught"); CharUnits fieldOffset; RunSkipBlockVars.clear(); bool hasUnion = false; if (const RecordType *record = T->getAs<RecordType>()) { BuildRCBlockVarRecordLayout(record, fieldOffset, hasUnion, true /*ByrefLayout */); llvm::Constant *Result = getBitmapBlockLayout(true); return Result; } llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy); return nullPtr; } llvm::Value *CGObjCMac::GenerateProtocolRef(CodeGenFunction &CGF, const ObjCProtocolDecl *PD) { // FIXME: I don't understand why gcc generates this, or where it is // resolved. Investigate. Its also wasteful to look this up over and over. LazySymbols.insert(&CGM.getContext().Idents.get("Protocol")); return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD), ObjCTypes.getExternalProtocolPtrTy()); } void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) { // FIXME: We shouldn't need this, the protocol decl should contain enough // information to tell us whether this was a declaration or a definition. DefinedProtocols.insert(PD->getIdentifier()); // If we have generated a forward reference to this protocol, emit // it now. Otherwise do nothing, the protocol objects are lazily // emitted. if (Protocols.count(PD->getIdentifier())) GetOrEmitProtocol(PD); } llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) { if (DefinedProtocols.count(PD->getIdentifier())) return GetOrEmitProtocol(PD); return GetOrEmitProtocolRef(PD); } /* // Objective-C 1.0 extensions struct _objc_protocol { struct _objc_protocol_extension *isa; char *protocol_name; struct _objc_protocol_list *protocol_list; struct _objc__method_prototype_list *instance_methods; struct _objc__method_prototype_list *class_methods }; See EmitProtocolExtension(). */ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) { llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()]; // Early exit if a defining object has already been generated. if (Entry && Entry->hasInitializer()) return Entry; // Use the protocol definition, if there is one. if (const ObjCProtocolDecl *Def = PD->getDefinition()) PD = Def; // FIXME: I don't understand why gcc generates this, or where it is // resolved. Investigate. Its also wasteful to look this up over and over. LazySymbols.insert(&CGM.getContext().Idents.get("Protocol")); // Construct method lists. std::vector<llvm::Constant*> InstanceMethods, ClassMethods; std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods; std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt; for (const auto *MD : PD->instance_methods()) { llvm::Constant *C = GetMethodDescriptionConstant(MD); if (!C) return GetOrEmitProtocolRef(PD); if (MD->getImplementationControl() == ObjCMethodDecl::Optional) { OptInstanceMethods.push_back(C); OptMethodTypesExt.push_back(GetMethodVarType(MD, true)); } else { InstanceMethods.push_back(C); MethodTypesExt.push_back(GetMethodVarType(MD, true)); } } for (const auto *MD : PD->class_methods()) { llvm::Constant *C = GetMethodDescriptionConstant(MD); if (!C) return GetOrEmitProtocolRef(PD); if (MD->getImplementationControl() == ObjCMethodDecl::Optional) { OptClassMethods.push_back(C); OptMethodTypesExt.push_back(GetMethodVarType(MD, true)); } else { ClassMethods.push_back(C); MethodTypesExt.push_back(GetMethodVarType(MD, true)); } } MethodTypesExt.insert(MethodTypesExt.end(), OptMethodTypesExt.begin(), OptMethodTypesExt.end()); llvm::Constant *Values[] = { EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods, MethodTypesExt), GetClassName(PD->getObjCRuntimeNameAsString()), EmitProtocolList("OBJC_PROTOCOL_REFS_" + PD->getName(), PD->protocol_begin(), PD->protocol_end()), EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(), "__OBJC,__cat_inst_meth,regular,no_dead_strip", InstanceMethods), EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(), "__OBJC,__cat_cls_meth,regular,no_dead_strip", ClassMethods)}; llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy, Values); if (Entry) { // Already created, update the initializer. assert(Entry->hasPrivateLinkage()); Entry->setInitializer(Init); } else { Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false, llvm::GlobalValue::PrivateLinkage, Init, "OBJC_PROTOCOL_" + PD->getName()); Entry->setSection("__OBJC,__protocol,regular,no_dead_strip"); // FIXME: Is this necessary? Why only for protocol? Entry->setAlignment(4); Protocols[PD->getIdentifier()] = Entry; } CGM.addCompilerUsedGlobal(Entry); return Entry; } llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) { llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()]; if (!Entry) { // We use the initializer as a marker of whether this is a forward // reference or not. At module finalization we add the empty // contents for protocols which were referenced but never defined. Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false, llvm::GlobalValue::PrivateLinkage, nullptr, "OBJC_PROTOCOL_" + PD->getName()); Entry->setSection("__OBJC,__protocol,regular,no_dead_strip"); // FIXME: Is this necessary? Why only for protocol? Entry->setAlignment(4); } return Entry; } /* struct _objc_protocol_extension { uint32_t size; struct objc_method_description_list *optional_instance_methods; struct objc_method_description_list *optional_class_methods; struct objc_property_list *instance_properties; const char ** extendedMethodTypes; }; */ llvm::Constant * CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD, ArrayRef<llvm::Constant*> OptInstanceMethods, ArrayRef<llvm::Constant*> OptClassMethods, ArrayRef<llvm::Constant*> MethodTypesExt) { uint64_t Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy); llvm::Constant *Values[] = { llvm::ConstantInt::get(ObjCTypes.IntTy, Size), EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_OPT_" + PD->getName(), "__OBJC,__cat_inst_meth,regular,no_dead_strip", OptInstanceMethods), EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(), "__OBJC,__cat_cls_meth,regular,no_dead_strip", OptClassMethods), EmitPropertyList("OBJC_$_PROP_PROTO_LIST_" + PD->getName(), nullptr, PD, ObjCTypes), EmitProtocolMethodTypes("OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(), MethodTypesExt, ObjCTypes)}; // Return null if no extension bits are used. if (Values[1]->isNullValue() && Values[2]->isNullValue() && Values[3]->isNullValue() && Values[4]->isNullValue()) return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy); llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values); // No special section, but goes in llvm.used return CreateMetadataVar("\01l_OBJC_PROTOCOLEXT_" + PD->getName(), Init, StringRef(), 0, true); } /* struct objc_protocol_list { struct objc_protocol_list *next; long count; Protocol *list[]; }; */ llvm::Constant * CGObjCMac::EmitProtocolList(Twine Name, ObjCProtocolDecl::protocol_iterator begin, ObjCProtocolDecl::protocol_iterator end) { SmallVector<llvm::Constant *, 16> ProtocolRefs; for (; begin != end; ++begin) ProtocolRefs.push_back(GetProtocolRef(*begin)); // Just return null for empty protocol lists if (ProtocolRefs.empty()) return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy); // This list is null terminated. ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy)); llvm::Constant *Values[3]; // This field is only used by the runtime. Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy); Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1); Values[2] = llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy, ProtocolRefs.size()), ProtocolRefs); llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values); llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip", 4, false); return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy); } void CGObjCCommonMac:: PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet, SmallVectorImpl<llvm::Constant *> &Properties, const Decl *Container, const ObjCProtocolDecl *Proto, const ObjCCommonTypesHelper &ObjCTypes) { for (const auto *P : Proto->protocols()) PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes); for (const auto *PD : Proto->properties()) { if (!PropertySet.insert(PD->getIdentifier()).second) continue; llvm::Constant *Prop[] = { GetPropertyName(PD->getIdentifier()), GetPropertyTypeString(PD, Container) }; Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop)); } } /* struct _objc_property { const char * const name; const char * const attributes; }; struct _objc_property_list { uint32_t entsize; // sizeof (struct _objc_property) uint32_t prop_count; struct _objc_property[prop_count]; }; */ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name, const Decl *Container, const ObjCContainerDecl *OCD, const ObjCCommonTypesHelper &ObjCTypes) { SmallVector<llvm::Constant *, 16> Properties; llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet; for (const auto *PD : OCD->properties()) { PropertySet.insert(PD->getIdentifier()); llvm::Constant *Prop[] = { GetPropertyName(PD->getIdentifier()), GetPropertyTypeString(PD, Container) }; Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop)); } if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) { for (const auto *P : OID->all_referenced_protocols()) PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes); } else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) { for (const auto *P : CD->protocols()) PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes); } // Return null for empty list. if (Properties.empty()) return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy); unsigned PropertySize = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.PropertyTy); llvm::Constant *Values[3]; Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize); Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size()); llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy, Properties.size()); Values[2] = llvm::ConstantArray::get(AT, Properties); llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values); llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, (ObjCABI == 2) ? "__DATA, __objc_const" : "__OBJC,__property,regular,no_dead_strip", (ObjCABI == 2) ? 8 : 4, true); return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy); } llvm::Constant * CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name, ArrayRef<llvm::Constant*> MethodTypes, const ObjCCommonTypesHelper &ObjCTypes) { // Return null for empty list. if (MethodTypes.empty()) return llvm::Constant::getNullValue(ObjCTypes.Int8PtrPtrTy); llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.Int8PtrTy, MethodTypes.size()); llvm::Constant *Init = llvm::ConstantArray::get(AT, MethodTypes); llvm::GlobalVariable *GV = CreateMetadataVar( Name, Init, (ObjCABI == 2) ? "__DATA, __objc_const" : StringRef(), (ObjCABI == 2) ? 8 : 4, true); return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy); } /* struct objc_method_description_list { int count; struct objc_method_description list[]; }; */ llvm::Constant * CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) { llvm::Constant *Desc[] = { llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()), ObjCTypes.SelectorPtrTy), GetMethodVarType(MD) }; if (!Desc[1]) return nullptr; return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy, Desc); } llvm::Constant * CGObjCMac::EmitMethodDescList(Twine Name, const char *Section, ArrayRef<llvm::Constant*> Methods) { // Return null for empty list. if (Methods.empty()) return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy); llvm::Constant *Values[2]; Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size()); llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy, Methods.size()); Values[1] = llvm::ConstantArray::get(AT, Methods); llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values); llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true); return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodDescriptionListPtrTy); } /* struct _objc_category { char *category_name; char *class_name; struct _objc_method_list *instance_methods; struct _objc_method_list *class_methods; struct _objc_protocol_list *protocols; uint32_t size; // <rdar://4585769> struct _objc_property_list *instance_properties; }; */ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) { unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.CategoryTy); // FIXME: This is poor design, the OCD should have a pointer to the category // decl. Additionally, note that Category can be null for the @implementation // w/o an @interface case. Sema should just create one for us as it does for // @implementation so everyone else can live life under a clear blue sky. const ObjCInterfaceDecl *Interface = OCD->getClassInterface(); const ObjCCategoryDecl *Category = Interface->FindCategoryDeclaration(OCD->getIdentifier()); SmallString<256> ExtName; llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_' << OCD->getName(); SmallVector<llvm::Constant *, 16> InstanceMethods, ClassMethods; for (const auto *I : OCD->instance_methods()) // Instance methods should always be defined. InstanceMethods.push_back(GetMethodConstant(I)); for (const auto *I : OCD->class_methods()) // Class methods should always be defined. ClassMethods.push_back(GetMethodConstant(I)); llvm::Constant *Values[7]; Values[0] = GetClassName(OCD->getName()); Values[1] = GetClassName(Interface->getObjCRuntimeNameAsString()); LazySymbols.insert(Interface->getIdentifier()); Values[2] = EmitMethodList("OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(), "__OBJC,__cat_inst_meth,regular,no_dead_strip", InstanceMethods); Values[3] = EmitMethodList("OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(), "__OBJC,__cat_cls_meth,regular,no_dead_strip", ClassMethods); if (Category) { Values[4] = EmitProtocolList("OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(), Category->protocol_begin(), Category->protocol_end()); } else { Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy); } Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size); // If there is no category @interface then there can be no properties. if (Category) { Values[6] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(), OCD, Category, ObjCTypes); } else { Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy); } llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy, Values); llvm::GlobalVariable *GV = CreateMetadataVar("OBJC_CATEGORY_" + ExtName.str(), Init, "__OBJC,__category,regular,no_dead_strip", 4, true); DefinedCategories.push_back(GV); DefinedCategoryNames.insert(ExtName.str()); // method definition entries must be clear for next implementation. MethodDefinitions.clear(); } enum FragileClassFlags { FragileABI_Class_Factory = 0x00001, FragileABI_Class_Meta = 0x00002, FragileABI_Class_HasCXXStructors = 0x02000, FragileABI_Class_Hidden = 0x20000 }; enum NonFragileClassFlags { /// Is a meta-class. NonFragileABI_Class_Meta = 0x00001, /// Is a root class. NonFragileABI_Class_Root = 0x00002, /// Has a C++ constructor and destructor. NonFragileABI_Class_HasCXXStructors = 0x00004, /// Has hidden visibility. NonFragileABI_Class_Hidden = 0x00010, /// Has the exception attribute. NonFragileABI_Class_Exception = 0x00020, /// (Obsolete) ARC-specific: this class has a .release_ivars method NonFragileABI_Class_HasIvarReleaser = 0x00040, /// Class implementation was compiled under ARC. NonFragileABI_Class_CompiledByARC = 0x00080, /// Class has non-trivial destructors, but zero-initialization is okay. NonFragileABI_Class_HasCXXDestructorOnly = 0x00100 }; /* struct _objc_class { Class isa; Class super_class; const char *name; long version; long info; long instance_size; struct _objc_ivar_list *ivars; struct _objc_method_list *methods; struct _objc_cache *cache; struct _objc_protocol_list *protocols; // Objective-C 1.0 extensions (<rdr://4585769>) const char *ivar_layout; struct _objc_class_ext *ext; }; See EmitClassExtension(); */ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) { DefinedSymbols.insert(ID->getIdentifier()); std::string ClassName = ID->getNameAsString(); // FIXME: Gross ObjCInterfaceDecl *Interface = const_cast<ObjCInterfaceDecl*>(ID->getClassInterface()); llvm::Constant *Protocols = EmitProtocolList("OBJC_CLASS_PROTOCOLS_" + ID->getName(), Interface->all_referenced_protocol_begin(), Interface->all_referenced_protocol_end()); unsigned Flags = FragileABI_Class_Factory; if (ID->hasNonZeroConstructors() || ID->hasDestructors()) Flags |= FragileABI_Class_HasCXXStructors; unsigned Size = CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity(); // FIXME: Set CXX-structors flag. if (ID->getClassInterface()->getVisibility() == HiddenVisibility) Flags |= FragileABI_Class_Hidden; SmallVector<llvm::Constant *, 16> InstanceMethods, ClassMethods; for (const auto *I : ID->instance_methods()) // Instance methods should always be defined. InstanceMethods.push_back(GetMethodConstant(I)); for (const auto *I : ID->class_methods()) // Class methods should always be defined. ClassMethods.push_back(GetMethodConstant(I)); for (const auto *PID : ID->property_impls()) { if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) { ObjCPropertyDecl *PD = PID->getPropertyDecl(); if (ObjCMethodDecl *MD = PD->getGetterMethodDecl()) if (llvm::Constant *C = GetMethodConstant(MD)) InstanceMethods.push_back(C); if (ObjCMethodDecl *MD = PD->getSetterMethodDecl()) if (llvm::Constant *C = GetMethodConstant(MD)) InstanceMethods.push_back(C); } } llvm::Constant *Values[12]; Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods); if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) { // Record a reference to the super class. LazySymbols.insert(Super->getIdentifier()); Values[ 1] = llvm::ConstantExpr::getBitCast(GetClassName(Super->getObjCRuntimeNameAsString()), ObjCTypes.ClassPtrTy); } else { Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy); } Values[ 2] = GetClassName(ID->getObjCRuntimeNameAsString()); // Version is always 0. Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0); Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags); Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size); Values[ 6] = EmitIvarList(ID, false); Values[7] = EmitMethodList("OBJC_INSTANCE_METHODS_" + ID->getName(), "__OBJC,__inst_meth,regular,no_dead_strip", InstanceMethods); // cache is always NULL. Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy); Values[ 9] = Protocols; Values[10] = BuildIvarLayout(ID, true); Values[11] = EmitClassExtension(ID); llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy, Values); std::string Name("OBJC_CLASS_"); Name += ClassName; const char *Section = "__OBJC,__class,regular,no_dead_strip"; // Check for a forward reference. llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true); if (GV) { assert(GV->getType()->getElementType() == ObjCTypes.ClassTy && "Forward metaclass reference has incorrect type."); GV->setInitializer(Init); GV->setSection(Section); GV->setAlignment(4); CGM.addCompilerUsedGlobal(GV); } else GV = CreateMetadataVar(Name, Init, Section, 4, true); DefinedClasses.push_back(GV); ImplementedClasses.push_back(Interface); // method definition entries must be clear for next implementation. MethodDefinitions.clear(); } llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID, llvm::Constant *Protocols, ArrayRef<llvm::Constant*> Methods) { unsigned Flags = FragileABI_Class_Meta; unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassTy); if (ID->getClassInterface()->getVisibility() == HiddenVisibility) Flags |= FragileABI_Class_Hidden; llvm::Constant *Values[12]; // The isa for the metaclass is the root of the hierarchy. const ObjCInterfaceDecl *Root = ID->getClassInterface(); while (const ObjCInterfaceDecl *Super = Root->getSuperClass()) Root = Super; Values[ 0] = llvm::ConstantExpr::getBitCast(GetClassName(Root->getObjCRuntimeNameAsString()), ObjCTypes.ClassPtrTy); // The super class for the metaclass is emitted as the name of the // super class. The runtime fixes this up to point to the // *metaclass* for the super class. if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) { Values[ 1] = llvm::ConstantExpr::getBitCast(GetClassName(Super->getObjCRuntimeNameAsString()), ObjCTypes.ClassPtrTy); } else { Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy); } Values[ 2] = GetClassName(ID->getObjCRuntimeNameAsString()); // Version is always 0. Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0); Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags); Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size); Values[ 6] = EmitIvarList(ID, true); Values[7] = EmitMethodList("OBJC_CLASS_METHODS_" + ID->getNameAsString(), "__OBJC,__cls_meth,regular,no_dead_strip", Methods); // cache is always NULL. Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy); Values[ 9] = Protocols; // ivar_layout for metaclass is always NULL. Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy); // The class extension is always unused for metaclasses. Values[11] = llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy); llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy, Values); std::string Name("OBJC_METACLASS_"); Name += ID->getName(); // Check for a forward reference. llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true); if (GV) { assert(GV->getType()->getElementType() == ObjCTypes.ClassTy && "Forward metaclass reference has incorrect type."); GV->setInitializer(Init); } else { GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false, llvm::GlobalValue::PrivateLinkage, Init, Name); } GV->setSection("__OBJC,__meta_class,regular,no_dead_strip"); GV->setAlignment(4); CGM.addCompilerUsedGlobal(GV); return GV; } llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) { std::string Name = "OBJC_METACLASS_" + ID->getNameAsString(); // FIXME: Should we look these up somewhere other than the module. Its a bit // silly since we only generate these while processing an implementation, so // exactly one pointer would work if know when we entered/exitted an // implementation block. // Check for an existing forward reference. // Previously, metaclass with internal linkage may have been defined. // pass 'true' as 2nd argument so it is returned. llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true); if (!GV) GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false, llvm::GlobalValue::PrivateLinkage, nullptr, Name); assert(GV->getType()->getElementType() == ObjCTypes.ClassTy && "Forward metaclass reference has incorrect type."); return GV; } llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) { std::string Name = "OBJC_CLASS_" + ID->getNameAsString(); llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true); if (!GV) GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false, llvm::GlobalValue::PrivateLinkage, nullptr, Name); assert(GV->getType()->getElementType() == ObjCTypes.ClassTy && "Forward class metadata reference has incorrect type."); return GV; } /* struct objc_class_ext { uint32_t size; const char *weak_ivar_layout; struct _objc_property_list *properties; }; */ llvm::Constant * CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) { uint64_t Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassExtensionTy); llvm::Constant *Values[3]; Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size); Values[1] = BuildIvarLayout(ID, false); Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(), ID, ID->getClassInterface(), ObjCTypes); // Return null if no extension bits are used. if (Values[1]->isNullValue() && Values[2]->isNullValue()) return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy); llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values); return CreateMetadataVar("OBJC_CLASSEXT_" + ID->getName(), Init, "__OBJC,__class_ext,regular,no_dead_strip", 4, true); } /* struct objc_ivar { char *ivar_name; char *ivar_type; int ivar_offset; }; struct objc_ivar_list { int ivar_count; struct objc_ivar list[count]; }; */ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID, bool ForClass) { std::vector<llvm::Constant*> Ivars; // When emitting the root class GCC emits ivar entries for the // actual class structure. It is not clear if we need to follow this // behavior; for now lets try and get away with not doing it. If so, // the cleanest solution would be to make up an ObjCInterfaceDecl // for the class. if (ForClass) return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy); const ObjCInterfaceDecl *OID = ID->getClassInterface(); for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin(); IVD; IVD = IVD->getNextIvar()) { // Ignore unnamed bit-fields. if (!IVD->getDeclName()) continue; llvm::Constant *Ivar[] = { GetMethodVarName(IVD->getIdentifier()), GetMethodVarType(IVD), llvm::ConstantInt::get(ObjCTypes.IntTy, ComputeIvarBaseOffset(CGM, OID, IVD)) }; Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar)); } // Return null for empty list. if (Ivars.empty()) return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy); llvm::Constant *Values[2]; Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size()); llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy, Ivars.size()); Values[1] = llvm::ConstantArray::get(AT, Ivars); llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values); llvm::GlobalVariable *GV; if (ForClass) GV = CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), Init, "__OBJC,__class_vars,regular,no_dead_strip", 4, true); else GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), Init, "__OBJC,__instance_vars,regular,no_dead_strip", 4, true); return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy); } /* struct objc_method { SEL method_name; char *method_types; void *method; }; struct objc_method_list { struct objc_method_list *obsolete; int count; struct objc_method methods_list[count]; }; */ /// GetMethodConstant - Return a struct objc_method constant for the /// given method if it has been defined. The result is null if the /// method has not been defined. The return value has type MethodPtrTy. llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) { llvm::Function *Fn = GetMethodDefinition(MD); if (!Fn) return nullptr; llvm::Constant *Method[] = { llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()), ObjCTypes.SelectorPtrTy), GetMethodVarType(MD), llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy) }; return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method); } llvm::Constant *CGObjCMac::EmitMethodList(Twine Name, const char *Section, ArrayRef<llvm::Constant*> Methods) { // Return null for empty list. if (Methods.empty()) return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy); llvm::Constant *Values[3]; Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy); Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size()); llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy, Methods.size()); Values[2] = llvm::ConstantArray::get(AT, Methods); llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values); llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true); return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy); } llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD, const ObjCContainerDecl *CD) { SmallString<256> Name; GetNameForMethod(OMD, CD, Name); CodeGenTypes &Types = CGM.getTypes(); llvm::FunctionType *MethodTy = Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD)); llvm::Function *Method = llvm::Function::Create(MethodTy, llvm::GlobalValue::InternalLinkage, Name.str(), &CGM.getModule()); MethodDefinitions.insert(std::make_pair(OMD, Method)); return Method; } llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name, llvm::Constant *Init, StringRef Section, unsigned Align, bool AddToUsed) { llvm::Type *Ty = Init->getType(); llvm::GlobalVariable *GV = new llvm::GlobalVariable(CGM.getModule(), Ty, false, llvm::GlobalValue::PrivateLinkage, Init, Name); if (!Section.empty()) GV->setSection(Section); if (Align) GV->setAlignment(Align); if (AddToUsed) CGM.addCompilerUsedGlobal(GV); return GV; } llvm::Function *CGObjCMac::ModuleInitFunction() { // Abuse this interface function as a place to finalize. FinishModule(); return nullptr; } llvm::Constant *CGObjCMac::GetPropertyGetFunction() { return ObjCTypes.getGetPropertyFn(); } llvm::Constant *CGObjCMac::GetPropertySetFunction() { return ObjCTypes.getSetPropertyFn(); } llvm::Constant *CGObjCMac::GetOptimizedPropertySetFunction(bool atomic, bool copy) { return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy); } llvm::Constant *CGObjCMac::GetGetStructFunction() { return ObjCTypes.getCopyStructFn(); } llvm::Constant *CGObjCMac::GetSetStructFunction() { return ObjCTypes.getCopyStructFn(); } llvm::Constant *CGObjCMac::GetCppAtomicObjectGetFunction() { return ObjCTypes.getCppAtomicObjectFunction(); } llvm::Constant *CGObjCMac::GetCppAtomicObjectSetFunction() { return ObjCTypes.getCppAtomicObjectFunction(); } llvm::Constant *CGObjCMac::EnumerationMutationFunction() { return ObjCTypes.getEnumerationMutationFn(); } void CGObjCMac::EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) { return EmitTryOrSynchronizedStmt(CGF, S); } void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF, const ObjCAtSynchronizedStmt &S) { return EmitTryOrSynchronizedStmt(CGF, S); } namespace { struct PerformFragileFinally : EHScopeStack::Cleanup { const Stmt &S; llvm::Value *SyncArgSlot; llvm::Value *CallTryExitVar; llvm::Value *ExceptionData; ObjCTypesHelper &ObjCTypes; PerformFragileFinally(const Stmt *S, llvm::Value *SyncArgSlot, llvm::Value *CallTryExitVar, llvm::Value *ExceptionData, ObjCTypesHelper *ObjCTypes) : S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar), ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {} void Emit(CodeGenFunction &CGF, Flags flags) override { // Check whether we need to call objc_exception_try_exit. // In optimized code, this branch will always be folded. llvm::BasicBlock *FinallyCallExit = CGF.createBasicBlock("finally.call_exit"); llvm::BasicBlock *FinallyNoCallExit = CGF.createBasicBlock("finally.no_call_exit"); CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar), FinallyCallExit, FinallyNoCallExit); CGF.EmitBlock(FinallyCallExit); CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData); CGF.EmitBlock(FinallyNoCallExit); if (isa<ObjCAtTryStmt>(S)) { if (const ObjCAtFinallyStmt* FinallyStmt = cast<ObjCAtTryStmt>(S).getFinallyStmt()) { // Don't try to do the @finally if this is an EH cleanup. if (flags.isForEHCleanup()) return; // Save the current cleanup destination in case there's // control flow inside the finally statement. llvm::Value *CurCleanupDest = CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot()); CGF.EmitStmt(FinallyStmt->getFinallyBody()); if (CGF.HaveInsertPoint()) { CGF.Builder.CreateStore(CurCleanupDest, CGF.getNormalCleanupDestSlot()); } else { // Currently, the end of the cleanup must always exist. CGF.EnsureInsertPoint(); } } } else { // Emit objc_sync_exit(expr); as finally's sole statement for // @synchronized. llvm::Value *SyncArg = CGF.Builder.CreateLoad(SyncArgSlot); CGF.EmitNounwindRuntimeCall(ObjCTypes.getSyncExitFn(), SyncArg); } } }; class FragileHazards { CodeGenFunction &CGF; SmallVector<llvm::Value*, 20> Locals; llvm::DenseSet<llvm::BasicBlock*> BlocksBeforeTry; llvm::InlineAsm *ReadHazard; llvm::InlineAsm *WriteHazard; llvm::FunctionType *GetAsmFnType(); void collectLocals(); void emitReadHazard(CGBuilderTy &Builder); public: FragileHazards(CodeGenFunction &CGF); void emitWriteHazard(); void emitHazardsInNewBlocks(); }; } /// Create the fragile-ABI read and write hazards based on the current /// state of the function, which is presumed to be immediately prior /// to a @try block. These hazards are used to maintain correct /// semantics in the face of optimization and the fragile ABI's /// cavalier use of setjmp/longjmp. FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) { collectLocals(); if (Locals.empty()) return; // Collect all the blocks in the function. for (llvm::Function::iterator I = CGF.CurFn->begin(), E = CGF.CurFn->end(); I != E; ++I) BlocksBeforeTry.insert(&*I); llvm::FunctionType *AsmFnTy = GetAsmFnType(); // Create a read hazard for the allocas. This inhibits dead-store // optimizations and forces the values to memory. This hazard is // inserted before any 'throwing' calls in the protected scope to // reflect the possibility that the variables might be read from the // catch block if the call throws. { std::string Constraint; for (unsigned I = 0, E = Locals.size(); I != E; ++I) { if (I) Constraint += ','; Constraint += "*m"; } ReadHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false); } // Create a write hazard for the allocas. This inhibits folding // loads across the hazard. This hazard is inserted at the // beginning of the catch path to reflect the possibility that the // variables might have been written within the protected scope. { std::string Constraint; for (unsigned I = 0, E = Locals.size(); I != E; ++I) { if (I) Constraint += ','; Constraint += "=*m"; } WriteHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false); } } /// Emit a write hazard at the current location. void FragileHazards::emitWriteHazard() { if (Locals.empty()) return; CGF.EmitNounwindRuntimeCall(WriteHazard, Locals); } void FragileHazards::emitReadHazard(CGBuilderTy &Builder) { assert(!Locals.empty()); llvm::CallInst *call = Builder.CreateCall(ReadHazard, Locals); call->setDoesNotThrow(); call->setCallingConv(CGF.getRuntimeCC()); } /// Emit read hazards in all the protected blocks, i.e. all the blocks /// which have been inserted since the beginning of the try. void FragileHazards::emitHazardsInNewBlocks() { if (Locals.empty()) return; CGBuilderTy Builder(CGF.getLLVMContext()); // Iterate through all blocks, skipping those prior to the try. for (llvm::Function::iterator FI = CGF.CurFn->begin(), FE = CGF.CurFn->end(); FI != FE; ++FI) { llvm::BasicBlock &BB = *FI; if (BlocksBeforeTry.count(&BB)) continue; // Walk through all the calls in the block. for (llvm::BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE; ++BI) { llvm::Instruction &I = *BI; // Ignore instructions that aren't non-intrinsic calls. // These are the only calls that can possibly call longjmp. if (!isa<llvm::CallInst>(I) && !isa<llvm::InvokeInst>(I)) continue; if (isa<llvm::IntrinsicInst>(I)) continue; // Ignore call sites marked nounwind. This may be questionable, // since 'nounwind' doesn't necessarily mean 'does not call longjmp'. llvm::CallSite CS(&I); if (CS.doesNotThrow()) continue; // Insert a read hazard before the call. This will ensure that // any writes to the locals are performed before making the // call. If the call throws, then this is sufficient to // guarantee correctness as long as it doesn't also write to any // locals. Builder.SetInsertPoint(&BB, BI); emitReadHazard(Builder); } } } static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, llvm::Value *V) { if (V) S.insert(V); } void FragileHazards::collectLocals() { // Compute a set of allocas to ignore. llvm::DenseSet<llvm::Value*> AllocasToIgnore; addIfPresent(AllocasToIgnore, CGF.ReturnValue); addIfPresent(AllocasToIgnore, CGF.NormalCleanupDest); // Collect all the allocas currently in the function. This is // probably way too aggressive. llvm::BasicBlock &Entry = CGF.CurFn->getEntryBlock(); for (llvm::BasicBlock::iterator I = Entry.begin(), E = Entry.end(); I != E; ++I) if (isa<llvm::AllocaInst>(*I) && !AllocasToIgnore.count(&*I)) Locals.push_back(&*I); } llvm::FunctionType *FragileHazards::GetAsmFnType() { SmallVector<llvm::Type *, 16> tys(Locals.size()); for (unsigned i = 0, e = Locals.size(); i != e; ++i) tys[i] = Locals[i]->getType(); return llvm::FunctionType::get(CGF.VoidTy, tys, false); } /* Objective-C setjmp-longjmp (sjlj) Exception Handling -- A catch buffer is a setjmp buffer plus: - a pointer to the exception that was caught - a pointer to the previous exception data buffer - two pointers of reserved storage Therefore catch buffers form a stack, with a pointer to the top of the stack kept in thread-local storage. objc_exception_try_enter pushes a catch buffer onto the EH stack. objc_exception_try_exit pops the given catch buffer, which is required to be the top of the EH stack. objc_exception_throw pops the top of the EH stack, writes the thrown exception into the appropriate field, and longjmps to the setjmp buffer. It crashes the process (with a printf and an abort()) if there are no catch buffers on the stack. objc_exception_extract just reads the exception pointer out of the catch buffer. There's no reason an implementation couldn't use a light-weight setjmp here --- something like __builtin_setjmp, but API-compatible with the heavyweight setjmp. This will be more important if we ever want to implement correct ObjC/C++ exception interactions for the fragile ABI. Note that for this use of setjmp/longjmp to be correct, we may need to mark some local variables volatile: if a non-volatile local variable is modified between the setjmp and the longjmp, it has indeterminate value. For the purposes of LLVM IR, it may be sufficient to make loads and stores within the @try (to variables declared outside the @try) volatile. This is necessary for optimized correctness, but is not currently being done; this is being tracked as rdar://problem/8160285 The basic framework for a @try-catch-finally is as follows: { objc_exception_data d; id _rethrow = null; bool _call_try_exit = true; objc_exception_try_enter(&d); if (!setjmp(d.jmp_buf)) { ... try body ... } else { // exception path id _caught = objc_exception_extract(&d); // enter new try scope for handlers if (!setjmp(d.jmp_buf)) { ... match exception and execute catch blocks ... // fell off end, rethrow. _rethrow = _caught; ... jump-through-finally to finally_rethrow ... } else { // exception in catch block _rethrow = objc_exception_extract(&d); _call_try_exit = false; ... jump-through-finally to finally_rethrow ... } } ... jump-through-finally to finally_end ... finally: if (_call_try_exit) objc_exception_try_exit(&d); ... finally block .... ... dispatch to finally destination ... finally_rethrow: objc_exception_throw(_rethrow); finally_end: } This framework differs slightly from the one gcc uses, in that gcc uses _rethrow to determine if objc_exception_try_exit should be called and if the object should be rethrown. This breaks in the face of throwing nil and introduces unnecessary branches. We specialize this framework for a few particular circumstances: - If there are no catch blocks, then we avoid emitting the second exception handling context. - If there is a catch-all catch block (i.e. @catch(...) or @catch(id e)) we avoid emitting the code to rethrow an uncaught exception. - FIXME: If there is no @finally block we can do a few more simplifications. Rethrows and Jumps-Through-Finally -- '@throw;' is supported by pushing the currently-caught exception onto ObjCEHStack while the @catch blocks are emitted. Branches through the @finally block are handled with an ordinary normal cleanup. We do not register an EH cleanup; fragile-ABI ObjC exceptions are not compatible with C++ exceptions, and this is hardly the only place where this will go wrong. @synchronized(expr) { stmt; } is emitted as if it were: id synch_value = expr; objc_sync_enter(synch_value); @try { stmt; } @finally { objc_sync_exit(synch_value); } */ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S) { bool isTry = isa<ObjCAtTryStmt>(S); // A destination for the fall-through edges of the catch handlers to // jump to. CodeGenFunction::JumpDest FinallyEnd = CGF.getJumpDestInCurrentScope("finally.end"); // A destination for the rethrow edge of the catch handlers to jump // to. CodeGenFunction::JumpDest FinallyRethrow = CGF.getJumpDestInCurrentScope("finally.rethrow"); // For @synchronized, call objc_sync_enter(sync.expr). The // evaluation of the expression must occur before we enter the // @synchronized. We can't avoid a temp here because we need the // value to be preserved. If the backend ever does liveness // correctly after setjmp, this will be unnecessary. llvm::Value *SyncArgSlot = nullptr; if (!isTry) { llvm::Value *SyncArg = CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr()); SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy); CGF.EmitNounwindRuntimeCall(ObjCTypes.getSyncEnterFn(), SyncArg); SyncArgSlot = CGF.CreateTempAlloca(SyncArg->getType(), "sync.arg"); CGF.Builder.CreateStore(SyncArg, SyncArgSlot); } // Allocate memory for the setjmp buffer. This needs to be kept // live throughout the try and catch blocks. llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy, "exceptiondata.ptr"); // Create the fragile hazards. Note that this will not capture any // of the allocas required for exception processing, but will // capture the current basic block (which extends all the way to the // setjmp call) as "before the @try". FragileHazards Hazards(CGF); // Create a flag indicating whether the cleanup needs to call // objc_exception_try_exit. This is true except when // - no catches match and we're branching through the cleanup // just to rethrow the exception, or // - a catch matched and we're falling out of the catch handler. // The setjmp-safety rule here is that we should always store to this // variable in a place that dominates the branch through the cleanup // without passing through any setjmps. llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "_call_try_exit"); // A slot containing the exception to rethrow. Only needed when we // have both a @catch and a @finally. llvm::Value *PropagatingExnVar = nullptr; // Push a normal cleanup to leave the try scope. CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalAndEHCleanup, &S, SyncArgSlot, CallTryExitVar, ExceptionData, &ObjCTypes); // Enter a try block: // - Call objc_exception_try_enter to push ExceptionData on top of // the EH stack. CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData); // - Call setjmp on the exception data buffer. llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0); llvm::Value *GEPIndexes[] = { Zero, Zero, Zero }; llvm::Value *SetJmpBuffer = CGF.Builder.CreateGEP( ObjCTypes.ExceptionDataTy, ExceptionData, GEPIndexes, "setjmp_buffer"); llvm::CallInst *SetJmpResult = CGF.EmitNounwindRuntimeCall( ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result"); SetJmpResult->setCanReturnTwice(); // If setjmp returned 0, enter the protected block; otherwise, // branch to the handler. llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try"); llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler"); llvm::Value *DidCatch = CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception"); CGF.Builder.CreateCondBr(DidCatch, TryHandler, TryBlock); // Emit the protected block. CGF.EmitBlock(TryBlock); CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar); CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody() : cast<ObjCAtSynchronizedStmt>(S).getSynchBody()); CGBuilderTy::InsertPoint TryFallthroughIP = CGF.Builder.saveAndClearIP(); // Emit the exception handler block. CGF.EmitBlock(TryHandler); // Don't optimize loads of the in-scope locals across this point. Hazards.emitWriteHazard(); // For a @synchronized (or a @try with no catches), just branch // through the cleanup to the rethrow block. if (!isTry || !cast<ObjCAtTryStmt>(S).getNumCatchStmts()) { // Tell the cleanup not to re-pop the exit. CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar); CGF.EmitBranchThroughCleanup(FinallyRethrow); // Otherwise, we have to match against the caught exceptions. } else { // Retrieve the exception object. We may emit multiple blocks but // nothing can cross this so the value is already in SSA form. llvm::CallInst *Caught = CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(), ExceptionData, "caught"); // Push the exception to rethrow onto the EH value stack for the // benefit of any @throws in the handlers. CGF.ObjCEHValueStack.push_back(Caught); const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S); bool HasFinally = (AtTryStmt->getFinallyStmt() != nullptr); llvm::BasicBlock *CatchBlock = nullptr; llvm::BasicBlock *CatchHandler = nullptr; if (HasFinally) { // Save the currently-propagating exception before // objc_exception_try_enter clears the exception slot. PropagatingExnVar = CGF.CreateTempAlloca(Caught->getType(), "propagating_exception"); CGF.Builder.CreateStore(Caught, PropagatingExnVar); // Enter a new exception try block (in case a @catch block // throws an exception). CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData); llvm::CallInst *SetJmpResult = CGF.EmitNounwindRuntimeCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp.result"); SetJmpResult->setCanReturnTwice(); llvm::Value *Threw = CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception"); CatchBlock = CGF.createBasicBlock("catch"); CatchHandler = CGF.createBasicBlock("catch_for_catch"); CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock); CGF.EmitBlock(CatchBlock); } CGF.Builder.CreateStore(CGF.Builder.getInt1(HasFinally), CallTryExitVar); // Handle catch list. As a special case we check if everything is // matched and avoid generating code for falling off the end if // so. bool AllMatched = false; for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) { const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I); const VarDecl *CatchParam = CatchStmt->getCatchParamDecl(); const ObjCObjectPointerType *OPT = nullptr; // catch(...) always matches. if (!CatchParam) { AllMatched = true; } else { OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>(); // catch(id e) always matches under this ABI, since only // ObjC exceptions end up here in the first place. // FIXME: For the time being we also match id<X>; this should // be rejected by Sema instead. if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType())) AllMatched = true; } // If this is a catch-all, we don't need to test anything. if (AllMatched) { CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF); if (CatchParam) { CGF.EmitAutoVarDecl(*CatchParam); assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?"); // These types work out because ConvertType(id) == i8*. CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam)); } CGF.EmitStmt(CatchStmt->getCatchBody()); // The scope of the catch variable ends right here. CatchVarCleanups.ForceCleanup(); CGF.EmitBranchThroughCleanup(FinallyEnd); break; } assert(OPT && "Unexpected non-object pointer type in @catch"); const ObjCObjectType *ObjTy = OPT->getObjectType(); // FIXME: @catch (Class c) ? ObjCInterfaceDecl *IDecl = ObjTy->getInterface(); assert(IDecl && "Catch parameter must have Objective-C type!"); // Check if the @catch block matches the exception object. llvm::Value *Class = EmitClassRef(CGF, IDecl); llvm::Value *matchArgs[] = { Class, Caught }; llvm::CallInst *Match = CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionMatchFn(), matchArgs, "match"); llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("match"); llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch.next"); CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"), MatchedBlock, NextCatchBlock); // Emit the @catch block. CGF.EmitBlock(MatchedBlock); // Collect any cleanups for the catch variable. The scope lasts until // the end of the catch body. CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF); CGF.EmitAutoVarDecl(*CatchParam); assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?"); // Initialize the catch variable. llvm::Value *Tmp = CGF.Builder.CreateBitCast(Caught, CGF.ConvertType(CatchParam->getType())); CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam)); CGF.EmitStmt(CatchStmt->getCatchBody()); // We're done with the catch variable. CatchVarCleanups.ForceCleanup(); CGF.EmitBranchThroughCleanup(FinallyEnd); CGF.EmitBlock(NextCatchBlock); } CGF.ObjCEHValueStack.pop_back(); // If nothing wanted anything to do with the caught exception, // kill the extract call. if (Caught->use_empty()) Caught->eraseFromParent(); if (!AllMatched) CGF.EmitBranchThroughCleanup(FinallyRethrow); if (HasFinally) { // Emit the exception handler for the @catch blocks. CGF.EmitBlock(CatchHandler); // In theory we might now need a write hazard, but actually it's // unnecessary because there's no local-accessing code between // the try's write hazard and here. //Hazards.emitWriteHazard(); // Extract the new exception and save it to the // propagating-exception slot. assert(PropagatingExnVar); llvm::CallInst *NewCaught = CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(), ExceptionData, "caught"); CGF.Builder.CreateStore(NewCaught, PropagatingExnVar); // Don't pop the catch handler; the throw already did. CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar); CGF.EmitBranchThroughCleanup(FinallyRethrow); } } // Insert read hazards as required in the new blocks. Hazards.emitHazardsInNewBlocks(); // Pop the cleanup. CGF.Builder.restoreIP(TryFallthroughIP); if (CGF.HaveInsertPoint()) CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar); CGF.PopCleanupBlock(); CGF.EmitBlock(FinallyEnd.getBlock(), true); // Emit the rethrow block. CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); CGF.EmitBlock(FinallyRethrow.getBlock(), true); if (CGF.HaveInsertPoint()) { // If we have a propagating-exception variable, check it. llvm::Value *PropagatingExn; if (PropagatingExnVar) { PropagatingExn = CGF.Builder.CreateLoad(PropagatingExnVar); // Otherwise, just look in the buffer for the exception to throw. } else { llvm::CallInst *Caught = CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionExtractFn(), ExceptionData); PropagatingExn = Caught; } CGF.EmitNounwindRuntimeCall(ObjCTypes.getExceptionThrowFn(), PropagatingExn); CGF.Builder.CreateUnreachable(); } CGF.Builder.restoreIP(SavedIP); } void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S, bool ClearInsertionPoint) { llvm::Value *ExceptionAsObject; if (const Expr *ThrowExpr = S.getThrowExpr()) { llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr); ExceptionAsObject = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy); } else { assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) && "Unexpected rethrow outside @catch block."); ExceptionAsObject = CGF.ObjCEHValueStack.back(); } CGF.EmitRuntimeCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject) ->setDoesNotReturn(); CGF.Builder.CreateUnreachable(); // Clear the insertion point to indicate we are in unreachable code. if (ClearInsertionPoint) CGF.Builder.ClearInsertionPoint(); } /// EmitObjCWeakRead - Code gen for loading value of a __weak /// object: objc_read_weak (id *src) /// llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, llvm::Value *AddrWeakObj) { llvm::Type* DestTy = cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType(); AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy); llvm::Value *read_weak = CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(), AddrWeakObj, "weakread"); read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy); return read_weak; } /// EmitObjCWeakAssign - Code gen for assigning to a __weak object. /// objc_assign_weak (id src, id *dst) /// void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst) { llvm::Type * SrcTy = src->getType(); if (!isa<llvm::PointerType>(SrcTy)) { unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy); assert(Size <= 8 && "does not support size > 8"); src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy) : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy); src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = { src, dst }; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(), args, "weakassign"); return; } /// EmitObjCGlobalAssign - Code gen for assigning to a __strong object. /// objc_assign_global (id src, id *dst) /// void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst, bool threadlocal) { llvm::Type * SrcTy = src->getType(); if (!isa<llvm::PointerType>(SrcTy)) { unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy); assert(Size <= 8 && "does not support size > 8"); src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy) : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy); src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = { src, dst }; if (!threadlocal) CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(), args, "globalassign"); else CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignThreadLocalFn(), args, "threadlocalassign"); return; } /// EmitObjCIvarAssign - Code gen for assigning to a __strong object. /// objc_assign_ivar (id src, id *dst, ptrdiff_t ivaroffset) /// void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst, llvm::Value *ivarOffset) { assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL"); llvm::Type * SrcTy = src->getType(); if (!isa<llvm::PointerType>(SrcTy)) { unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy); assert(Size <= 8 && "does not support size > 8"); src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy) : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy); src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = { src, dst, ivarOffset }; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args); return; } /// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object. /// objc_assign_strongCast (id src, id *dst) /// void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst) { llvm::Type * SrcTy = src->getType(); if (!isa<llvm::PointerType>(SrcTy)) { unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy); assert(Size <= 8 && "does not support size > 8"); src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy) : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy); src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = { src, dst }; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(), args, "weakassign"); return; } void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, llvm::Value *size) { SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy); DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy); llvm::Value *args[] = { DestPtr, SrcPtr, size }; CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args); } /// EmitObjCValueForIvar - Code Gen for ivar reference. /// LValue CGObjCMac::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers) { const ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface(); return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers, EmitIvarOffset(CGF, ID, Ivar)); } llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar) { uint64_t Offset = ComputeIvarBaseOffset(CGM, Interface, Ivar); return llvm::ConstantInt::get( CGM.getTypes().ConvertType(CGM.getContext().LongTy), Offset); } /* *** Private Interface *** */ /// EmitImageInfo - Emit the image info marker used to encode some module /// level information. /// /// See: <rdr://4810609&4810587&4810587> /// struct IMAGE_INFO { /// unsigned version; /// unsigned flags; /// }; enum ImageInfoFlags { eImageInfo_FixAndContinue = (1 << 0), // This flag is no longer set by clang. eImageInfo_GarbageCollected = (1 << 1), eImageInfo_GCOnly = (1 << 2), eImageInfo_OptimizedByDyld = (1 << 3), // This flag is set by the dyld shared cache. // A flag indicating that the module has no instances of a @synthesize of a // superclass variable. <rdar://problem/6803242> eImageInfo_CorrectedSynthesize = (1 << 4), // This flag is no longer set by clang. eImageInfo_ImageIsSimulated = (1 << 5) }; void CGObjCCommonMac::EmitImageInfo() { unsigned version = 0; // Version is unused? const char *Section = (ObjCABI == 1) ? "__OBJC, __image_info,regular" : "__DATA, __objc_imageinfo, regular, no_dead_strip"; // Generate module-level named metadata to convey this information to the // linker and code-gen. llvm::Module &Mod = CGM.getModule(); // Add the ObjC ABI version to the module flags. Mod.addModuleFlag(llvm::Module::Error, "Objective-C Version", ObjCABI); Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Version", version); Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Section", llvm::MDString::get(VMContext,Section)); if (CGM.getLangOpts().getGC() == LangOptions::NonGC) { // Non-GC overrides those files which specify GC. Mod.addModuleFlag(llvm::Module::Override, "Objective-C Garbage Collection", (uint32_t)0); } else { // Add the ObjC garbage collection value. Mod.addModuleFlag(llvm::Module::Error, "Objective-C Garbage Collection", eImageInfo_GarbageCollected); if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { // Add the ObjC GC Only value. Mod.addModuleFlag(llvm::Module::Error, "Objective-C GC Only", eImageInfo_GCOnly); // Require that GC be specified and set to eImageInfo_GarbageCollected. llvm::Metadata *Ops[2] = { llvm::MDString::get(VMContext, "Objective-C Garbage Collection"), llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::Type::getInt32Ty(VMContext), eImageInfo_GarbageCollected))}; Mod.addModuleFlag(llvm::Module::Require, "Objective-C GC Only", llvm::MDNode::get(VMContext, Ops)); } } // Indicate whether we're compiling this to run on a simulator. const llvm::Triple &Triple = CGM.getTarget().getTriple(); if (Triple.isiOS() && (Triple.getArch() == llvm::Triple::x86 || Triple.getArch() == llvm::Triple::x86_64)) Mod.addModuleFlag(llvm::Module::Error, "Objective-C Is Simulated", eImageInfo_ImageIsSimulated); } // struct objc_module { // unsigned long version; // unsigned long size; // const char *name; // Symtab symtab; // }; // FIXME: Get from somewhere static const int ModuleVersion = 7; void CGObjCMac::EmitModuleInfo() { uint64_t Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ModuleTy); llvm::Constant *Values[] = { llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion), llvm::ConstantInt::get(ObjCTypes.LongTy, Size), // This used to be the filename, now it is unused. <rdr://4327263> GetClassName(StringRef("")), EmitModuleSymbols() }; CreateMetadataVar("OBJC_MODULES", llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values), "__OBJC,__module_info,regular,no_dead_strip", 4, true); } llvm::Constant *CGObjCMac::EmitModuleSymbols() { unsigned NumClasses = DefinedClasses.size(); unsigned NumCategories = DefinedCategories.size(); // Return null if no symbols were defined. if (!NumClasses && !NumCategories) return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy); llvm::Constant *Values[5]; Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0); Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy); Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses); Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories); // The runtime expects exactly the list of defined classes followed // by the list of defined categories, in a single array. SmallVector<llvm::Constant*, 8> Symbols(NumClasses + NumCategories); for (unsigned i=0; i<NumClasses; i++) { const ObjCInterfaceDecl *ID = ImplementedClasses[i]; assert(ID); if (ObjCImplementationDecl *IMP = ID->getImplementation()) // We are implementing a weak imported interface. Give it external linkage if (ID->isWeakImported() && !IMP->isWeakImported()) DefinedClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage); Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i], ObjCTypes.Int8PtrTy); } for (unsigned i=0; i<NumCategories; i++) Symbols[NumClasses + i] = llvm::ConstantExpr::getBitCast(DefinedCategories[i], ObjCTypes.Int8PtrTy); Values[4] = llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy, Symbols.size()), Symbols); llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values); llvm::GlobalVariable *GV = CreateMetadataVar( "OBJC_SYMBOLS", Init, "__OBJC,__symbols,regular,no_dead_strip", 4, true); return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy); } llvm::Value *CGObjCMac::EmitClassRefFromId(CodeGenFunction &CGF, IdentifierInfo *II) { LazySymbols.insert(II); llvm::GlobalVariable *&Entry = ClassReferences[II]; if (!Entry) { llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(GetClassName(II->getName()), ObjCTypes.ClassPtrTy); Entry = CreateMetadataVar( "OBJC_CLASS_REFERENCES_", Casted, "__OBJC,__cls_refs,literal_pointers,no_dead_strip", 4, true); } return CGF.Builder.CreateLoad(Entry); } llvm::Value *CGObjCMac::EmitClassRef(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID) { return EmitClassRefFromId(CGF, ID->getIdentifier()); } llvm::Value *CGObjCMac::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) { IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool"); return EmitClassRefFromId(CGF, II); } llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel, bool lvalue) { llvm::GlobalVariable *&Entry = SelectorReferences[Sel]; if (!Entry) { llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel), ObjCTypes.SelectorPtrTy); Entry = CreateMetadataVar( "OBJC_SELECTOR_REFERENCES_", Casted, "__OBJC,__message_refs,literal_pointers,no_dead_strip", 4, true); Entry->setExternallyInitialized(true); } if (lvalue) return Entry; return CGF.Builder.CreateLoad(Entry); } llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) { llvm::GlobalVariable *&Entry = ClassNames[RuntimeName]; if (!Entry) Entry = CreateMetadataVar( "OBJC_CLASS_NAME_", llvm::ConstantDataArray::getString(VMContext, RuntimeName), ((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals" : "__TEXT,__cstring,cstring_literals"), 1, true); return getConstantGEP(VMContext, Entry, 0, 0); } llvm::Function *CGObjCCommonMac::GetMethodDefinition(const ObjCMethodDecl *MD) { llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*>::iterator I = MethodDefinitions.find(MD); if (I != MethodDefinitions.end()) return I->second; return nullptr; } /// GetIvarLayoutName - Returns a unique constant for the given /// ivar layout bitmap. llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident, const ObjCCommonTypesHelper &ObjCTypes) { return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy); } void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT, unsigned int BytePos, bool ForStrongLayout, bool &HasUnion) { const RecordDecl *RD = RT->getDecl(); // FIXME - Use iterator. SmallVector<const FieldDecl*, 16> Fields(RD->fields()); llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0)); const llvm::StructLayout *RecLayout = CGM.getDataLayout().getStructLayout(cast<llvm::StructType>(Ty)); BuildAggrIvarLayout(nullptr, RecLayout, RD, Fields, BytePos, ForStrongLayout, HasUnion); } void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI, const llvm::StructLayout *Layout, const RecordDecl *RD, ArrayRef<const FieldDecl*> RecFields, unsigned int BytePos, bool ForStrongLayout, bool &HasUnion) { bool IsUnion = (RD && RD->isUnion()); uint64_t MaxUnionIvarSize = 0; uint64_t MaxSkippedUnionIvarSize = 0; const FieldDecl *MaxField = nullptr; const FieldDecl *MaxSkippedField = nullptr; const FieldDecl *LastFieldBitfieldOrUnnamed = nullptr; uint64_t MaxFieldOffset = 0; uint64_t MaxSkippedFieldOffset = 0; uint64_t LastBitfieldOrUnnamedOffset = 0; uint64_t FirstFieldDelta = 0; if (RecFields.empty()) return; unsigned WordSizeInBits = CGM.getTarget().getPointerWidth(0); unsigned ByteSizeInBits = CGM.getTarget().getCharWidth(); if (!RD && CGM.getLangOpts().ObjCAutoRefCount) { const FieldDecl *FirstField = RecFields[0]; FirstFieldDelta = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(FirstField)); } for (unsigned i = 0, e = RecFields.size(); i != e; ++i) { const FieldDecl *Field = RecFields[i]; uint64_t FieldOffset; if (RD) { // Note that 'i' here is actually the field index inside RD of Field, // although this dependency is hidden. const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); FieldOffset = (RL.getFieldOffset(i) / ByteSizeInBits) - FirstFieldDelta; } else FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field)) - FirstFieldDelta; // Skip over unnamed or bitfields if (!Field->getIdentifier() || Field->isBitField()) { LastFieldBitfieldOrUnnamed = Field; LastBitfieldOrUnnamedOffset = FieldOffset; continue; } LastFieldBitfieldOrUnnamed = nullptr; QualType FQT = Field->getType(); if (FQT->isRecordType() || FQT->isUnionType()) { if (FQT->isUnionType()) HasUnion = true; BuildAggrIvarRecordLayout(FQT->getAs<RecordType>(), BytePos + FieldOffset, ForStrongLayout, HasUnion); continue; } if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) { const ConstantArrayType *CArray = dyn_cast_or_null<ConstantArrayType>(Array); uint64_t ElCount = CArray->getSize().getZExtValue(); assert(CArray && "only array with known element size is supported"); FQT = CArray->getElementType(); while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) { const ConstantArrayType *CArray = dyn_cast_or_null<ConstantArrayType>(Array); ElCount *= CArray->getSize().getZExtValue(); FQT = CArray->getElementType(); } if (FQT->isRecordType() && ElCount) { int OldIndex = IvarsInfo.size() - 1; int OldSkIndex = SkipIvars.size() -1; const RecordType *RT = FQT->getAs<RecordType>(); BuildAggrIvarRecordLayout(RT, BytePos + FieldOffset, ForStrongLayout, HasUnion); // Replicate layout information for each array element. Note that // one element is already done. uint64_t ElIx = 1; for (int FirstIndex = IvarsInfo.size() - 1, FirstSkIndex = SkipIvars.size() - 1 ;ElIx < ElCount; ElIx++) { uint64_t Size = CGM.getContext().getTypeSize(RT)/ByteSizeInBits; for (int i = OldIndex+1; i <= FirstIndex; ++i) IvarsInfo.push_back(GC_IVAR(IvarsInfo[i].ivar_bytepos + Size*ElIx, IvarsInfo[i].ivar_size)); for (int i = OldSkIndex+1; i <= FirstSkIndex; ++i) SkipIvars.push_back(GC_IVAR(SkipIvars[i].ivar_bytepos + Size*ElIx, SkipIvars[i].ivar_size)); } continue; } } // At this point, we are done with Record/Union and array there of. // For other arrays we are down to its element type. Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), FQT); unsigned FieldSize = CGM.getContext().getTypeSize(Field->getType()); if ((ForStrongLayout && GCAttr == Qualifiers::Strong) || (!ForStrongLayout && GCAttr == Qualifiers::Weak)) { if (IsUnion) { uint64_t UnionIvarSize = FieldSize / WordSizeInBits; if (UnionIvarSize > MaxUnionIvarSize) { MaxUnionIvarSize = UnionIvarSize; MaxField = Field; MaxFieldOffset = FieldOffset; } } else { IvarsInfo.push_back(GC_IVAR(BytePos + FieldOffset, FieldSize / WordSizeInBits)); } } else if ((ForStrongLayout && (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak)) || (!ForStrongLayout && GCAttr != Qualifiers::Weak)) { if (IsUnion) { // FIXME: Why the asymmetry? We divide by word size in bits on other // side. uint64_t UnionIvarSize = FieldSize / ByteSizeInBits; if (UnionIvarSize > MaxSkippedUnionIvarSize) { MaxSkippedUnionIvarSize = UnionIvarSize; MaxSkippedField = Field; MaxSkippedFieldOffset = FieldOffset; } } else { // FIXME: Why the asymmetry, we divide by byte size in bits here? SkipIvars.push_back(GC_IVAR(BytePos + FieldOffset, FieldSize / ByteSizeInBits)); } } } if (LastFieldBitfieldOrUnnamed) { if (LastFieldBitfieldOrUnnamed->isBitField()) { // Last field was a bitfield. Must update skip info. uint64_t BitFieldSize = LastFieldBitfieldOrUnnamed->getBitWidthValue(CGM.getContext()); GC_IVAR skivar; skivar.ivar_bytepos = BytePos + LastBitfieldOrUnnamedOffset; skivar.ivar_size = (BitFieldSize / ByteSizeInBits) + ((BitFieldSize % ByteSizeInBits) != 0); SkipIvars.push_back(skivar); } else { assert(!LastFieldBitfieldOrUnnamed->getIdentifier() &&"Expected unnamed"); // Last field was unnamed. Must update skip info. unsigned FieldSize = CGM.getContext().getTypeSize(LastFieldBitfieldOrUnnamed->getType()); SkipIvars.push_back(GC_IVAR(BytePos + LastBitfieldOrUnnamedOffset, FieldSize / ByteSizeInBits)); } } if (MaxField) IvarsInfo.push_back(GC_IVAR(BytePos + MaxFieldOffset, MaxUnionIvarSize)); if (MaxSkippedField) SkipIvars.push_back(GC_IVAR(BytePos + MaxSkippedFieldOffset, MaxSkippedUnionIvarSize)); } /// BuildIvarLayoutBitmap - This routine is the horsework for doing all /// the computations and returning the layout bitmap (for ivar or blocks) in /// the given argument BitMap string container. Routine reads /// two containers, IvarsInfo and SkipIvars which are assumed to be /// filled already by the caller. llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) { unsigned int WordsToScan, WordsToSkip; llvm::Type *PtrTy = CGM.Int8PtrTy; // Build the string of skip/scan nibbles SmallVector<SKIP_SCAN, 32> SkipScanIvars; unsigned int WordSize = CGM.getTypes().getDataLayout().getTypeAllocSize(PtrTy); if (IvarsInfo[0].ivar_bytepos == 0) { WordsToSkip = 0; WordsToScan = IvarsInfo[0].ivar_size; } else { WordsToSkip = IvarsInfo[0].ivar_bytepos/WordSize; WordsToScan = IvarsInfo[0].ivar_size; } for (unsigned int i=1, Last=IvarsInfo.size(); i != Last; i++) { unsigned int TailPrevGCObjC = IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize; if (IvarsInfo[i].ivar_bytepos == TailPrevGCObjC) { // consecutive 'scanned' object pointers. WordsToScan += IvarsInfo[i].ivar_size; } else { // Skip over 'gc'able object pointer which lay over each other. if (TailPrevGCObjC > IvarsInfo[i].ivar_bytepos) continue; // Must skip over 1 or more words. We save current skip/scan values // and start a new pair. SKIP_SCAN SkScan; SkScan.skip = WordsToSkip; SkScan.scan = WordsToScan; SkipScanIvars.push_back(SkScan); // Skip the hole. SkScan.skip = (IvarsInfo[i].ivar_bytepos - TailPrevGCObjC) / WordSize; SkScan.scan = 0; SkipScanIvars.push_back(SkScan); WordsToSkip = 0; WordsToScan = IvarsInfo[i].ivar_size; } } if (WordsToScan > 0) { SKIP_SCAN SkScan; SkScan.skip = WordsToSkip; SkScan.scan = WordsToScan; SkipScanIvars.push_back(SkScan); } if (!SkipIvars.empty()) { unsigned int LastIndex = SkipIvars.size()-1; int LastByteSkipped = SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size; LastIndex = IvarsInfo.size()-1; int LastByteScanned = IvarsInfo[LastIndex].ivar_bytepos + IvarsInfo[LastIndex].ivar_size * WordSize; // Compute number of bytes to skip at the tail end of the last ivar scanned. if (LastByteSkipped > LastByteScanned) { unsigned int TotalWords = (LastByteSkipped + (WordSize -1)) / WordSize; SKIP_SCAN SkScan; SkScan.skip = TotalWords - (LastByteScanned/WordSize); SkScan.scan = 0; SkipScanIvars.push_back(SkScan); } } // Mini optimization of nibbles such that an 0xM0 followed by 0x0N is produced // as 0xMN. int SkipScan = SkipScanIvars.size()-1; for (int i = 0; i <= SkipScan; i++) { if ((i < SkipScan) && SkipScanIvars[i].skip && SkipScanIvars[i].scan == 0 && SkipScanIvars[i+1].skip == 0 && SkipScanIvars[i+1].scan) { // 0xM0 followed by 0x0N detected. SkipScanIvars[i].scan = SkipScanIvars[i+1].scan; for (int j = i+1; j < SkipScan; j++) SkipScanIvars[j] = SkipScanIvars[j+1]; --SkipScan; } } // Generate the string. for (int i = 0; i <= SkipScan; i++) { unsigned char byte; unsigned int skip_small = SkipScanIvars[i].skip % 0xf; unsigned int scan_small = SkipScanIvars[i].scan % 0xf; unsigned int skip_big = SkipScanIvars[i].skip / 0xf; unsigned int scan_big = SkipScanIvars[i].scan / 0xf; // first skip big. for (unsigned int ix = 0; ix < skip_big; ix++) BitMap += (unsigned char)(0xf0); // next (skip small, scan) if (skip_small) { byte = skip_small << 4; if (scan_big > 0) { byte |= 0xf; --scan_big; } else if (scan_small) { byte |= scan_small; scan_small = 0; } BitMap += byte; } // next scan big for (unsigned int ix = 0; ix < scan_big; ix++) BitMap += (unsigned char)(0x0f); // last scan small if (scan_small) { byte = scan_small; BitMap += byte; } } // null terminate string. unsigned char zero = 0; BitMap += zero; llvm::GlobalVariable *Entry = CreateMetadataVar( "OBJC_CLASS_NAME_", llvm::ConstantDataArray::getString(VMContext, BitMap, false), ((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals" : "__TEXT,__cstring,cstring_literals"), 1, true); return getConstantGEP(VMContext, Entry, 0, 0); } /// BuildIvarLayout - Builds ivar layout bitmap for the class /// implementation for the __strong or __weak case. /// The layout map displays which words in ivar list must be skipped /// and which must be scanned by GC (see below). String is built of bytes. /// Each byte is divided up in two nibbles (4-bit each). Left nibble is count /// of words to skip and right nibble is count of words to scan. So, each /// nibble represents up to 15 workds to skip or scan. Skipping the rest is /// represented by a 0x00 byte which also ends the string. /// 1. when ForStrongLayout is true, following ivars are scanned: /// - id, Class /// - object * /// - __strong anything /// /// 2. When ForStrongLayout is false, following ivars are scanned: /// - __weak anything /// llvm::Constant *CGObjCCommonMac::BuildIvarLayout( const ObjCImplementationDecl *OMD, bool ForStrongLayout) { bool hasUnion = false; llvm::Type *PtrTy = CGM.Int8PtrTy; if (CGM.getLangOpts().getGC() == LangOptions::NonGC && !CGM.getLangOpts().ObjCAutoRefCount) return llvm::Constant::getNullValue(PtrTy); const ObjCInterfaceDecl *OI = OMD->getClassInterface(); SmallVector<const FieldDecl*, 32> RecFields; if (CGM.getLangOpts().ObjCAutoRefCount) { for (const ObjCIvarDecl *IVD = OI->all_declared_ivar_begin(); IVD; IVD = IVD->getNextIvar()) RecFields.push_back(cast<FieldDecl>(IVD)); } else { SmallVector<const ObjCIvarDecl*, 32> Ivars; CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars); // FIXME: This is not ideal; we shouldn't have to do this copy. RecFields.append(Ivars.begin(), Ivars.end()); } if (RecFields.empty()) return llvm::Constant::getNullValue(PtrTy); SkipIvars.clear(); IvarsInfo.clear(); BuildAggrIvarLayout(OMD, nullptr, nullptr, RecFields, 0, ForStrongLayout, hasUnion); if (IvarsInfo.empty()) return llvm::Constant::getNullValue(PtrTy); // Sort on byte position in case we encounterred a union nested in // the ivar list. if (hasUnion && !IvarsInfo.empty()) std::sort(IvarsInfo.begin(), IvarsInfo.end()); if (hasUnion && !SkipIvars.empty()) std::sort(SkipIvars.begin(), SkipIvars.end()); std::string BitMap; llvm::Constant *C = BuildIvarLayoutBitmap(BitMap); if (CGM.getLangOpts().ObjCGCBitmapPrint) { printf("\n%s ivar layout for class '%s': ", ForStrongLayout ? "strong" : "weak", OMD->getClassInterface()->getName().str().c_str()); const unsigned char *s = (const unsigned char*)BitMap.c_str(); for (unsigned i = 0, e = BitMap.size(); i < e; i++) if (!(s[i] & 0xf0)) printf("0x0%x%s", s[i], s[i] != 0 ? ", " : ""); else printf("0x%x%s", s[i], s[i] != 0 ? ", " : ""); printf("\n"); } return C; } llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) { llvm::GlobalVariable *&Entry = MethodVarNames[Sel]; // FIXME: Avoid std::string in "Sel.getAsString()" if (!Entry) Entry = CreateMetadataVar( "OBJC_METH_VAR_NAME_", llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()), ((ObjCABI == 2) ? "__TEXT,__objc_methname,cstring_literals" : "__TEXT,__cstring,cstring_literals"), 1, true); return getConstantGEP(VMContext, Entry, 0, 0); } // FIXME: Merge into a single cstring creation function. llvm::Constant *CGObjCCommonMac::GetMethodVarName(IdentifierInfo *ID) { return GetMethodVarName(CGM.getContext().Selectors.getNullarySelector(ID)); } llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) { std::string TypeStr; CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field); llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr]; if (!Entry) Entry = CreateMetadataVar( "OBJC_METH_VAR_TYPE_", llvm::ConstantDataArray::getString(VMContext, TypeStr), ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals" : "__TEXT,__cstring,cstring_literals"), 1, true); return getConstantGEP(VMContext, Entry, 0, 0); } llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D, bool Extended) { std::string TypeStr; if (CGM.getContext().getObjCEncodingForMethodDecl(D, TypeStr, Extended)) return nullptr; llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr]; if (!Entry) Entry = CreateMetadataVar( "OBJC_METH_VAR_TYPE_", llvm::ConstantDataArray::getString(VMContext, TypeStr), ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals" : "__TEXT,__cstring,cstring_literals"), 1, true); return getConstantGEP(VMContext, Entry, 0, 0); } // FIXME: Merge into a single cstring creation function. llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) { llvm::GlobalVariable *&Entry = PropertyNames[Ident]; if (!Entry) Entry = CreateMetadataVar( "OBJC_PROP_NAME_ATTR_", llvm::ConstantDataArray::getString(VMContext, Ident->getName()), "__TEXT,__cstring,cstring_literals", 1, true); return getConstantGEP(VMContext, Entry, 0, 0); } // FIXME: Merge into a single cstring creation function. // FIXME: This Decl should be more precise. llvm::Constant * CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD, const Decl *Container) { std::string TypeStr; CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr); return GetPropertyName(&CGM.getContext().Idents.get(TypeStr)); } void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D, const ObjCContainerDecl *CD, SmallVectorImpl<char> &Name) { llvm::raw_svector_ostream OS(Name); assert (CD && "Missing container decl in GetNameForMethod"); OS << '\01' << (D->isInstanceMethod() ? '-' : '+') << '[' << CD->getName(); if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext())) OS << '(' << *CID << ')'; OS << ' ' << D->getSelector().getAsString() << ']'; } void CGObjCMac::FinishModule() { EmitModuleInfo(); // Emit the dummy bodies for any protocols which were referenced but // never defined. for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator I = Protocols.begin(), e = Protocols.end(); I != e; ++I) { if (I->second->hasInitializer()) continue; llvm::Constant *Values[5]; Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy); Values[1] = GetClassName(I->first->getName()); Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy); Values[3] = Values[4] = llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy); I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy, Values)); CGM.addCompilerUsedGlobal(I->second); } // Add assembler directives to add lazy undefined symbol references // for classes which are referenced but not defined. This is // important for correct linker interaction. // // FIXME: It would be nice if we had an LLVM construct for this. if (!LazySymbols.empty() || !DefinedSymbols.empty()) { SmallString<256> Asm; Asm += CGM.getModule().getModuleInlineAsm(); if (!Asm.empty() && Asm.back() != '\n') Asm += '\n'; llvm::raw_svector_ostream OS(Asm); for (llvm::SetVector<IdentifierInfo*>::iterator I = DefinedSymbols.begin(), e = DefinedSymbols.end(); I != e; ++I) OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n" << "\t.globl .objc_class_name_" << (*I)->getName() << "\n"; for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(), e = LazySymbols.end(); I != e; ++I) { OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n"; } for (size_t i = 0, e = DefinedCategoryNames.size(); i < e; ++i) { OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n" << "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n"; } CGM.getModule().setModuleInlineAsm(OS.str()); } } CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm), ObjCTypes(cgm) { ObjCEmptyCacheVar = ObjCEmptyVtableVar = nullptr; ObjCABI = 2; } /* *** */ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm) : VMContext(cgm.getLLVMContext()), CGM(cgm), ExternalProtocolPtrTy(nullptr) { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); ShortTy = Types.ConvertType(Ctx.ShortTy); IntTy = Types.ConvertType(Ctx.IntTy); LongTy = Types.ConvertType(Ctx.LongTy); LongLongTy = Types.ConvertType(Ctx.LongLongTy); Int8PtrTy = CGM.Int8PtrTy; Int8PtrPtrTy = CGM.Int8PtrPtrTy; // arm64 targets use "int" ivar offset variables. All others, // including OS X x86_64 and Windows x86_64, use "long" ivar offsets. if (CGM.getTarget().getTriple().getArch() == llvm::Triple::aarch64) IvarOffsetVarTy = IntTy; else IvarOffsetVarTy = LongTy; ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType()); PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy); SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType()); // I'm not sure I like this. The implicit coordination is a bit // gross. We should solve this in a reasonable fashion because this // is a pretty common task (match some runtime data structure with // an LLVM data structure). // FIXME: This is leaked. // FIXME: Merge with rewriter code? // struct _objc_super { // id self; // Class cls; // } RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), &Ctx.Idents.get("_objc_super")); RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), nullptr, Ctx.getObjCIdType(), nullptr, nullptr, false, ICIS_NoInit)); RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), nullptr, Ctx.getObjCClassType(), nullptr, nullptr, false, ICIS_NoInit)); RD->completeDefinition(); SuperCTy = Ctx.getTagDeclType(RD); SuperPtrCTy = Ctx.getPointerType(SuperCTy); SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy)); SuperPtrTy = llvm::PointerType::getUnqual(SuperTy); // struct _prop_t { // char *name; // char *attributes; // } PropertyTy = llvm::StructType::create("struct._prop_t", Int8PtrTy, Int8PtrTy, nullptr); // struct _prop_list_t { // uint32_t entsize; // sizeof(struct _prop_t) // uint32_t count_of_properties; // struct _prop_t prop_list[count_of_properties]; // } PropertyListTy = llvm::StructType::create("struct._prop_list_t", IntTy, IntTy, llvm::ArrayType::get(PropertyTy, 0), nullptr); // struct _prop_list_t * PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy); // struct _objc_method { // SEL _cmd; // char *method_type; // char *_imp; // } MethodTy = llvm::StructType::create("struct._objc_method", SelectorPtrTy, Int8PtrTy, Int8PtrTy, nullptr); // struct _objc_cache * CacheTy = llvm::StructType::create(VMContext, "struct._objc_cache"); CachePtrTy = llvm::PointerType::getUnqual(CacheTy); } ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) : ObjCCommonTypesHelper(cgm) { // struct _objc_method_description { // SEL name; // char *types; // } MethodDescriptionTy = llvm::StructType::create("struct._objc_method_description", SelectorPtrTy, Int8PtrTy, nullptr); // struct _objc_method_description_list { // int count; // struct _objc_method_description[1]; // } MethodDescriptionListTy = llvm::StructType::create( "struct._objc_method_description_list", IntTy, llvm::ArrayType::get(MethodDescriptionTy, 0), nullptr); // struct _objc_method_description_list * MethodDescriptionListPtrTy = llvm::PointerType::getUnqual(MethodDescriptionListTy); // Protocol description structures // struct _objc_protocol_extension { // uint32_t size; // sizeof(struct _objc_protocol_extension) // struct _objc_method_description_list *optional_instance_methods; // struct _objc_method_description_list *optional_class_methods; // struct _objc_property_list *instance_properties; // const char ** extendedMethodTypes; // } ProtocolExtensionTy = llvm::StructType::create("struct._objc_protocol_extension", IntTy, MethodDescriptionListPtrTy, MethodDescriptionListPtrTy, PropertyListPtrTy, Int8PtrPtrTy, nullptr); // struct _objc_protocol_extension * ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy); // Handle recursive construction of Protocol and ProtocolList types ProtocolTy = llvm::StructType::create(VMContext, "struct._objc_protocol"); ProtocolListTy = llvm::StructType::create(VMContext, "struct._objc_protocol_list"); ProtocolListTy->setBody(llvm::PointerType::getUnqual(ProtocolListTy), LongTy, llvm::ArrayType::get(ProtocolTy, 0), nullptr); // struct _objc_protocol { // struct _objc_protocol_extension *isa; // char *protocol_name; // struct _objc_protocol **_objc_protocol_list; // struct _objc_method_description_list *instance_methods; // struct _objc_method_description_list *class_methods; // } ProtocolTy->setBody(ProtocolExtensionPtrTy, Int8PtrTy, llvm::PointerType::getUnqual(ProtocolListTy), MethodDescriptionListPtrTy, MethodDescriptionListPtrTy, nullptr); // struct _objc_protocol_list * ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy); ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy); // Class description structures // struct _objc_ivar { // char *ivar_name; // char *ivar_type; // int ivar_offset; // } IvarTy = llvm::StructType::create("struct._objc_ivar", Int8PtrTy, Int8PtrTy, IntTy, nullptr); // struct _objc_ivar_list * IvarListTy = llvm::StructType::create(VMContext, "struct._objc_ivar_list"); IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy); // struct _objc_method_list * MethodListTy = llvm::StructType::create(VMContext, "struct._objc_method_list"); MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy); // struct _objc_class_extension * ClassExtensionTy = llvm::StructType::create("struct._objc_class_extension", IntTy, Int8PtrTy, PropertyListPtrTy, nullptr); ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy); ClassTy = llvm::StructType::create(VMContext, "struct._objc_class"); // struct _objc_class { // Class isa; // Class super_class; // char *name; // long version; // long info; // long instance_size; // struct _objc_ivar_list *ivars; // struct _objc_method_list *methods; // struct _objc_cache *cache; // struct _objc_protocol_list *protocols; // char *ivar_layout; // struct _objc_class_ext *ext; // }; ClassTy->setBody(llvm::PointerType::getUnqual(ClassTy), llvm::PointerType::getUnqual(ClassTy), Int8PtrTy, LongTy, LongTy, LongTy, IvarListPtrTy, MethodListPtrTy, CachePtrTy, ProtocolListPtrTy, Int8PtrTy, ClassExtensionPtrTy, nullptr); ClassPtrTy = llvm::PointerType::getUnqual(ClassTy); // struct _objc_category { // char *category_name; // char *class_name; // struct _objc_method_list *instance_method; // struct _objc_method_list *class_method; // uint32_t size; // sizeof(struct _objc_category) // struct _objc_property_list *instance_properties;// category's @property // } CategoryTy = llvm::StructType::create("struct._objc_category", Int8PtrTy, Int8PtrTy, MethodListPtrTy, MethodListPtrTy, ProtocolListPtrTy, IntTy, PropertyListPtrTy, nullptr); // Global metadata structures // struct _objc_symtab { // long sel_ref_cnt; // SEL *refs; // short cls_def_cnt; // short cat_def_cnt; // char *defs[cls_def_cnt + cat_def_cnt]; // } SymtabTy = llvm::StructType::create("struct._objc_symtab", LongTy, SelectorPtrTy, ShortTy, ShortTy, llvm::ArrayType::get(Int8PtrTy, 0), nullptr); SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy); // struct _objc_module { // long version; // long size; // sizeof(struct _objc_module) // char *name; // struct _objc_symtab* symtab; // } ModuleTy = llvm::StructType::create("struct._objc_module", LongTy, LongTy, Int8PtrTy, SymtabPtrTy, nullptr); // FIXME: This is the size of the setjmp buffer and should be target // specific. 18 is what's used on 32-bit X86. uint64_t SetJmpBufferSize = 18; // Exceptions llvm::Type *StackPtrTy = llvm::ArrayType::get(CGM.Int8PtrTy, 4); ExceptionDataTy = llvm::StructType::create("struct._objc_exception_data", llvm::ArrayType::get(CGM.Int32Ty,SetJmpBufferSize), StackPtrTy, nullptr); } ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm) : ObjCCommonTypesHelper(cgm) { // struct _method_list_t { // uint32_t entsize; // sizeof(struct _objc_method) // uint32_t method_count; // struct _objc_method method_list[method_count]; // } MethodListnfABITy = llvm::StructType::create("struct.__method_list_t", IntTy, IntTy, llvm::ArrayType::get(MethodTy, 0), nullptr); // struct method_list_t * MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy); // struct _protocol_t { // id isa; // NULL // const char * const protocol_name; // const struct _protocol_list_t * protocol_list; // super protocols // const struct method_list_t * const instance_methods; // const struct method_list_t * const class_methods; // const struct method_list_t *optionalInstanceMethods; // const struct method_list_t *optionalClassMethods; // const struct _prop_list_t * properties; // const uint32_t size; // sizeof(struct _protocol_t) // const uint32_t flags; // = 0 // const char ** extendedMethodTypes; // const char *demangledName; // } // Holder for struct _protocol_list_t * ProtocolListnfABITy = llvm::StructType::create(VMContext, "struct._objc_protocol_list"); ProtocolnfABITy = llvm::StructType::create("struct._protocol_t", ObjectPtrTy, Int8PtrTy, llvm::PointerType::getUnqual(ProtocolListnfABITy), MethodListnfABIPtrTy, MethodListnfABIPtrTy, MethodListnfABIPtrTy, MethodListnfABIPtrTy, PropertyListPtrTy, IntTy, IntTy, Int8PtrPtrTy, Int8PtrTy, nullptr); // struct _protocol_t* ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy); // struct _protocol_list_t { // long protocol_count; // Note, this is 32/64 bit // struct _protocol_t *[protocol_count]; // } ProtocolListnfABITy->setBody(LongTy, llvm::ArrayType::get(ProtocolnfABIPtrTy, 0), nullptr); // struct _objc_protocol_list* ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy); // struct _ivar_t { // unsigned [long] int *offset; // pointer to ivar offset location // char *name; // char *type; // uint32_t alignment; // uint32_t size; // } IvarnfABITy = llvm::StructType::create( "struct._ivar_t", llvm::PointerType::getUnqual(IvarOffsetVarTy), Int8PtrTy, Int8PtrTy, IntTy, IntTy, nullptr); // struct _ivar_list_t { // uint32 entsize; // sizeof(struct _ivar_t) // uint32 count; // struct _iver_t list[count]; // } IvarListnfABITy = llvm::StructType::create("struct._ivar_list_t", IntTy, IntTy, llvm::ArrayType::get(IvarnfABITy, 0), nullptr); IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy); // struct _class_ro_t { // uint32_t const flags; // uint32_t const instanceStart; // uint32_t const instanceSize; // uint32_t const reserved; // only when building for 64bit targets // const uint8_t * const ivarLayout; // const char *const name; // const struct _method_list_t * const baseMethods; // const struct _objc_protocol_list *const baseProtocols; // const struct _ivar_list_t *const ivars; // const uint8_t * const weakIvarLayout; // const struct _prop_list_t * const properties; // } // FIXME. Add 'reserved' field in 64bit abi mode! ClassRonfABITy = llvm::StructType::create("struct._class_ro_t", IntTy, IntTy, IntTy, Int8PtrTy, Int8PtrTy, MethodListnfABIPtrTy, ProtocolListnfABIPtrTy, IvarListnfABIPtrTy, Int8PtrTy, PropertyListPtrTy, nullptr); // ImpnfABITy - LLVM for id (*)(id, SEL, ...) llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy }; ImpnfABITy = llvm::FunctionType::get(ObjectPtrTy, params, false) ->getPointerTo(); // struct _class_t { // struct _class_t *isa; // struct _class_t * const superclass; // void *cache; // IMP *vtable; // struct class_ro_t *ro; // } ClassnfABITy = llvm::StructType::create(VMContext, "struct._class_t"); ClassnfABITy->setBody(llvm::PointerType::getUnqual(ClassnfABITy), llvm::PointerType::getUnqual(ClassnfABITy), CachePtrTy, llvm::PointerType::getUnqual(ImpnfABITy), llvm::PointerType::getUnqual(ClassRonfABITy), nullptr); // LLVM for struct _class_t * ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy); // struct _category_t { // const char * const name; // struct _class_t *const cls; // const struct _method_list_t * const instance_methods; // const struct _method_list_t * const class_methods; // const struct _protocol_list_t * const protocols; // const struct _prop_list_t * const properties; // } CategorynfABITy = llvm::StructType::create("struct._category_t", Int8PtrTy, ClassnfABIPtrTy, MethodListnfABIPtrTy, MethodListnfABIPtrTy, ProtocolListnfABIPtrTy, PropertyListPtrTy, nullptr); // New types for nonfragile abi messaging. CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); // MessageRefTy - LLVM for: // struct _message_ref_t { // IMP messenger; // SEL name; // }; // First the clang type for struct _message_ref_t RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), &Ctx.Idents.get("_message_ref_t")); RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), nullptr, Ctx.VoidPtrTy, nullptr, nullptr, false, ICIS_NoInit)); RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), nullptr, Ctx.getObjCSelType(), nullptr, nullptr, false, ICIS_NoInit)); RD->completeDefinition(); MessageRefCTy = Ctx.getTagDeclType(RD); MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy); MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy)); // MessageRefPtrTy - LLVM for struct _message_ref_t* MessageRefPtrTy = llvm::PointerType::getUnqual(MessageRefTy); // SuperMessageRefTy - LLVM for: // struct _super_message_ref_t { // SUPER_IMP messenger; // SEL name; // }; SuperMessageRefTy = llvm::StructType::create("struct._super_message_ref_t", ImpnfABITy, SelectorPtrTy, nullptr); // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t* SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy); // struct objc_typeinfo { // const void** vtable; // objc_ehtype_vtable + 2 // const char* name; // c++ typeinfo string // Class cls; // }; EHTypeTy = llvm::StructType::create("struct._objc_typeinfo", llvm::PointerType::getUnqual(Int8PtrTy), Int8PtrTy, ClassnfABIPtrTy, nullptr); EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy); } llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() { FinishNonFragileABIModule(); return nullptr; } void CGObjCNonFragileABIMac:: AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container, const char *SymbolName, const char *SectionName) { unsigned NumClasses = Container.size(); if (!NumClasses) return; SmallVector<llvm::Constant*, 8> Symbols(NumClasses); for (unsigned i=0; i<NumClasses; i++) Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i], ObjCTypes.Int8PtrTy); llvm::Constant *Init = llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy, Symbols.size()), Symbols); llvm::GlobalVariable *GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false, llvm::GlobalValue::PrivateLinkage, Init, SymbolName); GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType())); GV->setSection(SectionName); CGM.addCompilerUsedGlobal(GV); } void CGObjCNonFragileABIMac::FinishNonFragileABIModule() { // nonfragile abi has no module definition. // Build list of all implemented class addresses in array // L_OBJC_LABEL_CLASS_$. for (unsigned i=0, NumClasses=ImplementedClasses.size(); i<NumClasses; i++) { const ObjCInterfaceDecl *ID = ImplementedClasses[i]; assert(ID); if (ObjCImplementationDecl *IMP = ID->getImplementation()) // We are implementing a weak imported interface. Give it external linkage if (ID->isWeakImported() && !IMP->isWeakImported()) { DefinedClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage); DefinedMetaClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage); } } AddModuleClassList(DefinedClasses, "OBJC_LABEL_CLASS_$", "__DATA, __objc_classlist, regular, no_dead_strip"); AddModuleClassList(DefinedNonLazyClasses, "OBJC_LABEL_NONLAZY_CLASS_$", "__DATA, __objc_nlclslist, regular, no_dead_strip"); // Build list of all implemented category addresses in array // L_OBJC_LABEL_CATEGORY_$. AddModuleClassList(DefinedCategories, "OBJC_LABEL_CATEGORY_$", "__DATA, __objc_catlist, regular, no_dead_strip"); AddModuleClassList(DefinedNonLazyCategories, "OBJC_LABEL_NONLAZY_CATEGORY_$", "__DATA, __objc_nlcatlist, regular, no_dead_strip"); EmitImageInfo(); } /// isVTableDispatchedSelector - Returns true if SEL is not in the list of /// VTableDispatchMethods; false otherwise. What this means is that /// except for the 19 selectors in the list, we generate 32bit-style /// message dispatch call for all the rest. bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) { // At various points we've experimented with using vtable-based // dispatch for all methods. switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) { case CodeGenOptions::Legacy: return false; case CodeGenOptions::NonLegacy: return true; case CodeGenOptions::Mixed: break; } // If so, see whether this selector is in the white-list of things which must // use the new dispatch convention. We lazily build a dense set for this. if (VTableDispatchMethods.empty()) { VTableDispatchMethods.insert(GetNullarySelector("alloc")); VTableDispatchMethods.insert(GetNullarySelector("class")); VTableDispatchMethods.insert(GetNullarySelector("self")); VTableDispatchMethods.insert(GetNullarySelector("isFlipped")); VTableDispatchMethods.insert(GetNullarySelector("length")); VTableDispatchMethods.insert(GetNullarySelector("count")); // These are vtable-based if GC is disabled. // Optimistically use vtable dispatch for hybrid compiles. if (CGM.getLangOpts().getGC() != LangOptions::GCOnly) { VTableDispatchMethods.insert(GetNullarySelector("retain")); VTableDispatchMethods.insert(GetNullarySelector("release")); VTableDispatchMethods.insert(GetNullarySelector("autorelease")); } VTableDispatchMethods.insert(GetUnarySelector("allocWithZone")); VTableDispatchMethods.insert(GetUnarySelector("isKindOfClass")); VTableDispatchMethods.insert(GetUnarySelector("respondsToSelector")); VTableDispatchMethods.insert(GetUnarySelector("objectForKey")); VTableDispatchMethods.insert(GetUnarySelector("objectAtIndex")); VTableDispatchMethods.insert(GetUnarySelector("isEqualToString")); VTableDispatchMethods.insert(GetUnarySelector("isEqual")); // These are vtable-based if GC is enabled. // Optimistically use vtable dispatch for hybrid compiles. if (CGM.getLangOpts().getGC() != LangOptions::NonGC) { VTableDispatchMethods.insert(GetNullarySelector("hash")); VTableDispatchMethods.insert(GetUnarySelector("addObject")); // "countByEnumeratingWithState:objects:count" IdentifierInfo *KeyIdents[] = { &CGM.getContext().Idents.get("countByEnumeratingWithState"), &CGM.getContext().Idents.get("objects"), &CGM.getContext().Idents.get("count") }; VTableDispatchMethods.insert( CGM.getContext().Selectors.getSelector(3, KeyIdents)); } } return VTableDispatchMethods.count(Sel); } /// BuildClassRoTInitializer - generate meta-data for: /// struct _class_ro_t { /// uint32_t const flags; /// uint32_t const instanceStart; /// uint32_t const instanceSize; /// uint32_t const reserved; // only when building for 64bit targets /// const uint8_t * const ivarLayout; /// const char *const name; /// const struct _method_list_t * const baseMethods; /// const struct _protocol_list_t *const baseProtocols; /// const struct _ivar_list_t *const ivars; /// const uint8_t * const weakIvarLayout; /// const struct _prop_list_t * const properties; /// } /// llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer( unsigned flags, unsigned InstanceStart, unsigned InstanceSize, const ObjCImplementationDecl *ID) { std::string ClassName = ID->getObjCRuntimeNameAsString(); llvm::Constant *Values[10]; // 11 for 64bit targets! if (CGM.getLangOpts().ObjCAutoRefCount) flags |= NonFragileABI_Class_CompiledByARC; Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags); Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart); Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize); // FIXME. For 64bit targets add 0 here. Values[ 3] = (flags & NonFragileABI_Class_Meta) ? GetIvarLayoutName(nullptr, ObjCTypes) : BuildIvarLayout(ID, true); Values[ 4] = GetClassName(ID->getObjCRuntimeNameAsString()); // const struct _method_list_t * const baseMethods; std::vector<llvm::Constant*> Methods; std::string MethodListName("\01l_OBJC_$_"); if (flags & NonFragileABI_Class_Meta) { MethodListName += "CLASS_METHODS_"; MethodListName += ID->getObjCRuntimeNameAsString(); for (const auto *I : ID->class_methods()) // Class methods should always be defined. Methods.push_back(GetMethodConstant(I)); } else { MethodListName += "INSTANCE_METHODS_"; MethodListName += ID->getObjCRuntimeNameAsString(); for (const auto *I : ID->instance_methods()) // Instance methods should always be defined. Methods.push_back(GetMethodConstant(I)); for (const auto *PID : ID->property_impls()) { if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){ ObjCPropertyDecl *PD = PID->getPropertyDecl(); if (ObjCMethodDecl *MD = PD->getGetterMethodDecl()) if (llvm::Constant *C = GetMethodConstant(MD)) Methods.push_back(C); if (ObjCMethodDecl *MD = PD->getSetterMethodDecl()) if (llvm::Constant *C = GetMethodConstant(MD)) Methods.push_back(C); } } } Values[ 5] = EmitMethodList(MethodListName, "__DATA, __objc_const", Methods); const ObjCInterfaceDecl *OID = ID->getClassInterface(); assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer"); Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_" + OID->getObjCRuntimeNameAsString(), OID->all_referenced_protocol_begin(), OID->all_referenced_protocol_end()); if (flags & NonFragileABI_Class_Meta) { Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy); Values[ 8] = GetIvarLayoutName(nullptr, ObjCTypes); Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy); } else { Values[ 7] = EmitIvarList(ID); Values[ 8] = BuildIvarLayout(ID, false); Values[ 9] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getObjCRuntimeNameAsString(), ID, ID->getClassInterface(), ObjCTypes); } llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy, Values); llvm::GlobalVariable *CLASS_RO_GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false, llvm::GlobalValue::PrivateLinkage, Init, (flags & NonFragileABI_Class_Meta) ? std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName : std::string("\01l_OBJC_CLASS_RO_$_")+ClassName); CLASS_RO_GV->setAlignment( CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassRonfABITy)); CLASS_RO_GV->setSection("__DATA, __objc_const"); return CLASS_RO_GV; } /// BuildClassMetaData - This routine defines that to-level meta-data /// for the given ClassName for: /// struct _class_t { /// struct _class_t *isa; /// struct _class_t * const superclass; /// void *cache; /// IMP *vtable; /// struct class_ro_t *ro; /// } /// llvm::GlobalVariable *CGObjCNonFragileABIMac::BuildClassMetaData( const std::string &ClassName, llvm::Constant *IsAGV, llvm::Constant *SuperClassGV, llvm::Constant *ClassRoGV, bool HiddenVisibility, bool Weak) { llvm::Constant *Values[] = { IsAGV, SuperClassGV, ObjCEmptyCacheVar, // &ObjCEmptyCacheVar ObjCEmptyVtableVar, // &ObjCEmptyVtableVar ClassRoGV // &CLASS_RO_GV }; if (!Values[1]) Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy); if (!Values[3]) Values[3] = llvm::Constant::getNullValue( llvm::PointerType::getUnqual(ObjCTypes.ImpnfABITy)); llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy, Values); llvm::GlobalVariable *GV = GetClassGlobal(ClassName, Weak); GV->setInitializer(Init); GV->setSection("__DATA, __objc_data"); GV->setAlignment( CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABITy)); if (HiddenVisibility) GV->setVisibility(llvm::GlobalValue::HiddenVisibility); return GV; } bool CGObjCNonFragileABIMac::ImplementationIsNonLazy(const ObjCImplDecl *OD) const { return OD->getClassMethod(GetNullarySelector("load")) != nullptr; } void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID, uint32_t &InstanceStart, uint32_t &InstanceSize) { const ASTRecordLayout &RL = CGM.getContext().getASTObjCImplementationLayout(OID); // InstanceSize is really instance end. InstanceSize = RL.getDataSize().getQuantity(); // If there are no fields, the start is the same as the end. if (!RL.getFieldCount()) InstanceStart = InstanceSize; else InstanceStart = RL.getFieldOffset(0) / CGM.getContext().getCharWidth(); } void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) { std::string ClassName = ID->getObjCRuntimeNameAsString(); if (!ObjCEmptyCacheVar) { ObjCEmptyCacheVar = new llvm::GlobalVariable( CGM.getModule(), ObjCTypes.CacheTy, false, llvm::GlobalValue::ExternalLinkage, nullptr, "_objc_empty_cache"); // Make this entry NULL for any iOS device target, any iOS simulator target, // OS X with deployment target 10.9 or later. const llvm::Triple &Triple = CGM.getTarget().getTriple(); if (Triple.isiOS() || (Triple.isMacOSX() && !Triple.isMacOSXVersionLT(10, 9))) // This entry will be null. ObjCEmptyVtableVar = nullptr; else ObjCEmptyVtableVar = new llvm::GlobalVariable( CGM.getModule(), ObjCTypes.ImpnfABITy, false, llvm::GlobalValue::ExternalLinkage, nullptr, "_objc_empty_vtable"); } assert(ID->getClassInterface() && "CGObjCNonFragileABIMac::GenerateClass - class is 0"); // FIXME: Is this correct (that meta class size is never computed)? uint32_t InstanceStart = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassnfABITy); uint32_t InstanceSize = InstanceStart; uint32_t flags = NonFragileABI_Class_Meta; llvm::SmallString<64> ObjCMetaClassName(getMetaclassSymbolPrefix()); llvm::SmallString<64> ObjCClassName(getClassSymbolPrefix()); llvm::SmallString<64> TClassName; llvm::GlobalVariable *SuperClassGV, *IsAGV; // Build the flags for the metaclass. bool classIsHidden = ID->getClassInterface()->getVisibility() == HiddenVisibility; if (classIsHidden) flags |= NonFragileABI_Class_Hidden; // FIXME: why is this flag set on the metaclass? // ObjC metaclasses have no fields and don't really get constructed. if (ID->hasNonZeroConstructors() || ID->hasDestructors()) { flags |= NonFragileABI_Class_HasCXXStructors; if (!ID->hasNonZeroConstructors()) flags |= NonFragileABI_Class_HasCXXDestructorOnly; } if (!ID->getClassInterface()->getSuperClass()) { // class is root flags |= NonFragileABI_Class_Root; TClassName = ObjCClassName; TClassName += ClassName; SuperClassGV = GetClassGlobal(TClassName.str(), ID->getClassInterface()->isWeakImported()); TClassName = ObjCMetaClassName; TClassName += ClassName; IsAGV = GetClassGlobal(TClassName.str(), ID->getClassInterface()->isWeakImported()); } else { // Has a root. Current class is not a root. const ObjCInterfaceDecl *Root = ID->getClassInterface(); while (const ObjCInterfaceDecl *Super = Root->getSuperClass()) Root = Super; TClassName = ObjCMetaClassName ; TClassName += Root->getObjCRuntimeNameAsString(); IsAGV = GetClassGlobal(TClassName.str(), Root->isWeakImported()); // work on super class metadata symbol. TClassName = ObjCMetaClassName; TClassName += ID->getClassInterface()->getSuperClass()->getObjCRuntimeNameAsString(); SuperClassGV = GetClassGlobal( TClassName.str(), ID->getClassInterface()->getSuperClass()->isWeakImported()); } llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags, InstanceStart, InstanceSize,ID); TClassName = ObjCMetaClassName; TClassName += ClassName; llvm::GlobalVariable *MetaTClass = BuildClassMetaData( TClassName.str(), IsAGV, SuperClassGV, CLASS_RO_GV, classIsHidden, ID->getClassInterface()->isWeakImported()); DefinedMetaClasses.push_back(MetaTClass); // Metadata for the class flags = 0; if (classIsHidden) flags |= NonFragileABI_Class_Hidden; if (ID->hasNonZeroConstructors() || ID->hasDestructors()) { flags |= NonFragileABI_Class_HasCXXStructors; // Set a flag to enable a runtime optimization when a class has // fields that require destruction but which don't require // anything except zero-initialization during construction. This // is most notably true of __strong and __weak types, but you can // also imagine there being C++ types with non-trivial default // constructors that merely set all fields to null. if (!ID->hasNonZeroConstructors()) flags |= NonFragileABI_Class_HasCXXDestructorOnly; } if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface())) flags |= NonFragileABI_Class_Exception; if (!ID->getClassInterface()->getSuperClass()) { flags |= NonFragileABI_Class_Root; SuperClassGV = nullptr; } else { // Has a root. Current class is not a root. TClassName = ObjCClassName; TClassName += ID->getClassInterface()->getSuperClass()->getObjCRuntimeNameAsString(); SuperClassGV = GetClassGlobal( TClassName.str(), ID->getClassInterface()->getSuperClass()->isWeakImported()); } GetClassSizeInfo(ID, InstanceStart, InstanceSize); CLASS_RO_GV = BuildClassRoTInitializer(flags, InstanceStart, InstanceSize, ID); TClassName = ObjCClassName; TClassName += ClassName; llvm::GlobalVariable *ClassMD = BuildClassMetaData(TClassName.str(), MetaTClass, SuperClassGV, CLASS_RO_GV, classIsHidden, ID->getClassInterface()->isWeakImported()); DefinedClasses.push_back(ClassMD); ImplementedClasses.push_back(ID->getClassInterface()); // Determine if this class is also "non-lazy". if (ImplementationIsNonLazy(ID)) DefinedNonLazyClasses.push_back(ClassMD); // Force the definition of the EHType if necessary. if (flags & NonFragileABI_Class_Exception) GetInterfaceEHType(ID->getClassInterface(), true); // Make sure method definition entries are all clear for next implementation. MethodDefinitions.clear(); } /// GenerateProtocolRef - This routine is called to generate code for /// a protocol reference expression; as in: /// @code /// @protocol(Proto1); /// @endcode /// It generates a weak reference to l_OBJC_PROTOCOL_REFERENCE_$_Proto1 /// which will hold address of the protocol meta-data. /// llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF, const ObjCProtocolDecl *PD) { // This routine is called for @protocol only. So, we must build definition // of protocol's meta-data (not a reference to it!) // llvm::Constant *Init = llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD), ObjCTypes.getExternalProtocolPtrTy()); std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_"); ProtocolName += PD->getObjCRuntimeNameAsString(); llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName); if (PTGV) return CGF.Builder.CreateLoad(PTGV); PTGV = new llvm::GlobalVariable( CGM.getModule(), Init->getType(), false, llvm::GlobalValue::WeakAnyLinkage, Init, ProtocolName); PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip"); PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility); CGM.addCompilerUsedGlobal(PTGV); return CGF.Builder.CreateLoad(PTGV); } /// GenerateCategory - Build metadata for a category implementation. /// struct _category_t { /// const char * const name; /// struct _class_t *const cls; /// const struct _method_list_t * const instance_methods; /// const struct _method_list_t * const class_methods; /// const struct _protocol_list_t * const protocols; /// const struct _prop_list_t * const properties; /// } /// void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) { const ObjCInterfaceDecl *Interface = OCD->getClassInterface(); const char *Prefix = "\01l_OBJC_$_CATEGORY_"; llvm::SmallString<64> ExtCatName(Prefix); ExtCatName += Interface->getObjCRuntimeNameAsString(); ExtCatName += "_$_"; ExtCatName += OCD->getNameAsString(); llvm::SmallString<64> ExtClassName(getClassSymbolPrefix()); ExtClassName += Interface->getObjCRuntimeNameAsString(); llvm::Constant *Values[6]; Values[0] = GetClassName(OCD->getIdentifier()->getName()); // meta-class entry symbol llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName.str(), Interface->isWeakImported()); Values[1] = ClassGV; std::vector<llvm::Constant*> Methods; llvm::SmallString<64> MethodListName(Prefix); MethodListName += "INSTANCE_METHODS_"; MethodListName += Interface->getObjCRuntimeNameAsString(); MethodListName += "_$_"; MethodListName += OCD->getName(); for (const auto *I : OCD->instance_methods()) // Instance methods should always be defined. Methods.push_back(GetMethodConstant(I)); Values[2] = EmitMethodList(MethodListName.str(), "__DATA, __objc_const", Methods); MethodListName = Prefix; MethodListName += "CLASS_METHODS_"; MethodListName += Interface->getObjCRuntimeNameAsString(); MethodListName += "_$_"; MethodListName += OCD->getNameAsString(); Methods.clear(); for (const auto *I : OCD->class_methods()) // Class methods should always be defined. Methods.push_back(GetMethodConstant(I)); Values[3] = EmitMethodList(MethodListName.str(), "__DATA, __objc_const", Methods); const ObjCCategoryDecl *Category = Interface->FindCategoryDeclaration(OCD->getIdentifier()); if (Category) { SmallString<256> ExtName; llvm::raw_svector_ostream(ExtName) << Interface->getObjCRuntimeNameAsString() << "_$_" << OCD->getName(); Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_" + Interface->getObjCRuntimeNameAsString() + "_$_" + Category->getName(), Category->protocol_begin(), Category->protocol_end()); Values[5] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(), OCD, Category, ObjCTypes); } else { Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy); Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy); } llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy, Values); llvm::GlobalVariable *GCATV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.CategorynfABITy, false, llvm::GlobalValue::PrivateLinkage, Init, ExtCatName.str()); GCATV->setAlignment( CGM.getDataLayout().getABITypeAlignment(ObjCTypes.CategorynfABITy)); GCATV->setSection("__DATA, __objc_const"); CGM.addCompilerUsedGlobal(GCATV); DefinedCategories.push_back(GCATV); // Determine if this category is also "non-lazy". if (ImplementationIsNonLazy(OCD)) DefinedNonLazyCategories.push_back(GCATV); // method definition entries must be clear for next implementation. MethodDefinitions.clear(); } /// GetMethodConstant - Return a struct objc_method constant for the /// given method if it has been defined. The result is null if the /// method has not been defined. The return value has type MethodPtrTy. llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant( const ObjCMethodDecl *MD) { llvm::Function *Fn = GetMethodDefinition(MD); if (!Fn) return nullptr; llvm::Constant *Method[] = { llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()), ObjCTypes.SelectorPtrTy), GetMethodVarType(MD), llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy) }; return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method); } /// EmitMethodList - Build meta-data for method declarations /// struct _method_list_t { /// uint32_t entsize; // sizeof(struct _objc_method) /// uint32_t method_count; /// struct _objc_method method_list[method_count]; /// } /// llvm::Constant * CGObjCNonFragileABIMac::EmitMethodList(Twine Name, const char *Section, ArrayRef<llvm::Constant*> Methods) { // Return null for empty list. if (Methods.empty()) return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy); llvm::Constant *Values[3]; // sizeof(struct _objc_method) unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.MethodTy); Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size); // method_count Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size()); llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy, Methods.size()); Values[2] = llvm::ConstantArray::get(AT, Methods); llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values); llvm::GlobalVariable *GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false, llvm::GlobalValue::PrivateLinkage, Init, Name); GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType())); GV->setSection(Section); CGM.addCompilerUsedGlobal(GV); return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy); } /// ObjCIvarOffsetVariable - Returns the ivar offset variable for /// the given ivar. llvm::GlobalVariable * CGObjCNonFragileABIMac::ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID, const ObjCIvarDecl *Ivar) { const ObjCInterfaceDecl *Container = Ivar->getContainingInterface(); llvm::SmallString<64> Name("OBJC_IVAR_$_"); Name += Container->getObjCRuntimeNameAsString(); Name += "."; Name += Ivar->getName(); llvm::GlobalVariable *IvarOffsetGV = CGM.getModule().getGlobalVariable(Name); if (!IvarOffsetGV) IvarOffsetGV = new llvm::GlobalVariable( CGM.getModule(), ObjCTypes.IvarOffsetVarTy, false, llvm::GlobalValue::ExternalLinkage, nullptr, Name.str()); return IvarOffsetGV; } llvm::Constant * CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID, const ObjCIvarDecl *Ivar, unsigned long int Offset) { llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar); IvarOffsetGV->setInitializer( llvm::ConstantInt::get(ObjCTypes.IvarOffsetVarTy, Offset)); IvarOffsetGV->setAlignment( CGM.getDataLayout().getABITypeAlignment(ObjCTypes.IvarOffsetVarTy)); // FIXME: This matches gcc, but shouldn't the visibility be set on the use as // well (i.e., in ObjCIvarOffsetVariable). if (Ivar->getAccessControl() == ObjCIvarDecl::Private || Ivar->getAccessControl() == ObjCIvarDecl::Package || ID->getVisibility() == HiddenVisibility) IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility); else IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility); IvarOffsetGV->setSection("__DATA, __objc_ivar"); return IvarOffsetGV; } /// EmitIvarList - Emit the ivar list for the given /// implementation. The return value has type /// IvarListnfABIPtrTy. /// struct _ivar_t { /// unsigned [long] int *offset; // pointer to ivar offset location /// char *name; /// char *type; /// uint32_t alignment; /// uint32_t size; /// } /// struct _ivar_list_t { /// uint32 entsize; // sizeof(struct _ivar_t) /// uint32 count; /// struct _iver_t list[count]; /// } /// llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList( const ObjCImplementationDecl *ID) { std::vector<llvm::Constant*> Ivars; const ObjCInterfaceDecl *OID = ID->getClassInterface(); assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface"); // FIXME. Consolidate this with similar code in GenerateClass. for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin(); IVD; IVD = IVD->getNextIvar()) { // Ignore unnamed bit-fields. if (!IVD->getDeclName()) continue; llvm::Constant *Ivar[5]; Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD, ComputeIvarBaseOffset(CGM, ID, IVD)); Ivar[1] = GetMethodVarName(IVD->getIdentifier()); Ivar[2] = GetMethodVarType(IVD); llvm::Type *FieldTy = CGM.getTypes().ConvertTypeForMem(IVD->getType()); unsigned Size = CGM.getDataLayout().getTypeAllocSize(FieldTy); unsigned Align = CGM.getContext().getPreferredTypeAlign( IVD->getType().getTypePtr()) >> 3; Align = llvm::Log2_32(Align); Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align); // NOTE. Size of a bitfield does not match gcc's, because of the // way bitfields are treated special in each. But I am told that // 'size' for bitfield ivars is ignored by the runtime so it does // not matter. If it matters, there is enough info to get the // bitfield right! Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size); Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar)); } // Return null for empty list. if (Ivars.empty()) return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy); llvm::Constant *Values[3]; unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.IvarnfABITy); Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size); Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size()); llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy, Ivars.size()); Values[2] = llvm::ConstantArray::get(AT, Ivars); llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values); const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_"; llvm::GlobalVariable *GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false, llvm::GlobalValue::PrivateLinkage, Init, Prefix + OID->getObjCRuntimeNameAsString()); GV->setAlignment( CGM.getDataLayout().getABITypeAlignment(Init->getType())); GV->setSection("__DATA, __objc_const"); CGM.addCompilerUsedGlobal(GV); return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy); } llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef( const ObjCProtocolDecl *PD) { llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()]; if (!Entry) { // We use the initializer as a marker of whether this is a forward // reference or not. At module finalization we add the empty // contents for protocols which were referenced but never defined. Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy, false, llvm::GlobalValue::ExternalLinkage, nullptr, "\01l_OBJC_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString()); Entry->setSection("__DATA,__datacoal_nt,coalesced"); } return Entry; } /// GetOrEmitProtocol - Generate the protocol meta-data: /// @code /// struct _protocol_t { /// id isa; // NULL /// const char * const protocol_name; /// const struct _protocol_list_t * protocol_list; // super protocols /// const struct method_list_t * const instance_methods; /// const struct method_list_t * const class_methods; /// const struct method_list_t *optionalInstanceMethods; /// const struct method_list_t *optionalClassMethods; /// const struct _prop_list_t * properties; /// const uint32_t size; // sizeof(struct _protocol_t) /// const uint32_t flags; // = 0 /// const char ** extendedMethodTypes; /// const char *demangledName; /// } /// @endcode /// llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol( const ObjCProtocolDecl *PD) { llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()]; // Early exit if a defining object has already been generated. if (Entry && Entry->hasInitializer()) return Entry; // Use the protocol definition, if there is one. if (const ObjCProtocolDecl *Def = PD->getDefinition()) PD = Def; // Construct method lists. std::vector<llvm::Constant*> InstanceMethods, ClassMethods; std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods; std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt; for (const auto *MD : PD->instance_methods()) { llvm::Constant *C = GetMethodDescriptionConstant(MD); if (!C) return GetOrEmitProtocolRef(PD); if (MD->getImplementationControl() == ObjCMethodDecl::Optional) { OptInstanceMethods.push_back(C); OptMethodTypesExt.push_back(GetMethodVarType(MD, true)); } else { InstanceMethods.push_back(C); MethodTypesExt.push_back(GetMethodVarType(MD, true)); } } for (const auto *MD : PD->class_methods()) { llvm::Constant *C = GetMethodDescriptionConstant(MD); if (!C) return GetOrEmitProtocolRef(PD); if (MD->getImplementationControl() == ObjCMethodDecl::Optional) { OptClassMethods.push_back(C); OptMethodTypesExt.push_back(GetMethodVarType(MD, true)); } else { ClassMethods.push_back(C); MethodTypesExt.push_back(GetMethodVarType(MD, true)); } } MethodTypesExt.insert(MethodTypesExt.end(), OptMethodTypesExt.begin(), OptMethodTypesExt.end()); llvm::Constant *Values[12]; // isa is NULL Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy); Values[1] = GetClassName(PD->getObjCRuntimeNameAsString()); Values[2] = EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_" + PD->getObjCRuntimeNameAsString(), PD->protocol_begin(), PD->protocol_end()); Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_" + PD->getObjCRuntimeNameAsString(), "__DATA, __objc_const", InstanceMethods); Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_" + PD->getObjCRuntimeNameAsString(), "__DATA, __objc_const", ClassMethods); Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_" + PD->getObjCRuntimeNameAsString(), "__DATA, __objc_const", OptInstanceMethods); Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_" + PD->getObjCRuntimeNameAsString(), "__DATA, __objc_const", OptClassMethods); Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getObjCRuntimeNameAsString(), nullptr, PD, ObjCTypes); uint32_t Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolnfABITy); Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size); Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy); Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_" + PD->getObjCRuntimeNameAsString(), MethodTypesExt, ObjCTypes); // const char *demangledName; Values[11] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy); llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy, Values); if (Entry) { // Already created, fix the linkage and update the initializer. Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage); Entry->setInitializer(Init); } else { Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy, false, llvm::GlobalValue::WeakAnyLinkage, Init, "\01l_OBJC_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString()); Entry->setAlignment( CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABITy)); Entry->setSection("__DATA,__datacoal_nt,coalesced"); Protocols[PD->getIdentifier()] = Entry; } Entry->setVisibility(llvm::GlobalValue::HiddenVisibility); CGM.addCompilerUsedGlobal(Entry); // Use this protocol meta-data to build protocol list table in section // __DATA, __objc_protolist llvm::GlobalVariable *PTGV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABIPtrTy, false, llvm::GlobalValue::WeakAnyLinkage, Entry, "\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString()); PTGV->setAlignment( CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy)); PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip"); PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility); CGM.addCompilerUsedGlobal(PTGV); return Entry; } /// EmitProtocolList - Generate protocol list meta-data: /// @code /// struct _protocol_list_t { /// long protocol_count; // Note, this is 32/64 bit /// struct _protocol_t[protocol_count]; /// } /// @endcode /// llvm::Constant * CGObjCNonFragileABIMac::EmitProtocolList(Twine Name, ObjCProtocolDecl::protocol_iterator begin, ObjCProtocolDecl::protocol_iterator end) { SmallVector<llvm::Constant *, 16> ProtocolRefs; // Just return null for empty protocol lists if (begin == end) return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy); // FIXME: We shouldn't need to do this lookup here, should we? SmallString<256> TmpName; Name.toVector(TmpName); llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(TmpName.str(), true); if (GV) return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy); for (; begin != end; ++begin) ProtocolRefs.push_back(GetProtocolRef(*begin)); // Implemented??? // This list is null terminated. ProtocolRefs.push_back(llvm::Constant::getNullValue( ObjCTypes.ProtocolnfABIPtrTy)); llvm::Constant *Values[2]; Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1); Values[1] = llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy, ProtocolRefs.size()), ProtocolRefs); llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values); GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false, llvm::GlobalValue::PrivateLinkage, Init, Name); GV->setSection("__DATA, __objc_const"); GV->setAlignment( CGM.getDataLayout().getABITypeAlignment(Init->getType())); CGM.addCompilerUsedGlobal(GV); return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy); } /// GetMethodDescriptionConstant - This routine build following meta-data: /// struct _objc_method { /// SEL _cmd; /// char *method_type; /// char *_imp; /// } llvm::Constant * CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) { llvm::Constant *Desc[3]; Desc[0] = llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()), ObjCTypes.SelectorPtrTy); Desc[1] = GetMethodVarType(MD); if (!Desc[1]) return nullptr; // Protocol methods have no implementation. So, this entry is always NULL. Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy); return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc); } /// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference. /// This code gen. amounts to generating code for: /// @code /// (type *)((char *)base + _OBJC_IVAR_$_.ivar; /// @encode /// LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar( CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers) { ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface(); llvm::Value *Offset = EmitIvarOffset(CGF, ID, Ivar); return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers, Offset); } llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset( CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar) { llvm::Value *IvarOffsetValue = ObjCIvarOffsetVariable(Interface, Ivar); IvarOffsetValue = CGF.Builder.CreateLoad(IvarOffsetValue, "ivar"); if (IsIvarOffsetKnownIdempotent(CGF, Ivar)) cast<llvm::LoadInst>(IvarOffsetValue) ->setMetadata(CGM.getModule().getMDKindID("invariant.load"), llvm::MDNode::get(VMContext, None)); // This could be 32bit int or 64bit integer depending on the architecture. // Cast it to 64bit integer value, if it is a 32bit integer ivar offset value // as this is what caller always expectes. if (ObjCTypes.IvarOffsetVarTy == ObjCTypes.IntTy) IvarOffsetValue = CGF.Builder.CreateIntCast( IvarOffsetValue, ObjCTypes.LongTy, true, "ivar.conv"); return IvarOffsetValue; } static void appendSelectorForMessageRefTable(std::string &buffer, Selector selector) { if (selector.isUnarySelector()) { buffer += selector.getNameForSlot(0); return; } for (unsigned i = 0, e = selector.getNumArgs(); i != e; ++i) { buffer += selector.getNameForSlot(i); buffer += '_'; } } /// Emit a "v-table" message send. We emit a weak hidden-visibility /// struct, initially containing the selector pointer and a pointer to /// a "fixup" variant of the appropriate objc_msgSend. To call, we /// load and call the function pointer, passing the address of the /// struct as the second parameter. The runtime determines whether /// the selector is currently emitted using vtable dispatch; if so, it /// substitutes a stub function which simply tail-calls through the /// appropriate vtable slot, and if not, it substitues a stub function /// which tail-calls objc_msgSend. Both stubs adjust the selector /// argument to correctly point to the selector. RValue CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF, ReturnValueSlot returnSlot, QualType resultType, Selector selector, llvm::Value *arg0, QualType arg0Type, bool isSuper, const CallArgList &formalArgs, const ObjCMethodDecl *method) { // Compute the actual arguments. CallArgList args; // First argument: the receiver / super-call structure. if (!isSuper) arg0 = CGF.Builder.CreateBitCast(arg0, ObjCTypes.ObjectPtrTy); args.add(RValue::get(arg0), arg0Type); // Second argument: a pointer to the message ref structure. Leave // the actual argument value blank for now. args.add(RValue::get(nullptr), ObjCTypes.MessageRefCPtrTy); args.insert(args.end(), formalArgs.begin(), formalArgs.end()); MessageSendInfo MSI = getMessageSendInfo(method, resultType, args); NullReturnState nullReturn; // Find the function to call and the mangled name for the message // ref structure. Using a different mangled name wouldn't actually // be a problem; it would just be a waste. // // The runtime currently never uses vtable dispatch for anything // except normal, non-super message-sends. // FIXME: don't use this for that. llvm::Constant *fn = nullptr; std::string messageRefName("\01l_"); if (CGM.ReturnSlotInterferesWithArgs(MSI.CallInfo)) { if (isSuper) { fn = ObjCTypes.getMessageSendSuper2StretFixupFn(); messageRefName += "objc_msgSendSuper2_stret_fixup"; } else { nullReturn.init(CGF, arg0); fn = ObjCTypes.getMessageSendStretFixupFn(); messageRefName += "objc_msgSend_stret_fixup"; } } else if (!isSuper && CGM.ReturnTypeUsesFPRet(resultType)) { fn = ObjCTypes.getMessageSendFpretFixupFn(); messageRefName += "objc_msgSend_fpret_fixup"; } else { if (isSuper) { fn = ObjCTypes.getMessageSendSuper2FixupFn(); messageRefName += "objc_msgSendSuper2_fixup"; } else { fn = ObjCTypes.getMessageSendFixupFn(); messageRefName += "objc_msgSend_fixup"; } } assert(fn && "CGObjCNonFragileABIMac::EmitMessageSend"); messageRefName += '_'; // Append the selector name, except use underscores anywhere we // would have used colons. appendSelectorForMessageRefTable(messageRefName, selector); llvm::GlobalVariable *messageRef = CGM.getModule().getGlobalVariable(messageRefName); if (!messageRef) { // Build the message ref structure. llvm::Constant *values[] = { fn, GetMethodVarName(selector) }; llvm::Constant *init = llvm::ConstantStruct::getAnon(values); messageRef = new llvm::GlobalVariable(CGM.getModule(), init->getType(), /*constant*/ false, llvm::GlobalValue::WeakAnyLinkage, init, messageRefName); messageRef->setVisibility(llvm::GlobalValue::HiddenVisibility); messageRef->setAlignment(16); messageRef->setSection("__DATA, __objc_msgrefs, coalesced"); } bool requiresnullCheck = false; if (CGM.getLangOpts().ObjCAutoRefCount && method) for (const auto *ParamDecl : method->params()) { if (ParamDecl->hasAttr<NSConsumedAttr>()) { if (!nullReturn.NullBB) nullReturn.init(CGF, arg0); requiresnullCheck = true; break; } } llvm::Value *mref = CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy); // Update the message ref argument. args[1].RV = RValue::get(mref); // Load the function to call from the message ref table. llvm::Value *callee = CGF.Builder.CreateStructGEP(ObjCTypes.MessageRefTy, mref, 0); callee = CGF.Builder.CreateLoad(callee, "msgSend_fn"); callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType); RValue result = CGF.EmitCall(MSI.CallInfo, callee, returnSlot, args); return nullReturn.complete(CGF, result, resultType, formalArgs, requiresnullCheck ? method : nullptr); } /// Generate code for a message send expression in the nonfragile abi. CodeGen::RValue CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, llvm::Value *Receiver, const CallArgList &CallArgs, const ObjCInterfaceDecl *Class, const ObjCMethodDecl *Method) { return isVTableDispatchedSelector(Sel) ? EmitVTableMessageSend(CGF, Return, ResultType, Sel, Receiver, CGF.getContext().getObjCIdType(), false, CallArgs, Method) : EmitMessageSend(CGF, Return, ResultType, EmitSelector(CGF, Sel), Receiver, CGF.getContext().getObjCIdType(), false, CallArgs, Method, ObjCTypes); } llvm::GlobalVariable * CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name, bool Weak) { llvm::GlobalValue::LinkageTypes L = Weak ? llvm::GlobalValue::ExternalWeakLinkage : llvm::GlobalValue::ExternalLinkage; llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name); if (!GV) GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy, false, L, nullptr, Name); assert(GV->getLinkage() == L); return GV; } llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF, IdentifierInfo *II, bool Weak, const ObjCInterfaceDecl *ID) { llvm::GlobalVariable *&Entry = ClassReferences[II]; if (!Entry) { std::string ClassName( getClassSymbolPrefix() + (ID ? ID->getObjCRuntimeNameAsString() : II->getName()).str()); llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName, Weak); Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false, llvm::GlobalValue::PrivateLinkage, ClassGV, "OBJC_CLASSLIST_REFERENCES_$_"); Entry->setAlignment( CGM.getDataLayout().getABITypeAlignment( ObjCTypes.ClassnfABIPtrTy)); Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip"); CGM.addCompilerUsedGlobal(Entry); } return CGF.Builder.CreateLoad(Entry); } llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID) { return EmitClassRefFromId(CGF, ID->getIdentifier(), ID->isWeakImported(), ID); } llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef( CodeGenFunction &CGF) { IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool"); return EmitClassRefFromId(CGF, II, false, 0); } llvm::Value * CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID) { llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()]; if (!Entry) { llvm::SmallString<64> ClassName(getClassSymbolPrefix()); ClassName += ID->getObjCRuntimeNameAsString(); llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName.str(), ID->isWeakImported()); Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false, llvm::GlobalValue::PrivateLinkage, ClassGV, "OBJC_CLASSLIST_SUP_REFS_$_"); Entry->setAlignment( CGM.getDataLayout().getABITypeAlignment( ObjCTypes.ClassnfABIPtrTy)); Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip"); CGM.addCompilerUsedGlobal(Entry); } return CGF.Builder.CreateLoad(Entry); } /// EmitMetaClassRef - Return a Value * of the address of _class_t /// meta-data /// llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID, bool Weak) { llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()]; if (!Entry) { llvm::SmallString<64> MetaClassName(getMetaclassSymbolPrefix()); MetaClassName += ID->getObjCRuntimeNameAsString(); llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName.str(), Weak); Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false, llvm::GlobalValue::PrivateLinkage, MetaClassGV, "OBJC_CLASSLIST_SUP_REFS_$_"); Entry->setAlignment( CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABIPtrTy)); Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip"); CGM.addCompilerUsedGlobal(Entry); } return CGF.Builder.CreateLoad(Entry); } /// GetClass - Return a reference to the class for the given interface /// decl. llvm::Value *CGObjCNonFragileABIMac::GetClass(CodeGenFunction &CGF, const ObjCInterfaceDecl *ID) { if (ID->isWeakImported()) { llvm::SmallString<64> ClassName(getClassSymbolPrefix()); ClassName += ID->getObjCRuntimeNameAsString(); llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName.str(), true); (void)ClassGV; assert(ClassGV->hasExternalWeakLinkage()); } return EmitClassRef(CGF, ID); } /// Generates a message send where the super is the receiver. This is /// a message send to self with special delivery semantics indicating /// which class's method should be called. CodeGen::RValue CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, const ObjCInterfaceDecl *Class, bool isCategoryImpl, llvm::Value *Receiver, bool IsClassMessage, const CodeGen::CallArgList &CallArgs, const ObjCMethodDecl *Method) { // ... // Create and init a super structure; this is a (receiver, class) // pair we will pass to objc_msgSendSuper. llvm::Value *ObjCSuper = CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super"); llvm::Value *ReceiverAsObject = CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy); CGF.Builder.CreateStore( ReceiverAsObject, CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 0)); // If this is a class message the metaclass is passed as the target. llvm::Value *Target; if (IsClassMessage) Target = EmitMetaClassRef(CGF, Class, Class->isWeakImported()); else Target = EmitSuperClassRef(CGF, Class); // FIXME: We shouldn't need to do this cast, rectify the ASTContext and // ObjCTypes types. llvm::Type *ClassTy = CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType()); Target = CGF.Builder.CreateBitCast(Target, ClassTy); CGF.Builder.CreateStore( Target, CGF.Builder.CreateStructGEP(ObjCTypes.SuperTy, ObjCSuper, 1)); return (isVTableDispatchedSelector(Sel)) ? EmitVTableMessageSend(CGF, Return, ResultType, Sel, ObjCSuper, ObjCTypes.SuperPtrCTy, true, CallArgs, Method) : EmitMessageSend(CGF, Return, ResultType, EmitSelector(CGF, Sel), ObjCSuper, ObjCTypes.SuperPtrCTy, true, CallArgs, Method, ObjCTypes); } llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF, Selector Sel, bool lval) { llvm::GlobalVariable *&Entry = SelectorReferences[Sel]; if (!Entry) { llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel), ObjCTypes.SelectorPtrTy); Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy, false, llvm::GlobalValue::PrivateLinkage, Casted, "OBJC_SELECTOR_REFERENCES_"); Entry->setExternallyInitialized(true); Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip"); CGM.addCompilerUsedGlobal(Entry); } if (lval) return Entry; llvm::LoadInst* LI = CGF.Builder.CreateLoad(Entry); LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"), llvm::MDNode::get(VMContext, None)); return LI; } /// EmitObjCIvarAssign - Code gen for assigning to a __strong object. /// objc_assign_ivar (id src, id *dst, ptrdiff_t) /// void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst, llvm::Value *ivarOffset) { llvm::Type * SrcTy = src->getType(); if (!isa<llvm::PointerType>(SrcTy)) { unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy); assert(Size <= 8 && "does not support size > 8"); src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy) : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy)); src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = { src, dst, ivarOffset }; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args); } /// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object. /// objc_assign_strongCast (id src, id *dst) /// void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign( CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst) { llvm::Type * SrcTy = src->getType(); if (!isa<llvm::PointerType>(SrcTy)) { unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy); assert(Size <= 8 && "does not support size > 8"); src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy) : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy)); src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = { src, dst }; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(), args, "weakassign"); } void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable( CodeGen::CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, llvm::Value *Size) { SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy); DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy); llvm::Value *args[] = { DestPtr, SrcPtr, Size }; CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args); } /// EmitObjCWeakRead - Code gen for loading value of a __weak /// object: objc_read_weak (id *src) /// llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead( CodeGen::CodeGenFunction &CGF, llvm::Value *AddrWeakObj) { llvm::Type* DestTy = cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType(); AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy); llvm::Value *read_weak = CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(), AddrWeakObj, "weakread"); read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy); return read_weak; } /// EmitObjCWeakAssign - Code gen for assigning to a __weak object. /// objc_assign_weak (id src, id *dst) /// void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst) { llvm::Type * SrcTy = src->getType(); if (!isa<llvm::PointerType>(SrcTy)) { unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy); assert(Size <= 8 && "does not support size > 8"); src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy) : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy)); src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = { src, dst }; CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(), args, "weakassign"); } /// EmitObjCGlobalAssign - Code gen for assigning to a __strong object. /// objc_assign_global (id src, id *dst) /// void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst, bool threadlocal) { llvm::Type * SrcTy = src->getType(); if (!isa<llvm::PointerType>(SrcTy)) { unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy); assert(Size <= 8 && "does not support size > 8"); src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy) : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy)); src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy); } src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy); dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy); llvm::Value *args[] = { src, dst }; if (!threadlocal) CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(), args, "globalassign"); else CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignThreadLocalFn(), args, "threadlocalassign"); } void CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtSynchronizedStmt &S) { EmitAtSynchronizedStmt(CGF, S, cast<llvm::Function>(ObjCTypes.getSyncEnterFn()), cast<llvm::Function>(ObjCTypes.getSyncExitFn())); } llvm::Constant * CGObjCNonFragileABIMac::GetEHType(QualType T) { // There's a particular fixed type info for 'id'. if (T->isObjCIdType() || T->isObjCQualifiedIdType()) { llvm::Constant *IDEHType = CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id"); if (!IDEHType) IDEHType = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false, llvm::GlobalValue::ExternalLinkage, nullptr, "OBJC_EHTYPE_id"); return IDEHType; } // All other types should be Objective-C interface pointer types. const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>(); assert(PT && "Invalid @catch type."); const ObjCInterfaceType *IT = PT->getInterfaceType(); assert(IT && "Invalid @catch type."); return GetInterfaceEHType(IT->getDecl(), false); } void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtTryStmt &S) { EmitTryCatchStmt(CGF, S, cast<llvm::Function>(ObjCTypes.getObjCBeginCatchFn()), cast<llvm::Function>(ObjCTypes.getObjCEndCatchFn()), cast<llvm::Function>(ObjCTypes.getExceptionRethrowFn())); } /// EmitThrowStmt - Generate code for a throw statement. void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S, bool ClearInsertionPoint) { if (const Expr *ThrowExpr = S.getThrowExpr()) { llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr); Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy); CGF.EmitRuntimeCallOrInvoke(ObjCTypes.getExceptionThrowFn(), Exception) .setDoesNotReturn(); } else { CGF.EmitRuntimeCallOrInvoke(ObjCTypes.getExceptionRethrowFn()) .setDoesNotReturn(); } CGF.Builder.CreateUnreachable(); if (ClearInsertionPoint) CGF.Builder.ClearInsertionPoint(); } llvm::Constant * CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID, bool ForDefinition) { llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()]; // If we don't need a definition, return the entry if found or check // if we use an external reference. if (!ForDefinition) { if (Entry) return Entry; // If this type (or a super class) has the __objc_exception__ // attribute, emit an external reference. if (hasObjCExceptionAttribute(CGM.getContext(), ID)) return Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false, llvm::GlobalValue::ExternalLinkage, nullptr, ("OBJC_EHTYPE_$_" + ID->getObjCRuntimeNameAsString())); } // Otherwise we need to either make a new entry or fill in the // initializer. assert((!Entry || !Entry->hasInitializer()) && "Duplicate EHType definition"); llvm::SmallString<64> ClassName(getClassSymbolPrefix()); ClassName += ID->getObjCRuntimeNameAsString(); std::string VTableName = "objc_ehtype_vtable"; llvm::GlobalVariable *VTableGV = CGM.getModule().getGlobalVariable(VTableName); if (!VTableGV) VTableGV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.Int8PtrTy, false, llvm::GlobalValue::ExternalLinkage, nullptr, VTableName); llvm::Value *VTableIdx = llvm::ConstantInt::get(CGM.Int32Ty, 2); llvm::Constant *Values[] = { llvm::ConstantExpr::getGetElementPtr(VTableGV->getValueType(), VTableGV, VTableIdx), GetClassName(ID->getObjCRuntimeNameAsString()), GetClassGlobal(ClassName.str())}; llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values); llvm::GlobalValue::LinkageTypes L = ForDefinition ? llvm::GlobalValue::ExternalLinkage : llvm::GlobalValue::WeakAnyLinkage; if (Entry) { Entry->setInitializer(Init); } else { llvm::SmallString<64> EHTYPEName("OBJC_EHTYPE_$_"); EHTYPEName += ID->getObjCRuntimeNameAsString(); Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false, L, Init, EHTYPEName.str()); } assert(Entry->getLinkage() == L); if (ID->getVisibility() == HiddenVisibility) Entry->setVisibility(llvm::GlobalValue::HiddenVisibility); Entry->setAlignment(CGM.getDataLayout().getABITypeAlignment( ObjCTypes.EHTypeTy)); if (ForDefinition) Entry->setSection("__DATA,__objc_const"); else Entry->setSection("__DATA,__datacoal_nt,coalesced"); return Entry; } /* *** */ CodeGen::CGObjCRuntime * CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) { switch (CGM.getLangOpts().ObjCRuntime.getKind()) { case ObjCRuntime::FragileMacOSX: return new CGObjCMac(CGM); case ObjCRuntime::MacOSX: case ObjCRuntime::iOS: return new CGObjCNonFragileABIMac(CGM); case ObjCRuntime::GNUstep: case ObjCRuntime::GCC: case ObjCRuntime::ObjFW: llvm_unreachable("these runtimes are not Mac runtimes"); } llvm_unreachable("bad runtime"); } #endif // HLSL Change
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGClass.cpp
//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This contains code dealing with C++ code generation of classes // //===----------------------------------------------------------------------===// #include "CGBlocks.h" #include "CGCXXABI.h" #include "CGDebugInfo.h" #include "CGRecordLayout.h" #include "CodeGenFunction.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/EvaluatedExprVisitor.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/TargetBuiltins.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/Frontend/CodeGenOptions.h" #include "llvm/IR/Intrinsics.h" using namespace clang; using namespace CodeGen; CharUnits CodeGenModule::computeNonVirtualBaseClassOffset( const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start, CastExpr::path_const_iterator End) { CharUnits Offset = CharUnits::Zero(); const ASTContext &Context = getContext(); const CXXRecordDecl *RD = DerivedClass; for (CastExpr::path_const_iterator I = Start; I != End; ++I) { const CXXBaseSpecifier *Base = *I; assert(!Base->isVirtual() && "Should not see virtual bases here!"); // Get the layout. const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); // Add the offset. Offset += Layout.getBaseClassOffset(BaseDecl); RD = BaseDecl; } return Offset; } llvm::Constant * CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd) { assert(PathBegin != PathEnd && "Base path should not be empty!"); CharUnits Offset = computeNonVirtualBaseClassOffset(ClassDecl, PathBegin, PathEnd); if (Offset.isZero()) return nullptr; llvm::Type *PtrDiffTy = Types.ConvertType(getContext().getPointerDiffType()); return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); } /// Gets the address of a direct base class within a complete object. /// This should only be used for (1) non-virtual bases or (2) virtual bases /// when the type is known to be complete (e.g. in complete destructors). /// /// The object pointed to by 'This' is assumed to be non-null. llvm::Value * CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This, const CXXRecordDecl *Derived, const CXXRecordDecl *Base, bool BaseIsVirtual) { // 'this' must be a pointer (in some address space) to Derived. assert(This->getType()->isPointerTy() && cast<llvm::PointerType>(This->getType())->getElementType() == ConvertType(Derived)); // Compute the offset of the virtual base. CharUnits Offset; const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived); if (BaseIsVirtual) Offset = Layout.getVBaseClassOffset(Base); else Offset = Layout.getBaseClassOffset(Base); // Shift and cast down to the base type. // TODO: for complete types, this should be possible with a GEP. llvm::Value *V = This; if (Offset.isPositive()) { V = Builder.CreateBitCast(V, Int8PtrTy); V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity()); } V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo()); return V; } static llvm::Value * ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, CharUnits nonVirtualOffset, llvm::Value *virtualOffset) { // Assert that we have something to do. assert(!nonVirtualOffset.isZero() || virtualOffset != nullptr); // Compute the offset from the static and dynamic components. llvm::Value *baseOffset; if (!nonVirtualOffset.isZero()) { baseOffset = llvm::ConstantInt::get(CGF.PtrDiffTy, nonVirtualOffset.getQuantity()); if (virtualOffset) { baseOffset = CGF.Builder.CreateAdd(virtualOffset, baseOffset); } } else { baseOffset = virtualOffset; } // Apply the base offset. ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); return ptr; } llvm::Value *CodeGenFunction::GetAddressOfBaseClass( llvm::Value *Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue, SourceLocation Loc) { assert(PathBegin != PathEnd && "Base path should not be empty!"); CastExpr::path_const_iterator Start = PathBegin; const CXXRecordDecl *VBase = nullptr; // Sema has done some convenient canonicalization here: if the // access path involved any virtual steps, the conversion path will // *start* with a step down to the correct virtual base subobject, // and hence will not require any further steps. if ((*Start)->isVirtual()) { VBase = cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl()); ++Start; } // Compute the static offset of the ultimate destination within its // allocating subobject (the virtual base, if there is one, or else // the "complete" object that we see). CharUnits NonVirtualOffset = CGM.computeNonVirtualBaseClassOffset( VBase ? VBase : Derived, Start, PathEnd); // If there's a virtual step, we can sometimes "devirtualize" it. // For now, that's limited to when the derived type is final. // TODO: "devirtualize" this for accesses to known-complete objects. if (VBase && Derived->hasAttr<FinalAttr>()) { const ASTRecordLayout &layout = getContext().getASTRecordLayout(Derived); CharUnits vBaseOffset = layout.getVBaseClassOffset(VBase); NonVirtualOffset += vBaseOffset; VBase = nullptr; // we no longer have a virtual step } // Get the base pointer type. llvm::Type *BasePtrTy = ConvertType((PathEnd[-1])->getType())->getPointerTo( Value->getType()->getPointerAddressSpace()); // HLSL Change: match address space QualType DerivedTy = getContext().getRecordType(Derived); CharUnits DerivedAlign = getContext().getTypeAlignInChars(DerivedTy); // If the static offset is zero and we don't have a virtual step, // just do a bitcast; null checks are unnecessary. if (NonVirtualOffset.isZero() && !VBase) { if (sanitizePerformTypeCheck()) { EmitTypeCheck(TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign, !NullCheckValue); } return Builder.CreateBitCast(Value, BasePtrTy); } llvm::BasicBlock *origBB = nullptr; llvm::BasicBlock *endBB = nullptr; // Skip over the offset (and the vtable load) if we're supposed to // null-check the pointer. if (NullCheckValue) { origBB = Builder.GetInsertBlock(); llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); endBB = createBasicBlock("cast.end"); llvm::Value *isNull = Builder.CreateIsNull(Value); Builder.CreateCondBr(isNull, endBB, notNullBB); EmitBlock(notNullBB); } if (sanitizePerformTypeCheck()) { EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign, true); } // Compute the virtual offset. llvm::Value *VirtualOffset = nullptr; if (VBase) { VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase); } // Apply both offsets. Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset, VirtualOffset); // Cast to the destination type. Value = Builder.CreateBitCast(Value, BasePtrTy); // Build a phi if we needed a null check. if (NullCheckValue) { llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); Builder.CreateBr(endBB); EmitBlock(endBB); llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); PHI->addIncoming(Value, notNullBB); PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); Value = PHI; } return Value; } llvm::Value * CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue) { assert(PathBegin != PathEnd && "Base path should not be empty!"); QualType DerivedTy = getContext().getCanonicalType(getContext().getTagDeclType(Derived)); llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo(); llvm::Value *NonVirtualOffset = CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); if (!NonVirtualOffset) { // No offset, we can just cast back. return Builder.CreateBitCast(Value, DerivedPtrTy); } llvm::BasicBlock *CastNull = nullptr; llvm::BasicBlock *CastNotNull = nullptr; llvm::BasicBlock *CastEnd = nullptr; if (NullCheckValue) { CastNull = createBasicBlock("cast.null"); CastNotNull = createBasicBlock("cast.notnull"); CastEnd = createBasicBlock("cast.end"); llvm::Value *IsNull = Builder.CreateIsNull(Value); Builder.CreateCondBr(IsNull, CastNull, CastNotNull); EmitBlock(CastNotNull); } // Apply the offset. Value = Builder.CreateBitCast(Value, Int8PtrTy); Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), "sub.ptr"); // Just cast. Value = Builder.CreateBitCast(Value, DerivedPtrTy); if (NullCheckValue) { Builder.CreateBr(CastEnd); EmitBlock(CastNull); Builder.CreateBr(CastEnd); EmitBlock(CastEnd); llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); PHI->addIncoming(Value, CastNotNull); PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); Value = PHI; } return Value; } llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, bool ForVirtualBase, bool Delegating) { if (!CGM.getCXXABI().NeedsVTTParameter(GD)) { // This constructor/destructor does not need a VTT parameter. return nullptr; } const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent(); const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent(); llvm::Value *VTT; uint64_t SubVTTIndex; if (Delegating) { // If this is a delegating constructor call, just load the VTT. return LoadCXXVTT(); } else if (RD == Base) { // If the record matches the base, this is the complete ctor/dtor // variant calling the base variant in a class with virtual bases. assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) && "doing no-op VTT offset in base dtor/ctor?"); assert(!ForVirtualBase && "Can't have same class as virtual base!"); SubVTTIndex = 0; } else { const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); CharUnits BaseOffset = ForVirtualBase ? Layout.getVBaseClassOffset(Base) : Layout.getBaseClassOffset(Base); SubVTTIndex = CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); } if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { // A VTT parameter was passed to the constructor, use it. VTT = LoadCXXVTT(); VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex); } else { // We're the complete constructor, so get the VTT by name. VTT = CGM.getVTables().GetAddrOfVTT(RD); VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex); } return VTT; } namespace { /// Call the destructor for a direct base class. struct CallBaseDtor : EHScopeStack::Cleanup { const CXXRecordDecl *BaseClass; bool BaseIsVirtual; CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual) : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {} void Emit(CodeGenFunction &CGF, Flags flags) override { const CXXRecordDecl *DerivedClass = cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent(); const CXXDestructorDecl *D = BaseClass->getDestructor(); llvm::Value *Addr = CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), DerivedClass, BaseClass, BaseIsVirtual); CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, /*Delegating=*/false, Addr); } }; /// A visitor which checks whether an initializer uses 'this' in a /// way which requires the vtable to be properly set. struct DynamicThisUseChecker : ConstEvaluatedExprVisitor<DynamicThisUseChecker> { typedef ConstEvaluatedExprVisitor<DynamicThisUseChecker> super; bool UsesThis; DynamicThisUseChecker(const ASTContext &C) : super(C), UsesThis(false) {} // Black-list all explicit and implicit references to 'this'. // // Do we need to worry about external references to 'this' derived // from arbitrary code? If so, then anything which runs arbitrary // external code might potentially access the vtable. void VisitCXXThisExpr(const CXXThisExpr *E) { UsesThis = true; } }; } static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { DynamicThisUseChecker Checker(C); Checker.Visit(Init); return Checker.UsesThis; } static void EmitBaseInitializer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, CXXCtorInitializer *BaseInit, CXXCtorType CtorType) { assert(BaseInit->isBaseInitializer() && "Must have base initializer!"); llvm::Value *ThisPtr = CGF.LoadCXXThis(); const Type *BaseType = BaseInit->getBaseClass(); CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl()); bool isBaseVirtual = BaseInit->isBaseVirtual(); // The base constructor doesn't construct virtual bases. if (CtorType == Ctor_Base && isBaseVirtual) return; // If the initializer for the base (other than the constructor // itself) accesses 'this' in any way, we need to initialize the // vtables. if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit())) CGF.InitializeVTablePointers(ClassDecl); // We can pretend to be a complete class because it only matters for // virtual bases, and we only do virtual bases for complete ctors. llvm::Value *V = CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, BaseClassDecl, isBaseVirtual); CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType); AggValueSlot AggSlot = AggValueSlot::forAddr(V, Alignment, Qualifiers(), AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased); CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); if (CGF.CGM.getLangOpts().Exceptions && !BaseClassDecl->hasTrivialDestructor()) CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl, isBaseVirtual); } static void EmitAggMemberInitializer(CodeGenFunction &CGF, LValue LHS, Expr *Init, llvm::Value *ArrayIndexVar, QualType T, ArrayRef<VarDecl *> ArrayIndexes, unsigned Index) { if (Index == ArrayIndexes.size()) { LValue LV = LHS; if (ArrayIndexVar) { // If we have an array index variable, load it and use it as an offset. // Then, increment the value. llvm::Value *Dest = LHS.getAddress(); llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar); Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress"); llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1); Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc"); CGF.Builder.CreateStore(Next, ArrayIndexVar); // Update the LValue. LV.setAddress(Dest); CharUnits Align = CGF.getContext().getTypeAlignInChars(T); LV.setAlignment(std::min(Align, LV.getAlignment())); } switch (CGF.getEvaluationKind(T)) { case TEK_Scalar: CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false); break; case TEK_Complex: CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true); break; case TEK_Aggregate: { AggValueSlot Slot = AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased); CGF.EmitAggExpr(Init, Slot); break; } } return; } const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T); assert(Array && "Array initialization without the array type?"); llvm::Value *IndexVar = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); assert(IndexVar && "Array index variable not loaded"); // Initialize this index variable to zero. llvm::Value* Zero = llvm::Constant::getNullValue( CGF.ConvertType(CGF.getContext().getSizeType())); CGF.Builder.CreateStore(Zero, IndexVar); // Start the loop with a block that tests the condition. llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); CGF.EmitBlock(CondBlock); llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); // Generate: if (loop-index < number-of-elements) fall to the loop body, // otherwise, go to the block after the for-loop. uint64_t NumElements = Array->getSize().getZExtValue(); llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar); llvm::Value *NumElementsPtr = llvm::ConstantInt::get(Counter->getType(), NumElements); llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, "isless"); // If the condition is true, execute the body. CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); CGF.EmitBlock(ForBody); llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc"); // Inside the loop body recurse to emit the inner loop or, eventually, the // constructor call. EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar, Array->getElementType(), ArrayIndexes, Index + 1); CGF.EmitBlock(ContinueBlock); // Emit the increment of the loop counter. llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1); Counter = CGF.Builder.CreateLoad(IndexVar); NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc"); CGF.Builder.CreateStore(NextVal, IndexVar); // Finally, branch back up to the condition for the next iteration. CGF.EmitBranch(CondBlock); // Emit the fall-through block. CGF.EmitBlock(AfterFor, true); } static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) { auto *CD = dyn_cast<CXXConstructorDecl>(D); if (!(CD && CD->isCopyOrMoveConstructor()) && !D->isCopyAssignmentOperator() && !D->isMoveAssignmentOperator()) return false; // We can emit a memcpy for a trivial copy or move constructor/assignment. if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) return true; // We *must* emit a memcpy for a defaulted union copy or move op. if (D->getParent()->isUnion() && D->isDefaulted()) return true; return false; } static void EmitLValueForAnyFieldInitialization(CodeGenFunction &CGF, CXXCtorInitializer *MemberInit, LValue &LHS) { FieldDecl *Field = MemberInit->getAnyMember(); if (MemberInit->isIndirectMemberInitializer()) { // If we are initializing an anonymous union field, drill down to the field. IndirectFieldDecl *IndirectField = MemberInit->getIndirectMember(); for (const auto *I : IndirectField->chain()) LHS = CGF.EmitLValueForFieldInitialization(LHS, cast<FieldDecl>(I)); } else { LHS = CGF.EmitLValueForFieldInitialization(LHS, Field); } } static void EmitMemberInitializer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, CXXCtorInitializer *MemberInit, const CXXConstructorDecl *Constructor, FunctionArgList &Args) { ApplyDebugLocation Loc(CGF, MemberInit->getSourceLocation()); assert(MemberInit->isAnyMemberInitializer() && "Must have member initializer!"); assert(MemberInit->getInit() && "Must have initializer!"); // non-static data member initializers. FieldDecl *Field = MemberInit->getAnyMember(); QualType FieldType = Field->getType(); llvm::Value *ThisPtr = CGF.LoadCXXThis(); QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); EmitLValueForAnyFieldInitialization(CGF, MemberInit, LHS); // Special case: if we are in a copy or move constructor, and we are copying // an array of PODs or classes with trivial copy constructors, ignore the // AST and perform the copy we know is equivalent. // FIXME: This is hacky at best... if we had a bit more explicit information // in the AST, we could generalize it more easily. const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(FieldType); if (Array && Constructor->isDefaulted() && Constructor->isCopyOrMoveConstructor()) { QualType BaseElementTy = CGF.getContext().getBaseElementType(Array); CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); if (BaseElementTy.isPODType(CGF.getContext()) || (CE && isMemcpyEquivalentSpecialMember(CE->getConstructor()))) { unsigned SrcArgIndex = CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args); llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); // Copy the aggregate. CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, LHS.isVolatileQualified()); // Ensure that we destroy the objects if an exception is thrown later in // the constructor. QualType::DestructionKind dtorKind = FieldType.isDestructedType(); if (CGF.needsEHCleanup(dtorKind)) CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); return; } } ArrayRef<VarDecl *> ArrayIndexes; if (MemberInit->getNumArrayIndices()) ArrayIndexes = MemberInit->getArrayIndexes(); CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes); } void CodeGenFunction::EmitInitializerForField( FieldDecl *Field, LValue LHS, Expr *Init, ArrayRef<VarDecl *> ArrayIndexes) { QualType FieldType = Field->getType(); switch (getEvaluationKind(FieldType)) { case TEK_Scalar: if (LHS.isSimple()) { EmitExprAsInit(Init, Field, LHS, false); } else { RValue RHS = RValue::get(EmitScalarExpr(Init)); EmitStoreThroughLValue(RHS, LHS); } break; case TEK_Complex: EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true); break; case TEK_Aggregate: { llvm::Value *ArrayIndexVar = nullptr; if (ArrayIndexes.size()) { llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); // The LHS is a pointer to the first object we'll be constructing, as // a flat array. QualType BaseElementTy = getContext().getBaseElementType(FieldType); llvm::Type *BasePtr = ConvertType(BaseElementTy); BasePtr = llvm::PointerType::getUnqual(BasePtr); llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), BasePtr); LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); // Create an array index that will be used to walk over all of the // objects we're constructing. ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); Builder.CreateStore(Zero, ArrayIndexVar); // Emit the block variables for the array indices, if any. for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) EmitAutoVarDecl(*ArrayIndexes[I]); } EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, ArrayIndexes, 0); } } // Ensure that we destroy this object if an exception is thrown // later in the constructor. QualType::DestructionKind dtorKind = FieldType.isDestructedType(); if (needsEHCleanup(dtorKind)) pushEHDestroy(dtorKind, LHS.getAddress(), FieldType); } /// Checks whether the given constructor is a valid subject for the /// complete-to-base constructor delegation optimization, i.e. /// emitting the complete constructor as a simple call to the base /// constructor. static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) { // Currently we disable the optimization for classes with virtual // bases because (1) the addresses of parameter variables need to be // consistent across all initializers but (2) the delegate function // call necessarily creates a second copy of the parameter variable. // // The limiting example (purely theoretical AFAIK): // struct A { A(int &c) { c++; } }; // struct B : virtual A { // B(int count) : A(count) { printf("%d\n", count); } // }; // ...although even this example could in principle be emitted as a // delegation since the address of the parameter doesn't escape. if (Ctor->getParent()->getNumVBases()) { // TODO: white-list trivial vbase initializers. This case wouldn't // be subject to the restrictions below. // TODO: white-list cases where: // - there are no non-reference parameters to the constructor // - the initializers don't access any non-reference parameters // - the initializers don't take the address of non-reference // parameters // - etc. // If we ever add any of the above cases, remember that: // - function-try-blocks will always blacklist this optimization // - we need to perform the constructor prologue and cleanup in // EmitConstructorBody. return false; } // We also disable the optimization for variadic functions because // it's impossible to "re-pass" varargs. if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic()) return false; // FIXME: Decide if we can do a delegation of a delegating constructor. if (Ctor->isDelegatingConstructor()) return false; return true; } // Emit code in ctor (Prologue==true) or dtor (Prologue==false) // to poison the extra field paddings inserted under // -fsanitize-address-field-padding=1|2. void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) { ASTContext &Context = getContext(); const CXXRecordDecl *ClassDecl = Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent() : cast<CXXDestructorDecl>(CurGD.getDecl())->getParent(); if (!ClassDecl->mayInsertExtraPadding()) return; struct SizeAndOffset { uint64_t Size; uint64_t Offset; }; unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits(); const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl); // Populate sizes and offsets of fields. SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount()); for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) SSV[i].Offset = Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity(); size_t NumFields = 0; for (const auto *Field : ClassDecl->fields()) { const FieldDecl *D = Field; std::pair<CharUnits, CharUnits> FieldInfo = Context.getTypeInfoInChars(D->getType()); CharUnits FieldSize = FieldInfo.first; assert(NumFields < SSV.size()); SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity(); NumFields++; } assert(NumFields == SSV.size()); if (SSV.size() <= 1) return; // We will insert calls to __asan_* run-time functions. // LLVM AddressSanitizer pass may decide to inline them later. llvm::Type *Args[2] = {IntPtrTy, IntPtrTy}; llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, Args, false); llvm::Constant *F = CGM.CreateRuntimeFunction( FTy, Prologue ? "__asan_poison_intra_object_redzone" : "__asan_unpoison_intra_object_redzone"); llvm::Value *ThisPtr = LoadCXXThis(); ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy); uint64_t TypeSize = Info.getNonVirtualSize().getQuantity(); // For each field check if it has sufficient padding, // if so (un)poison it with a call. for (size_t i = 0; i < SSV.size(); i++) { uint64_t AsanAlignment = 8; uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset; uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size; uint64_t EndOffset = SSV[i].Offset + SSV[i].Size; if (PoisonSize < AsanAlignment || !SSV[i].Size || (NextField % AsanAlignment) != 0) continue; Builder.CreateCall( F, {Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)), Builder.getIntN(PtrSize, PoisonSize)}); } } /// EmitConstructorBody - Emits the body of the current constructor. void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) { EmitAsanPrologueOrEpilogue(true); const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl()); CXXCtorType CtorType = CurGD.getCtorType(); assert((CGM.getTarget().getCXXABI().hasConstructorVariants() || CtorType == Ctor_Complete) && "can only generate complete ctor for this ABI"); // Before we go any further, try the complete->base constructor // delegation optimization. if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) && CGM.getTarget().getCXXABI().hasConstructorVariants()) { EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd()); return; } const FunctionDecl *Definition = 0; Stmt *Body = Ctor->getBody(Definition); assert(Definition == Ctor && "emitting wrong constructor body"); #if 1 // HLSL Change - no support for exception handling assert(!(Body && isa<CXXTryStmt>(Body))); #else // Enter the function-try-block before the constructor prologue if // applicable. bool IsTryBody = (Body && isa<CXXTryStmt>(Body)); if (IsTryBody) EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); #endif // HLSL Change - no support for exception handling incrementProfileCounter(Body); RunCleanupsScope RunCleanups(*this); // TODO: in restricted cases, we can emit the vbase initializers of // a complete ctor and then delegate to the base ctor. // Emit the constructor prologue, i.e. the base and member // initializers. EmitCtorPrologue(Ctor, CtorType, Args); #if 1 // HLSL Change - no support for exception handling EmitStmt(Body); #else // Emit the body of the statement. if (IsTryBody) EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); else if (Body) EmitStmt(Body); #endif // HLSL Change - no support for exception handling // Emit any cleanup blocks associated with the member or base // initializers, which includes (along the exceptional path) the // destructors for those members and bases that were fully // constructed. RunCleanups.ForceCleanup(); #if 0 // HLSL Change - no support for exception handling if (IsTryBody) ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); #endif // HLSL Change - no support for exception handling } namespace { /// RAII object to indicate that codegen is copying the value representation /// instead of the object representation. Useful when copying a struct or /// class which has uninitialized members and we're only performing /// lvalue-to-rvalue conversion on the object but not its members. class CopyingValueRepresentation { public: explicit CopyingValueRepresentation(CodeGenFunction &CGF) : CGF(CGF), OldSanOpts(CGF.SanOpts) { CGF.SanOpts.set(SanitizerKind::Bool, false); CGF.SanOpts.set(SanitizerKind::Enum, false); } ~CopyingValueRepresentation() { CGF.SanOpts = OldSanOpts; } private: CodeGenFunction &CGF; SanitizerSet OldSanOpts; }; } namespace { class FieldMemcpyizer { public: FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, const VarDecl *SrcRec) : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), LastFieldOffset(0), LastAddedFieldIndex(0) {} bool isMemcpyableField(FieldDecl *F) const { // Never memcpy fields when we are adding poisoned paddings. if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding) return false; Qualifiers Qual = F->getType().getQualifiers(); if (Qual.hasVolatile() || Qual.hasObjCLifetime()) return false; return true; } void addMemcpyableField(FieldDecl *F) { if (!FirstField) addInitialField(F); else addNextField(F); } CharUnits getMemcpySize(uint64_t FirstByteOffset) const { unsigned LastFieldSize = LastField->isBitField() ? LastField->getBitWidthValue(CGF.getContext()) : CGF.getContext().getTypeSize(LastField->getType()); uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize - FirstByteOffset + CGF.getContext().getCharWidth() - 1; CharUnits MemcpySize = CGF.getContext().toCharUnitsFromBits(MemcpySizeBits); return MemcpySize; } void emitMemcpy() { // Give the subclass a chance to bail out if it feels the memcpy isn't // worth it (e.g. Hasn't aggregated enough data). if (!FirstField) { return; } uint64_t FirstByteOffset; if (FirstField->isBitField()) { const CGRecordLayout &RL = CGF.getTypes().getCGRecordLayout(FirstField->getParent()); const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField); // FirstFieldOffset is not appropriate for bitfields, // we need to use the storage offset instead. FirstByteOffset = CGF.getContext().toBits(BFInfo.StorageOffset); } else { FirstByteOffset = FirstFieldOffset; } CharUnits MemcpySize = getMemcpySize(FirstByteOffset); QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); llvm::Value *ThisPtr = CGF.LoadCXXThis(); LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); LValue Dest = CGF.EmitLValueForFieldInitialization(DestLV, FirstField); llvm::Value *SrcPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(SrcRec)); LValue SrcLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField); CharUnits Offset = CGF.getContext().toCharUnitsFromBits(FirstByteOffset); CharUnits Alignment = DestLV.getAlignment().alignmentAtOffset(Offset); emitMemcpyIR(Dest.isBitField() ? Dest.getBitFieldAddr() : Dest.getAddress(), Src.isBitField() ? Src.getBitFieldAddr() : Src.getAddress(), MemcpySize, Alignment); reset(); } void reset() { FirstField = nullptr; } protected: CodeGenFunction &CGF; const CXXRecordDecl *ClassDecl; private: void emitMemcpyIR(llvm::Value *DestPtr, llvm::Value *SrcPtr, CharUnits Size, CharUnits Alignment) { llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType()); llvm::Type *DBP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), DPT->getAddressSpace()); DestPtr = CGF.Builder.CreateBitCast(DestPtr, DBP); llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType()); llvm::Type *SBP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext(), SPT->getAddressSpace()); SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, SBP); CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, Size.getQuantity(), Alignment.getQuantity()); } void addInitialField(FieldDecl *F) { FirstField = F; LastField = F; FirstFieldOffset = RecLayout.getFieldOffset(F->getFieldIndex()); LastFieldOffset = FirstFieldOffset; LastAddedFieldIndex = F->getFieldIndex(); return; } void addNextField(FieldDecl *F) { // For the most part, the following invariant will hold: // F->getFieldIndex() == LastAddedFieldIndex + 1 // The one exception is that Sema won't add a copy-initializer for an // unnamed bitfield, which will show up here as a gap in the sequence. assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 && "Cannot aggregate fields out of order."); LastAddedFieldIndex = F->getFieldIndex(); // The 'first' and 'last' fields are chosen by offset, rather than field // index. This allows the code to support bitfields, as well as regular // fields. uint64_t FOffset = RecLayout.getFieldOffset(F->getFieldIndex()); if (FOffset < FirstFieldOffset) { FirstField = F; FirstFieldOffset = FOffset; } else if (FOffset > LastFieldOffset) { LastField = F; LastFieldOffset = FOffset; } } const VarDecl *SrcRec; const ASTRecordLayout &RecLayout; FieldDecl *FirstField; FieldDecl *LastField; uint64_t FirstFieldOffset, LastFieldOffset; unsigned LastAddedFieldIndex; }; class ConstructorMemcpyizer : public FieldMemcpyizer { private: /// Get source argument for copy constructor. Returns null if not a copy /// constructor. static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF, const CXXConstructorDecl *CD, FunctionArgList &Args) { if (CD->isCopyOrMoveConstructor() && CD->isDefaulted()) return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)]; return nullptr; } // Returns true if a CXXCtorInitializer represents a member initialization // that can be rolled into a memcpy. bool isMemberInitMemcpyable(CXXCtorInitializer *MemberInit) const { if (!MemcpyableCtor) return false; FieldDecl *Field = MemberInit->getMember(); assert(Field && "No field for member init."); QualType FieldType = Field->getType(); CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit()); // Bail out on non-memcpyable, not-trivially-copyable members. if (!(CE && isMemcpyEquivalentSpecialMember(CE->getConstructor())) && !(FieldType.isTriviallyCopyableType(CGF.getContext()) || FieldType->isReferenceType())) return false; // Bail out on volatile fields. if (!isMemcpyableField(Field)) return false; // Otherwise we're good. return true; } public: ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD, FunctionArgList &Args) : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)), ConstructorDecl(CD), MemcpyableCtor(CD->isDefaulted() && CD->isCopyOrMoveConstructor() && CGF.getLangOpts().getGC() == LangOptions::NonGC), Args(Args) { } void addMemberInitializer(CXXCtorInitializer *MemberInit) { if (isMemberInitMemcpyable(MemberInit)) { AggregatedInits.push_back(MemberInit); addMemcpyableField(MemberInit->getMember()); } else { emitAggregatedInits(); EmitMemberInitializer(CGF, ConstructorDecl->getParent(), MemberInit, ConstructorDecl, Args); } } void emitAggregatedInits() { if (AggregatedInits.size() <= 1) { // This memcpy is too small to be worthwhile. Fall back on default // codegen. if (!AggregatedInits.empty()) { CopyingValueRepresentation CVR(CGF); EmitMemberInitializer(CGF, ConstructorDecl->getParent(), AggregatedInits[0], ConstructorDecl, Args); AggregatedInits.clear(); } reset(); return; } pushEHDestructors(); emitMemcpy(); AggregatedInits.clear(); } void pushEHDestructors() { llvm::Value *ThisPtr = CGF.LoadCXXThis(); QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl); LValue LHS = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy); for (unsigned i = 0; i < AggregatedInits.size(); ++i) { CXXCtorInitializer *MemberInit = AggregatedInits[i]; QualType FieldType = MemberInit->getAnyMember()->getType(); QualType::DestructionKind dtorKind = FieldType.isDestructedType(); if (!CGF.needsEHCleanup(dtorKind)) continue; LValue FieldLHS = LHS; EmitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS); CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType); } } void finish() { emitAggregatedInits(); } private: const CXXConstructorDecl *ConstructorDecl; bool MemcpyableCtor; FunctionArgList &Args; SmallVector<CXXCtorInitializer*, 16> AggregatedInits; }; class AssignmentMemcpyizer : public FieldMemcpyizer { private: // Returns the memcpyable field copied by the given statement, if one // exists. Otherwise returns null. FieldDecl *getMemcpyableField(Stmt *S) { if (!AssignmentsMemcpyable) return nullptr; if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) { // Recognise trivial assignments. if (BO->getOpcode() != BO_Assign) return nullptr; MemberExpr *ME = dyn_cast<MemberExpr>(BO->getLHS()); if (!ME) return nullptr; FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); if (!Field || !isMemcpyableField(Field)) return nullptr; Stmt *RHS = BO->getRHS(); if (ImplicitCastExpr *EC = dyn_cast<ImplicitCastExpr>(RHS)) RHS = EC->getSubExpr(); if (!RHS) return nullptr; MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS); if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field) return nullptr; return Field; } else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) { CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl()); if (!(MD && isMemcpyEquivalentSpecialMember(MD))) return nullptr; MemberExpr *IOA = dyn_cast<MemberExpr>(MCE->getImplicitObjectArgument()); if (!IOA) return nullptr; FieldDecl *Field = dyn_cast<FieldDecl>(IOA->getMemberDecl()); if (!Field || !isMemcpyableField(Field)) return nullptr; MemberExpr *Arg0 = dyn_cast<MemberExpr>(MCE->getArg(0)); if (!Arg0 || Field != dyn_cast<FieldDecl>(Arg0->getMemberDecl())) return nullptr; return Field; } else if (CallExpr *CE = dyn_cast<CallExpr>(S)) { FunctionDecl *FD = dyn_cast<FunctionDecl>(CE->getCalleeDecl()); if (!FD || FD->getBuiltinID() != Builtin::BI__builtin_memcpy) return nullptr; Expr *DstPtr = CE->getArg(0); if (ImplicitCastExpr *DC = dyn_cast<ImplicitCastExpr>(DstPtr)) DstPtr = DC->getSubExpr(); UnaryOperator *DUO = dyn_cast<UnaryOperator>(DstPtr); if (!DUO || DUO->getOpcode() != UO_AddrOf) return nullptr; MemberExpr *ME = dyn_cast<MemberExpr>(DUO->getSubExpr()); if (!ME) return nullptr; FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl()); if (!Field || !isMemcpyableField(Field)) return nullptr; Expr *SrcPtr = CE->getArg(1); if (ImplicitCastExpr *SC = dyn_cast<ImplicitCastExpr>(SrcPtr)) SrcPtr = SC->getSubExpr(); UnaryOperator *SUO = dyn_cast<UnaryOperator>(SrcPtr); if (!SUO || SUO->getOpcode() != UO_AddrOf) return nullptr; MemberExpr *ME2 = dyn_cast<MemberExpr>(SUO->getSubExpr()); if (!ME2 || Field != dyn_cast<FieldDecl>(ME2->getMemberDecl())) return nullptr; return Field; } return nullptr; } bool AssignmentsMemcpyable; SmallVector<Stmt*, 16> AggregatedStmts; public: AssignmentMemcpyizer(CodeGenFunction &CGF, const CXXMethodDecl *AD, FunctionArgList &Args) : FieldMemcpyizer(CGF, AD->getParent(), Args[Args.size() - 1]), AssignmentsMemcpyable(CGF.getLangOpts().getGC() == LangOptions::NonGC) { assert(Args.size() == 2); } void emitAssignment(Stmt *S) { FieldDecl *F = getMemcpyableField(S); if (F) { addMemcpyableField(F); AggregatedStmts.push_back(S); } else { emitAggregatedStmts(); CGF.EmitStmt(S); } } void emitAggregatedStmts() { if (AggregatedStmts.size() <= 1) { if (!AggregatedStmts.empty()) { CopyingValueRepresentation CVR(CGF); CGF.EmitStmt(AggregatedStmts[0]); } reset(); } emitMemcpy(); AggregatedStmts.clear(); } void finish() { emitAggregatedStmts(); } }; } /// EmitCtorPrologue - This routine generates necessary code to initialize /// base classes and non-static data members belonging to this constructor. void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType CtorType, FunctionArgList &Args) { if (CD->isDelegatingConstructor()) return EmitDelegatingCXXConstructorCall(CD, Args); const CXXRecordDecl *ClassDecl = CD->getParent(); CXXConstructorDecl::init_const_iterator B = CD->init_begin(), E = CD->init_end(); llvm::BasicBlock *BaseCtorContinueBB = nullptr; if (ClassDecl->getNumVBases() && !CGM.getTarget().getCXXABI().hasConstructorVariants()) { // The ABIs that don't have constructor variants need to put a branch // before the virtual base initialization code. BaseCtorContinueBB = CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl); assert(BaseCtorContinueBB); } // Virtual base initializers first. for (; B != E && (*B)->isBaseInitializer() && (*B)->isBaseVirtual(); B++) { EmitBaseInitializer(*this, ClassDecl, *B, CtorType); } if (BaseCtorContinueBB) { // Complete object handler should continue to the remaining initializers. Builder.CreateBr(BaseCtorContinueBB); EmitBlock(BaseCtorContinueBB); } // Then, non-virtual base initializers. for (; B != E && (*B)->isBaseInitializer(); B++) { assert(!(*B)->isBaseVirtual()); EmitBaseInitializer(*this, ClassDecl, *B, CtorType); } InitializeVTablePointers(ClassDecl); // And finally, initialize class members. FieldConstructionScope FCS(*this, CXXThisValue); ConstructorMemcpyizer CM(*this, CD, Args); for (; B != E; B++) { CXXCtorInitializer *Member = (*B); assert(!Member->isBaseInitializer()); assert(Member->isAnyMemberInitializer() && "Delegating initializer on non-delegating constructor"); CM.addMemberInitializer(Member); } CM.finish(); } static bool FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); static bool HasTrivialDestructorBody(ASTContext &Context, const CXXRecordDecl *BaseClassDecl, const CXXRecordDecl *MostDerivedClassDecl) { // If the destructor is trivial we don't have to check anything else. if (BaseClassDecl->hasTrivialDestructor()) return true; if (!BaseClassDecl->getDestructor()->hasTrivialBody()) return false; // Check fields. for (const auto *Field : BaseClassDecl->fields()) if (!FieldHasTrivialDestructorBody(Context, Field)) return false; // Check non-virtual bases. for (const auto &I : BaseClassDecl->bases()) { if (I.isVirtual()) continue; const CXXRecordDecl *NonVirtualBase = cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); if (!HasTrivialDestructorBody(Context, NonVirtualBase, MostDerivedClassDecl)) return false; } if (BaseClassDecl == MostDerivedClassDecl) { // Check virtual bases. for (const auto &I : BaseClassDecl->vbases()) { const CXXRecordDecl *VirtualBase = cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); if (!HasTrivialDestructorBody(Context, VirtualBase, MostDerivedClassDecl)) return false; } } return true; } static bool FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field) { QualType FieldBaseElementType = Context.getBaseElementType(Field->getType()); const RecordType *RT = FieldBaseElementType->getAs<RecordType>(); if (!RT) return true; CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl()); // The destructor for an implicit anonymous union member is never invoked. if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion()) return false; return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); } /// CanSkipVTablePointerInitialization - Check whether we need to initialize /// any vtable pointers before calling this destructor. static bool CanSkipVTablePointerInitialization(ASTContext &Context, const CXXDestructorDecl *Dtor) { if (!Dtor->hasTrivialBody()) return false; // Check the fields. const CXXRecordDecl *ClassDecl = Dtor->getParent(); for (const auto *Field : ClassDecl->fields()) if (!FieldHasTrivialDestructorBody(Context, Field)) return false; return true; } // Generates function call for handling object poisoning, passing in // references to 'this' and its size as arguments. static void EmitDtorSanitizerCallback(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor) { const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Dtor->getParent()); llvm::Value *Args[] = { CGF.Builder.CreateBitCast(CGF.LoadCXXThis(), CGF.VoidPtrTy), llvm::ConstantInt::get(CGF.SizeTy, Layout.getSize().getQuantity())}; llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy}; llvm::FunctionType *FnType = llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false); llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback"); CGF.EmitNounwindRuntimeCall(Fn, Args); } /// EmitDestructorBody - Emits the body of the current destructor. void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl()); CXXDtorType DtorType = CurGD.getDtorType(); Stmt *Body = Dtor->getBody(); if (Body) incrementProfileCounter(Body); // The call to operator delete in a deleting destructor happens // outside of the function-try-block, which means it's always // possible to delegate the destructor body to the complete // destructor. Do so. if (DtorType == Dtor_Deleting) { EnterDtorCleanups(Dtor, Dtor_Deleting); EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, /*Delegating=*/false, LoadCXXThis()); PopCleanupBlock(); return; } #if 1 // HLSL Change - no support for exception handling assert(!(Body && isa<CXXTryStmt>(Body))); #else // If the body is a function-try-block, enter the try before // anything else. bool isTryBody = (Body && isa<CXXTryStmt>(Body)); if (isTryBody) EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true); #endif // HLSL Change - no support for exception handling EmitAsanPrologueOrEpilogue(false); // Enter the epilogue cleanups. RunCleanupsScope DtorEpilogue(*this); // If this is the complete variant, just invoke the base variant; // the epilogue will destruct the virtual bases. But we can't do // this optimization if the body is a function-try-block, because // we'd introduce *two* handler blocks. In the Microsoft ABI, we // always delegate because we might not have a definition in this TU. switch (DtorType) { case Dtor_Comdat: llvm_unreachable("not expecting a COMDAT"); case Dtor_Deleting: llvm_unreachable("already handled deleting case"); case Dtor_Complete: assert((Body || getTarget().getCXXABI().isMicrosoft()) && "can't emit a dtor without a body for non-Microsoft ABIs"); // Enter the cleanup scopes for virtual bases. EnterDtorCleanups(Dtor, Dtor_Complete); #if 1 // HLSL Change - no support for exception handling EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, /*Delegating=*/false, LoadCXXThis()); #else if (!isTryBody) { EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false, /*Delegating=*/false, LoadCXXThis()); break; } #endif // HLSL Change - no support for exception handling // Fallthrough: act like we're in the base variant. LLVM_FALLTHROUGH; case Dtor_Base: assert(Body); // Enter the cleanup scopes for fields and non-virtual bases. EnterDtorCleanups(Dtor, Dtor_Base); // Initialize the vtable pointers before entering the body. if (!CanSkipVTablePointerInitialization(getContext(), Dtor)) InitializeVTablePointers(Dtor->getParent()); #if 0 // HLSL Change - no support for exception handling if (isTryBody) EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock()); else #endif // HLSL Change - no support for exception handling if (Body) EmitStmt(Body); else { assert(Dtor->isImplicit() && "bodyless dtor not implicit"); // nothing to do besides what's in the epilogue } // -fapple-kext must inline any call to this dtor into // the caller's body. if (getLangOpts().AppleKext) CurFn->addFnAttr(llvm::Attribute::AlwaysInline); break; } // Jump out through the epilogue cleanups. DtorEpilogue.ForceCleanup(); // Exit the try if applicable. #if 0 // HLSL Change - no support for exception handling if (isTryBody) ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true); #endif // HLSL Change - no support for exception handling // Insert memory-poisoning instrumentation. if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor) EmitDtorSanitizerCallback(*this, Dtor); } void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) { const CXXMethodDecl *AssignOp = cast<CXXMethodDecl>(CurGD.getDecl()); const Stmt *RootS = AssignOp->getBody(); assert(isa<CompoundStmt>(RootS) && "Body of an implicit assignment operator should be compound stmt."); const CompoundStmt *RootCS = cast<CompoundStmt>(RootS); LexicalScope Scope(*this, RootCS->getSourceRange()); AssignmentMemcpyizer AM(*this, AssignOp, Args); for (auto *I : RootCS->body()) AM.emitAssignment(I); AM.finish(); } namespace { /// Call the operator delete associated with the current destructor. struct CallDtorDelete : EHScopeStack::Cleanup { CallDtorDelete() {} void Emit(CodeGenFunction &CGF, Flags flags) override { const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); const CXXRecordDecl *ClassDecl = Dtor->getParent(); CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), CGF.getContext().getTagDeclType(ClassDecl)); } }; struct CallDtorDeleteConditional : EHScopeStack::Cleanup { llvm::Value *ShouldDeleteCondition; public: CallDtorDeleteConditional(llvm::Value *ShouldDeleteCondition) : ShouldDeleteCondition(ShouldDeleteCondition) { assert(ShouldDeleteCondition != nullptr); } void Emit(CodeGenFunction &CGF, Flags flags) override { llvm::BasicBlock *callDeleteBB = CGF.createBasicBlock("dtor.call_delete"); llvm::BasicBlock *continueBB = CGF.createBasicBlock("dtor.continue"); llvm::Value *ShouldCallDelete = CGF.Builder.CreateIsNull(ShouldDeleteCondition); CGF.Builder.CreateCondBr(ShouldCallDelete, continueBB, callDeleteBB); CGF.EmitBlock(callDeleteBB); const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl); const CXXRecordDecl *ClassDecl = Dtor->getParent(); CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(), CGF.getContext().getTagDeclType(ClassDecl)); CGF.Builder.CreateBr(continueBB); CGF.EmitBlock(continueBB); } }; class DestroyField : public EHScopeStack::Cleanup { const FieldDecl *field; CodeGenFunction::Destroyer *destroyer; bool useEHCleanupForArray; public: DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer, bool useEHCleanupForArray) : field(field), destroyer(destroyer), useEHCleanupForArray(useEHCleanupForArray) {} void Emit(CodeGenFunction &CGF, Flags flags) override { // Find the address of the field. llvm::Value *thisValue = CGF.LoadCXXThis(); QualType RecordTy = CGF.getContext().getTagDeclType(field->getParent()); LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); LValue LV = CGF.EmitLValueForField(ThisLV, field); assert(LV.isSimple()); CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, flags.isForNormalCleanup() && useEHCleanupForArray); } }; } /// \brief Emit all code that comes at the end of class's /// destructor. This is to call destructors on members and base classes /// in reverse order of their construction. void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, CXXDtorType DtorType) { assert((!DD->isTrivial() || DD->hasAttr<DLLExportAttr>()) && "Should not emit dtor epilogue for non-exported trivial dtor!"); // The deleting-destructor phase just needs to call the appropriate // operator delete that Sema picked up. if (DtorType == Dtor_Deleting) { assert(DD->getOperatorDelete() && "operator delete missing - EnterDtorCleanups"); if (CXXStructorImplicitParamValue) { // If there is an implicit param to the deleting dtor, it's a boolean // telling whether we should call delete at the end of the dtor. EHStack.pushCleanup<CallDtorDeleteConditional>( NormalAndEHCleanup, CXXStructorImplicitParamValue); } else { EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup); } return; } const CXXRecordDecl *ClassDecl = DD->getParent(); // Unions have no bases and do not call field destructors. if (ClassDecl->isUnion()) return; // The complete-destructor phase just destructs all the virtual bases. if (DtorType == Dtor_Complete) { // We push them in the forward order so that they'll be popped in // the reverse order. for (const auto &Base : ClassDecl->vbases()) { CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); // Ignore trivial destructors. if (BaseClassDecl->hasTrivialDestructor()) continue; EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, BaseClassDecl, /*BaseIsVirtual*/ true); } return; } assert(DtorType == Dtor_Base); // Destroy non-virtual bases. for (const auto &Base : ClassDecl->bases()) { // Ignore virtual bases. if (Base.isVirtual()) continue; CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); // Ignore trivial destructors. if (BaseClassDecl->hasTrivialDestructor()) continue; EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, BaseClassDecl, /*BaseIsVirtual*/ false); } // Destroy direct fields. for (const auto *Field : ClassDecl->fields()) { QualType type = Field->getType(); QualType::DestructionKind dtorKind = type.isDestructedType(); if (!dtorKind) continue; // Anonymous union members do not have their destructors called. const RecordType *RT = type->getAsUnionType(); if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue; CleanupKind cleanupKind = getCleanupKind(dtorKind); EHStack.pushCleanup<DestroyField>(cleanupKind, Field, getDestroyer(dtorKind), cleanupKind & EHCleanup); } } /// EmitCXXAggrConstructorCall - Emit a loop to call a particular /// constructor for each of several members of an array. /// /// \param ctor the constructor to call for each element /// \param arrayType the type of the array to initialize /// \param arrayBegin an arrayType* /// \param zeroInitialize true if each element should be /// zero-initialized before it is constructed void CodeGenFunction::EmitCXXAggrConstructorCall( const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType, llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) { QualType elementType; llvm::Value *numElements = emitArrayLength(arrayType, elementType, arrayBegin); EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize); } /// EmitCXXAggrConstructorCall - Emit a loop to call a particular /// constructor for each of several members of an array. /// /// \param ctor the constructor to call for each element /// \param numElements the number of elements in the array; /// may be zero /// \param arrayBegin a T*, where T is the type constructed by ctor /// \param zeroInitialize true if each element should be /// zero-initialized before it is constructed void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, llvm::Value *numElements, llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) { // It's legal for numElements to be zero. This can happen both // dynamically, because x can be zero in 'new A[x]', and statically, // because of GCC extensions that permit zero-length arrays. There // are probably legitimate places where we could assume that this // doesn't happen, but it's not clear that it's worth it. llvm::BranchInst *zeroCheckBranch = nullptr; // Optimize for a constant count. llvm::ConstantInt *constantCount = dyn_cast<llvm::ConstantInt>(numElements); if (constantCount) { // Just skip out if the constant count is zero. if (constantCount->isZero()) return; // Otherwise, emit the check. } else { llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop"); llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty"); zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); EmitBlock(loopBB); } // HLSL Change Begin: Loop on index instead of ptr //// Find the end of the array. //llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, // "arrayctor.end"); // HLSL Change End // Enter the loop, setting up a phi for the current location to initialize. llvm::BasicBlock *entryBB = Builder.GetInsertBlock(); llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop"); EmitBlock(loopBB); // HLSL Change Begin: Loop on index instead of ptr //llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2, // "arrayctor.cur"); //cur->addIncoming(arrayBegin, entryBB); llvm::PHINode *idx = Builder.CreatePHI(numElements->getType(), 2, "arrayctor.idx"); idx->addIncoming( llvm::ConstantInt::get(numElements->getType(), (uint64_t)0), entryBB); llvm::Value *next = Builder.CreateAdd(idx, llvm::ConstantInt::get(idx->getType(), (uint64_t)1), "arrayctor.next"); llvm::Value *cur = Builder.CreateInBoundsGEP(arrayBegin, {idx}, "arrayctor.cur"); // HLSL Change End // Inside the loop body, emit the constructor call on the array element. QualType type = getContext().getTypeDeclType(ctor->getParent()); // Zero initialize the storage, if requested. if (zeroInitialize) EmitNullInitialization(cur, type); // C++ [class.temporary]p4: // There are two contexts in which temporaries are destroyed at a different // point than the end of the full-expression. The first context is when a // default constructor is called to initialize an element of an array. // If the constructor has one or more default arguments, the destruction of // every temporary created in a default argument expression is sequenced // before the construction of the next array element, if any. { RunCleanupsScope Scope(*this); // Evaluate the constructor and its arguments in a regular // partial-destroy cleanup. if (getLangOpts().Exceptions && !ctor->getParent()->hasTrivialDestructor()) { Destroyer *destroyer = destroyCXXObject; pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer); } EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false, /*Delegating=*/false, cur, E); } // HLSL Change Begin: Loop on index instead of ptr //// Go to the next element. //llvm::Value *next = // Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1), // "arrayctor.next"); //cur->addIncoming(next, Builder.GetInsertBlock()); idx->addIncoming(next, Builder.GetInsertBlock()); // HLSL Change End // Check whether that's the end of the loop. // HLSL Change Begin: Loop on index instead of ptr //llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done"); llvm::Value *done = Builder.CreateICmpEQ(next, numElements, "arrayctor.done"); // HLSL Change End llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont"); llvm::TerminatorInst *TI = cast<llvm::TerminatorInst>( // HLSL Change, capture terminator Builder.CreateCondBr(done, contBB, loopBB)); // HLSL Change Begin: force unroll LoopAttributes loopAttr; loopAttr.HlslUnrollPolicy = LoopAttributes::HlslForceUnroll; LoopInfo loopInfo(loopBB, loopAttr); TI->setMetadata("llvm.loop", loopInfo.getLoopID()); // HLSL Change End // Patch the earlier check to skip over the loop. if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB); EmitBlock(contBB); } void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF, llvm::Value *addr, QualType type) { const RecordType *rtype = type->castAs<RecordType>(); const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl()); const CXXDestructorDecl *dtor = record->getDestructor(); assert(!dtor->isTrivial()); CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false, /*Delegating=*/false, addr); } void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, llvm::Value *This, const CXXConstructExpr *E) { // C++11 [class.mfct.non-static]p2: // If a non-static member function of a class X is called for an object that // is not of type X, or of a type derived from X, the behavior is undefined. // FIXME: Provide a source location here. EmitTypeCheck(CodeGenFunction::TCK_ConstructorCall, SourceLocation(), This, getContext().getRecordType(D->getParent())); if (D->isTrivial() && D->isDefaultConstructor()) { assert(E->getNumArgs() == 0 && "trivial default ctor with args"); return; } // If this is a trivial constructor, just emit what's needed. If this is a // union copy constructor, we must emit a memcpy, because the AST does not // model that copy. if (isMemcpyEquivalentSpecialMember(D)) { assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); const Expr *Arg = E->getArg(0); QualType SrcTy = Arg->getType(); llvm::Value *Src = EmitLValue(Arg).getAddress(); QualType DestTy = getContext().getTypeDeclType(D->getParent()); EmitAggregateCopyCtor(This, Src, DestTy, SrcTy); return; } CallArgList Args; // Push the this ptr. Args.add(RValue::get(This), D->getThisType(getContext())); // Add the rest of the user-supplied arguments. const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getConstructor()); // Insert any ABI-specific implicit constructor arguments. unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs( *this, D, Type, ForVirtualBase, Delegating, Args); // Emit the call. llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type)); const CGFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs); EmitCall(Info, Callee, ReturnValueSlot(), Args, D); } void CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, llvm::Value *This, llvm::Value *Src, const CXXConstructExpr *E) { if (isMemcpyEquivalentSpecialMember(D)) { assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor"); assert(D->isCopyOrMoveConstructor() && "trivial 1-arg ctor not a copy/move ctor"); EmitAggregateCopyCtor(This, Src, getContext().getTypeDeclType(D->getParent()), E->arg_begin()->getType()); return; } llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete); assert(D->isInstance() && "Trying to emit a member call expr on a static method!"); const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); CallArgList Args; // Push the this ptr. Args.add(RValue::get(This), D->getThisType(getContext())); // Push the src ptr. QualType QT = *(FPT->param_type_begin()); llvm::Type *t = CGM.getTypes().ConvertType(QT); Src = Builder.CreateBitCast(Src, t); Args.add(RValue::get(Src), QT); // Skip over first argument (Src). EmitCallArgs(Args, FPT, E->arg_begin() + 1, E->arg_end(), E->getConstructor(), /*ParamsToSkip*/ 1); EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All), Callee, ReturnValueSlot(), Args, D); } void CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor, CXXCtorType CtorType, const FunctionArgList &Args, SourceLocation Loc) { CallArgList DelegateArgs; FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); assert(I != E && "no parameters to constructor"); // this DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType()); ++I; // vtt if (llvm::Value *VTT = GetVTTParameter(GlobalDecl(Ctor, CtorType), /*ForVirtualBase=*/false, /*Delegating=*/true)) { QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy); DelegateArgs.add(RValue::get(VTT), VoidPP); if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { assert(I != E && "cannot skip vtt parameter, already done with args"); assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type"); ++I; } } // Explicit arguments. for (; I != E; ++I) { const VarDecl *param = *I; // FIXME: per-argument source location EmitDelegateCallArg(DelegateArgs, param, Loc); } llvm::Value *Callee = CGM.getAddrOfCXXStructor(Ctor, getFromCtorType(CtorType)); EmitCall(CGM.getTypes() .arrangeCXXStructorDeclaration(Ctor, getFromCtorType(CtorType)), Callee, ReturnValueSlot(), DelegateArgs, Ctor); } namespace { struct CallDelegatingCtorDtor : EHScopeStack::Cleanup { const CXXDestructorDecl *Dtor; llvm::Value *Addr; CXXDtorType Type; CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr, CXXDtorType Type) : Dtor(D), Addr(Addr), Type(Type) {} void Emit(CodeGenFunction &CGF, Flags flags) override { CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false, /*Delegating=*/true, Addr); } }; } void CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor, const FunctionArgList &Args) { assert(Ctor->isDelegatingConstructor()); llvm::Value *ThisPtr = LoadCXXThis(); QualType Ty = getContext().getTagDeclType(Ctor->getParent()); CharUnits Alignment = getContext().getTypeAlignInChars(Ty); AggValueSlot AggSlot = AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(), AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased); EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot); const CXXRecordDecl *ClassDecl = Ctor->getParent(); if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) { CXXDtorType Type = CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base; EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup, ClassDecl->getDestructor(), ThisPtr, Type); } } void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, llvm::Value *This) { CGM.getCXXABI().EmitDestructorCall(*this, DD, Type, ForVirtualBase, Delegating, This); } namespace { struct CallLocalDtor : EHScopeStack::Cleanup { const CXXDestructorDecl *Dtor; llvm::Value *Addr; CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr) : Dtor(D), Addr(Addr) {} void Emit(CodeGenFunction &CGF, Flags flags) override { CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false, /*Delegating=*/false, Addr); } }; } void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D, llvm::Value *Addr) { EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr); } void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl(); if (!ClassDecl) return; if (ClassDecl->hasTrivialDestructor()) return; const CXXDestructorDecl *D = ClassDecl->getDestructor(); assert(D && D->isUsed() && "destructor not marked as used!"); PushDestructorCleanup(D, Addr); } void CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, const CXXRecordDecl *NearestVBase, CharUnits OffsetFromNearestVBase, const CXXRecordDecl *VTableClass) { const CXXRecordDecl *RD = Base.getBase(); // Don't initialize the vtable pointer if the class is marked with the // 'novtable' attribute. if ((RD == VTableClass || RD == NearestVBase) && VTableClass->hasAttr<MSNoVTableAttr>()) return; // Compute the address point. bool NeedsVirtualOffset; llvm::Value *VTableAddressPoint = CGM.getCXXABI().getVTableAddressPointInStructor( *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset); if (!VTableAddressPoint) return; // Compute where to store the address point. llvm::Value *VirtualOffset = nullptr; CharUnits NonVirtualOffset = CharUnits::Zero(); if (NeedsVirtualOffset) { // We need to use the virtual base offset offset because the virtual base // might have a different offset in the most derived class. VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this, LoadCXXThis(), VTableClass, NearestVBase); NonVirtualOffset = OffsetFromNearestVBase; } else { // We can just use the base offset in the complete class. NonVirtualOffset = Base.getBaseOffset(); } // Apply the offsets. llvm::Value *VTableField = LoadCXXThis(); if (!NonVirtualOffset.isZero() || VirtualOffset) VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, NonVirtualOffset, VirtualOffset); // Finally, store the address point. Use the same LLVM types as the field to // support optimization. llvm::Type *VTablePtrTy = llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true) ->getPointerTo() ->getPointerTo(); VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo()); VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy); llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField); CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr()); } void CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase, CharUnits OffsetFromNearestVBase, bool BaseIsNonVirtualPrimaryBase, const CXXRecordDecl *VTableClass, VisitedVirtualBasesSetTy& VBases) { // If this base is a non-virtual primary base the address point has already // been set. if (!BaseIsNonVirtualPrimaryBase) { // Initialize the vtable pointer for this base. InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, VTableClass); } const CXXRecordDecl *RD = Base.getBase(); // Traverse bases. for (const auto &I : RD->bases()) { CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl()); // Ignore classes without a vtable. if (!BaseDecl->isDynamicClass()) continue; CharUnits BaseOffset; CharUnits BaseOffsetFromNearestVBase; bool BaseDeclIsNonVirtualPrimaryBase; if (I.isVirtual()) { // Check if we've visited this virtual base before. if (!VBases.insert(BaseDecl).second) continue; const ASTRecordLayout &Layout = getContext().getASTRecordLayout(VTableClass); BaseOffset = Layout.getVBaseClassOffset(BaseDecl); BaseOffsetFromNearestVBase = CharUnits::Zero(); BaseDeclIsNonVirtualPrimaryBase = false; } else { const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); BaseOffsetFromNearestVBase = OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; } InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), I.isVirtual() ? BaseDecl : NearestVBase, BaseOffsetFromNearestVBase, BaseDeclIsNonVirtualPrimaryBase, VTableClass, VBases); } } void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { // Ignore classes without a vtable. if (!RD->isDynamicClass()) return; // Initialize the vtable pointers for this class and all of its bases. VisitedVirtualBasesSetTy VBases; InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), /*NearestVBase=*/nullptr, /*OffsetFromNearestVBase=*/CharUnits::Zero(), /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases); if (RD->getNumVBases()) CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD); } llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This, llvm::Type *Ty) { llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo()); llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable"); CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr()); return VTable; } // If a class has a single non-virtual base and does not introduce or override // virtual member functions or fields, it will have the same layout as its base. // This function returns the least derived such class. // // Casting an instance of a base class to such a derived class is technically // undefined behavior, but it is a relatively common hack for introducing member // functions on class instances with specific properties (e.g. llvm::Operator) // that works under most compilers and should not have security implications, so // we allow it by default. It can be disabled with -fsanitize=cfi-cast-strict. static const CXXRecordDecl * LeastDerivedClassWithSameLayout(const CXXRecordDecl *RD) { if (!RD->field_empty()) return RD; if (RD->getNumVBases() != 0) return RD; if (RD->getNumBases() != 1) return RD; for (const CXXMethodDecl *MD : RD->methods()) { if (MD->isVirtual()) { // Virtual member functions are only ok if they are implicit destructors // because the implicit destructor will have the same semantics as the // base class's destructor if no fields are added. if (isa<CXXDestructorDecl>(MD) && MD->isImplicit()) continue; return RD; } } return LeastDerivedClassWithSameLayout( RD->bases_begin()->getType()->getAsCXXRecordDecl()); } void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXMethodDecl *MD, llvm::Value *VTable, CFITypeCheckKind TCK, SourceLocation Loc) { const CXXRecordDecl *ClassDecl = MD->getParent(); if (!SanOpts.has(SanitizerKind::CFICastStrict)) ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl); EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc); } void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc) { if (!getLangOpts().CPlusPlus) return; auto *ClassTy = T->getAs<RecordType>(); if (!ClassTy) return; const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(ClassTy->getDecl()); if (!ClassDecl->isCompleteDefinition() || !ClassDecl->isDynamicClass()) return; SmallString<64> MangledName; llvm::raw_svector_ostream Out(MangledName); CGM.getCXXABI().getMangleContext().mangleCXXRTTI(T.getUnqualifiedType(), Out); // Blacklist based on the mangled type. if (CGM.getContext().getSanitizerBlacklist().isBlacklistedType(Out.str())) return; if (!SanOpts.has(SanitizerKind::CFICastStrict)) ClassDecl = LeastDerivedClassWithSameLayout(ClassDecl); llvm::BasicBlock *ContBlock = 0; if (MayBeNull) { llvm::Value *DerivedNotNull = Builder.CreateIsNotNull(Derived, "cast.nonnull"); llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check"); ContBlock = createBasicBlock("cast.cont"); Builder.CreateCondBr(DerivedNotNull, CheckBlock, ContBlock); EmitBlock(CheckBlock); } llvm::Value *VTable = GetVTablePtr(Derived, Int8PtrTy); EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc); if (MayBeNull) { Builder.CreateBr(ContBlock); EmitBlock(ContBlock); } } void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD, llvm::Value *VTable, CFITypeCheckKind TCK, SourceLocation Loc) { if (CGM.IsCFIBlacklistedRecord(RD)) return; SanitizerScope SanScope(this); std::string OutName; llvm::raw_string_ostream Out(OutName); CGM.getCXXABI().getMangleContext().mangleCXXVTableBitSet(RD, Out); llvm::Value *BitSetName = llvm::MetadataAsValue::get( getLLVMContext(), llvm::MDString::get(getLLVMContext(), Out.str())); llvm::Value *CastedVTable = Builder.CreateBitCast(VTable, Int8PtrTy); llvm::Value *BitSetTest = Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::bitset_test), {CastedVTable, BitSetName}); SanitizerMask M; switch (TCK) { case CFITCK_VCall: M = SanitizerKind::CFIVCall; break; case CFITCK_NVCall: M = SanitizerKind::CFINVCall; break; case CFITCK_DerivedCast: M = SanitizerKind::CFIDerivedCast; break; case CFITCK_UnrelatedCast: M = SanitizerKind::CFIUnrelatedCast; break; } llvm::Constant *StaticData[] = { EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(QualType(RD->getTypeForDecl(), 0)), llvm::ConstantInt::get(Int8Ty, TCK), }; EmitCheck(std::make_pair(BitSetTest, M), "cfi_bad_type", StaticData, CastedVTable); } // FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do // quite what we want. static const Expr *skipNoOpCastsAndParens(const Expr *E) { while (true) { if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) { E = PE->getSubExpr(); continue; } if (const CastExpr *CE = dyn_cast<CastExpr>(E)) { if (CE->getCastKind() == CK_NoOp) { E = CE->getSubExpr(); continue; } } if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) { if (UO->getOpcode() == UO_Extension) { E = UO->getSubExpr(); continue; } } return E; } } bool CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, const CXXMethodDecl *MD) { // When building with -fapple-kext, all calls must go through the vtable since // the kernel linker can do runtime patching of vtables. if (getLangOpts().AppleKext) return false; // If the most derived class is marked final, we know that no subclass can // override this member function and so we can devirtualize it. For example: // // struct A { virtual void f(); } // struct B final : A { }; // // void f(B *b) { // b->f(); // } // const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType(); if (MostDerivedClassDecl->hasAttr<FinalAttr>()) return true; // If the member function is marked 'final', we know that it can't be // overridden and can therefore devirtualize it. if (MD->hasAttr<FinalAttr>()) return true; // Similarly, if the class itself is marked 'final' it can't be overridden // and we can therefore devirtualize the member function call. if (MD->getParent()->hasAttr<FinalAttr>()) return true; Base = skipNoOpCastsAndParens(Base); if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) { if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) { // This is a record decl. We know the type and can devirtualize it. return VD->getType()->isRecordType(); } return false; } // We can devirtualize calls on an object accessed by a class member access // expression, since by C++11 [basic.life]p6 we know that it can't refer to // a derived class object constructed in the same location. if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base)) if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl())) return VD->getType()->isRecordType(); // We can always devirtualize calls on temporary object expressions. if (isa<CXXConstructExpr>(Base)) return true; // And calls on bound temporaries. if (isa<CXXBindTemporaryExpr>(Base)) return true; // Check if this is a call expr that returns a record type. if (const CallExpr *CE = dyn_cast<CallExpr>(Base)) return CE->getCallReturnType(getContext())->isRecordType(); // We can't devirtualize the call. return false; } void CodeGenFunction::EmitForwardingCallToLambda( const CXXMethodDecl *callOperator, CallArgList &callArgs) { // Get the address of the call operator. const CGFunctionInfo &calleeFnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(callOperator); llvm::Value *callee = CGM.GetAddrOfFunction(GlobalDecl(callOperator), CGM.getTypes().GetFunctionType(calleeFnInfo)); // Prepare the return slot. const FunctionProtoType *FPT = callOperator->getType()->castAs<FunctionProtoType>(); QualType resultType = FPT->getReturnType(); ReturnValueSlot returnSlot; if (!resultType->isVoidType() && calleeFnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect && !hasScalarEvaluationKind(calleeFnInfo.getReturnType())) returnSlot = ReturnValueSlot(ReturnValue, resultType.isVolatileQualified()); // We don't need to separately arrange the call arguments because // the call can't be variadic anyway --- it's impossible to forward // variadic arguments. // Now emit our call. RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs, callOperator); // If necessary, copy the returned value into the slot. if (!resultType->isVoidType() && returnSlot.isNull()) EmitReturnOfRValue(RV, resultType); else EmitBranchThroughCleanup(ReturnBlock); } void CodeGenFunction::EmitLambdaBlockInvokeBody() { const BlockDecl *BD = BlockInfo->getBlockDecl(); const VarDecl *variable = BD->capture_begin()->getVariable(); const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl(); // Start building arguments for forwarding call CallArgList CallArgs; QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false); CallArgs.add(RValue::get(ThisPtr), ThisType); // Add the rest of the parameters. for (auto param : BD->params()) EmitDelegateCallArg(CallArgs, param, param->getLocStart()); assert(!Lambda->isGenericLambda() && "generic lambda interconversion to block not implemented"); EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); } void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) { if (cast<CXXMethodDecl>(CurCodeDecl)->isVariadic()) { // FIXME: Making this work correctly is nasty because it requires either // cloning the body of the call operator or making the call operator forward. CGM.ErrorUnsupported(CurCodeDecl, "lambda conversion to variadic function"); return; } EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody()); } void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { const CXXRecordDecl *Lambda = MD->getParent(); // Start building arguments for forwarding call CallArgList CallArgs; QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda)); llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType)); CallArgs.add(RValue::get(ThisPtr), ThisType); // Add the rest of the parameters. for (auto Param : MD->params()) EmitDelegateCallArg(CallArgs, Param, Param->getLocStart()); const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator(); // For a generic lambda, find the corresponding call operator specialization // to which the call to the static-invoker shall be forwarded. if (Lambda->isGenericLambda()) { assert(MD->isFunctionTemplateSpecialization()); const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); void *InsertPos = nullptr; FunctionDecl *CorrespondingCallOpSpecialization = CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); assert(CorrespondingCallOpSpecialization); CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); } EmitForwardingCallToLambda(CallOp, CallArgs); } void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) { if (MD->isVariadic()) { // FIXME: Making this work correctly is nasty because it requires either // cloning the body of the call operator or making the call operator forward. CGM.ErrorUnsupported(MD, "lambda conversion to variadic function"); return; } EmitLambdaDelegatingInvokeBody(MD); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGCleanup.cpp
//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains code dealing with the IR generation for cleanups // and related information. // // A "cleanup" is a piece of code which needs to be executed whenever // control transfers out of a particular scope. This can be // conditionalized to occur only on exceptional control flow, only on // normal control flow, or both. // //===----------------------------------------------------------------------===// #include "CGCleanup.h" #include "CodeGenFunction.h" #include "CGHLSLRuntime.h" // HLSL Change using namespace clang; using namespace CodeGen; bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) { if (rv.isScalar()) return DominatingLLVMValue::needsSaving(rv.getScalarVal()); if (rv.isAggregate()) return DominatingLLVMValue::needsSaving(rv.getAggregateAddr()); return true; } DominatingValue<RValue>::saved_type DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) { if (rv.isScalar()) { llvm::Value *V = rv.getScalarVal(); // These automatically dominate and don't need to be saved. if (!DominatingLLVMValue::needsSaving(V)) return saved_type(V, ScalarLiteral); // Everything else needs an alloca. llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); CGF.Builder.CreateStore(V, addr); return saved_type(addr, ScalarAddress); } if (rv.isComplex()) { CodeGenFunction::ComplexPairTy V = rv.getComplexVal(); llvm::Type *ComplexTy = llvm::StructType::get(V.first->getType(), V.second->getType(), (void*) nullptr); llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex"); CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(ComplexTy, addr, 0)); CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(ComplexTy, addr, 1)); return saved_type(addr, ComplexAddress); } assert(rv.isAggregate()); llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile? if (!DominatingLLVMValue::needsSaving(V)) return saved_type(V, AggregateLiteral); llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue"); CGF.Builder.CreateStore(V, addr); return saved_type(addr, AggregateAddress); } /// Given a saved r-value produced by SaveRValue, perform the code /// necessary to restore it to usability at the current insertion /// point. RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) { switch (K) { case ScalarLiteral: return RValue::get(Value); case ScalarAddress: return RValue::get(CGF.Builder.CreateLoad(Value)); case AggregateLiteral: return RValue::getAggregate(Value); case AggregateAddress: return RValue::getAggregate(CGF.Builder.CreateLoad(Value)); case ComplexAddress: { llvm::Value *real = CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 0)); llvm::Value *imag = CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(nullptr, Value, 1)); return RValue::getComplex(real, imag); } } llvm_unreachable("bad saved r-value kind"); } /// Push an entry of the given size onto this protected-scope stack. char *EHScopeStack::allocate(size_t Size) { if (!StartOfBuffer) { unsigned Capacity = 1024; while (Capacity < Size) Capacity *= 2; StartOfBuffer = new char[Capacity]; StartOfData = EndOfBuffer = StartOfBuffer + Capacity; } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) { unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer; unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer); unsigned NewCapacity = CurrentCapacity; do { NewCapacity *= 2; } while (NewCapacity < UsedCapacity + Size); char *NewStartOfBuffer = new char[NewCapacity]; char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity; char *NewStartOfData = NewEndOfBuffer - UsedCapacity; memcpy(NewStartOfData, StartOfData, UsedCapacity); delete [] StartOfBuffer; StartOfBuffer = NewStartOfBuffer; EndOfBuffer = NewEndOfBuffer; StartOfData = NewStartOfData; } assert(StartOfBuffer + Size <= StartOfData); StartOfData -= Size; return StartOfData; } bool EHScopeStack::containsOnlyLifetimeMarkers( EHScopeStack::stable_iterator Old) const { for (EHScopeStack::iterator it = begin(); stabilize(it) != Old; it++) { EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*it); if (!cleanup || !cleanup->isLifetimeMarker()) return false; } return true; } EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveNormalCleanup() const { for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end(); si != se; ) { EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si)); if (cleanup.isActive()) return si; si = cleanup.getEnclosingNormalCleanup(); } return stable_end(); } EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const { for (stable_iterator si = getInnermostEHScope(), se = stable_end(); si != se; ) { // Skip over inactive cleanups. EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si)); if (cleanup && !cleanup->isActive()) { si = cleanup->getEnclosingEHScope(); continue; } // All other scopes are always active. return si; } return stable_end(); } void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) { assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned"); char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size)); bool IsNormalCleanup = Kind & NormalCleanup; bool IsEHCleanup = Kind & EHCleanup; bool IsActive = !(Kind & InactiveCleanup); EHCleanupScope *Scope = new (Buffer) EHCleanupScope(IsNormalCleanup, IsEHCleanup, IsActive, Size, BranchFixups.size(), InnermostNormalCleanup, InnermostEHScope); if (IsNormalCleanup) InnermostNormalCleanup = stable_begin(); if (IsEHCleanup) InnermostEHScope = stable_begin(); return Scope->getCleanupBuffer(); } void EHScopeStack::popCleanup() { assert(!empty() && "popping exception stack when not empty"); assert(isa<EHCleanupScope>(*begin())); EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin()); InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup(); InnermostEHScope = Cleanup.getEnclosingEHScope(); StartOfData += Cleanup.getAllocatedSize(); // Destroy the cleanup. Cleanup.Destroy(); // Check whether we can shrink the branch-fixups stack. if (!BranchFixups.empty()) { // If we no longer have any normal cleanups, all the fixups are // complete. if (!hasNormalCleanups()) BranchFixups.clear(); // Otherwise we can still trim out unnecessary nulls. else popNullFixups(); } } EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) { assert(getInnermostEHScope() == stable_end()); char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters)); EHFilterScope *filter = new (buffer) EHFilterScope(numFilters); InnermostEHScope = stable_begin(); return filter; } void EHScopeStack::popFilter() { assert(!empty() && "popping exception stack when not empty"); EHFilterScope &filter = cast<EHFilterScope>(*begin()); StartOfData += EHFilterScope::getSizeForNumFilters(filter.getNumFilters()); InnermostEHScope = filter.getEnclosingEHScope(); } EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) { char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers)); EHCatchScope *scope = new (buffer) EHCatchScope(numHandlers, InnermostEHScope); InnermostEHScope = stable_begin(); return scope; } void EHScopeStack::pushTerminate() { char *Buffer = allocate(EHTerminateScope::getSize()); new (Buffer) EHTerminateScope(InnermostEHScope); InnermostEHScope = stable_begin(); } /// Remove any 'null' fixups on the stack. However, we can't pop more /// fixups than the fixup depth on the innermost normal cleanup, or /// else fixups that we try to add to that cleanup will end up in the /// wrong place. We *could* try to shrink fixup depths, but that's /// actually a lot of work for little benefit. void EHScopeStack::popNullFixups() { // We expect this to only be called when there's still an innermost // normal cleanup; otherwise there really shouldn't be any fixups. assert(hasNormalCleanups()); EHScopeStack::iterator it = find(InnermostNormalCleanup); unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth(); assert(BranchFixups.size() >= MinSize && "fixup stack out of order"); while (BranchFixups.size() > MinSize && BranchFixups.back().Destination == nullptr) BranchFixups.pop_back(); } void CodeGenFunction::initFullExprCleanup() { // Create a variable to decide whether the cleanup needs to be run. llvm::AllocaInst *active = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond"); // Initialize it to false at a site that's guaranteed to be run // before each evaluation. setBeforeOutermostConditional(Builder.getFalse(), active); // Initialize it to true at the current location. Builder.CreateStore(Builder.getTrue(), active); // Set that as the active flag in the cleanup. EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin()); assert(!cleanup.getActiveFlag() && "cleanup already has active flag?"); cleanup.setActiveFlag(active); if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup(); if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup(); } void EHScopeStack::Cleanup::anchor() {} /// All the branch fixups on the EH stack have propagated out past the /// outermost normal cleanup; resolve them all by adding cases to the /// given switch instruction. static void ResolveAllBranchFixups(CodeGenFunction &CGF, llvm::SwitchInst *Switch, llvm::BasicBlock *CleanupEntry) { llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded; for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) { // Skip this fixup if its destination isn't set. BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I); if (Fixup.Destination == nullptr) continue; // If there isn't an OptimisticBranchBlock, then InitialBranch is // still pointing directly to its destination; forward it to the // appropriate cleanup entry. This is required in the specific // case of // { std::string s; goto lbl; } // lbl: // i.e. where there's an unresolved fixup inside a single cleanup // entry which we're currently popping. if (Fixup.OptimisticBranchBlock == nullptr) { new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex), CGF.getNormalCleanupDestSlot(), Fixup.InitialBranch); Fixup.InitialBranch->setSuccessor(0, CleanupEntry); } // Don't add this case to the switch statement twice. if (!CasesAdded.insert(Fixup.Destination).second) continue; Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex), Fixup.Destination); } CGF.EHStack.clearFixups(); } /// Transitions the terminator of the given exit-block of a cleanup to /// be a cleanup switch. static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF, llvm::BasicBlock *Block) { // If it's a branch, turn it into a switch whose default // destination is its original target. llvm::TerminatorInst *Term = Block->getTerminator(); assert(Term && "can't transition block without terminator"); if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { assert(Br->isUnconditional()); llvm::LoadInst *Load = new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term); llvm::SwitchInst *Switch = llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block); Br->eraseFromParent(); return Switch; } else { return cast<llvm::SwitchInst>(Term); } } void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) { assert(Block && "resolving a null target block"); if (!EHStack.getNumBranchFixups()) return; assert(EHStack.hasNormalCleanups() && "branch fixups exist with no normal cleanups on stack"); llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks; bool ResolvedAny = false; for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) { // Skip this fixup if its destination doesn't match. BranchFixup &Fixup = EHStack.getBranchFixup(I); if (Fixup.Destination != Block) continue; Fixup.Destination = nullptr; ResolvedAny = true; // If it doesn't have an optimistic branch block, LatestBranch is // already pointing to the right place. llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock; if (!BranchBB) continue; // Don't process the same optimistic branch block twice. if (!ModifiedOptimisticBlocks.insert(BranchBB).second) continue; llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB); // Add a case to the switch. Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block); } if (ResolvedAny) EHStack.popNullFixups(); } /// Pops cleanup blocks until the given savepoint is reached. void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) { assert(Old.isValid()); while (EHStack.stable_begin() != Old) { EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); // As long as Old strictly encloses the scope's enclosing normal // cleanup, we're going to emit another normal cleanup which // fallthrough can propagate through. bool FallThroughIsBranchThrough = Old.strictlyEncloses(Scope.getEnclosingNormalCleanup()); PopCleanupBlock(FallThroughIsBranchThrough); } } /// Pops cleanup blocks until the given savepoint is reached, then add the /// cleanups from the given savepoint in the lifetime-extended cleanups stack. void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old, size_t OldLifetimeExtendedSize) { PopCleanupBlocks(Old); // Move our deferred cleanups onto the EH stack. for (size_t I = OldLifetimeExtendedSize, E = LifetimeExtendedCleanupStack.size(); I != E; /**/) { // Alignment should be guaranteed by the vptrs in the individual cleanups. assert((I % llvm::alignOf<LifetimeExtendedCleanupHeader>() == 0) && "misaligned cleanup stack entry"); LifetimeExtendedCleanupHeader &Header = reinterpret_cast<LifetimeExtendedCleanupHeader&>( LifetimeExtendedCleanupStack[I]); I += sizeof(Header); EHStack.pushCopyOfCleanup(Header.getKind(), &LifetimeExtendedCleanupStack[I], Header.getSize()); I += Header.getSize(); } LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize); } static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF, EHCleanupScope &Scope) { assert(Scope.isNormalCleanup()); llvm::BasicBlock *Entry = Scope.getNormalBlock(); if (!Entry) { Entry = CGF.createBasicBlock("cleanup"); Scope.setNormalBlock(Entry); CGF.CGM.getHLSLRuntime().MarkCleanupBlock(CGF, Entry); // HLSL Change } return Entry; } /// Attempts to reduce a cleanup's entry block to a fallthrough. This /// is basically llvm::MergeBlockIntoPredecessor, except /// simplified/optimized for the tighter constraints on cleanup blocks. /// /// Returns the new block, whatever it is. static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF, llvm::BasicBlock *Entry) { llvm::BasicBlock *Pred = Entry->getSinglePredecessor(); if (!Pred) return Entry; llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator()); if (!Br || Br->isConditional()) return Entry; assert(Br->getSuccessor(0) == Entry); // If we were previously inserting at the end of the cleanup entry // block, we'll need to continue inserting at the end of the // predecessor. bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry; assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end()); // Kill the branch. Br->eraseFromParent(); // Replace all uses of the entry with the predecessor, in case there // are phis in the cleanup. Entry->replaceAllUsesWith(Pred); // Merge the blocks. Pred->getInstList().splice(Pred->end(), Entry->getInstList()); // Kill the entry block. Entry->eraseFromParent(); if (WasInsertBlock) CGF.Builder.SetInsertPoint(Pred); return Pred; } static void EmitCleanup(CodeGenFunction &CGF, EHScopeStack::Cleanup *Fn, EHScopeStack::Cleanup::Flags flags, llvm::Value *ActiveFlag) { // Itanium EH cleanups occur within a terminate scope. Microsoft SEH doesn't // have this behavior, and the Microsoft C++ runtime will call terminate for // us if the cleanup throws. bool PushedTerminate = false; if (flags.isForEHCleanup() && !CGF.getTarget().getCXXABI().isMicrosoft()) { CGF.EHStack.pushTerminate(); PushedTerminate = true; } // If there's an active flag, load it and skip the cleanup if it's // false. llvm::BasicBlock *ContBB = nullptr; if (ActiveFlag) { ContBB = CGF.createBasicBlock("cleanup.done"); llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action"); llvm::Value *IsActive = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active"); CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB); CGF.EmitBlock(CleanupBB); } // Ask the cleanup to emit itself. Fn->Emit(CGF, flags); assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?"); // Emit the continuation block if there was an active flag. if (ActiveFlag) CGF.EmitBlock(ContBB); // Leave the terminate scope. if (PushedTerminate) CGF.EHStack.popTerminate(); } static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit, llvm::BasicBlock *From, llvm::BasicBlock *To) { // Exit is the exit block of a cleanup, so it always terminates in // an unconditional branch or a switch. llvm::TerminatorInst *Term = Exit->getTerminator(); if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) { assert(Br->isUnconditional() && Br->getSuccessor(0) == From); Br->setSuccessor(0, To); } else { llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term); for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I) if (Switch->getSuccessor(I) == From) Switch->setSuccessor(I, To); } } /// We don't need a normal entry block for the given cleanup. /// Optimistic fixup branches can cause these blocks to come into /// existence anyway; if so, destroy it. /// /// The validity of this transformation is very much specific to the /// exact ways in which we form branches to cleanup entries. static void destroyOptimisticNormalEntry(CodeGenFunction &CGF, EHCleanupScope &scope) { llvm::BasicBlock *entry = scope.getNormalBlock(); if (!entry) return; // Replace all the uses with unreachable. llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock(); for (llvm::BasicBlock::use_iterator i = entry->use_begin(), e = entry->use_end(); i != e; ) { llvm::Use &use = *i; ++i; use.set(unreachableBB); // The only uses should be fixup switches. llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser()); if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) { // Replace the switch with a branch. llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si); // The switch operand is a load from the cleanup-dest alloca. llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition()); // Destroy the switch. si->eraseFromParent(); // Destroy the load. assert(condition->getOperand(0) == CGF.NormalCleanupDest); assert(condition->use_empty()); condition->eraseFromParent(); } } assert(entry->use_empty()); delete entry; } /// Pops a cleanup block. If the block includes a normal cleanup, the /// current insertion point is threaded through the cleanup, as are /// any branch fixups on the cleanup. void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { assert(!EHStack.empty() && "cleanup stack is empty!"); assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!"); EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin()); assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups()); // Remember activation information. bool IsActive = Scope.isActive(); llvm::Value *NormalActiveFlag = Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : nullptr; // HLSL Change - unused // llvm::Value *EHActiveFlag = // Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : nullptr; // Check whether we need an EH cleanup. This is only true if we've // generated a lazy EH cleanup block. llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock(); assert(Scope.hasEHBranches() == (EHEntry != nullptr)); bool RequiresEHCleanup = (EHEntry != nullptr); //EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope(); // HLSL Change - no support for exception handling // Check the three conditions which might require a normal cleanup: // - whether there are branch fix-ups through this cleanup unsigned FixupDepth = Scope.getFixupDepth(); bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth; // - whether there are branch-throughs or branch-afters bool HasExistingBranches = Scope.hasBranches(); // - whether there's a fallthrough llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock(); bool HasFallthrough = (FallthroughSource != nullptr && IsActive); // Branch-through fall-throughs leave the insertion point set to the // end of the last cleanup, which points to the current scope. The // rest of IR gen doesn't need to worry about this; it only happens // during the execution of PopCleanupBlocks(). bool HasPrebranchedFallthrough = (FallthroughSource && FallthroughSource->getTerminator()); // If this is a normal cleanup, then having a prebranched // fallthrough implies that the fallthrough source unconditionally // jumps here. assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough || (Scope.getNormalBlock() && FallthroughSource->getTerminator()->getSuccessor(0) == Scope.getNormalBlock())); bool RequiresNormalCleanup = false; if (Scope.isNormalCleanup() && (HasFixups || HasExistingBranches || HasFallthrough)) { RequiresNormalCleanup = true; } // If we have a prebranched fallthrough into an inactive normal // cleanup, rewrite it so that it leads to the appropriate place. if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) { llvm::BasicBlock *prebranchDest; // If the prebranch is semantically branching through the next // cleanup, just forward it to the next block, leaving the // insertion point in the prebranched block. if (FallthroughIsBranchThrough) { EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup()); prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing)); // Otherwise, we need to make a new block. If the normal cleanup // isn't being used at all, we could actually reuse the normal // entry block, but this is simpler, and it avoids conflicts with // dead optimistic fixup branches. } else { prebranchDest = createBasicBlock("forwarded-prebranch"); EmitBlock(prebranchDest); } llvm::BasicBlock *normalEntry = Scope.getNormalBlock(); assert(normalEntry && !normalEntry->use_empty()); ForwardPrebranchedFallthrough(FallthroughSource, normalEntry, prebranchDest); } // If we don't need the cleanup at all, we're done. if (!RequiresNormalCleanup && !RequiresEHCleanup) { destroyOptimisticNormalEntry(*this, Scope); EHStack.popCleanup(); // safe because there are no fixups assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups()); return; } // Copy the cleanup emission data out. Note that SmallVector // guarantees maximal alignment for its buffer regardless of its // type parameter. SmallVector<char, 8*sizeof(void*)> CleanupBuffer; CleanupBuffer.reserve(Scope.getCleanupSize()); memcpy(CleanupBuffer.data(), Scope.getCleanupBuffer(), Scope.getCleanupSize()); CleanupBuffer.set_size(Scope.getCleanupSize()); EHScopeStack::Cleanup *Fn = reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data()); EHScopeStack::Cleanup::Flags cleanupFlags; if (Scope.isNormalCleanup()) cleanupFlags.setIsNormalCleanupKind(); if (Scope.isEHCleanup()) cleanupFlags.setIsEHCleanupKind(); if (!RequiresNormalCleanup) { destroyOptimisticNormalEntry(*this, Scope); EHStack.popCleanup(); } else { // If we have a fallthrough and no other need for the cleanup, // emit it directly. if (HasFallthrough && !HasPrebranchedFallthrough && !HasFixups && !HasExistingBranches) { destroyOptimisticNormalEntry(*this, Scope); EHStack.popCleanup(); EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); // Otherwise, the best approach is to thread everything through // the cleanup block and then try to clean up after ourselves. } else { // Force the entry block to exist. llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope); // I. Set up the fallthrough edge in. CGBuilderTy::InsertPoint savedInactiveFallthroughIP; // If there's a fallthrough, we need to store the cleanup // destination index. For fall-throughs this is always zero. if (HasFallthrough) { if (!HasPrebranchedFallthrough) Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot()); // Otherwise, save and clear the IP if we don't have fallthrough // because the cleanup is inactive. } else if (FallthroughSource) { assert(!IsActive && "source without fallthrough for active cleanup"); savedInactiveFallthroughIP = Builder.saveAndClearIP(); } // II. Emit the entry block. This implicitly branches to it if // we have fallthrough. All the fixups and existing branches // should already be branched to it. EmitBlock(NormalEntry); // III. Figure out where we're going and build the cleanup // epilogue. bool HasEnclosingCleanups = (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); // Compute the branch-through dest if we need it: // - if there are branch-throughs threaded through the scope // - if fall-through is a branch-through // - if there are fixups that will be optimistically forwarded // to the enclosing cleanup llvm::BasicBlock *BranchThroughDest = nullptr; if (Scope.hasBranchThroughs() || (FallthroughSource && FallthroughIsBranchThrough) || (HasFixups && HasEnclosingCleanups)) { assert(HasEnclosingCleanups); EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup()); BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S)); } llvm::BasicBlock *FallthroughDest = nullptr; SmallVector<llvm::Instruction*, 2> InstsToAppend; // If there's exactly one branch-after and no other threads, // we can route it without a switch. if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && Scope.getNumBranchAfters() == 1) { assert(!BranchThroughDest || !IsActive); // Clean up the possibly dead store to the cleanup dest slot. llvm::Instruction *NormalCleanupDestSlot = cast<llvm::Instruction>(getNormalCleanupDestSlot()); if (NormalCleanupDestSlot->hasOneUse()) { NormalCleanupDestSlot->user_back()->eraseFromParent(); NormalCleanupDestSlot->eraseFromParent(); NormalCleanupDest = nullptr; } llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0); InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter)); // Build a switch-out if we need it: // - if there are branch-afters threaded through the scope // - if fall-through is a branch-after // - if there are fixups that have nowhere left to go and // so must be immediately resolved } else if (Scope.getNumBranchAfters() || (HasFallthrough && !FallthroughIsBranchThrough) || (HasFixups && !HasEnclosingCleanups)) { llvm::BasicBlock *Default = (BranchThroughDest ? BranchThroughDest : getUnreachableBlock()); // TODO: base this on the number of branch-afters and fixups const unsigned SwitchCapacity = 10; llvm::LoadInst *Load = new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest"); llvm::SwitchInst *Switch = llvm::SwitchInst::Create(Load, Default, SwitchCapacity); InstsToAppend.push_back(Load); InstsToAppend.push_back(Switch); // Branch-after fallthrough. if (FallthroughSource && !FallthroughIsBranchThrough) { FallthroughDest = createBasicBlock("cleanup.cont"); if (HasFallthrough) Switch->addCase(Builder.getInt32(0), FallthroughDest); } for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) { Switch->addCase(Scope.getBranchAfterIndex(I), Scope.getBranchAfterBlock(I)); } // If there aren't any enclosing cleanups, we can resolve all // the fixups now. if (HasFixups && !HasEnclosingCleanups) ResolveAllBranchFixups(*this, Switch, NormalEntry); } else { // We should always have a branch-through destination in this case. assert(BranchThroughDest); InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest)); } // IV. Pop the cleanup and emit it. EHStack.popCleanup(); assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); // Append the prepared cleanup prologue from above. llvm::BasicBlock *NormalExit = Builder.GetInsertBlock(); for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I) NormalExit->getInstList().push_back(InstsToAppend[I]); // Optimistically hope that any fixups will continue falling through. for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); I < E; ++I) { BranchFixup &Fixup = EHStack.getBranchFixup(I); if (!Fixup.Destination) continue; if (!Fixup.OptimisticBranchBlock) { new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex), getNormalCleanupDestSlot(), Fixup.InitialBranch); Fixup.InitialBranch->setSuccessor(0, NormalEntry); } Fixup.OptimisticBranchBlock = NormalExit; } // V. Set up the fallthrough edge out. // Case 1: a fallthrough source exists but doesn't branch to the // cleanup because the cleanup is inactive. if (!HasFallthrough && FallthroughSource) { // Prebranched fallthrough was forwarded earlier. // Non-prebranched fallthrough doesn't need to be forwarded. // Either way, all we need to do is restore the IP we cleared before. assert(!IsActive); Builder.restoreIP(savedInactiveFallthroughIP); // Case 2: a fallthrough source exists and should branch to the // cleanup, but we're not supposed to branch through to the next // cleanup. } else if (HasFallthrough && FallthroughDest) { assert(!FallthroughIsBranchThrough); EmitBlock(FallthroughDest); // Case 3: a fallthrough source exists and should branch to the // cleanup and then through to the next. } else if (HasFallthrough) { // Everything is already set up for this. // Case 4: no fallthrough source exists. } else { Builder.ClearInsertionPoint(); } // VI. Assorted cleaning. // Check whether we can merge NormalEntry into a single predecessor. // This might invalidate (non-IR) pointers to NormalEntry. llvm::BasicBlock *NewNormalEntry = SimplifyCleanupEntry(*this, NormalEntry); // If it did invalidate those pointers, and NormalEntry was the same // as NormalExit, go back and patch up the fixups. if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit) for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); I < E; ++I) EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry; } } assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0); // Emit the EH cleanup if required. #if 1 // HLSL Change - no support for exception handling assert(!RequiresEHCleanup && "nothing in HLSL requires EH cleanup"); #else if (RequiresEHCleanup) { CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); EmitBlock(EHEntry); // We only actually emit the cleanup code if the cleanup is either // active or was used before it was deactivated. if (EHActiveFlag || IsActive) { cleanupFlags.setIsForEHCleanup(); EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag); } Builder.CreateBr(getEHDispatchBlock(EHParent)); Builder.restoreIP(SavedIP); SimplifyCleanupEntry(*this, EHEntry); } #endif // HLSL Change - no support for exception handling } /// isObviouslyBranchWithoutCleanups - Return true if a branch to the /// specified destination obviously has no cleanups to run. 'false' is always /// a conservatively correct answer for this method. bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const { assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) && "stale jump destination"); // Calculate the innermost active normal cleanup. EHScopeStack::stable_iterator TopCleanup = EHStack.getInnermostActiveNormalCleanup(); // If we're not in an active normal cleanup scope, or if the // destination scope is within the innermost active normal cleanup // scope, we don't need to worry about fixups. if (TopCleanup == EHStack.stable_end() || TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid return true; // Otherwise, we might need some cleanups. return false; } /// Terminate the current block by emitting a branch which might leave /// the current cleanup-protected scope. The target scope may not yet /// be known, in which case this will require a fixup. /// /// As a side-effect, this method clears the insertion point. void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest, llvm::BranchInst *PreExistingBr) { assert(Dest.getScopeDepth().encloses(EHStack.stable_begin()) && "stale jump destination"); if (!HaveInsertPoint()) return; // Create the branch. // HLSL Change Begin - use pre-generated branch if exists llvm::BranchInst *BI = PreExistingBr ? PreExistingBr : Builder.CreateBr(Dest.getBlock()); // HLSL Change End - use pre-generated branch if exists // Calculate the innermost active normal cleanup. EHScopeStack::stable_iterator TopCleanup = EHStack.getInnermostActiveNormalCleanup(); // If we're not in an active normal cleanup scope, or if the // destination scope is within the innermost active normal cleanup // scope, we don't need to worry about fixups. if (TopCleanup == EHStack.stable_end() || TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid Builder.ClearInsertionPoint(); return; } // If we can't resolve the destination cleanup scope, just add this // to the current cleanup scope as a branch fixup. if (!Dest.getScopeDepth().isValid()) { BranchFixup &Fixup = EHStack.addBranchFixup(); Fixup.Destination = Dest.getBlock(); Fixup.DestinationIndex = Dest.getDestIndex(); Fixup.InitialBranch = BI; Fixup.OptimisticBranchBlock = nullptr; Builder.ClearInsertionPoint(); return; } // Otherwise, thread through all the normal cleanups in scope. // Store the index at the start. llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex()); new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI); // Adjust BI to point to the first cleanup block. { EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(TopCleanup)); BI->setSuccessor(0, CreateNormalEntry(*this, Scope)); } // Add this destination to all the scopes involved. EHScopeStack::stable_iterator I = TopCleanup; EHScopeStack::stable_iterator E = Dest.getScopeDepth(); if (E.strictlyEncloses(I)) { while (true) { EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I)); assert(Scope.isNormalCleanup()); I = Scope.getEnclosingNormalCleanup(); // If this is the last cleanup we're propagating through, tell it // that there's a resolved jump moving through it. if (!E.strictlyEncloses(I)) { Scope.addBranchAfter(Index, Dest.getBlock()); break; } // Otherwise, tell the scope that there's a jump propoagating // through it. If this isn't new information, all the rest of // the work has been done before. if (!Scope.addBranchThrough(Dest.getBlock())) break; } } Builder.ClearInsertionPoint(); } static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack, EHScopeStack::stable_iterator C) { // If we needed a normal block for any reason, that counts. if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock()) return true; // Check whether any enclosed cleanups were needed. for (EHScopeStack::stable_iterator I = EHStack.getInnermostNormalCleanup(); I != C; ) { assert(C.strictlyEncloses(I)); EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I)); if (S.getNormalBlock()) return true; I = S.getEnclosingNormalCleanup(); } return false; } static bool IsUsedAsEHCleanup(EHScopeStack &EHStack, EHScopeStack::stable_iterator cleanup) { // If we needed an EH block for any reason, that counts. if (EHStack.find(cleanup)->hasEHBranches()) return true; // Check whether any enclosed cleanups were needed. for (EHScopeStack::stable_iterator i = EHStack.getInnermostEHScope(); i != cleanup; ) { assert(cleanup.strictlyEncloses(i)); EHScope &scope = *EHStack.find(i); if (scope.hasEHBranches()) return true; i = scope.getEnclosingEHScope(); } return false; } enum ForActivation_t { ForActivation, ForDeactivation }; /// The given cleanup block is changing activation state. Configure a /// cleanup variable if necessary. /// /// It would be good if we had some way of determining if there were /// extra uses *after* the change-over point. static void SetupCleanupBlockActivation(CodeGenFunction &CGF, EHScopeStack::stable_iterator C, ForActivation_t kind, llvm::Instruction *dominatingIP) { EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C)); // We always need the flag if we're activating the cleanup in a // conditional context, because we have to assume that the current // location doesn't necessarily dominate the cleanup's code. bool isActivatedInConditional = (kind == ForActivation && CGF.isInConditionalBranch()); bool needFlag = false; // Calculate whether the cleanup was used: // - as a normal cleanup if (Scope.isNormalCleanup() && (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) { Scope.setTestFlagInNormalCleanup(); needFlag = true; } // - as an EH cleanup if (Scope.isEHCleanup() && (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) { Scope.setTestFlagInEHCleanup(); needFlag = true; } // If it hasn't yet been used as either, we're done. if (!needFlag) return; llvm::AllocaInst *var = Scope.getActiveFlag(); if (!var) { var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive"); Scope.setActiveFlag(var); assert(dominatingIP && "no existing variable and no dominating IP!"); // Initialize to true or false depending on whether it was // active up to this point. llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation); // If we're in a conditional block, ignore the dominating IP and // use the outermost conditional branch. if (CGF.isInConditionalBranch()) { CGF.setBeforeOutermostConditional(value, var); } else { new llvm::StoreInst(value, var, dominatingIP); } } CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var); } /// Activate a cleanup that was created in an inactivated state. void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C, llvm::Instruction *dominatingIP) { assert(C != EHStack.stable_end() && "activating bottom of stack?"); EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); assert(!Scope.isActive() && "double activation"); SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP); Scope.setActive(true); } /// Deactive a cleanup that was created in an active state. void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C, llvm::Instruction *dominatingIP) { assert(C != EHStack.stable_end() && "deactivating bottom of stack?"); EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C)); assert(Scope.isActive() && "double deactivation"); // If it's the top of the stack, just pop it. if (C == EHStack.stable_begin()) { // If it's a normal cleanup, we need to pretend that the // fallthrough is unreachable. CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP(); PopCleanupBlock(); Builder.restoreIP(SavedIP); return; } // Otherwise, follow the general case. SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP); Scope.setActive(false); } llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() { if (!NormalCleanupDest) NormalCleanupDest = CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot"); return NormalCleanupDest; } /// Emits all the code to cause the given temporary to be cleaned up. void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType, llvm::Value *Ptr) { pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject, /*useEHCleanup*/ true); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This provides C++ code generation targeting the Microsoft Visual C++ ABI. // The class in this file generates structures that follow the Microsoft // Visual C++ ABI, which is actually not very well documented at all outside // of Microsoft. // //===----------------------------------------------------------------------===// #include "CGCXXABI.h" #include "CGVTables.h" #include "CodeGenModule.h" #include "CodeGenTypes.h" #include "TargetInfo.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/VTableBuilder.h" #include "llvm/ADT/StringExtras.h" #include "llvm/ADT/StringSet.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Intrinsics.h" using namespace clang; using namespace CodeGen; namespace { /// Holds all the vbtable globals for a given class. struct VBTableGlobals { const VPtrInfoVector *VBTables; SmallVector<llvm::GlobalVariable *, 2> Globals; }; class MicrosoftCXXABI : public CGCXXABI { public: MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM), BaseClassDescriptorType(nullptr), ClassHierarchyDescriptorType(nullptr), CompleteObjectLocatorType(nullptr), CatchableTypeType(nullptr), ThrowInfoType(nullptr), CatchHandlerTypeType(nullptr) {} bool HasThisReturn(GlobalDecl GD) const override; bool hasMostDerivedReturn(GlobalDecl GD) const override; bool classifyReturnType(CGFunctionInfo &FI) const override; RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override; bool isSRetParameterAfterThis() const override { return true; } size_t getSrcArgforCopyCtor(const CXXConstructorDecl *CD, FunctionArgList &Args) const override { assert(Args.size() >= 2 && "expected the arglist to have at least two args!"); // The 'most_derived' parameter goes second if the ctor is variadic and // has v-bases. if (CD->getParent()->getNumVBases() > 0 && CD->getType()->castAs<FunctionProtoType>()->isVariadic()) return 2; return 1; } StringRef GetPureVirtualCallName() override { return "_purecall"; } StringRef GetDeletedVirtualCallName() override { return "_purecall"; } void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, llvm::Value *Ptr, QualType ElementType, const CXXDestructorDecl *Dtor) override; void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override; void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override; void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; llvm::GlobalVariable *getMSCompleteObjectLocator(const CXXRecordDecl *RD, const VPtrInfo *Info); llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override; llvm::Constant * getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) override; bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override; void EmitBadTypeidCall(CodeGenFunction &CGF) override; llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, llvm::Value *ThisPtr, llvm::Type *StdTypeInfoPtrTy) override; bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, QualType SrcRecordTy) override; llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) override; llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy, QualType DestTy) override; bool EmitBadCastCall(CodeGenFunction &CGF) override; llvm::Value * GetVirtualBaseClassOffset(CodeGenFunction &CGF, llvm::Value *This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl) override; llvm::BasicBlock * EmitCtorCompleteObjectHandler(CodeGenFunction &CGF, const CXXRecordDecl *RD) override; void initializeHiddenVirtualInheritanceMembers(CodeGenFunction &CGF, const CXXRecordDecl *RD) override; void EmitCXXConstructors(const CXXConstructorDecl *D) override; // Background on MSVC destructors // ============================== // // Both Itanium and MSVC ABIs have destructor variants. The variant names // roughly correspond in the following way: // Itanium Microsoft // Base -> no name, just ~Class // Complete -> vbase destructor // Deleting -> scalar deleting destructor // vector deleting destructor // // The base and complete destructors are the same as in Itanium, although the // complete destructor does not accept a VTT parameter when there are virtual // bases. A separate mechanism involving vtordisps is used to ensure that // virtual methods of destroyed subobjects are not called. // // The deleting destructors accept an i32 bitfield as a second parameter. Bit // 1 indicates if the memory should be deleted. Bit 2 indicates if the this // pointer points to an array. The scalar deleting destructor assumes that // bit 2 is zero, and therefore does not contain a loop. // // For virtual destructors, only one entry is reserved in the vftable, and it // always points to the vector deleting destructor. The vector deleting // destructor is the most general, so it can be used to destroy objects in // place, delete single heap objects, or delete arrays. // // A TU defining a non-inline destructor is only guaranteed to emit a base // destructor, and all of the other variants are emitted on an as-needed basis // in COMDATs. Because a non-base destructor can be emitted in a TU that // lacks a definition for the destructor, non-base destructors must always // delegate to or alias the base destructor. void buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl<CanQualType> &ArgTys) override; /// Non-base dtors should be emitted as delegating thunks in this ABI. bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, CXXDtorType DT) const override { return DT != Dtor_Base; } void EmitCXXDestructors(const CXXDestructorDecl *D) override; const CXXRecordDecl * getThisArgumentTypeForMethod(const CXXMethodDecl *MD) override { MD = MD->getCanonicalDecl(); if (MD->isVirtual() && !isa<CXXDestructorDecl>(MD)) { MicrosoftVTableContext::MethodVFTableLocation ML = CGM.getMicrosoftVTableContext().getMethodVFTableLocation(MD); // The vbases might be ordered differently in the final overrider object // and the complete object, so the "this" argument may sometimes point to // memory that has no particular type (e.g. past the complete object). // In this case, we just use a generic pointer type. // FIXME: might want to have a more precise type in the non-virtual // multiple inheritance case. if (ML.VBase || !ML.VFPtrOffset.isZero()) return nullptr; } return MD->getParent(); } llvm::Value * adjustThisArgumentForVirtualFunctionCall(CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This, bool VirtualCall) override; void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params) override; llvm::Value *adjustThisParameterInVirtualFunctionPrologue( CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This) override; void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override; unsigned addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, CallArgList &Args) override; void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, llvm::Value *This) override; void emitVTableBitSetEntries(VPtrInfo *Info, const CXXRecordDecl *RD, llvm::GlobalVariable *VTable); void emitVTableDefinitions(CodeGenVTables &CGVT, const CXXRecordDecl *RD) override; llvm::Value *getVTableAddressPointInStructor( CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) override; llvm::Constant * getVTableAddressPointForConstExpr(BaseSubobject Base, const CXXRecordDecl *VTableClass) override; llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) override; llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This, llvm::Type *Ty, SourceLocation Loc) override; llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, llvm::Value *This, const CXXMemberCallExpr *CE) override; void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF, GlobalDecl GD, CallArgList &CallArgs) override { assert(GD.getDtorType() == Dtor_Deleting && "Only deleting destructor thunks are available in this ABI"); CallArgs.add(RValue::get(getStructorImplicitParamValue(CGF)), getContext().IntTy); } void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; llvm::GlobalVariable * getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD, llvm::GlobalVariable::LinkageTypes Linkage); llvm::GlobalVariable * getAddrOfVirtualDisplacementMap(const CXXRecordDecl *SrcRD, const CXXRecordDecl *DstRD) { SmallString<256> OutName; llvm::raw_svector_ostream Out(OutName); getMangleContext().mangleCXXVirtualDisplacementMap(SrcRD, DstRD, Out); Out.flush(); StringRef MangledName = OutName.str(); if (auto *VDispMap = CGM.getModule().getNamedGlobal(MangledName)) return VDispMap; MicrosoftVTableContext &VTContext = CGM.getMicrosoftVTableContext(); unsigned NumEntries = 1 + SrcRD->getNumVBases(); SmallVector<llvm::Constant *, 4> Map(NumEntries, llvm::UndefValue::get(CGM.IntTy)); Map[0] = llvm::ConstantInt::get(CGM.IntTy, 0); bool AnyDifferent = false; for (const auto &I : SrcRD->vbases()) { const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl(); if (!DstRD->isVirtuallyDerivedFrom(VBase)) continue; unsigned SrcVBIndex = VTContext.getVBTableIndex(SrcRD, VBase); unsigned DstVBIndex = VTContext.getVBTableIndex(DstRD, VBase); Map[SrcVBIndex] = llvm::ConstantInt::get(CGM.IntTy, DstVBIndex * 4); AnyDifferent |= SrcVBIndex != DstVBIndex; } // This map would be useless, don't use it. if (!AnyDifferent) return nullptr; llvm::ArrayType *VDispMapTy = llvm::ArrayType::get(CGM.IntTy, Map.size()); llvm::Constant *Init = llvm::ConstantArray::get(VDispMapTy, Map); llvm::GlobalValue::LinkageTypes Linkage = SrcRD->isExternallyVisible() && DstRD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage : llvm::GlobalValue::InternalLinkage; auto *VDispMap = new llvm::GlobalVariable( CGM.getModule(), VDispMapTy, /*Constant=*/true, Linkage, /*Initializer=*/Init, MangledName); return VDispMap; } void emitVBTableDefinition(const VPtrInfo &VBT, const CXXRecordDecl *RD, llvm::GlobalVariable *GV) const; void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, bool ReturnAdjustment) override { // Never dllimport/dllexport thunks. Thunk->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass); GVALinkage Linkage = getContext().GetGVALinkageForFunction(cast<FunctionDecl>(GD.getDecl())); if (Linkage == GVA_Internal) Thunk->setLinkage(llvm::GlobalValue::InternalLinkage); else if (ReturnAdjustment) Thunk->setLinkage(llvm::GlobalValue::WeakODRLinkage); else Thunk->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage); } llvm::Value *performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This, const ThisAdjustment &TA) override; llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret, const ReturnAdjustment &RA) override; void EmitThreadLocalInitFuncs( CodeGenModule &CGM, ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>> CXXThreadLocals, ArrayRef<llvm::Function *> CXXThreadLocalInits, ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) override; bool usesThreadWrapperFunction() const override { return false; } LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType) override; void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit) override; void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *Dtor, llvm::Constant *Addr) override; // ==== Notes on array cookies ========= // // MSVC seems to only use cookies when the class has a destructor; a // two-argument usual array deallocation function isn't sufficient. // // For example, this code prints "100" and "1": // struct A { // char x; // void *operator new[](size_t sz) { // printf("%u\n", sz); // return malloc(sz); // } // void operator delete[](void *p, size_t sz) { // printf("%u\n", sz); // free(p); // } // }; // int main() { // A *p = new A[100]; // delete[] p; // } // Whereas it prints "104" and "104" if you give A a destructor. bool requiresArrayCookie(const CXXDeleteExpr *expr, QualType elementType) override; bool requiresArrayCookie(const CXXNewExpr *expr) override; CharUnits getArrayCookieSizeImpl(QualType type) override; llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *NewPtr, llvm::Value *NumElements, const CXXNewExpr *expr, QualType ElementType) override; llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr, CharUnits cookieSize) override; friend struct MSRTTIBuilder; bool isImageRelative() const { return CGM.getTarget().getPointerWidth(/*AddressSpace=*/0) == 64; } // 5 routines for constructing the llvm types for MS RTTI structs. llvm::StructType *getTypeDescriptorType(StringRef TypeInfoString) { llvm::SmallString<32> TDTypeName("rtti.TypeDescriptor"); TDTypeName += llvm::utostr(TypeInfoString.size()); llvm::StructType *&TypeDescriptorType = TypeDescriptorTypeMap[TypeInfoString.size()]; if (TypeDescriptorType) return TypeDescriptorType; llvm::Type *FieldTypes[] = { CGM.Int8PtrPtrTy, CGM.Int8PtrTy, llvm::ArrayType::get(CGM.Int8Ty, TypeInfoString.size() + 1)}; TypeDescriptorType = llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, TDTypeName); return TypeDescriptorType; } llvm::Type *getImageRelativeType(llvm::Type *PtrType) { if (!isImageRelative()) return PtrType; return CGM.IntTy; } llvm::StructType *getBaseClassDescriptorType() { if (BaseClassDescriptorType) return BaseClassDescriptorType; llvm::Type *FieldTypes[] = { getImageRelativeType(CGM.Int8PtrTy), CGM.IntTy, CGM.IntTy, CGM.IntTy, CGM.IntTy, CGM.IntTy, getImageRelativeType(getClassHierarchyDescriptorType()->getPointerTo()), }; BaseClassDescriptorType = llvm::StructType::create( CGM.getLLVMContext(), FieldTypes, "rtti.BaseClassDescriptor"); return BaseClassDescriptorType; } llvm::StructType *getClassHierarchyDescriptorType() { if (ClassHierarchyDescriptorType) return ClassHierarchyDescriptorType; // Forward-declare RTTIClassHierarchyDescriptor to break a cycle. ClassHierarchyDescriptorType = llvm::StructType::create( CGM.getLLVMContext(), "rtti.ClassHierarchyDescriptor"); llvm::Type *FieldTypes[] = { CGM.IntTy, CGM.IntTy, CGM.IntTy, getImageRelativeType( getBaseClassDescriptorType()->getPointerTo()->getPointerTo()), }; ClassHierarchyDescriptorType->setBody(FieldTypes); return ClassHierarchyDescriptorType; } llvm::StructType *getCompleteObjectLocatorType() { if (CompleteObjectLocatorType) return CompleteObjectLocatorType; CompleteObjectLocatorType = llvm::StructType::create( CGM.getLLVMContext(), "rtti.CompleteObjectLocator"); llvm::Type *FieldTypes[] = { CGM.IntTy, CGM.IntTy, CGM.IntTy, getImageRelativeType(CGM.Int8PtrTy), getImageRelativeType(getClassHierarchyDescriptorType()->getPointerTo()), getImageRelativeType(CompleteObjectLocatorType), }; llvm::ArrayRef<llvm::Type *> FieldTypesRef(FieldTypes); if (!isImageRelative()) FieldTypesRef = FieldTypesRef.drop_back(); CompleteObjectLocatorType->setBody(FieldTypesRef); return CompleteObjectLocatorType; } llvm::GlobalVariable *getImageBase() { StringRef Name = "__ImageBase"; if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name)) return GV; return new llvm::GlobalVariable(CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage, /*Initializer=*/nullptr, Name); } llvm::Constant *getImageRelativeConstant(llvm::Constant *PtrVal) { if (!isImageRelative()) return PtrVal; if (PtrVal->isNullValue()) return llvm::Constant::getNullValue(CGM.IntTy); llvm::Constant *ImageBaseAsInt = llvm::ConstantExpr::getPtrToInt(getImageBase(), CGM.IntPtrTy); llvm::Constant *PtrValAsInt = llvm::ConstantExpr::getPtrToInt(PtrVal, CGM.IntPtrTy); llvm::Constant *Diff = llvm::ConstantExpr::getSub(PtrValAsInt, ImageBaseAsInt, /*HasNUW=*/true, /*HasNSW=*/true); return llvm::ConstantExpr::getTrunc(Diff, CGM.IntTy); } private: MicrosoftMangleContext &getMangleContext() { return cast<MicrosoftMangleContext>(CodeGen::CGCXXABI::getMangleContext()); } llvm::Constant *getZeroInt() { return llvm::ConstantInt::get(CGM.IntTy, 0); } llvm::Constant *getAllOnesInt() { return llvm::Constant::getAllOnesValue(CGM.IntTy); } llvm::Constant *getConstantOrZeroInt(llvm::Constant *C) { return C ? C : getZeroInt(); } llvm::Value *getValueOrZeroInt(llvm::Value *C) { return C ? C : getZeroInt(); } CharUnits getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD); void GetNullMemberPointerFields(const MemberPointerType *MPT, llvm::SmallVectorImpl<llvm::Constant *> &fields); /// \brief Shared code for virtual base adjustment. Returns the offset from /// the vbptr to the virtual base. Optionally returns the address of the /// vbptr itself. llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF, llvm::Value *Base, llvm::Value *VBPtrOffset, llvm::Value *VBTableOffset, llvm::Value **VBPtr = nullptr); llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF, llvm::Value *Base, int32_t VBPtrOffset, int32_t VBTableOffset, llvm::Value **VBPtr = nullptr) { assert(VBTableOffset % 4 == 0 && "should be byte offset into table of i32s"); llvm::Value *VBPOffset = llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset), *VBTOffset = llvm::ConstantInt::get(CGM.IntTy, VBTableOffset); return GetVBaseOffsetFromVBPtr(CGF, Base, VBPOffset, VBTOffset, VBPtr); } std::pair<llvm::Value *, llvm::Value *> performBaseAdjustment(CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy); /// \brief Performs a full virtual base adjustment. Used to dereference /// pointers to members of virtual bases. llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const Expr *E, const CXXRecordDecl *RD, llvm::Value *Base, llvm::Value *VirtualBaseAdjustmentOffset, llvm::Value *VBPtrOffset /* optional */); /// \brief Emits a full member pointer with the fields common to data and /// function member pointers. llvm::Constant *EmitFullMemberPointer(llvm::Constant *FirstField, bool IsMemberFunction, const CXXRecordDecl *RD, CharUnits NonVirtualBaseAdjustment, unsigned VBTableIndex); bool MemberPointerConstantIsNull(const MemberPointerType *MPT, llvm::Constant *MP); /// \brief - Initialize all vbptrs of 'this' with RD as the complete type. void EmitVBPtrStores(CodeGenFunction &CGF, const CXXRecordDecl *RD); /// \brief Caching wrapper around VBTableBuilder::enumerateVBTables(). const VBTableGlobals &enumerateVBTables(const CXXRecordDecl *RD); /// \brief Generate a thunk for calling a virtual member function MD. llvm::Function *EmitVirtualMemPtrThunk( const CXXMethodDecl *MD, const MicrosoftVTableContext::MethodVFTableLocation &ML); public: llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override; bool isZeroInitializable(const MemberPointerType *MPT) override; bool isMemberPointerConvertible(const MemberPointerType *MPT) const override { const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl(); return RD->hasAttr<MSInheritanceAttr>(); } bool isTypeInfoCalculable(QualType Ty) const override { if (!CGCXXABI::isTypeInfoCalculable(Ty)) return false; if (const auto *MPT = Ty->getAs<MemberPointerType>()) { const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl(); if (!RD->hasAttr<MSInheritanceAttr>()) return false; } return true; } llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override; llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, CharUnits offset) override; llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override; llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override; llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality) override; llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT) override; llvm::Value * EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr, const MemberPointerType *MPT) override; llvm::Value *EmitNonNullMemberPointerConversion( const MemberPointerType *SrcTy, const MemberPointerType *DstTy, CastKind CK, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, llvm::Value *Src, CGBuilderTy &Builder); llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src) override; llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, llvm::Constant *Src) override; llvm::Constant *EmitMemberPointerConversion( const MemberPointerType *SrcTy, const MemberPointerType *DstTy, CastKind CK, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, llvm::Constant *Src); llvm::Value * EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E, llvm::Value *&This, llvm::Value *MemPtr, const MemberPointerType *MPT) override; void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override; llvm::StructType *getCatchHandlerTypeType() { if (!CatchHandlerTypeType) { llvm::Type *FieldTypes[] = { CGM.IntTy, // Flags CGM.Int8PtrTy, // TypeDescriptor }; CatchHandlerTypeType = llvm::StructType::create( CGM.getLLVMContext(), FieldTypes, "eh.CatchHandlerType"); } return CatchHandlerTypeType; } llvm::StructType *getCatchableTypeType() { if (CatchableTypeType) return CatchableTypeType; llvm::Type *FieldTypes[] = { CGM.IntTy, // Flags getImageRelativeType(CGM.Int8PtrTy), // TypeDescriptor CGM.IntTy, // NonVirtualAdjustment CGM.IntTy, // OffsetToVBPtr CGM.IntTy, // VBTableIndex CGM.IntTy, // Size getImageRelativeType(CGM.Int8PtrTy) // CopyCtor }; CatchableTypeType = llvm::StructType::create( CGM.getLLVMContext(), FieldTypes, "eh.CatchableType"); return CatchableTypeType; } llvm::StructType *getCatchableTypeArrayType(uint32_t NumEntries) { llvm::StructType *&CatchableTypeArrayType = CatchableTypeArrayTypeMap[NumEntries]; if (CatchableTypeArrayType) return CatchableTypeArrayType; llvm::SmallString<23> CTATypeName("eh.CatchableTypeArray."); CTATypeName += llvm::utostr(NumEntries); llvm::Type *CTType = getImageRelativeType(getCatchableTypeType()->getPointerTo()); llvm::Type *FieldTypes[] = { CGM.IntTy, // NumEntries llvm::ArrayType::get(CTType, NumEntries) // CatchableTypes }; CatchableTypeArrayType = llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, CTATypeName); return CatchableTypeArrayType; } llvm::StructType *getThrowInfoType() { if (ThrowInfoType) return ThrowInfoType; llvm::Type *FieldTypes[] = { CGM.IntTy, // Flags getImageRelativeType(CGM.Int8PtrTy), // CleanupFn getImageRelativeType(CGM.Int8PtrTy), // ForwardCompat getImageRelativeType(CGM.Int8PtrTy) // CatchableTypeArray }; ThrowInfoType = llvm::StructType::create(CGM.getLLVMContext(), FieldTypes, "eh.ThrowInfo"); return ThrowInfoType; } llvm::Constant *getThrowFn() { // _CxxThrowException is passed an exception object and a ThrowInfo object // which describes the exception. llvm::Type *Args[] = {CGM.Int8PtrTy, getThrowInfoType()->getPointerTo()}; llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false); auto *Fn = cast<llvm::Function>( CGM.CreateRuntimeFunction(FTy, "_CxxThrowException")); // _CxxThrowException is stdcall on 32-bit x86 platforms. if (CGM.getTarget().getTriple().getArch() == llvm::Triple::x86) Fn->setCallingConv(llvm::CallingConv::X86_StdCall); return Fn; } llvm::Function *getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT); llvm::Constant *getCatchableType(QualType T, uint32_t NVOffset = 0, int32_t VBPtrOffset = -1, uint32_t VBIndex = 0); llvm::GlobalVariable *getCatchableTypeArray(QualType T); llvm::GlobalVariable *getThrowInfo(QualType T) override; private: typedef std::pair<const CXXRecordDecl *, CharUnits> VFTableIdTy; typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalVariable *> VTablesMapTy; typedef llvm::DenseMap<VFTableIdTy, llvm::GlobalValue *> VFTablesMapTy; /// \brief All the vftables that have been referenced. VFTablesMapTy VFTablesMap; VTablesMapTy VTablesMap; /// \brief This set holds the record decls we've deferred vtable emission for. llvm::SmallPtrSet<const CXXRecordDecl *, 4> DeferredVFTables; /// \brief All the vbtables which have been referenced. llvm::DenseMap<const CXXRecordDecl *, VBTableGlobals> VBTablesMap; /// Info on the global variable used to guard initialization of static locals. /// The BitIndex field is only used for externally invisible declarations. struct GuardInfo { GuardInfo() : Guard(nullptr), BitIndex(0) {} llvm::GlobalVariable *Guard; unsigned BitIndex; }; /// Map from DeclContext to the current guard variable. We assume that the /// AST is visited in source code order. llvm::DenseMap<const DeclContext *, GuardInfo> GuardVariableMap; llvm::DenseMap<const DeclContext *, GuardInfo> ThreadLocalGuardVariableMap; llvm::DenseMap<const DeclContext *, unsigned> ThreadSafeGuardNumMap; llvm::DenseMap<size_t, llvm::StructType *> TypeDescriptorTypeMap; llvm::StructType *BaseClassDescriptorType; llvm::StructType *ClassHierarchyDescriptorType; llvm::StructType *CompleteObjectLocatorType; llvm::DenseMap<QualType, llvm::GlobalVariable *> CatchableTypeArrays; llvm::StructType *CatchableTypeType; llvm::DenseMap<uint32_t, llvm::StructType *> CatchableTypeArrayTypeMap; llvm::StructType *ThrowInfoType; llvm::StructType *CatchHandlerTypeType; }; } CGCXXABI::RecordArgABI MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const { switch (CGM.getTarget().getTriple().getArch()) { default: // FIXME: Implement for other architectures. return RAA_Default; case llvm::Triple::x86: // All record arguments are passed in memory on x86. Decide whether to // construct the object directly in argument memory, or to construct the // argument elsewhere and copy the bytes during the call. // If C++ prohibits us from making a copy, construct the arguments directly // into argument memory. if (!canCopyArgument(RD)) return RAA_DirectInMemory; // Otherwise, construct the argument into a temporary and copy the bytes // into the outgoing argument memory. return RAA_Default; case llvm::Triple::x86_64: // Win64 passes objects with non-trivial copy ctors indirectly. if (RD->hasNonTrivialCopyConstructor()) return RAA_Indirect; // If an object has a destructor, we'd really like to pass it indirectly // because it allows us to elide copies. Unfortunately, MSVC makes that // impossible for small types, which it will pass in a single register or // stack slot. Most objects with dtors are large-ish, so handle that early. // We can't call out all large objects as being indirect because there are // multiple x64 calling conventions and the C++ ABI code shouldn't dictate // how we pass large POD types. if (RD->hasNonTrivialDestructor() && getContext().getTypeSize(RD->getTypeForDecl()) > 64) return RAA_Indirect; // We have a trivial copy constructor or no copy constructors, but we have // to make sure it isn't deleted. bool CopyDeleted = false; for (const CXXConstructorDecl *CD : RD->ctors()) { if (CD->isCopyConstructor()) { assert(CD->isTrivial()); // We had at least one undeleted trivial copy ctor. Return directly. if (!CD->isDeleted()) return RAA_Default; CopyDeleted = true; } } // The trivial copy constructor was deleted. Return indirectly. if (CopyDeleted) return RAA_Indirect; // There were no copy ctors. Return in RAX. return RAA_Default; } llvm_unreachable("invalid enum"); } void MicrosoftCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, llvm::Value *Ptr, QualType ElementType, const CXXDestructorDecl *Dtor) { // FIXME: Provide a source location here even though there's no // CXXMemberCallExpr for dtor call. bool UseGlobalDelete = DE->isGlobalDelete(); CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting; llvm::Value *MDThis = EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr); if (UseGlobalDelete) CGF.EmitDeleteCall(DE->getOperatorDelete(), MDThis, ElementType); } void MicrosoftCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { llvm::Value *Args[] = { llvm::ConstantPointerNull::get(CGM.Int8PtrTy), llvm::ConstantPointerNull::get(getThrowInfoType()->getPointerTo())}; auto *Fn = getThrowFn(); if (isNoReturn) CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, Args); else CGF.EmitRuntimeCallOrInvoke(Fn, Args); } namespace { struct CallEndCatchMSVC : EHScopeStack::Cleanup { CallEndCatchMSVC() {} void Emit(CodeGenFunction &CGF, Flags flags) override { CGF.EmitNounwindRuntimeCall( CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_endcatch)); } }; } void MicrosoftCXXABI::emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) { // In the MS ABI, the runtime handles the copy, and the catch handler is // responsible for destruction. VarDecl *CatchParam = S->getExceptionDecl(); llvm::Value *Exn = CGF.getExceptionFromSlot(); llvm::Function *BeginCatch = CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_begincatch); // If this is a catch-all or the catch parameter is unnamed, we don't need to // emit an alloca to the object. if (!CatchParam || !CatchParam->getDeclName()) { llvm::Value *Args[2] = {Exn, llvm::Constant::getNullValue(CGF.Int8PtrTy)}; CGF.EmitNounwindRuntimeCall(BeginCatch, Args); CGF.EHStack.pushCleanup<CallEndCatchMSVC>(NormalCleanup); return; } CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); llvm::Value *ParamAddr = CGF.Builder.CreateBitCast(var.getObjectAddress(CGF), CGF.Int8PtrTy); llvm::Value *Args[2] = {Exn, ParamAddr}; CGF.EmitNounwindRuntimeCall(BeginCatch, Args); CGF.EHStack.pushCleanup<CallEndCatchMSVC>(NormalCleanup); CGF.EmitAutoVarCleanups(var); } std::pair<llvm::Value *, llvm::Value *> MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy) { Value = CGF.Builder.CreateBitCast(Value, CGF.Int8PtrTy); const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); const ASTContext &Context = getContext(); if (Context.getASTRecordLayout(SrcDecl).hasExtendableVFPtr()) return std::make_pair(Value, llvm::ConstantInt::get(CGF.Int32Ty, 0)); // Perform a base adjustment. const CXXBaseSpecifier *PolymorphicBase = std::find_if( SrcDecl->vbases_begin(), SrcDecl->vbases_end(), [&](const CXXBaseSpecifier &Base) { const CXXRecordDecl *BaseDecl = Base.getType()->getAsCXXRecordDecl(); return Context.getASTRecordLayout(BaseDecl).hasExtendableVFPtr(); }); llvm::Value *Offset = GetVirtualBaseClassOffset( CGF, Value, SrcDecl, PolymorphicBase->getType()->getAsCXXRecordDecl()); Value = CGF.Builder.CreateInBoundsGEP(Value, Offset); Offset = CGF.Builder.CreateTrunc(Offset, CGF.Int32Ty); return std::make_pair(Value, Offset); } bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) { const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); return IsDeref && !getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr(); } static llvm::CallSite emitRTtypeidCall(CodeGenFunction &CGF, llvm::Value *Argument) { llvm::Type *ArgTypes[] = {CGF.Int8PtrTy}; llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false); llvm::Value *Args[] = {Argument}; llvm::Constant *Fn = CGF.CGM.CreateRuntimeFunction(FTy, "__RTtypeid"); return CGF.EmitRuntimeCallOrInvoke(Fn, Args); } void MicrosoftCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) { llvm::CallSite Call = emitRTtypeidCall(CGF, llvm::Constant::getNullValue(CGM.VoidPtrTy)); Call.setDoesNotReturn(); CGF.Builder.CreateUnreachable(); } llvm::Value *MicrosoftCXXABI::EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, llvm::Value *ThisPtr, llvm::Type *StdTypeInfoPtrTy) { llvm::Value *Offset; std::tie(ThisPtr, Offset) = performBaseAdjustment(CGF, ThisPtr, SrcRecordTy); return CGF.Builder.CreateBitCast( emitRTtypeidCall(CGF, ThisPtr).getInstruction(), StdTypeInfoPtrTy); } bool MicrosoftCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, QualType SrcRecordTy) { const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); return SrcIsPtr && !getContext().getASTRecordLayout(SrcDecl).hasExtendableVFPtr(); } llvm::Value *MicrosoftCXXABI::EmitDynamicCastCall( CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) { llvm::Type *DestLTy = CGF.ConvertType(DestTy); llvm::Value *SrcRTTI = CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); llvm::Value *DestRTTI = CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); llvm::Value *Offset; std::tie(Value, Offset) = performBaseAdjustment(CGF, Value, SrcRecordTy); // PVOID __RTDynamicCast( // PVOID inptr, // LONG VfDelta, // PVOID SrcType, // PVOID TargetType, // BOOL isReference) llvm::Type *ArgTypes[] = {CGF.Int8PtrTy, CGF.Int32Ty, CGF.Int8PtrTy, CGF.Int8PtrTy, CGF.Int32Ty}; llvm::Constant *Function = CGF.CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false), "__RTDynamicCast"); llvm::Value *Args[] = { Value, Offset, SrcRTTI, DestRTTI, llvm::ConstantInt::get(CGF.Int32Ty, DestTy->isReferenceType())}; Value = CGF.EmitRuntimeCallOrInvoke(Function, Args).getInstruction(); return CGF.Builder.CreateBitCast(Value, DestLTy); } llvm::Value * MicrosoftCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy, QualType DestTy) { llvm::Value *Offset; std::tie(Value, Offset) = performBaseAdjustment(CGF, Value, SrcRecordTy); // PVOID __RTCastToVoid( // PVOID inptr) llvm::Type *ArgTypes[] = {CGF.Int8PtrTy}; llvm::Constant *Function = CGF.CGM.CreateRuntimeFunction( llvm::FunctionType::get(CGF.Int8PtrTy, ArgTypes, false), "__RTCastToVoid"); llvm::Value *Args[] = {Value}; return CGF.EmitRuntimeCall(Function, Args); } bool MicrosoftCXXABI::EmitBadCastCall(CodeGenFunction &CGF) { return false; } llvm::Value *MicrosoftCXXABI::GetVirtualBaseClassOffset( CodeGenFunction &CGF, llvm::Value *This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl) { const ASTContext &Context = getContext(); int64_t VBPtrChars = Context.getASTRecordLayout(ClassDecl).getVBPtrOffset().getQuantity(); llvm::Value *VBPtrOffset = llvm::ConstantInt::get(CGM.PtrDiffTy, VBPtrChars); CharUnits IntSize = Context.getTypeSizeInChars(Context.IntTy); CharUnits VBTableChars = IntSize * CGM.getMicrosoftVTableContext().getVBTableIndex(ClassDecl, BaseClassDecl); llvm::Value *VBTableOffset = llvm::ConstantInt::get(CGM.IntTy, VBTableChars.getQuantity()); llvm::Value *VBPtrToNewBase = GetVBaseOffsetFromVBPtr(CGF, This, VBPtrOffset, VBTableOffset); VBPtrToNewBase = CGF.Builder.CreateSExtOrBitCast(VBPtrToNewBase, CGM.PtrDiffTy); return CGF.Builder.CreateNSWAdd(VBPtrOffset, VBPtrToNewBase); } bool MicrosoftCXXABI::HasThisReturn(GlobalDecl GD) const { return isa<CXXConstructorDecl>(GD.getDecl()); } static bool isDeletingDtor(GlobalDecl GD) { return isa<CXXDestructorDecl>(GD.getDecl()) && GD.getDtorType() == Dtor_Deleting; } bool MicrosoftCXXABI::hasMostDerivedReturn(GlobalDecl GD) const { return isDeletingDtor(GD); } bool MicrosoftCXXABI::classifyReturnType(CGFunctionInfo &FI) const { const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl(); if (!RD) return false; if (FI.isInstanceMethod()) { // If it's an instance method, aggregates are always returned indirectly via // the second parameter. FI.getReturnInfo() = ABIArgInfo::getIndirect(0, /*ByVal=*/false); FI.getReturnInfo().setSRetAfterThis(FI.isInstanceMethod()); return true; } else if (!RD->isPOD()) { // If it's a free function, non-POD types are returned indirectly. FI.getReturnInfo() = ABIArgInfo::getIndirect(0, /*ByVal=*/false); return true; } // Otherwise, use the C ABI rules. return false; } llvm::BasicBlock * MicrosoftCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF, const CXXRecordDecl *RD) { llvm::Value *IsMostDerivedClass = getStructorImplicitParamValue(CGF); assert(IsMostDerivedClass && "ctor for a class with virtual bases must have an implicit parameter"); llvm::Value *IsCompleteObject = CGF.Builder.CreateIsNotNull(IsMostDerivedClass, "is_complete_object"); llvm::BasicBlock *CallVbaseCtorsBB = CGF.createBasicBlock("ctor.init_vbases"); llvm::BasicBlock *SkipVbaseCtorsBB = CGF.createBasicBlock("ctor.skip_vbases"); CGF.Builder.CreateCondBr(IsCompleteObject, CallVbaseCtorsBB, SkipVbaseCtorsBB); CGF.EmitBlock(CallVbaseCtorsBB); // Fill in the vbtable pointers here. EmitVBPtrStores(CGF, RD); // CGF will put the base ctor calls in this basic block for us later. return SkipVbaseCtorsBB; } void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers( CodeGenFunction &CGF, const CXXRecordDecl *RD) { // In most cases, an override for a vbase virtual method can adjust // the "this" parameter by applying a constant offset. // However, this is not enough while a constructor or a destructor of some // class X is being executed if all the following conditions are met: // - X has virtual bases, (1) // - X overrides a virtual method M of a vbase Y, (2) // - X itself is a vbase of the most derived class. // // If (1) and (2) are true, the vtorDisp for vbase Y is a hidden member of X // which holds the extra amount of "this" adjustment we must do when we use // the X vftables (i.e. during X ctor or dtor). // Outside the ctors and dtors, the values of vtorDisps are zero. const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); typedef ASTRecordLayout::VBaseOffsetsMapTy VBOffsets; const VBOffsets &VBaseMap = Layout.getVBaseOffsetsMap(); CGBuilderTy &Builder = CGF.Builder; unsigned AS = cast<llvm::PointerType>(getThisValue(CGF)->getType())->getAddressSpace(); llvm::Value *Int8This = nullptr; // Initialize lazily. for (VBOffsets::const_iterator I = VBaseMap.begin(), E = VBaseMap.end(); I != E; ++I) { if (!I->second.hasVtorDisp()) continue; llvm::Value *VBaseOffset = GetVirtualBaseClassOffset(CGF, getThisValue(CGF), RD, I->first); // FIXME: it doesn't look right that we SExt in GetVirtualBaseClassOffset() // just to Trunc back immediately. VBaseOffset = Builder.CreateTruncOrBitCast(VBaseOffset, CGF.Int32Ty); uint64_t ConstantVBaseOffset = Layout.getVBaseClassOffset(I->first).getQuantity(); // vtorDisp_for_vbase = vbptr[vbase_idx] - offsetof(RD, vbase). llvm::Value *VtorDispValue = Builder.CreateSub( VBaseOffset, llvm::ConstantInt::get(CGM.Int32Ty, ConstantVBaseOffset), "vtordisp.value"); if (!Int8This) Int8This = Builder.CreateBitCast(getThisValue(CGF), CGF.Int8Ty->getPointerTo(AS)); llvm::Value *VtorDispPtr = Builder.CreateInBoundsGEP(Int8This, VBaseOffset); // vtorDisp is always the 32-bits before the vbase in the class layout. VtorDispPtr = Builder.CreateConstGEP1_32(VtorDispPtr, -4); VtorDispPtr = Builder.CreateBitCast( VtorDispPtr, CGF.Int32Ty->getPointerTo(AS), "vtordisp.ptr"); Builder.CreateStore(VtorDispValue, VtorDispPtr); } } static bool hasDefaultCXXMethodCC(ASTContext &Context, const CXXMethodDecl *MD) { CallingConv ExpectedCallingConv = Context.getDefaultCallingConvention( /*IsVariadic=*/false, /*IsCXXMethod=*/true); CallingConv ActualCallingConv = MD->getType()->getAs<FunctionProtoType>()->getCallConv(); return ExpectedCallingConv == ActualCallingConv; } void MicrosoftCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) { // There's only one constructor type in this ABI. CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete)); // Exported default constructors either have a simple call-site where they use // the typical calling convention and have a single 'this' pointer for an // argument -or- they get a wrapper function which appropriately thunks to the // real default constructor. This thunk is the default constructor closure. if (D->hasAttr<DLLExportAttr>() && D->isDefaultConstructor()) if (!hasDefaultCXXMethodCC(getContext(), D) || D->getNumParams() != 0) { llvm::Function *Fn = getAddrOfCXXCtorClosure(D, Ctor_DefaultClosure); Fn->setLinkage(llvm::GlobalValue::WeakODRLinkage); Fn->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); } } void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF, const CXXRecordDecl *RD) { llvm::Value *ThisInt8Ptr = CGF.Builder.CreateBitCast(getThisValue(CGF), CGM.Int8PtrTy, "this.int8"); const ASTContext &Context = getContext(); const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); const VBTableGlobals &VBGlobals = enumerateVBTables(RD); for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) { const VPtrInfo *VBT = (*VBGlobals.VBTables)[I]; llvm::GlobalVariable *GV = VBGlobals.Globals[I]; const ASTRecordLayout &SubobjectLayout = Context.getASTRecordLayout(VBT->BaseWithVPtr); CharUnits Offs = VBT->NonVirtualOffset; Offs += SubobjectLayout.getVBPtrOffset(); if (VBT->getVBaseWithVPtr()) Offs += Layout.getVBaseClassOffset(VBT->getVBaseWithVPtr()); llvm::Value *VBPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ThisInt8Ptr, Offs.getQuantity()); llvm::Value *GVPtr = CGF.Builder.CreateConstInBoundsGEP2_32(GV->getValueType(), GV, 0, 0); VBPtr = CGF.Builder.CreateBitCast(VBPtr, GVPtr->getType()->getPointerTo(0), "vbptr." + VBT->ReusingBase->getName()); CGF.Builder.CreateStore(GVPtr, VBPtr); } } void MicrosoftCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl<CanQualType> &ArgTys) { // TODO: 'for base' flag if (T == StructorType::Deleting) { // The scalar deleting destructor takes an implicit int parameter. ArgTys.push_back(getContext().IntTy); } auto *CD = dyn_cast<CXXConstructorDecl>(MD); if (!CD) return; // All parameters are already in place except is_most_derived, which goes // after 'this' if it's variadic and last if it's not. const CXXRecordDecl *Class = CD->getParent(); const FunctionProtoType *FPT = CD->getType()->castAs<FunctionProtoType>(); if (Class->getNumVBases()) { if (FPT->isVariadic()) ArgTys.insert(ArgTys.begin() + 1, getContext().IntTy); else ArgTys.push_back(getContext().IntTy); } } void MicrosoftCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) { // The TU defining a dtor is only guaranteed to emit a base destructor. All // other destructor variants are delegating thunks. CGM.EmitGlobal(GlobalDecl(D, Dtor_Base)); } CharUnits MicrosoftCXXABI::getVirtualFunctionPrologueThisAdjustment(GlobalDecl GD) { GD = GD.getCanonicalDecl(); const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); GlobalDecl LookupGD = GD; if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { // Complete destructors take a pointer to the complete object as a // parameter, thus don't need this adjustment. if (GD.getDtorType() == Dtor_Complete) return CharUnits(); // There's no Dtor_Base in vftable but it shares the this adjustment with // the deleting one, so look it up instead. LookupGD = GlobalDecl(DD, Dtor_Deleting); } MicrosoftVTableContext::MethodVFTableLocation ML = CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD); CharUnits Adjustment = ML.VFPtrOffset; // Normal virtual instance methods need to adjust from the vfptr that first // defined the virtual method to the virtual base subobject, but destructors // do not. The vector deleting destructor thunk applies this adjustment for // us if necessary. if (isa<CXXDestructorDecl>(MD)) Adjustment = CharUnits::Zero(); if (ML.VBase) { const ASTRecordLayout &DerivedLayout = getContext().getASTRecordLayout(MD->getParent()); Adjustment += DerivedLayout.getVBaseClassOffset(ML.VBase); } return Adjustment; } llvm::Value *MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall( CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This, bool VirtualCall) { if (!VirtualCall) { // If the call of a virtual function is not virtual, we just have to // compensate for the adjustment the virtual function does in its prologue. CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(GD); if (Adjustment.isZero()) return This; unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace(); llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS); This = CGF.Builder.CreateBitCast(This, charPtrTy); assert(Adjustment.isPositive()); return CGF.Builder.CreateConstGEP1_32(This, Adjustment.getQuantity()); } GD = GD.getCanonicalDecl(); const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); GlobalDecl LookupGD = GD; if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) { // Complete dtors take a pointer to the complete object, // thus don't need adjustment. if (GD.getDtorType() == Dtor_Complete) return This; // There's only Dtor_Deleting in vftable but it shares the this adjustment // with the base one, so look up the deleting one instead. LookupGD = GlobalDecl(DD, Dtor_Deleting); } MicrosoftVTableContext::MethodVFTableLocation ML = CGM.getMicrosoftVTableContext().getMethodVFTableLocation(LookupGD); unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace(); llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS); CharUnits StaticOffset = ML.VFPtrOffset; // Base destructors expect 'this' to point to the beginning of the base // subobject, not the first vfptr that happens to contain the virtual dtor. // However, we still need to apply the virtual base adjustment. if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) StaticOffset = CharUnits::Zero(); if (ML.VBase) { This = CGF.Builder.CreateBitCast(This, charPtrTy); llvm::Value *VBaseOffset = GetVirtualBaseClassOffset(CGF, This, MD->getParent(), ML.VBase); This = CGF.Builder.CreateInBoundsGEP(This, VBaseOffset); } if (!StaticOffset.isZero()) { assert(StaticOffset.isPositive()); This = CGF.Builder.CreateBitCast(This, charPtrTy); if (ML.VBase) { // Non-virtual adjustment might result in a pointer outside the allocated // object, e.g. if the final overrider class is laid out after the virtual // base that declares a method in the most derived class. // FIXME: Update the code that emits this adjustment in thunks prologues. This = CGF.Builder.CreateConstGEP1_32(This, StaticOffset.getQuantity()); } else { This = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, This, StaticOffset.getQuantity()); } } return This; } void MicrosoftCXXABI::addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params) { ASTContext &Context = getContext(); const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)); if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) { ImplicitParamDecl *IsMostDerived = ImplicitParamDecl::Create(Context, nullptr, CGF.CurGD.getDecl()->getLocation(), &Context.Idents.get("is_most_derived"), Context.IntTy); // The 'most_derived' parameter goes second if the ctor is variadic and last // if it's not. Dtors can't be variadic. const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); if (FPT->isVariadic()) Params.insert(Params.begin() + 1, IsMostDerived); else Params.push_back(IsMostDerived); getStructorImplicitParamDecl(CGF) = IsMostDerived; } else if (isDeletingDtor(CGF.CurGD)) { ImplicitParamDecl *ShouldDelete = ImplicitParamDecl::Create(Context, nullptr, CGF.CurGD.getDecl()->getLocation(), &Context.Idents.get("should_call_delete"), Context.IntTy); Params.push_back(ShouldDelete); getStructorImplicitParamDecl(CGF) = ShouldDelete; } } llvm::Value *MicrosoftCXXABI::adjustThisParameterInVirtualFunctionPrologue( CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This) { // In this ABI, every virtual function takes a pointer to one of the // subobjects that first defines it as the 'this' parameter, rather than a // pointer to the final overrider subobject. Thus, we need to adjust it back // to the final overrider subobject before use. // See comments in the MicrosoftVFTableContext implementation for the details. CharUnits Adjustment = getVirtualFunctionPrologueThisAdjustment(GD); if (Adjustment.isZero()) return This; unsigned AS = cast<llvm::PointerType>(This->getType())->getAddressSpace(); llvm::Type *charPtrTy = CGF.Int8Ty->getPointerTo(AS), *thisTy = This->getType(); This = CGF.Builder.CreateBitCast(This, charPtrTy); assert(Adjustment.isPositive()); This = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, This, -Adjustment.getQuantity()); return CGF.Builder.CreateBitCast(This, thisTy); } void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { EmitThisParam(CGF); /// If this is a function that the ABI specifies returns 'this', initialize /// the return slot to 'this' at the start of the function. /// /// Unlike the setting of return types, this is done within the ABI /// implementation instead of by clients of CGCXXABI because: /// 1) getThisValue is currently protected /// 2) in theory, an ABI could implement 'this' returns some other way; /// HasThisReturn only specifies a contract, not the implementation if (HasThisReturn(CGF.CurGD)) CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); else if (hasMostDerivedReturn(CGF.CurGD)) CGF.Builder.CreateStore(CGF.EmitCastToVoidPtr(getThisValue(CGF)), CGF.ReturnValue); const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); if (isa<CXXConstructorDecl>(MD) && MD->getParent()->getNumVBases()) { assert(getStructorImplicitParamDecl(CGF) && "no implicit parameter for a constructor with virtual bases?"); getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "is_most_derived"); } if (isDeletingDtor(CGF.CurGD)) { assert(getStructorImplicitParamDecl(CGF) && "no implicit parameter for a deleting destructor?"); getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "should_call_delete"); } } unsigned MicrosoftCXXABI::addImplicitConstructorArgs( CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, CallArgList &Args) { assert(Type == Ctor_Complete || Type == Ctor_Base); // Check if we need a 'most_derived' parameter. if (!D->getParent()->getNumVBases()) return 0; // Add the 'most_derived' argument second if we are variadic or last if not. const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>(); llvm::Value *MostDerivedArg = llvm::ConstantInt::get(CGM.Int32Ty, Type == Ctor_Complete); RValue RV = RValue::get(MostDerivedArg); if (MostDerivedArg) { if (FPT->isVariadic()) Args.insert(Args.begin() + 1, CallArg(RV, getContext().IntTy, /*needscopy=*/false)); else Args.add(RV, getContext().IntTy); } return 1; // Added one arg. } void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, llvm::Value *This) { llvm::Value *Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)); if (DD->isVirtual()) { assert(Type != CXXDtorType::Dtor_Deleting && "The deleting destructor should only be called via a virtual call"); This = adjustThisArgumentForVirtualFunctionCall(CGF, GlobalDecl(DD, Type), This, false); } CGF.EmitCXXStructorCall(DD, Callee, ReturnValueSlot(), This, /*ImplicitParam=*/nullptr, /*ImplicitParamTy=*/QualType(), nullptr, getFromDtorType(Type)); } void MicrosoftCXXABI::emitVTableBitSetEntries(VPtrInfo *Info, const CXXRecordDecl *RD, llvm::GlobalVariable *VTable) { if (!getContext().getLangOpts().Sanitize.has(SanitizerKind::CFIVCall) && !getContext().getLangOpts().Sanitize.has(SanitizerKind::CFINVCall) && !getContext().getLangOpts().Sanitize.has(SanitizerKind::CFIDerivedCast) && !getContext().getLangOpts().Sanitize.has(SanitizerKind::CFIUnrelatedCast)) return; llvm::NamedMDNode *BitsetsMD = CGM.getModule().getOrInsertNamedMetadata("llvm.bitsets"); // The location of the first virtual function pointer in the virtual table, // aka the "address point" on Itanium. This is at offset 0 if RTTI is // disabled, or sizeof(void*) if RTTI is enabled. CharUnits AddressPoint = getContext().getLangOpts().RTTIData ? getContext().toCharUnitsFromBits( getContext().getTargetInfo().getPointerWidth(0)) : CharUnits::Zero(); if (Info->PathToBaseWithVPtr.empty()) { if (!CGM.IsCFIBlacklistedRecord(RD)) BitsetsMD->addOperand( CGM.CreateVTableBitSetEntry(VTable, AddressPoint, RD)); return; } // Add a bitset entry for the least derived base belonging to this vftable. if (!CGM.IsCFIBlacklistedRecord(Info->PathToBaseWithVPtr.back())) BitsetsMD->addOperand(CGM.CreateVTableBitSetEntry( VTable, AddressPoint, Info->PathToBaseWithVPtr.back())); // Add a bitset entry for each derived class that is laid out at the same // offset as the least derived base. for (unsigned I = Info->PathToBaseWithVPtr.size() - 1; I != 0; --I) { const CXXRecordDecl *DerivedRD = Info->PathToBaseWithVPtr[I - 1]; const CXXRecordDecl *BaseRD = Info->PathToBaseWithVPtr[I]; const ASTRecordLayout &Layout = getContext().getASTRecordLayout(DerivedRD); CharUnits Offset; auto VBI = Layout.getVBaseOffsetsMap().find(BaseRD); if (VBI == Layout.getVBaseOffsetsMap().end()) Offset = Layout.getBaseClassOffset(BaseRD); else Offset = VBI->second.VBaseOffset; if (!Offset.isZero()) return; if (!CGM.IsCFIBlacklistedRecord(DerivedRD)) BitsetsMD->addOperand( CGM.CreateVTableBitSetEntry(VTable, AddressPoint, DerivedRD)); } // Finally do the same for the most derived class. if (Info->FullOffsetInMDC.isZero() && !CGM.IsCFIBlacklistedRecord(RD)) BitsetsMD->addOperand( CGM.CreateVTableBitSetEntry(VTable, AddressPoint, RD)); } void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, const CXXRecordDecl *RD) { MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext(); const VPtrInfoVector &VFPtrs = VFTContext.getVFPtrOffsets(RD); for (VPtrInfo *Info : VFPtrs) { llvm::GlobalVariable *VTable = getAddrOfVTable(RD, Info->FullOffsetInMDC); if (VTable->hasInitializer()) continue; llvm::Constant *RTTI = getContext().getLangOpts().RTTIData ? getMSCompleteObjectLocator(RD, Info) : nullptr; const VTableLayout &VTLayout = VFTContext.getVFTableLayout(RD, Info->FullOffsetInMDC); llvm::Constant *Init = CGVT.CreateVTableInitializer( RD, VTLayout.vtable_component_begin(), VTLayout.getNumVTableComponents(), VTLayout.vtable_thunk_begin(), VTLayout.getNumVTableThunks(), RTTI); VTable->setInitializer(Init); emitVTableBitSetEntries(Info, RD, VTable); } } llvm::Value *MicrosoftCXXABI::getVTableAddressPointInStructor( CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) { NeedsVirtualOffset = (NearestVBase != nullptr); (void)getAddrOfVTable(VTableClass, Base.getBaseOffset()); VFTableIdTy ID(VTableClass, Base.getBaseOffset()); llvm::GlobalValue *VTableAddressPoint = VFTablesMap[ID]; if (!VTableAddressPoint) { assert(Base.getBase()->getNumVBases() && !getContext().getASTRecordLayout(Base.getBase()).hasOwnVFPtr()); } return VTableAddressPoint; } static void mangleVFTableName(MicrosoftMangleContext &MangleContext, const CXXRecordDecl *RD, const VPtrInfo *VFPtr, SmallString<256> &Name) { llvm::raw_svector_ostream Out(Name); MangleContext.mangleCXXVFTable(RD, VFPtr->MangledPath, Out); } llvm::Constant *MicrosoftCXXABI::getVTableAddressPointForConstExpr( BaseSubobject Base, const CXXRecordDecl *VTableClass) { (void)getAddrOfVTable(VTableClass, Base.getBaseOffset()); VFTableIdTy ID(VTableClass, Base.getBaseOffset()); llvm::GlobalValue *VFTable = VFTablesMap[ID]; assert(VFTable && "Couldn't find a vftable for the given base?"); return VFTable; } llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) { // getAddrOfVTable may return 0 if asked to get an address of a vtable which // shouldn't be used in the given record type. We want to cache this result in // VFTablesMap, thus a simple zero check is not sufficient. VFTableIdTy ID(RD, VPtrOffset); VTablesMapTy::iterator I; bool Inserted; std::tie(I, Inserted) = VTablesMap.insert(std::make_pair(ID, nullptr)); if (!Inserted) return I->second; llvm::GlobalVariable *&VTable = I->second; MicrosoftVTableContext &VTContext = CGM.getMicrosoftVTableContext(); const VPtrInfoVector &VFPtrs = VTContext.getVFPtrOffsets(RD); if (DeferredVFTables.insert(RD).second) { // We haven't processed this record type before. // Queue up this v-table for possible deferred emission. CGM.addDeferredVTable(RD); #ifndef NDEBUG // Create all the vftables at once in order to make sure each vftable has // a unique mangled name. llvm::StringSet<> ObservedMangledNames; for (size_t J = 0, F = VFPtrs.size(); J != F; ++J) { SmallString<256> Name; mangleVFTableName(getMangleContext(), RD, VFPtrs[J], Name); if (!ObservedMangledNames.insert(Name.str()).second) llvm_unreachable("Already saw this mangling before?"); } #endif } VPtrInfo *const *VFPtrI = std::find_if(VFPtrs.begin(), VFPtrs.end(), [&](VPtrInfo *VPI) { return VPI->FullOffsetInMDC == VPtrOffset; }); if (VFPtrI == VFPtrs.end()) { VFTablesMap[ID] = nullptr; return nullptr; } VPtrInfo *VFPtr = *VFPtrI; SmallString<256> VFTableName; mangleVFTableName(getMangleContext(), RD, VFPtr, VFTableName); llvm::GlobalValue::LinkageTypes VFTableLinkage = CGM.getVTableLinkage(RD); bool VFTableComesFromAnotherTU = llvm::GlobalValue::isAvailableExternallyLinkage(VFTableLinkage) || llvm::GlobalValue::isExternalLinkage(VFTableLinkage); bool VTableAliasIsRequred = !VFTableComesFromAnotherTU && getContext().getLangOpts().RTTIData; if (llvm::GlobalValue *VFTable = CGM.getModule().getNamedGlobal(VFTableName)) { VFTablesMap[ID] = VFTable; return VTableAliasIsRequred ? cast<llvm::GlobalVariable>( cast<llvm::GlobalAlias>(VFTable)->getBaseObject()) : cast<llvm::GlobalVariable>(VFTable); } uint64_t NumVTableSlots = VTContext.getVFTableLayout(RD, VFPtr->FullOffsetInMDC) .getNumVTableComponents(); llvm::GlobalValue::LinkageTypes VTableLinkage = VTableAliasIsRequred ? llvm::GlobalValue::PrivateLinkage : VFTableLinkage; StringRef VTableName = VTableAliasIsRequred ? StringRef() : VFTableName.str(); llvm::ArrayType *VTableType = llvm::ArrayType::get(CGM.Int8PtrTy, NumVTableSlots); // Create a backing variable for the contents of VTable. The VTable may // or may not include space for a pointer to RTTI data. llvm::GlobalValue *VFTable; VTable = new llvm::GlobalVariable(CGM.getModule(), VTableType, /*isConstant=*/true, VTableLinkage, /*Initializer=*/nullptr, VTableName); VTable->setUnnamedAddr(true); llvm::Comdat *C = nullptr; if (!VFTableComesFromAnotherTU && (llvm::GlobalValue::isWeakForLinker(VFTableLinkage) || (llvm::GlobalValue::isLocalLinkage(VFTableLinkage) && VTableAliasIsRequred))) C = CGM.getModule().getOrInsertComdat(VFTableName.str()); // Only insert a pointer into the VFTable for RTTI data if we are not // importing it. We never reference the RTTI data directly so there is no // need to make room for it. if (VTableAliasIsRequred) { llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.IntTy, 0), llvm::ConstantInt::get(CGM.IntTy, 1)}; // Create a GEP which points just after the first entry in the VFTable, // this should be the location of the first virtual method. llvm::Constant *VTableGEP = llvm::ConstantExpr::getInBoundsGetElementPtr( VTable->getValueType(), VTable, GEPIndices); if (llvm::GlobalValue::isWeakForLinker(VFTableLinkage)) { VFTableLinkage = llvm::GlobalValue::ExternalLinkage; if (C) C->setSelectionKind(llvm::Comdat::Largest); } VFTable = llvm::GlobalAlias::create( cast<llvm::PointerType>(VTableGEP->getType()), VFTableLinkage, VFTableName.str(), VTableGEP, &CGM.getModule()); VFTable->setUnnamedAddr(true); } else { // We don't need a GlobalAlias to be a symbol for the VTable if we won't // be referencing any RTTI data. // The GlobalVariable will end up being an appropriate definition of the // VFTable. VFTable = VTable; } if (C) VTable->setComdat(C); if (RD->hasAttr<DLLImportAttr>()) VFTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); else if (RD->hasAttr<DLLExportAttr>()) VFTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); VFTablesMap[ID] = VFTable; return VTable; } // Compute the identity of the most derived class whose virtual table is located // at the given offset into RD. static const CXXRecordDecl *getClassAtVTableLocation(ASTContext &Ctx, const CXXRecordDecl *RD, CharUnits Offset) { if (Offset.isZero()) return RD; const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD); const CXXRecordDecl *MaxBase = nullptr; CharUnits MaxBaseOffset; for (auto &&B : RD->bases()) { const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl(); CharUnits BaseOffset = Layout.getBaseClassOffset(Base); if (BaseOffset <= Offset && BaseOffset >= MaxBaseOffset) { MaxBase = Base; MaxBaseOffset = BaseOffset; } } for (auto &&B : RD->vbases()) { const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl(); CharUnits BaseOffset = Layout.getVBaseClassOffset(Base); if (BaseOffset <= Offset && BaseOffset >= MaxBaseOffset) { MaxBase = Base; MaxBaseOffset = BaseOffset; } } assert(MaxBase); return getClassAtVTableLocation(Ctx, MaxBase, Offset - MaxBaseOffset); } // Compute the identity of the most derived class whose virtual table is located // at the MethodVFTableLocation ML. static const CXXRecordDecl * getClassAtVTableLocation(ASTContext &Ctx, GlobalDecl GD, MicrosoftVTableContext::MethodVFTableLocation &ML) { const CXXRecordDecl *RD = ML.VBase; if (!RD) RD = cast<CXXMethodDecl>(GD.getDecl())->getParent(); return getClassAtVTableLocation(Ctx, RD, ML.VFPtrOffset); } llvm::Value *MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This, llvm::Type *Ty, SourceLocation Loc) { GD = GD.getCanonicalDecl(); CGBuilderTy &Builder = CGF.Builder; Ty = Ty->getPointerTo()->getPointerTo(); llvm::Value *VPtr = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true); llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty); MicrosoftVTableContext::MethodVFTableLocation ML = CGM.getMicrosoftVTableContext().getMethodVFTableLocation(GD); if (CGF.SanOpts.has(SanitizerKind::CFIVCall)) CGF.EmitVTablePtrCheck(getClassAtVTableLocation(getContext(), GD, ML), VTable, CodeGenFunction::CFITCK_VCall, Loc); llvm::Value *VFuncPtr = Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn"); return Builder.CreateLoad(VFuncPtr); } llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall( CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, llvm::Value *This, const CXXMemberCallExpr *CE) { assert(CE == nullptr || CE->arg_begin() == CE->arg_end()); assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); // We have only one destructor in the vftable but can get both behaviors // by passing an implicit int parameter. GlobalDecl GD(Dtor, Dtor_Deleting); const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration( Dtor, StructorType::Deleting); llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); llvm::Value *Callee = getVirtualFunctionPointer( CGF, GD, This, Ty, CE ? CE->getLocStart() : SourceLocation()); ASTContext &Context = getContext(); llvm::Value *ImplicitParam = llvm::ConstantInt::get( llvm::IntegerType::getInt32Ty(CGF.getLLVMContext()), DtorType == Dtor_Deleting); This = adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true); RValue RV = CGF.EmitCXXStructorCall(Dtor, Callee, ReturnValueSlot(), This, ImplicitParam, Context.IntTy, CE, StructorType::Deleting); return RV.getScalarVal(); } const VBTableGlobals & MicrosoftCXXABI::enumerateVBTables(const CXXRecordDecl *RD) { // At this layer, we can key the cache off of a single class, which is much // easier than caching each vbtable individually. llvm::DenseMap<const CXXRecordDecl*, VBTableGlobals>::iterator Entry; bool Added; std::tie(Entry, Added) = VBTablesMap.insert(std::make_pair(RD, VBTableGlobals())); VBTableGlobals &VBGlobals = Entry->second; if (!Added) return VBGlobals; MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext(); VBGlobals.VBTables = &Context.enumerateVBTables(RD); // Cache the globals for all vbtables so we don't have to recompute the // mangled names. llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD); for (VPtrInfoVector::const_iterator I = VBGlobals.VBTables->begin(), E = VBGlobals.VBTables->end(); I != E; ++I) { VBGlobals.Globals.push_back(getAddrOfVBTable(**I, RD, Linkage)); } return VBGlobals; } llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk( const CXXMethodDecl *MD, const MicrosoftVTableContext::MethodVFTableLocation &ML) { assert(!isa<CXXConstructorDecl>(MD) && !isa<CXXDestructorDecl>(MD) && "can't form pointers to ctors or virtual dtors"); // Calculate the mangled name. SmallString<256> ThunkName; llvm::raw_svector_ostream Out(ThunkName); getMangleContext().mangleVirtualMemPtrThunk(MD, Out); Out.flush(); // If the thunk has been generated previously, just return it. if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName)) return cast<llvm::Function>(GV); // Create the llvm::Function. const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSMemberPointerThunk(MD); llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo); llvm::Function *ThunkFn = llvm::Function::Create(ThunkTy, llvm::Function::ExternalLinkage, ThunkName.str(), &CGM.getModule()); assert(ThunkFn->getName() == ThunkName && "name was uniqued!"); ThunkFn->setLinkage(MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage : llvm::GlobalValue::InternalLinkage); if (MD->isExternallyVisible()) ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName())); CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn); CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn); // Add the "thunk" attribute so that LLVM knows that the return type is // meaningless. These thunks can be used to call functions with differing // return types, and the caller is required to cast the prototype // appropriately to extract the correct value. ThunkFn->addFnAttr("thunk"); // These thunks can be compared, so they are not unnamed. ThunkFn->setUnnamedAddr(false); // Start codegen. CodeGenFunction CGF(CGM); CGF.CurGD = GlobalDecl(MD); CGF.CurFuncIsThunk = true; // Build FunctionArgs, but only include the implicit 'this' parameter // declaration. FunctionArgList FunctionArgs; buildThisParam(CGF, FunctionArgs); // Start defining the function. CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo, FunctionArgs, MD->getLocation(), SourceLocation()); EmitThisParam(CGF); // Load the vfptr and then callee from the vftable. The callee should have // adjusted 'this' so that the vfptr is at offset zero. llvm::Value *VTable = CGF.GetVTablePtr( getThisValue(CGF), ThunkTy->getPointerTo()->getPointerTo()); llvm::Value *VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn"); llvm::Value *Callee = CGF.Builder.CreateLoad(VFuncPtr); CGF.EmitMustTailThunk(MD, getThisValue(CGF), Callee); return ThunkFn; } void MicrosoftCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) { const VBTableGlobals &VBGlobals = enumerateVBTables(RD); for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) { const VPtrInfo *VBT = (*VBGlobals.VBTables)[I]; llvm::GlobalVariable *GV = VBGlobals.Globals[I]; if (GV->isDeclaration()) emitVBTableDefinition(*VBT, RD, GV); } } llvm::GlobalVariable * MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD, llvm::GlobalVariable::LinkageTypes Linkage) { SmallString<256> OutName; llvm::raw_svector_ostream Out(OutName); getMangleContext().mangleCXXVBTable(RD, VBT.MangledPath, Out); Out.flush(); StringRef Name = OutName.str(); llvm::ArrayType *VBTableType = llvm::ArrayType::get(CGM.IntTy, 1 + VBT.ReusingBase->getNumVBases()); assert(!CGM.getModule().getNamedGlobal(Name) && "vbtable with this name already exists: mangling bug?"); llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(Name, VBTableType, Linkage); GV->setUnnamedAddr(true); if (RD->hasAttr<DLLImportAttr>()) GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); else if (RD->hasAttr<DLLExportAttr>()) GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); if (!GV->hasExternalLinkage()) emitVBTableDefinition(VBT, RD, GV); return GV; } void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT, const CXXRecordDecl *RD, llvm::GlobalVariable *GV) const { const CXXRecordDecl *ReusingBase = VBT.ReusingBase; assert(RD->getNumVBases() && ReusingBase->getNumVBases() && "should only emit vbtables for classes with vbtables"); const ASTRecordLayout &BaseLayout = getContext().getASTRecordLayout(VBT.BaseWithVPtr); const ASTRecordLayout &DerivedLayout = getContext().getASTRecordLayout(RD); SmallVector<llvm::Constant *, 4> Offsets(1 + ReusingBase->getNumVBases(), nullptr); // The offset from ReusingBase's vbptr to itself always leads. CharUnits VBPtrOffset = BaseLayout.getVBPtrOffset(); Offsets[0] = llvm::ConstantInt::get(CGM.IntTy, -VBPtrOffset.getQuantity()); MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext(); for (const auto &I : ReusingBase->vbases()) { const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl(); CharUnits Offset = DerivedLayout.getVBaseClassOffset(VBase); assert(!Offset.isNegative()); // Make it relative to the subobject vbptr. CharUnits CompleteVBPtrOffset = VBT.NonVirtualOffset + VBPtrOffset; if (VBT.getVBaseWithVPtr()) CompleteVBPtrOffset += DerivedLayout.getVBaseClassOffset(VBT.getVBaseWithVPtr()); Offset -= CompleteVBPtrOffset; unsigned VBIndex = Context.getVBTableIndex(ReusingBase, VBase); assert(Offsets[VBIndex] == nullptr && "The same vbindex seen twice?"); Offsets[VBIndex] = llvm::ConstantInt::get(CGM.IntTy, Offset.getQuantity()); } assert(Offsets.size() == cast<llvm::ArrayType>(cast<llvm::PointerType>(GV->getType()) ->getElementType())->getNumElements()); llvm::ArrayType *VBTableType = llvm::ArrayType::get(CGM.IntTy, Offsets.size()); llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets); GV->setInitializer(Init); } llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This, const ThisAdjustment &TA) { if (TA.isEmpty()) return This; llvm::Value *V = CGF.Builder.CreateBitCast(This, CGF.Int8PtrTy); if (!TA.Virtual.isEmpty()) { assert(TA.Virtual.Microsoft.VtordispOffset < 0); // Adjust the this argument based on the vtordisp value. llvm::Value *VtorDispPtr = CGF.Builder.CreateConstGEP1_32(V, TA.Virtual.Microsoft.VtordispOffset); VtorDispPtr = CGF.Builder.CreateBitCast(VtorDispPtr, CGF.Int32Ty->getPointerTo()); llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp"); V = CGF.Builder.CreateGEP(V, CGF.Builder.CreateNeg(VtorDisp)); if (TA.Virtual.Microsoft.VBPtrOffset) { // If the final overrider is defined in a virtual base other than the one // that holds the vfptr, we have to use a vtordispex thunk which looks up // the vbtable of the derived class. assert(TA.Virtual.Microsoft.VBPtrOffset > 0); assert(TA.Virtual.Microsoft.VBOffsetOffset >= 0); llvm::Value *VBPtr; llvm::Value *VBaseOffset = GetVBaseOffsetFromVBPtr(CGF, V, -TA.Virtual.Microsoft.VBPtrOffset, TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr); V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset); } } if (TA.NonVirtual) { // Non-virtual adjustment might result in a pointer outside the allocated // object, e.g. if the final overrider class is laid out after the virtual // base that declares a method in the most derived class. V = CGF.Builder.CreateConstGEP1_32(V, TA.NonVirtual); } // Don't need to bitcast back, the call CodeGen will handle this. return V; } llvm::Value * MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret, const ReturnAdjustment &RA) { if (RA.isEmpty()) return Ret; llvm::Value *V = CGF.Builder.CreateBitCast(Ret, CGF.Int8PtrTy); if (RA.Virtual.Microsoft.VBIndex) { assert(RA.Virtual.Microsoft.VBIndex > 0); const ASTContext &Context = getContext(); int32_t IntSize = Context.getTypeSizeInChars(Context.IntTy).getQuantity(); llvm::Value *VBPtr; llvm::Value *VBaseOffset = GetVBaseOffsetFromVBPtr(CGF, V, RA.Virtual.Microsoft.VBPtrOffset, IntSize * RA.Virtual.Microsoft.VBIndex, &VBPtr); V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset); } if (RA.NonVirtual) V = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, V, RA.NonVirtual); // Cast back to the original type. return CGF.Builder.CreateBitCast(V, Ret->getType()); } bool MicrosoftCXXABI::requiresArrayCookie(const CXXDeleteExpr *expr, QualType elementType) { // Microsoft seems to completely ignore the possibility of a // two-argument usual deallocation function. return elementType.isDestructedType(); } bool MicrosoftCXXABI::requiresArrayCookie(const CXXNewExpr *expr) { // Microsoft seems to completely ignore the possibility of a // two-argument usual deallocation function. return expr->getAllocatedType().isDestructedType(); } CharUnits MicrosoftCXXABI::getArrayCookieSizeImpl(QualType type) { // The array cookie is always a size_t; we then pad that out to the // alignment of the element type. ASTContext &Ctx = getContext(); return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()), Ctx.getTypeAlignInChars(type)); } llvm::Value *MicrosoftCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr, CharUnits cookieSize) { unsigned AS = allocPtr->getType()->getPointerAddressSpace(); llvm::Value *numElementsPtr = CGF.Builder.CreateBitCast(allocPtr, CGF.SizeTy->getPointerTo(AS)); return CGF.Builder.CreateLoad(numElementsPtr); } llvm::Value* MicrosoftCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *newPtr, llvm::Value *numElements, const CXXNewExpr *expr, QualType elementType) { assert(requiresArrayCookie(expr)); // The size of the cookie. CharUnits cookieSize = getArrayCookieSizeImpl(elementType); // Compute an offset to the cookie. llvm::Value *cookiePtr = newPtr; // Write the number of elements into the appropriate slot. unsigned AS = newPtr->getType()->getPointerAddressSpace(); llvm::Value *numElementsPtr = CGF.Builder.CreateBitCast(cookiePtr, CGF.SizeTy->getPointerTo(AS)); CGF.Builder.CreateStore(numElements, numElementsPtr); // Finally, compute a pointer to the actual data buffer by skipping // over the cookie completely. return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr, cookieSize.getQuantity()); } static void emitGlobalDtorWithTLRegDtor(CodeGenFunction &CGF, const VarDecl &VD, llvm::Constant *Dtor, llvm::Constant *Addr) { // Create a function which calls the destructor. llvm::Constant *DtorStub = CGF.createAtExitStub(VD, Dtor, Addr); // extern "C" int __tlregdtor(void (*f)(void)); llvm::FunctionType *TLRegDtorTy = llvm::FunctionType::get( CGF.IntTy, DtorStub->getType(), /*IsVarArg=*/false); llvm::Constant *TLRegDtor = CGF.CGM.CreateRuntimeFunction(TLRegDtorTy, "__tlregdtor"); if (llvm::Function *TLRegDtorFn = dyn_cast<llvm::Function>(TLRegDtor)) TLRegDtorFn->setDoesNotThrow(); CGF.EmitNounwindRuntimeCall(TLRegDtor, DtorStub); } void MicrosoftCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *Dtor, llvm::Constant *Addr) { if (D.getTLSKind()) return emitGlobalDtorWithTLRegDtor(CGF, D, Dtor, Addr); // The default behavior is to use atexit. CGF.registerGlobalDtorWithAtExit(D, Dtor, Addr); } void MicrosoftCXXABI::EmitThreadLocalInitFuncs( CodeGenModule &CGM, ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>> CXXThreadLocals, ArrayRef<llvm::Function *> CXXThreadLocalInits, ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) { // This will create a GV in the .CRT$XDU section. It will point to our // initialization function. The CRT will call all of these function // pointers at start-up time and, eventually, at thread-creation time. auto AddToXDU = [&CGM](llvm::Function *InitFunc) { llvm::GlobalVariable *InitFuncPtr = new llvm::GlobalVariable( CGM.getModule(), InitFunc->getType(), /*IsConstant=*/true, llvm::GlobalVariable::InternalLinkage, InitFunc, Twine(InitFunc->getName(), "$initializer$")); InitFuncPtr->setSection(".CRT$XDU"); // This variable has discardable linkage, we have to add it to @llvm.used to // ensure it won't get discarded. CGM.addUsedGlobal(InitFuncPtr); return InitFuncPtr; }; std::vector<llvm::Function *> NonComdatInits; for (size_t I = 0, E = CXXThreadLocalInitVars.size(); I != E; ++I) { llvm::GlobalVariable *GV = CXXThreadLocalInitVars[I]; llvm::Function *F = CXXThreadLocalInits[I]; // If the GV is already in a comdat group, then we have to join it. if (llvm::Comdat *C = GV->getComdat()) AddToXDU(F)->setComdat(C); else NonComdatInits.push_back(F); } if (!NonComdatInits.empty()) { llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); llvm::Function *InitFunc = CGM.CreateGlobalInitOrDestructFunction( FTy, "__tls_init", SourceLocation(), /*TLS=*/true); CodeGenFunction(CGM).GenerateCXXGlobalInitFunc(InitFunc, NonComdatInits); AddToXDU(InitFunc); } } LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType) { CGF.CGM.ErrorUnsupported(VD, "thread wrappers"); return LValue(); } static llvm::GlobalVariable *getInitThreadEpochPtr(CodeGenModule &CGM) { StringRef VarName("_Init_thread_epoch"); if (auto *GV = CGM.getModule().getNamedGlobal(VarName)) return GV; auto *GV = new llvm::GlobalVariable( CGM.getModule(), CGM.IntTy, /*Constant=*/false, llvm::GlobalVariable::ExternalLinkage, /*Initializer=*/nullptr, VarName, /*InsertBefore=*/nullptr, llvm::GlobalVariable::GeneralDynamicTLSModel); GV->setAlignment(CGM.getTarget().getIntAlign() / 8); return GV; } static llvm::Constant *getInitThreadHeaderFn(CodeGenModule &CGM) { llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), CGM.IntTy->getPointerTo(), /*isVarArg=*/false); return CGM.CreateRuntimeFunction( FTy, "_Init_thread_header", llvm::AttributeSet::get(CGM.getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoUnwind)); } static llvm::Constant *getInitThreadFooterFn(CodeGenModule &CGM) { llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), CGM.IntTy->getPointerTo(), /*isVarArg=*/false); return CGM.CreateRuntimeFunction( FTy, "_Init_thread_footer", llvm::AttributeSet::get(CGM.getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoUnwind)); } static llvm::Constant *getInitThreadAbortFn(CodeGenModule &CGM) { llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()), CGM.IntTy->getPointerTo(), /*isVarArg=*/false); return CGM.CreateRuntimeFunction( FTy, "_Init_thread_abort", llvm::AttributeSet::get(CGM.getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoUnwind)); } namespace { struct ResetGuardBit : EHScopeStack::Cleanup { llvm::GlobalVariable *Guard; unsigned GuardNum; ResetGuardBit(llvm::GlobalVariable *Guard, unsigned GuardNum) : Guard(Guard), GuardNum(GuardNum) {} void Emit(CodeGenFunction &CGF, Flags flags) override { // Reset the bit in the mask so that the static variable may be // reinitialized. CGBuilderTy &Builder = CGF.Builder; llvm::LoadInst *LI = Builder.CreateLoad(Guard); llvm::ConstantInt *Mask = llvm::ConstantInt::get(CGF.IntTy, ~((uint64_t)1U << (uint64_t)GuardNum)); // HLSL Change - explicit uint64_t Builder.CreateStore(Builder.CreateAnd(LI, Mask), Guard); } }; struct CallInitThreadAbort : EHScopeStack::Cleanup { llvm::GlobalVariable *Guard; CallInitThreadAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} void Emit(CodeGenFunction &CGF, Flags flags) override { // Calling _Init_thread_abort will reset the guard's state. CGF.EmitNounwindRuntimeCall(getInitThreadAbortFn(CGF.CGM), Guard); } }; } void MicrosoftCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *GV, bool PerformInit) { // MSVC only uses guards for static locals. if (!D.isStaticLocal()) { assert(GV->hasWeakLinkage() || GV->hasLinkOnceLinkage()); // GlobalOpt is allowed to discard the initializer, so use linkonce_odr. llvm::Function *F = CGF.CurFn; F->setLinkage(llvm::GlobalValue::LinkOnceODRLinkage); F->setComdat(CGM.getModule().getOrInsertComdat(F->getName())); CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit); return; } bool ThreadlocalStatic = D.getTLSKind(); bool ThreadsafeStatic = getContext().getLangOpts().ThreadsafeStatics; // Thread-safe static variables which aren't thread-specific have a // per-variable guard. bool HasPerVariableGuard = ThreadsafeStatic && !ThreadlocalStatic; CGBuilderTy &Builder = CGF.Builder; llvm::IntegerType *GuardTy = CGF.Int32Ty; llvm::ConstantInt *Zero = llvm::ConstantInt::get(GuardTy, 0); // Get the guard variable for this function if we have one already. GuardInfo *GI = nullptr; if (ThreadlocalStatic) GI = &ThreadLocalGuardVariableMap[D.getDeclContext()]; else if (!ThreadsafeStatic) GI = &GuardVariableMap[D.getDeclContext()]; llvm::GlobalVariable *GuardVar = GI ? GI->Guard : nullptr; unsigned GuardNum; if (D.isExternallyVisible()) { // Externally visible variables have to be numbered in Sema to properly // handle unreachable VarDecls. GuardNum = getContext().getStaticLocalNumber(&D); assert(GuardNum > 0); GuardNum--; } else if (HasPerVariableGuard) { GuardNum = ThreadSafeGuardNumMap[D.getDeclContext()]++; } else { // Non-externally visible variables are numbered here in CodeGen. GuardNum = GI->BitIndex++; } if (!HasPerVariableGuard && GuardNum >= 32) { if (D.isExternallyVisible()) ErrorUnsupportedABI(CGF, "more than 32 guarded initializations"); GuardNum %= 32; GuardVar = nullptr; } if (!GuardVar) { // Mangle the name for the guard. SmallString<256> GuardName; { llvm::raw_svector_ostream Out(GuardName); if (HasPerVariableGuard) getMangleContext().mangleThreadSafeStaticGuardVariable(&D, GuardNum, Out); else getMangleContext().mangleStaticGuardVariable(&D, Out); Out.flush(); } // Create the guard variable with a zero-initializer. Just absorb linkage, // visibility and dll storage class from the guarded variable. GuardVar = new llvm::GlobalVariable(CGM.getModule(), GuardTy, /*isConstant=*/false, GV->getLinkage(), Zero, GuardName.str()); GuardVar->setVisibility(GV->getVisibility()); GuardVar->setDLLStorageClass(GV->getDLLStorageClass()); if (GuardVar->isWeakForLinker()) GuardVar->setComdat( CGM.getModule().getOrInsertComdat(GuardVar->getName())); if (D.getTLSKind()) GuardVar->setThreadLocal(true); if (GI && !HasPerVariableGuard) GI->Guard = GuardVar; } assert(GuardVar->getLinkage() == GV->getLinkage() && "static local from the same function had different linkage"); if (!HasPerVariableGuard) { // Pseudo code for the test: // if (!(GuardVar & MyGuardBit)) { // GuardVar |= MyGuardBit; // ... initialize the object ...; // } // Test our bit from the guard variable. llvm::ConstantInt *Bit = llvm::ConstantInt::get(GuardTy, (uint64_t)(1U) << GuardNum); llvm::LoadInst *LI = Builder.CreateLoad(GuardVar); llvm::Value *IsInitialized = Builder.CreateICmpNE(Builder.CreateAnd(LI, Bit), Zero); llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); Builder.CreateCondBr(IsInitialized, EndBlock, InitBlock); // Set our bit in the guard variable and emit the initializer and add a global // destructor if appropriate. CGF.EmitBlock(InitBlock); Builder.CreateStore(Builder.CreateOr(LI, Bit), GuardVar); CGF.EHStack.pushCleanup<ResetGuardBit>(EHCleanup, GuardVar, GuardNum); CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit); CGF.PopCleanupBlock(); Builder.CreateBr(EndBlock); // Continue. CGF.EmitBlock(EndBlock); } else { // Pseudo code for the test: // if (TSS > _Init_thread_epoch) { // _Init_thread_header(&TSS); // if (TSS == -1) { // ... initialize the object ...; // _Init_thread_footer(&TSS); // } // } // // The algorithm is almost identical to what can be found in the appendix // found in N2325. unsigned IntAlign = CGM.getTarget().getIntAlign() / 8; // This BasicBLock determines whether or not we have any work to do. llvm::LoadInst *FirstGuardLoad = Builder.CreateAlignedLoad(GuardVar, IntAlign); FirstGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered); llvm::LoadInst *InitThreadEpoch = Builder.CreateLoad(getInitThreadEpochPtr(CGM)); llvm::Value *IsUninitialized = Builder.CreateICmpSGT(FirstGuardLoad, InitThreadEpoch); llvm::BasicBlock *AttemptInitBlock = CGF.createBasicBlock("init.attempt"); llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); Builder.CreateCondBr(IsUninitialized, AttemptInitBlock, EndBlock); // This BasicBlock attempts to determine whether or not this thread is // responsible for doing the initialization. CGF.EmitBlock(AttemptInitBlock); CGF.EmitNounwindRuntimeCall(getInitThreadHeaderFn(CGM), GuardVar); llvm::LoadInst *SecondGuardLoad = Builder.CreateAlignedLoad(GuardVar, IntAlign); SecondGuardLoad->setOrdering(llvm::AtomicOrdering::Unordered); llvm::Value *ShouldDoInit = Builder.CreateICmpEQ(SecondGuardLoad, getAllOnesInt()); llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); Builder.CreateCondBr(ShouldDoInit, InitBlock, EndBlock); // Ok, we ended up getting selected as the initializing thread. CGF.EmitBlock(InitBlock); CGF.EHStack.pushCleanup<CallInitThreadAbort>(EHCleanup, GuardVar); CGF.EmitCXXGlobalVarDeclInit(D, GV, PerformInit); CGF.PopCleanupBlock(); CGF.EmitNounwindRuntimeCall(getInitThreadFooterFn(CGM), GuardVar); Builder.CreateBr(EndBlock); CGF.EmitBlock(EndBlock); } } bool MicrosoftCXXABI::isZeroInitializable(const MemberPointerType *MPT) { // Null-ness for function memptrs only depends on the first field, which is // the function pointer. The rest don't matter, so we can zero initialize. if (MPT->isMemberFunctionPointer()) return true; // The virtual base adjustment field is always -1 for null, so if we have one // we can't zero initialize. The field offset is sometimes also -1 if 0 is a // valid field offset. const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl(); MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel(); return (!MSInheritanceAttr::hasVBTableOffsetField(Inheritance) && RD->nullFieldOffsetIsZero()); } llvm::Type * MicrosoftCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl(); MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel(); llvm::SmallVector<llvm::Type *, 4> fields; if (MPT->isMemberFunctionPointer()) fields.push_back(CGM.VoidPtrTy); // FunctionPointerOrVirtualThunk else fields.push_back(CGM.IntTy); // FieldOffset if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(), Inheritance)) fields.push_back(CGM.IntTy); if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance)) fields.push_back(CGM.IntTy); if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance)) fields.push_back(CGM.IntTy); // VirtualBaseAdjustmentOffset if (fields.size() == 1) return fields[0]; return llvm::StructType::get(CGM.getLLVMContext(), fields); } void MicrosoftCXXABI:: GetNullMemberPointerFields(const MemberPointerType *MPT, llvm::SmallVectorImpl<llvm::Constant *> &fields) { assert(fields.empty()); const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl(); MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel(); if (MPT->isMemberFunctionPointer()) { // FunctionPointerOrVirtualThunk fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy)); } else { if (RD->nullFieldOffsetIsZero()) fields.push_back(getZeroInt()); // FieldOffset else fields.push_back(getAllOnesInt()); // FieldOffset } if (MSInheritanceAttr::hasNVOffsetField(MPT->isMemberFunctionPointer(), Inheritance)) fields.push_back(getZeroInt()); if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance)) fields.push_back(getZeroInt()); if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance)) fields.push_back(getAllOnesInt()); } llvm::Constant * MicrosoftCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { llvm::SmallVector<llvm::Constant *, 4> fields; GetNullMemberPointerFields(MPT, fields); if (fields.size() == 1) return fields[0]; llvm::Constant *Res = llvm::ConstantStruct::getAnon(fields); assert(Res->getType() == ConvertMemberPointerType(MPT)); return Res; } llvm::Constant * MicrosoftCXXABI::EmitFullMemberPointer(llvm::Constant *FirstField, bool IsMemberFunction, const CXXRecordDecl *RD, CharUnits NonVirtualBaseAdjustment, unsigned VBTableIndex) { MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel(); // Single inheritance class member pointer are represented as scalars instead // of aggregates. if (MSInheritanceAttr::hasOnlyOneField(IsMemberFunction, Inheritance)) return FirstField; llvm::SmallVector<llvm::Constant *, 4> fields; fields.push_back(FirstField); if (MSInheritanceAttr::hasNVOffsetField(IsMemberFunction, Inheritance)) fields.push_back(llvm::ConstantInt::get( CGM.IntTy, NonVirtualBaseAdjustment.getQuantity())); if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance)) { CharUnits Offs = CharUnits::Zero(); if (VBTableIndex) Offs = getContext().getASTRecordLayout(RD).getVBPtrOffset(); fields.push_back(llvm::ConstantInt::get(CGM.IntTy, Offs.getQuantity())); } // The rest of the fields are adjusted by conversions to a more derived class. if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance)) fields.push_back(llvm::ConstantInt::get(CGM.IntTy, VBTableIndex)); return llvm::ConstantStruct::getAnon(fields); } llvm::Constant * MicrosoftCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, CharUnits offset) { const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl(); if (RD->getMSInheritanceModel() == MSInheritanceAttr::Keyword_virtual_inheritance) offset -= getContext().getOffsetOfBaseWithVBPtr(RD); llvm::Constant *FirstField = llvm::ConstantInt::get(CGM.IntTy, offset.getQuantity()); return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/false, RD, CharUnits::Zero(), /*VBTableIndex=*/0); } llvm::Constant *MicrosoftCXXABI::EmitMemberPointer(const APValue &MP, QualType MPType) { const MemberPointerType *DstTy = MPType->castAs<MemberPointerType>(); const ValueDecl *MPD = MP.getMemberPointerDecl(); if (!MPD) return EmitNullMemberPointer(DstTy); ASTContext &Ctx = getContext(); ArrayRef<const CXXRecordDecl *> MemberPointerPath = MP.getMemberPointerPath(); llvm::Constant *C; if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) { C = EmitMemberFunctionPointer(MD); } else { CharUnits FieldOffset = Ctx.toCharUnitsFromBits(Ctx.getFieldOffset(MPD)); C = EmitMemberDataPointer(DstTy, FieldOffset); } if (!MemberPointerPath.empty()) { const CXXRecordDecl *SrcRD = cast<CXXRecordDecl>(MPD->getDeclContext()); const Type *SrcRecTy = Ctx.getTypeDeclType(SrcRD).getTypePtr(); const MemberPointerType *SrcTy = Ctx.getMemberPointerType(DstTy->getPointeeType(), SrcRecTy) ->castAs<MemberPointerType>(); bool DerivedMember = MP.isMemberPointerToDerivedMember(); SmallVector<const CXXBaseSpecifier *, 4> DerivedToBasePath; const CXXRecordDecl *PrevRD = SrcRD; for (const CXXRecordDecl *PathElem : MemberPointerPath) { const CXXRecordDecl *Base = nullptr; const CXXRecordDecl *Derived = nullptr; if (DerivedMember) { Base = PathElem; Derived = PrevRD; } else { Base = PrevRD; Derived = PathElem; } for (const CXXBaseSpecifier &BS : Derived->bases()) if (BS.getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base->getCanonicalDecl()) DerivedToBasePath.push_back(&BS); PrevRD = PathElem; } assert(DerivedToBasePath.size() == MemberPointerPath.size()); CastKind CK = DerivedMember ? CK_DerivedToBaseMemberPointer : CK_BaseToDerivedMemberPointer; C = EmitMemberPointerConversion(SrcTy, DstTy, CK, DerivedToBasePath.begin(), DerivedToBasePath.end(), C); } return C; } llvm::Constant * MicrosoftCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) { assert(MD->isInstance() && "Member function must not be static!"); MD = MD->getCanonicalDecl(); CharUnits NonVirtualBaseAdjustment = CharUnits::Zero(); const CXXRecordDecl *RD = MD->getParent()->getMostRecentDecl(); CodeGenTypes &Types = CGM.getTypes(); unsigned VBTableIndex = 0; llvm::Constant *FirstField; const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); if (!MD->isVirtual()) { llvm::Type *Ty; // Check whether the function has a computable LLVM signature. if (Types.isFuncTypeConvertible(FPT)) { // The function has a computable LLVM signature; use the correct type. Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); } else { // Use an arbitrary non-function type to tell GetAddrOfFunction that the // function type is incomplete. Ty = CGM.PtrDiffTy; } FirstField = CGM.GetAddrOfFunction(MD, Ty); } else { auto &VTableContext = CGM.getMicrosoftVTableContext(); MicrosoftVTableContext::MethodVFTableLocation ML = VTableContext.getMethodVFTableLocation(MD); FirstField = EmitVirtualMemPtrThunk(MD, ML); // Include the vfptr adjustment if the method is in a non-primary vftable. NonVirtualBaseAdjustment += ML.VFPtrOffset; if (ML.VBase) VBTableIndex = VTableContext.getVBTableIndex(RD, ML.VBase) * 4; } if (VBTableIndex == 0 && RD->getMSInheritanceModel() == MSInheritanceAttr::Keyword_virtual_inheritance) NonVirtualBaseAdjustment -= getContext().getOffsetOfBaseWithVBPtr(RD); // The rest of the fields are common with data member pointers. FirstField = llvm::ConstantExpr::getBitCast(FirstField, CGM.VoidPtrTy); return EmitFullMemberPointer(FirstField, /*IsMemberFunction=*/true, RD, NonVirtualBaseAdjustment, VBTableIndex); } /// Member pointers are the same if they're either bitwise identical *or* both /// null. Null-ness for function members is determined by the first field, /// while for data member pointers we must compare all fields. llvm::Value * MicrosoftCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality) { CGBuilderTy &Builder = CGF.Builder; // Handle != comparisons by switching the sense of all boolean operations. llvm::ICmpInst::Predicate Eq; llvm::Instruction::BinaryOps And, Or; if (Inequality) { Eq = llvm::ICmpInst::ICMP_NE; And = llvm::Instruction::Or; Or = llvm::Instruction::And; } else { Eq = llvm::ICmpInst::ICMP_EQ; And = llvm::Instruction::And; Or = llvm::Instruction::Or; } // If this is a single field member pointer (single inheritance), this is a // single icmp. const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl(); MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel(); if (MSInheritanceAttr::hasOnlyOneField(MPT->isMemberFunctionPointer(), Inheritance)) return Builder.CreateICmp(Eq, L, R); // Compare the first field. llvm::Value *L0 = Builder.CreateExtractValue(L, 0, "lhs.0"); llvm::Value *R0 = Builder.CreateExtractValue(R, 0, "rhs.0"); llvm::Value *Cmp0 = Builder.CreateICmp(Eq, L0, R0, "memptr.cmp.first"); // Compare everything other than the first field. llvm::Value *Res = nullptr; llvm::StructType *LType = cast<llvm::StructType>(L->getType()); for (unsigned I = 1, E = LType->getNumElements(); I != E; ++I) { llvm::Value *LF = Builder.CreateExtractValue(L, I); llvm::Value *RF = Builder.CreateExtractValue(R, I); llvm::Value *Cmp = Builder.CreateICmp(Eq, LF, RF, "memptr.cmp.rest"); if (Res) Res = Builder.CreateBinOp(And, Res, Cmp); else Res = Cmp; } // Check if the first field is 0 if this is a function pointer. if (MPT->isMemberFunctionPointer()) { // (l1 == r1 && ...) || l0 == 0 llvm::Value *Zero = llvm::Constant::getNullValue(L0->getType()); llvm::Value *IsZero = Builder.CreateICmp(Eq, L0, Zero, "memptr.cmp.iszero"); Res = Builder.CreateBinOp(Or, Res, IsZero); } // Combine the comparison of the first field, which must always be true for // this comparison to succeeed. return Builder.CreateBinOp(And, Res, Cmp0, "memptr.cmp"); } llvm::Value * MicrosoftCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT) { CGBuilderTy &Builder = CGF.Builder; llvm::SmallVector<llvm::Constant *, 4> fields; // We only need one field for member functions. if (MPT->isMemberFunctionPointer()) fields.push_back(llvm::Constant::getNullValue(CGM.VoidPtrTy)); else GetNullMemberPointerFields(MPT, fields); assert(!fields.empty()); llvm::Value *FirstField = MemPtr; if (MemPtr->getType()->isStructTy()) FirstField = Builder.CreateExtractValue(MemPtr, 0); llvm::Value *Res = Builder.CreateICmpNE(FirstField, fields[0], "memptr.cmp0"); // For function member pointers, we only need to test the function pointer // field. The other fields if any can be garbage. if (MPT->isMemberFunctionPointer()) return Res; // Otherwise, emit a series of compares and combine the results. for (int I = 1, E = fields.size(); I < E; ++I) { llvm::Value *Field = Builder.CreateExtractValue(MemPtr, I); llvm::Value *Next = Builder.CreateICmpNE(Field, fields[I], "memptr.cmp"); Res = Builder.CreateOr(Res, Next, "memptr.tobool"); } return Res; } bool MicrosoftCXXABI::MemberPointerConstantIsNull(const MemberPointerType *MPT, llvm::Constant *Val) { // Function pointers are null if the pointer in the first field is null. if (MPT->isMemberFunctionPointer()) { llvm::Constant *FirstField = Val->getType()->isStructTy() ? Val->getAggregateElement(0U) : Val; return FirstField->isNullValue(); } // If it's not a function pointer and it's zero initializable, we can easily // check zero. if (isZeroInitializable(MPT) && Val->isNullValue()) return true; // Otherwise, break down all the fields for comparison. Hopefully these // little Constants are reused, while a big null struct might not be. llvm::SmallVector<llvm::Constant *, 4> Fields; GetNullMemberPointerFields(MPT, Fields); if (Fields.size() == 1) { assert(Val->getType()->isIntegerTy()); return Val == Fields[0]; } unsigned I, E; for (I = 0, E = Fields.size(); I != E; ++I) { if (Val->getAggregateElement(I) != Fields[I]) break; } return I == E; } llvm::Value * MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF, llvm::Value *This, llvm::Value *VBPtrOffset, llvm::Value *VBTableOffset, llvm::Value **VBPtrOut) { CGBuilderTy &Builder = CGF.Builder; // Load the vbtable pointer from the vbptr in the instance. This = Builder.CreateBitCast(This, CGM.Int8PtrTy); llvm::Value *VBPtr = Builder.CreateInBoundsGEP(This, VBPtrOffset, "vbptr"); if (VBPtrOut) *VBPtrOut = VBPtr; VBPtr = Builder.CreateBitCast(VBPtr, CGM.Int32Ty->getPointerTo(0)->getPointerTo(0)); llvm::Value *VBTable = Builder.CreateLoad(VBPtr, "vbtable"); // Translate from byte offset to table index. It improves analyzability. llvm::Value *VBTableIndex = Builder.CreateAShr( VBTableOffset, llvm::ConstantInt::get(VBTableOffset->getType(), 2), "vbtindex", /*isExact=*/true); // Load an i32 offset from the vb-table. llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VBTableIndex); VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0)); return Builder.CreateLoad(VBaseOffs, "vbase_offs"); } // Returns an adjusted base cast to i8*, since we do more address arithmetic on // it. llvm::Value *MicrosoftCXXABI::AdjustVirtualBase( CodeGenFunction &CGF, const Expr *E, const CXXRecordDecl *RD, llvm::Value *Base, llvm::Value *VBTableOffset, llvm::Value *VBPtrOffset) { CGBuilderTy &Builder = CGF.Builder; Base = Builder.CreateBitCast(Base, CGM.Int8PtrTy); llvm::BasicBlock *OriginalBB = nullptr; llvm::BasicBlock *SkipAdjustBB = nullptr; llvm::BasicBlock *VBaseAdjustBB = nullptr; // In the unspecified inheritance model, there might not be a vbtable at all, // in which case we need to skip the virtual base lookup. If there is a // vbtable, the first entry is a no-op entry that gives back the original // base, so look for a virtual base adjustment offset of zero. if (VBPtrOffset) { OriginalBB = Builder.GetInsertBlock(); VBaseAdjustBB = CGF.createBasicBlock("memptr.vadjust"); SkipAdjustBB = CGF.createBasicBlock("memptr.skip_vadjust"); llvm::Value *IsVirtual = Builder.CreateICmpNE(VBTableOffset, getZeroInt(), "memptr.is_vbase"); Builder.CreateCondBr(IsVirtual, VBaseAdjustBB, SkipAdjustBB); CGF.EmitBlock(VBaseAdjustBB); } // If we weren't given a dynamic vbptr offset, RD should be complete and we'll // know the vbptr offset. if (!VBPtrOffset) { CharUnits offs = CharUnits::Zero(); if (!RD->hasDefinition()) { DiagnosticsEngine &Diags = CGF.CGM.getDiags(); unsigned DiagID = Diags.getCustomDiagID( DiagnosticsEngine::Error, "member pointer representation requires a " "complete class type for %0 to perform this expression"); Diags.Report(E->getExprLoc(), DiagID) << RD << E->getSourceRange(); } else if (RD->getNumVBases()) offs = getContext().getASTRecordLayout(RD).getVBPtrOffset(); VBPtrOffset = llvm::ConstantInt::get(CGM.IntTy, offs.getQuantity()); } llvm::Value *VBPtr = nullptr; llvm::Value *VBaseOffs = GetVBaseOffsetFromVBPtr(CGF, Base, VBPtrOffset, VBTableOffset, &VBPtr); llvm::Value *AdjustedBase = Builder.CreateInBoundsGEP(VBPtr, VBaseOffs); // Merge control flow with the case where we didn't have to adjust. if (VBaseAdjustBB) { Builder.CreateBr(SkipAdjustBB); CGF.EmitBlock(SkipAdjustBB); llvm::PHINode *Phi = Builder.CreatePHI(CGM.Int8PtrTy, 2, "memptr.base"); Phi->addIncoming(Base, OriginalBB); Phi->addIncoming(AdjustedBase, VBaseAdjustBB); return Phi; } return AdjustedBase; } llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress( CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr, const MemberPointerType *MPT) { assert(MPT->isMemberDataPointer()); unsigned AS = Base->getType()->getPointerAddressSpace(); llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS); CGBuilderTy &Builder = CGF.Builder; const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl(); MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel(); // Extract the fields we need, regardless of model. We'll apply them if we // have them. llvm::Value *FieldOffset = MemPtr; llvm::Value *VirtualBaseAdjustmentOffset = nullptr; llvm::Value *VBPtrOffset = nullptr; if (MemPtr->getType()->isStructTy()) { // We need to extract values. unsigned I = 0; FieldOffset = Builder.CreateExtractValue(MemPtr, I++); if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance)) VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++); if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance)) VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++); } if (VirtualBaseAdjustmentOffset) { Base = AdjustVirtualBase(CGF, E, RD, Base, VirtualBaseAdjustmentOffset, VBPtrOffset); } // Cast to char*. Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS)); // Apply the offset, which we assume is non-null. llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, FieldOffset, "memptr.offset"); // Cast the address to the appropriate pointer type, adopting the address // space of the base pointer. return Builder.CreateBitCast(Addr, PType); } llvm::Value * MicrosoftCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src) { assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || E->getCastKind() == CK_BaseToDerivedMemberPointer || E->getCastKind() == CK_ReinterpretMemberPointer); // Use constant emission if we can. if (isa<llvm::Constant>(Src)) return EmitMemberPointerConversion(E, cast<llvm::Constant>(Src)); // We may be adding or dropping fields from the member pointer, so we need // both types and the inheritance models of both records. const MemberPointerType *SrcTy = E->getSubExpr()->getType()->castAs<MemberPointerType>(); const MemberPointerType *DstTy = E->getType()->castAs<MemberPointerType>(); bool IsFunc = SrcTy->isMemberFunctionPointer(); // If the classes use the same null representation, reinterpret_cast is a nop. bool IsReinterpret = E->getCastKind() == CK_ReinterpretMemberPointer; if (IsReinterpret && IsFunc) return Src; CXXRecordDecl *SrcRD = SrcTy->getMostRecentCXXRecordDecl(); CXXRecordDecl *DstRD = DstTy->getMostRecentCXXRecordDecl(); if (IsReinterpret && SrcRD->nullFieldOffsetIsZero() == DstRD->nullFieldOffsetIsZero()) return Src; CGBuilderTy &Builder = CGF.Builder; // Branch past the conversion if Src is null. llvm::Value *IsNotNull = EmitMemberPointerIsNotNull(CGF, Src, SrcTy); llvm::Constant *DstNull = EmitNullMemberPointer(DstTy); // C++ 5.2.10p9: The null member pointer value is converted to the null member // pointer value of the destination type. if (IsReinterpret) { // For reinterpret casts, sema ensures that src and dst are both functions // or data and have the same size, which means the LLVM types should match. assert(Src->getType() == DstNull->getType()); return Builder.CreateSelect(IsNotNull, Src, DstNull); } llvm::BasicBlock *OriginalBB = Builder.GetInsertBlock(); llvm::BasicBlock *ConvertBB = CGF.createBasicBlock("memptr.convert"); llvm::BasicBlock *ContinueBB = CGF.createBasicBlock("memptr.converted"); Builder.CreateCondBr(IsNotNull, ConvertBB, ContinueBB); CGF.EmitBlock(ConvertBB); llvm::Value *Dst = EmitNonNullMemberPointerConversion( SrcTy, DstTy, E->getCastKind(), E->path_begin(), E->path_end(), Src, Builder); Builder.CreateBr(ContinueBB); // In the continuation, choose between DstNull and Dst. CGF.EmitBlock(ContinueBB); llvm::PHINode *Phi = Builder.CreatePHI(DstNull->getType(), 2, "memptr.converted"); Phi->addIncoming(DstNull, OriginalBB); Phi->addIncoming(Dst, ConvertBB); return Phi; } llvm::Value *MicrosoftCXXABI::EmitNonNullMemberPointerConversion( const MemberPointerType *SrcTy, const MemberPointerType *DstTy, CastKind CK, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, llvm::Value *Src, CGBuilderTy &Builder) { const CXXRecordDecl *SrcRD = SrcTy->getMostRecentCXXRecordDecl(); const CXXRecordDecl *DstRD = DstTy->getMostRecentCXXRecordDecl(); MSInheritanceAttr::Spelling SrcInheritance = SrcRD->getMSInheritanceModel(); MSInheritanceAttr::Spelling DstInheritance = DstRD->getMSInheritanceModel(); bool IsFunc = SrcTy->isMemberFunctionPointer(); bool IsConstant = isa<llvm::Constant>(Src); // Decompose src. llvm::Value *FirstField = Src; llvm::Value *NonVirtualBaseAdjustment = getZeroInt(); llvm::Value *VirtualBaseAdjustmentOffset = getZeroInt(); llvm::Value *VBPtrOffset = getZeroInt(); if (!MSInheritanceAttr::hasOnlyOneField(IsFunc, SrcInheritance)) { // We need to extract values. unsigned I = 0; FirstField = Builder.CreateExtractValue(Src, I++); if (MSInheritanceAttr::hasNVOffsetField(IsFunc, SrcInheritance)) NonVirtualBaseAdjustment = Builder.CreateExtractValue(Src, I++); if (MSInheritanceAttr::hasVBPtrOffsetField(SrcInheritance)) VBPtrOffset = Builder.CreateExtractValue(Src, I++); if (MSInheritanceAttr::hasVBTableOffsetField(SrcInheritance)) VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(Src, I++); } bool IsDerivedToBase = (CK == CK_DerivedToBaseMemberPointer); const MemberPointerType *DerivedTy = IsDerivedToBase ? SrcTy : DstTy; const CXXRecordDecl *DerivedClass = DerivedTy->getMostRecentCXXRecordDecl(); // For data pointers, we adjust the field offset directly. For functions, we // have a separate field. llvm::Value *&NVAdjustField = IsFunc ? NonVirtualBaseAdjustment : FirstField; // The virtual inheritance model has a quirk: the virtual base table is always // referenced when dereferencing a member pointer even if the member pointer // is non-virtual. This is accounted for by adjusting the non-virtual offset // to point backwards to the top of the MDC from the first VBase. Undo this // adjustment to normalize the member pointer. llvm::Value *SrcVBIndexEqZero = Builder.CreateICmpEQ(VirtualBaseAdjustmentOffset, getZeroInt()); if (SrcInheritance == MSInheritanceAttr::Keyword_virtual_inheritance) { if (int64_t SrcOffsetToFirstVBase = getContext().getOffsetOfBaseWithVBPtr(SrcRD).getQuantity()) { llvm::Value *UndoSrcAdjustment = Builder.CreateSelect( SrcVBIndexEqZero, llvm::ConstantInt::get(CGM.IntTy, SrcOffsetToFirstVBase), getZeroInt()); NVAdjustField = Builder.CreateNSWAdd(NVAdjustField, UndoSrcAdjustment); } } // A non-zero vbindex implies that we are dealing with a source member in a // floating virtual base in addition to some non-virtual offset. If the // vbindex is zero, we are dealing with a source that exists in a non-virtual, // fixed, base. The difference between these two cases is that the vbindex + // nvoffset *always* point to the member regardless of what context they are // evaluated in so long as the vbindex is adjusted. A member inside a fixed // base requires explicit nv adjustment. llvm::Constant *BaseClassOffset = llvm::ConstantInt::get( CGM.IntTy, CGM.computeNonVirtualBaseClassOffset(DerivedClass, PathBegin, PathEnd) .getQuantity()); llvm::Value *NVDisp; if (IsDerivedToBase) NVDisp = Builder.CreateNSWSub(NVAdjustField, BaseClassOffset, "adj"); else NVDisp = Builder.CreateNSWAdd(NVAdjustField, BaseClassOffset, "adj"); NVAdjustField = Builder.CreateSelect(SrcVBIndexEqZero, NVDisp, getZeroInt()); // Update the vbindex to an appropriate value in the destination because // SrcRD's vbtable might not be a strict prefix of the one in DstRD. llvm::Value *DstVBIndexEqZero = SrcVBIndexEqZero; if (MSInheritanceAttr::hasVBTableOffsetField(DstInheritance) && MSInheritanceAttr::hasVBTableOffsetField(SrcInheritance)) { if (llvm::GlobalVariable *VDispMap = getAddrOfVirtualDisplacementMap(SrcRD, DstRD)) { llvm::Value *VBIndex = Builder.CreateExactUDiv( VirtualBaseAdjustmentOffset, llvm::ConstantInt::get(CGM.IntTy, 4)); if (IsConstant) { llvm::Constant *Mapping = VDispMap->getInitializer(); VirtualBaseAdjustmentOffset = Mapping->getAggregateElement(cast<llvm::Constant>(VBIndex)); } else { llvm::Value *Idxs[] = {getZeroInt(), VBIndex}; VirtualBaseAdjustmentOffset = Builder.CreateLoad(Builder.CreateInBoundsGEP(VDispMap, Idxs)); } DstVBIndexEqZero = Builder.CreateICmpEQ(VirtualBaseAdjustmentOffset, getZeroInt()); } } // Set the VBPtrOffset to zero if the vbindex is zero. Otherwise, initialize // it to the offset of the vbptr. if (MSInheritanceAttr::hasVBPtrOffsetField(DstInheritance)) { llvm::Value *DstVBPtrOffset = llvm::ConstantInt::get( CGM.IntTy, getContext().getASTRecordLayout(DstRD).getVBPtrOffset().getQuantity()); VBPtrOffset = Builder.CreateSelect(DstVBIndexEqZero, getZeroInt(), DstVBPtrOffset); } // Likewise, apply a similar adjustment so that dereferencing the member // pointer correctly accounts for the distance between the start of the first // virtual base and the top of the MDC. if (DstInheritance == MSInheritanceAttr::Keyword_virtual_inheritance) { if (int64_t DstOffsetToFirstVBase = getContext().getOffsetOfBaseWithVBPtr(DstRD).getQuantity()) { llvm::Value *DoDstAdjustment = Builder.CreateSelect( DstVBIndexEqZero, llvm::ConstantInt::get(CGM.IntTy, DstOffsetToFirstVBase), getZeroInt()); NVAdjustField = Builder.CreateNSWSub(NVAdjustField, DoDstAdjustment); } } // Recompose dst from the null struct and the adjusted fields from src. llvm::Value *Dst; if (MSInheritanceAttr::hasOnlyOneField(IsFunc, DstInheritance)) { Dst = FirstField; } else { Dst = llvm::UndefValue::get(ConvertMemberPointerType(DstTy)); unsigned Idx = 0; Dst = Builder.CreateInsertValue(Dst, FirstField, Idx++); if (MSInheritanceAttr::hasNVOffsetField(IsFunc, DstInheritance)) Dst = Builder.CreateInsertValue(Dst, NonVirtualBaseAdjustment, Idx++); if (MSInheritanceAttr::hasVBPtrOffsetField(DstInheritance)) Dst = Builder.CreateInsertValue(Dst, VBPtrOffset, Idx++); if (MSInheritanceAttr::hasVBTableOffsetField(DstInheritance)) Dst = Builder.CreateInsertValue(Dst, VirtualBaseAdjustmentOffset, Idx++); } return Dst; } llvm::Constant * MicrosoftCXXABI::EmitMemberPointerConversion(const CastExpr *E, llvm::Constant *Src) { const MemberPointerType *SrcTy = E->getSubExpr()->getType()->castAs<MemberPointerType>(); const MemberPointerType *DstTy = E->getType()->castAs<MemberPointerType>(); CastKind CK = E->getCastKind(); return EmitMemberPointerConversion(SrcTy, DstTy, CK, E->path_begin(), E->path_end(), Src); } llvm::Constant *MicrosoftCXXABI::EmitMemberPointerConversion( const MemberPointerType *SrcTy, const MemberPointerType *DstTy, CastKind CK, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, llvm::Constant *Src) { assert(CK == CK_DerivedToBaseMemberPointer || CK == CK_BaseToDerivedMemberPointer || CK == CK_ReinterpretMemberPointer); // If src is null, emit a new null for dst. We can't return src because dst // might have a new representation. if (MemberPointerConstantIsNull(SrcTy, Src)) return EmitNullMemberPointer(DstTy); // We don't need to do anything for reinterpret_casts of non-null member // pointers. We should only get here when the two type representations have // the same size. if (CK == CK_ReinterpretMemberPointer) return Src; CGBuilderTy Builder(CGM.getLLVMContext()); auto *Dst = cast<llvm::Constant>(EmitNonNullMemberPointerConversion( SrcTy, DstTy, CK, PathBegin, PathEnd, Src, Builder)); return Dst; } llvm::Value *MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer( CodeGenFunction &CGF, const Expr *E, llvm::Value *&This, llvm::Value *MemPtr, const MemberPointerType *MPT) { assert(MPT->isMemberFunctionPointer()); const FunctionProtoType *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>(); const CXXRecordDecl *RD = MPT->getMostRecentCXXRecordDecl(); llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType( CGM.getTypes().arrangeCXXMethodType(RD, FPT)); CGBuilderTy &Builder = CGF.Builder; MSInheritanceAttr::Spelling Inheritance = RD->getMSInheritanceModel(); // Extract the fields we need, regardless of model. We'll apply them if we // have them. llvm::Value *FunctionPointer = MemPtr; llvm::Value *NonVirtualBaseAdjustment = nullptr; llvm::Value *VirtualBaseAdjustmentOffset = nullptr; llvm::Value *VBPtrOffset = nullptr; if (MemPtr->getType()->isStructTy()) { // We need to extract values. unsigned I = 0; FunctionPointer = Builder.CreateExtractValue(MemPtr, I++); if (MSInheritanceAttr::hasNVOffsetField(MPT, Inheritance)) NonVirtualBaseAdjustment = Builder.CreateExtractValue(MemPtr, I++); if (MSInheritanceAttr::hasVBPtrOffsetField(Inheritance)) VBPtrOffset = Builder.CreateExtractValue(MemPtr, I++); if (MSInheritanceAttr::hasVBTableOffsetField(Inheritance)) VirtualBaseAdjustmentOffset = Builder.CreateExtractValue(MemPtr, I++); } if (VirtualBaseAdjustmentOffset) { This = AdjustVirtualBase(CGF, E, RD, This, VirtualBaseAdjustmentOffset, VBPtrOffset); } if (NonVirtualBaseAdjustment) { // Apply the adjustment and cast back to the original struct type. llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy()); Ptr = Builder.CreateInBoundsGEP(Ptr, NonVirtualBaseAdjustment); This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted"); } return Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo()); } CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) { return new MicrosoftCXXABI(CGM); } // MS RTTI Overview: // The run time type information emitted by cl.exe contains 5 distinct types of // structures. Many of them reference each other. // // TypeInfo: Static classes that are returned by typeid. // // CompleteObjectLocator: Referenced by vftables. They contain information // required for dynamic casting, including OffsetFromTop. They also contain // a reference to the TypeInfo for the type and a reference to the // CompleteHierarchyDescriptor for the type. // // ClassHieararchyDescriptor: Contains information about a class hierarchy. // Used during dynamic_cast to walk a class hierarchy. References a base // class array and the size of said array. // // BaseClassArray: Contains a list of classes in a hierarchy. BaseClassArray is // somewhat of a misnomer because the most derived class is also in the list // as well as multiple copies of virtual bases (if they occur multiple times // in the hiearchy.) The BaseClassArray contains one BaseClassDescriptor for // every path in the hierarchy, in pre-order depth first order. Note, we do // not declare a specific llvm type for BaseClassArray, it's merely an array // of BaseClassDescriptor pointers. // // BaseClassDescriptor: Contains information about a class in a class hierarchy. // BaseClassDescriptor is also somewhat of a misnomer for the same reason that // BaseClassArray is. It contains information about a class within a // hierarchy such as: is this base is ambiguous and what is its offset in the // vbtable. The names of the BaseClassDescriptors have all of their fields // mangled into them so they can be aggressively deduplicated by the linker. static llvm::GlobalVariable *getTypeInfoVTable(CodeGenModule &CGM) { StringRef MangledName("\01??_7type_info@@6B@"); if (auto VTable = CGM.getModule().getNamedGlobal(MangledName)) return VTable; return new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy, /*Constant=*/true, llvm::GlobalVariable::ExternalLinkage, /*Initializer=*/nullptr, MangledName); } namespace { /// \brief A Helper struct that stores information about a class in a class /// hierarchy. The information stored in these structs struct is used during /// the generation of ClassHierarchyDescriptors and BaseClassDescriptors. // During RTTI creation, MSRTTIClasses are stored in a contiguous array with // implicit depth first pre-order tree connectivity. getFirstChild and // getNextSibling allow us to walk the tree efficiently. struct MSRTTIClass { enum { IsPrivateOnPath = 1 | 8, IsAmbiguous = 2, IsPrivate = 4, IsVirtual = 16, HasHierarchyDescriptor = 64 }; MSRTTIClass(const CXXRecordDecl *RD) : RD(RD) {} uint32_t initialize(const MSRTTIClass *Parent, const CXXBaseSpecifier *Specifier); MSRTTIClass *getFirstChild() { return this + 1; } static MSRTTIClass *getNextChild(MSRTTIClass *Child) { return Child + 1 + Child->NumBases; } const CXXRecordDecl *RD, *VirtualRoot; uint32_t Flags, NumBases, OffsetInVBase; }; /// \brief Recursively initialize the base class array. uint32_t MSRTTIClass::initialize(const MSRTTIClass *Parent, const CXXBaseSpecifier *Specifier) { Flags = HasHierarchyDescriptor; if (!Parent) { VirtualRoot = nullptr; OffsetInVBase = 0; } else { if (Specifier->getAccessSpecifier() != AS_public) Flags |= IsPrivate | IsPrivateOnPath; if (Specifier->isVirtual()) { Flags |= IsVirtual; VirtualRoot = RD; OffsetInVBase = 0; } else { if (Parent->Flags & IsPrivateOnPath) Flags |= IsPrivateOnPath; VirtualRoot = Parent->VirtualRoot; OffsetInVBase = Parent->OffsetInVBase + RD->getASTContext() .getASTRecordLayout(Parent->RD).getBaseClassOffset(RD).getQuantity(); } } NumBases = 0; MSRTTIClass *Child = getFirstChild(); for (const CXXBaseSpecifier &Base : RD->bases()) { NumBases += Child->initialize(this, &Base) + 1; Child = getNextChild(Child); } return NumBases; } static llvm::GlobalValue::LinkageTypes getLinkageForRTTI(QualType Ty) { switch (Ty->getLinkage()) { case NoLinkage: case InternalLinkage: case UniqueExternalLinkage: return llvm::GlobalValue::InternalLinkage; case VisibleNoLinkage: case ExternalLinkage: return llvm::GlobalValue::LinkOnceODRLinkage; } llvm_unreachable("Invalid linkage!"); } /// \brief An ephemeral helper class for building MS RTTI types. It caches some /// calls to the module and information about the most derived class in a /// hierarchy. struct MSRTTIBuilder { enum { HasBranchingHierarchy = 1, HasVirtualBranchingHierarchy = 2, HasAmbiguousBases = 4 }; MSRTTIBuilder(MicrosoftCXXABI &ABI, const CXXRecordDecl *RD) : CGM(ABI.CGM), Context(CGM.getContext()), VMContext(CGM.getLLVMContext()), Module(CGM.getModule()), RD(RD), Linkage(getLinkageForRTTI(CGM.getContext().getTagDeclType(RD))), ABI(ABI) {} llvm::GlobalVariable *getBaseClassDescriptor(const MSRTTIClass &Classes); llvm::GlobalVariable * getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes); llvm::GlobalVariable *getClassHierarchyDescriptor(); llvm::GlobalVariable *getCompleteObjectLocator(const VPtrInfo *Info); CodeGenModule &CGM; ASTContext &Context; llvm::LLVMContext &VMContext; llvm::Module &Module; const CXXRecordDecl *RD; llvm::GlobalVariable::LinkageTypes Linkage; MicrosoftCXXABI &ABI; }; } // namespace /// \brief Recursively serializes a class hierarchy in pre-order depth first /// order. static void serializeClassHierarchy(SmallVectorImpl<MSRTTIClass> &Classes, const CXXRecordDecl *RD) { Classes.push_back(MSRTTIClass(RD)); for (const CXXBaseSpecifier &Base : RD->bases()) serializeClassHierarchy(Classes, Base.getType()->getAsCXXRecordDecl()); } /// \brief Find ambiguity among base classes. static void detectAmbiguousBases(SmallVectorImpl<MSRTTIClass> &Classes) { llvm::SmallPtrSet<const CXXRecordDecl *, 8> VirtualBases; llvm::SmallPtrSet<const CXXRecordDecl *, 8> UniqueBases; llvm::SmallPtrSet<const CXXRecordDecl *, 8> AmbiguousBases; for (MSRTTIClass *Class = &Classes.front(); Class <= &Classes.back();) { if ((Class->Flags & MSRTTIClass::IsVirtual) && !VirtualBases.insert(Class->RD).second) { Class = MSRTTIClass::getNextChild(Class); continue; } if (!UniqueBases.insert(Class->RD).second) AmbiguousBases.insert(Class->RD); Class++; } if (AmbiguousBases.empty()) return; for (MSRTTIClass &Class : Classes) if (AmbiguousBases.count(Class.RD)) Class.Flags |= MSRTTIClass::IsAmbiguous; } llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() { SmallString<256> MangledName; { llvm::raw_svector_ostream Out(MangledName); ABI.getMangleContext().mangleCXXRTTIClassHierarchyDescriptor(RD, Out); } // Check to see if we've already declared this ClassHierarchyDescriptor. if (auto CHD = Module.getNamedGlobal(MangledName)) return CHD; // Serialize the class hierarchy and initialize the CHD Fields. SmallVector<MSRTTIClass, 8> Classes; serializeClassHierarchy(Classes, RD); Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr); detectAmbiguousBases(Classes); int Flags = 0; for (auto Class : Classes) { if (Class.RD->getNumBases() > 1) Flags |= HasBranchingHierarchy; // Note: cl.exe does not calculate "HasAmbiguousBases" correctly. We // believe the field isn't actually used. if (Class.Flags & MSRTTIClass::IsAmbiguous) Flags |= HasAmbiguousBases; } if ((Flags & HasBranchingHierarchy) && RD->getNumVBases() != 0) Flags |= HasVirtualBranchingHierarchy; // These gep indices are used to get the address of the first element of the // base class array. llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.IntTy, 0), llvm::ConstantInt::get(CGM.IntTy, 0)}; // Forward-declare the class hierarchy descriptor auto Type = ABI.getClassHierarchyDescriptorType(); auto CHD = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage, /*Initializer=*/nullptr, StringRef(MangledName)); if (CHD->isWeakForLinker()) CHD->setComdat(CGM.getModule().getOrInsertComdat(CHD->getName())); auto *Bases = getBaseClassArray(Classes); // Initialize the base class ClassHierarchyDescriptor. llvm::Constant *Fields[] = { llvm::ConstantInt::get(CGM.IntTy, 0), // Unknown llvm::ConstantInt::get(CGM.IntTy, Flags), llvm::ConstantInt::get(CGM.IntTy, Classes.size()), ABI.getImageRelativeConstant(llvm::ConstantExpr::getInBoundsGetElementPtr( Bases->getValueType(), Bases, llvm::ArrayRef<llvm::Value *>(GEPIndices))), }; CHD->setInitializer(llvm::ConstantStruct::get(Type, Fields)); return CHD; } llvm::GlobalVariable * MSRTTIBuilder::getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes) { SmallString<256> MangledName; { llvm::raw_svector_ostream Out(MangledName); ABI.getMangleContext().mangleCXXRTTIBaseClassArray(RD, Out); } // Forward-declare the base class array. // cl.exe pads the base class array with 1 (in 32 bit mode) or 4 (in 64 bit // mode) bytes of padding. We provide a pointer sized amount of padding by // adding +1 to Classes.size(). The sections have pointer alignment and are // marked pick-any so it shouldn't matter. llvm::Type *PtrType = ABI.getImageRelativeType( ABI.getBaseClassDescriptorType()->getPointerTo()); auto *ArrType = llvm::ArrayType::get(PtrType, Classes.size() + 1); auto *BCA = new llvm::GlobalVariable(Module, ArrType, /*Constant=*/true, Linkage, /*Initializer=*/nullptr, StringRef(MangledName)); if (BCA->isWeakForLinker()) BCA->setComdat(CGM.getModule().getOrInsertComdat(BCA->getName())); // Initialize the BaseClassArray. SmallVector<llvm::Constant *, 8> BaseClassArrayData; for (MSRTTIClass &Class : Classes) BaseClassArrayData.push_back( ABI.getImageRelativeConstant(getBaseClassDescriptor(Class))); BaseClassArrayData.push_back(llvm::Constant::getNullValue(PtrType)); BCA->setInitializer(llvm::ConstantArray::get(ArrType, BaseClassArrayData)); return BCA; } llvm::GlobalVariable * MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) { // Compute the fields for the BaseClassDescriptor. They are computed up front // because they are mangled into the name of the object. uint32_t OffsetInVBTable = 0; int32_t VBPtrOffset = -1; if (Class.VirtualRoot) { auto &VTableContext = CGM.getMicrosoftVTableContext(); OffsetInVBTable = VTableContext.getVBTableIndex(RD, Class.VirtualRoot) * 4; VBPtrOffset = Context.getASTRecordLayout(RD).getVBPtrOffset().getQuantity(); } SmallString<256> MangledName; { llvm::raw_svector_ostream Out(MangledName); ABI.getMangleContext().mangleCXXRTTIBaseClassDescriptor( Class.RD, Class.OffsetInVBase, VBPtrOffset, OffsetInVBTable, Class.Flags, Out); } // Check to see if we've already declared this object. if (auto BCD = Module.getNamedGlobal(MangledName)) return BCD; // Forward-declare the base class descriptor. auto Type = ABI.getBaseClassDescriptorType(); auto BCD = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage, /*Initializer=*/nullptr, StringRef(MangledName)); if (BCD->isWeakForLinker()) BCD->setComdat(CGM.getModule().getOrInsertComdat(BCD->getName())); // Initialize the BaseClassDescriptor. llvm::Constant *Fields[] = { ABI.getImageRelativeConstant( ABI.getAddrOfRTTIDescriptor(Context.getTypeDeclType(Class.RD))), llvm::ConstantInt::get(CGM.IntTy, Class.NumBases), llvm::ConstantInt::get(CGM.IntTy, Class.OffsetInVBase), llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset), llvm::ConstantInt::get(CGM.IntTy, OffsetInVBTable), llvm::ConstantInt::get(CGM.IntTy, Class.Flags), ABI.getImageRelativeConstant( MSRTTIBuilder(ABI, Class.RD).getClassHierarchyDescriptor()), }; BCD->setInitializer(llvm::ConstantStruct::get(Type, Fields)); return BCD; } llvm::GlobalVariable * MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo *Info) { SmallString<256> MangledName; { llvm::raw_svector_ostream Out(MangledName); ABI.getMangleContext().mangleCXXRTTICompleteObjectLocator(RD, Info->MangledPath, Out); } // Check to see if we've already computed this complete object locator. if (auto COL = Module.getNamedGlobal(MangledName)) return COL; // Compute the fields of the complete object locator. int OffsetToTop = Info->FullOffsetInMDC.getQuantity(); int VFPtrOffset = 0; // The offset includes the vtordisp if one exists. if (const CXXRecordDecl *VBase = Info->getVBaseWithVPtr()) if (Context.getASTRecordLayout(RD) .getVBaseOffsetsMap() .find(VBase) ->second.hasVtorDisp()) VFPtrOffset = Info->NonVirtualOffset.getQuantity() + 4; // Forward-declare the complete object locator. llvm::StructType *Type = ABI.getCompleteObjectLocatorType(); auto COL = new llvm::GlobalVariable(Module, Type, /*Constant=*/true, Linkage, /*Initializer=*/nullptr, StringRef(MangledName)); // Initialize the CompleteObjectLocator. llvm::Constant *Fields[] = { llvm::ConstantInt::get(CGM.IntTy, ABI.isImageRelative()), llvm::ConstantInt::get(CGM.IntTy, OffsetToTop), llvm::ConstantInt::get(CGM.IntTy, VFPtrOffset), ABI.getImageRelativeConstant( CGM.GetAddrOfRTTIDescriptor(Context.getTypeDeclType(RD))), ABI.getImageRelativeConstant(getClassHierarchyDescriptor()), ABI.getImageRelativeConstant(COL), }; llvm::ArrayRef<llvm::Constant *> FieldsRef(Fields); if (!ABI.isImageRelative()) FieldsRef = FieldsRef.drop_back(); COL->setInitializer(llvm::ConstantStruct::get(Type, FieldsRef)); if (COL->isWeakForLinker()) COL->setComdat(CGM.getModule().getOrInsertComdat(COL->getName())); return COL; } static QualType decomposeTypeForEH(ASTContext &Context, QualType T, bool &IsConst, bool &IsVolatile) { T = Context.getExceptionObjectType(T); // C++14 [except.handle]p3: // A handler is a match for an exception object of type E if [...] // - the handler is of type cv T or const T& where T is a pointer type and // E is a pointer type that can be converted to T by [...] // - a qualification conversion IsConst = false; IsVolatile = false; QualType PointeeType = T->getPointeeType(); if (!PointeeType.isNull()) { IsConst = PointeeType.isConstQualified(); IsVolatile = PointeeType.isVolatileQualified(); } // Member pointer types like "const int A::*" are represented by having RTTI // for "int A::*" and separately storing the const qualifier. if (const auto *MPTy = T->getAs<MemberPointerType>()) T = Context.getMemberPointerType(PointeeType.getUnqualifiedType(), MPTy->getClass()); // Pointer types like "const int * const *" are represented by having RTTI // for "const int **" and separately storing the const qualifier. if (T->isPointerType()) T = Context.getPointerType(PointeeType.getUnqualifiedType()); return T; } llvm::Constant * MicrosoftCXXABI::getAddrOfCXXCatchHandlerType(QualType Type, QualType CatchHandlerType) { // TypeDescriptors for exceptions never have qualified pointer types, // qualifiers are stored seperately in order to support qualification // conversions. bool IsConst, IsVolatile; Type = decomposeTypeForEH(getContext(), Type, IsConst, IsVolatile); bool IsReference = CatchHandlerType->isReferenceType(); uint32_t Flags = 0; if (IsConst) Flags |= 1; if (IsVolatile) Flags |= 2; if (IsReference) Flags |= 8; SmallString<256> MangledName; { llvm::raw_svector_ostream Out(MangledName); getMangleContext().mangleCXXCatchHandlerType(Type, Flags, Out); } if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName)) return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); llvm::Constant *Fields[] = { llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags getAddrOfRTTIDescriptor(Type), // TypeDescriptor }; llvm::StructType *CatchHandlerTypeType = getCatchHandlerTypeType(); auto *Var = new llvm::GlobalVariable( CGM.getModule(), CatchHandlerTypeType, /*Constant=*/true, llvm::GlobalValue::PrivateLinkage, llvm::ConstantStruct::get(CatchHandlerTypeType, Fields), StringRef(MangledName)); Var->setUnnamedAddr(true); Var->setSection("llvm.metadata"); return Var; } /// \brief Gets a TypeDescriptor. Returns a llvm::Constant * rather than a /// llvm::GlobalVariable * because different type descriptors have different /// types, and need to be abstracted. They are abstracting by casting the /// address to an Int8PtrTy. llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) { SmallString<256> MangledName; { llvm::raw_svector_ostream Out(MangledName); getMangleContext().mangleCXXRTTI(Type, Out); } // Check to see if we've already declared this TypeDescriptor. if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName)) return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); // Compute the fields for the TypeDescriptor. SmallString<256> TypeInfoString; { llvm::raw_svector_ostream Out(TypeInfoString); getMangleContext().mangleCXXRTTIName(Type, Out); } // Declare and initialize the TypeDescriptor. llvm::Constant *Fields[] = { getTypeInfoVTable(CGM), // VFPtr llvm::ConstantPointerNull::get(CGM.Int8PtrTy), // Runtime data llvm::ConstantDataArray::getString(CGM.getLLVMContext(), TypeInfoString)}; llvm::StructType *TypeDescriptorType = getTypeDescriptorType(TypeInfoString); auto *Var = new llvm::GlobalVariable( CGM.getModule(), TypeDescriptorType, /*Constant=*/false, getLinkageForRTTI(Type), llvm::ConstantStruct::get(TypeDescriptorType, Fields), StringRef(MangledName)); if (Var->isWeakForLinker()) Var->setComdat(CGM.getModule().getOrInsertComdat(Var->getName())); return llvm::ConstantExpr::getBitCast(Var, CGM.Int8PtrTy); } /// \brief Gets or a creates a Microsoft CompleteObjectLocator. llvm::GlobalVariable * MicrosoftCXXABI::getMSCompleteObjectLocator(const CXXRecordDecl *RD, const VPtrInfo *Info) { return MSRTTIBuilder(*this, RD).getCompleteObjectLocator(Info); } static void emitCXXConstructor(CodeGenModule &CGM, const CXXConstructorDecl *ctor, StructorType ctorType) { // There are no constructor variants, always emit the complete destructor. llvm::Function *Fn = CGM.codegenCXXStructor(ctor, StructorType::Complete); CGM.maybeSetTrivialComdat(*ctor, *Fn); } static void emitCXXDestructor(CodeGenModule &CGM, const CXXDestructorDecl *dtor, StructorType dtorType) { // The complete destructor is equivalent to the base destructor for // classes with no virtual bases, so try to emit it as an alias. if (!dtor->getParent()->getNumVBases() && (dtorType == StructorType::Complete || dtorType == StructorType::Base)) { bool ProducedAlias = !CGM.TryEmitDefinitionAsAlias( GlobalDecl(dtor, Dtor_Complete), GlobalDecl(dtor, Dtor_Base), true); if (ProducedAlias) { if (dtorType == StructorType::Complete) return; if (dtor->isVirtual()) CGM.getVTables().EmitThunks(GlobalDecl(dtor, Dtor_Complete)); } } // The base destructor is equivalent to the base destructor of its // base class if there is exactly one non-virtual base class with a // non-trivial destructor, there are no fields with a non-trivial // destructor, and the body of the destructor is trivial. if (dtorType == StructorType::Base && !CGM.TryEmitBaseDestructorAsAlias(dtor)) return; llvm::Function *Fn = CGM.codegenCXXStructor(dtor, dtorType); if (Fn->isWeakForLinker()) Fn->setComdat(CGM.getModule().getOrInsertComdat(Fn->getName())); } void MicrosoftCXXABI::emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) { if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { emitCXXConstructor(CGM, CD, Type); return; } emitCXXDestructor(CGM, cast<CXXDestructorDecl>(MD), Type); } llvm::Function * MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD, CXXCtorType CT) { assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); // Calculate the mangled name. SmallString<256> ThunkName; llvm::raw_svector_ostream Out(ThunkName); getMangleContext().mangleCXXCtor(CD, CT, Out); Out.flush(); // If the thunk has been generated previously, just return it. if (llvm::GlobalValue *GV = CGM.getModule().getNamedValue(ThunkName)) return cast<llvm::Function>(GV); // Create the llvm::Function. const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeMSCtorClosure(CD, CT); llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo); const CXXRecordDecl *RD = CD->getParent(); QualType RecordTy = getContext().getRecordType(RD); llvm::Function *ThunkFn = llvm::Function::Create( ThunkTy, getLinkageForRTTI(RecordTy), ThunkName.str(), &CGM.getModule()); ThunkFn->setCallingConv(static_cast<llvm::CallingConv::ID>( FnInfo.getEffectiveCallingConvention())); if (ThunkFn->isWeakForLinker()) ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName())); bool IsCopy = CT == Ctor_CopyingClosure; // Start codegen. CodeGenFunction CGF(CGM); CGF.CurGD = GlobalDecl(CD, Ctor_Complete); // Build FunctionArgs. FunctionArgList FunctionArgs; // A constructor always starts with a 'this' pointer as its first argument. buildThisParam(CGF, FunctionArgs); // Following the 'this' pointer is a reference to the source object that we // are copying from. ImplicitParamDecl SrcParam( getContext(), nullptr, SourceLocation(), &getContext().Idents.get("src"), getContext().getLValueReferenceType(RecordTy, /*SpelledAsLValue=*/true)); if (IsCopy) FunctionArgs.push_back(&SrcParam); // Constructors for classes which utilize virtual bases have an additional // parameter which indicates whether or not it is being delegated to by a more // derived constructor. ImplicitParamDecl IsMostDerived(getContext(), nullptr, SourceLocation(), &getContext().Idents.get("is_most_derived"), getContext().IntTy); // Only add the parameter to the list if thie class has virtual bases. if (RD->getNumVBases() > 0) FunctionArgs.push_back(&IsMostDerived); // Start defining the function. CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo, FunctionArgs, CD->getLocation(), SourceLocation()); EmitThisParam(CGF); llvm::Value *This = getThisValue(CGF); llvm::Value *SrcVal = IsCopy ? CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&SrcParam), "src") : nullptr; CallArgList Args; // Push the this ptr. Args.add(RValue::get(This), CD->getThisType(getContext())); // Push the src ptr. if (SrcVal) Args.add(RValue::get(SrcVal), SrcParam.getType()); // Add the rest of the default arguments. std::vector<Stmt *> ArgVec; for (unsigned I = IsCopy ? 1 : 0, E = CD->getNumParams(); I != E; ++I) { Stmt *DefaultArg = getContext().getDefaultArgExprForConstructor(CD, I); assert(DefaultArg && "sema forgot to instantiate default args"); ArgVec.push_back(DefaultArg); } CodeGenFunction::RunCleanupsScope Cleanups(CGF); const auto *FPT = CD->getType()->castAs<FunctionProtoType>(); ConstExprIterator ArgBegin(ArgVec.data()), ArgEnd(ArgVec.data() + ArgVec.size()); CGF.EmitCallArgs(Args, FPT, ArgBegin, ArgEnd, CD, IsCopy ? 1 : 0); // Insert any ABI-specific implicit constructor arguments. unsigned ExtraArgs = addImplicitConstructorArgs(CGF, CD, Ctor_Complete, /*ForVirtualBase=*/false, /*Delegating=*/false, Args); // Call the destructor with our arguments. llvm::Value *CalleeFn = CGM.getAddrOfCXXStructor(CD, StructorType::Complete); const CGFunctionInfo &CalleeInfo = CGM.getTypes().arrangeCXXConstructorCall( Args, CD, Ctor_Complete, ExtraArgs); CGF.EmitCall(CalleeInfo, CalleeFn, ReturnValueSlot(), Args, CD); Cleanups.ForceCleanup(); // Emit the ret instruction, remove any temporary instructions created for the // aid of CodeGen. CGF.FinishFunction(SourceLocation()); return ThunkFn; } llvm::Constant *MicrosoftCXXABI::getCatchableType(QualType T, uint32_t NVOffset, int32_t VBPtrOffset, uint32_t VBIndex) { assert(!T->isReferenceType()); CXXRecordDecl *RD = T->getAsCXXRecordDecl(); const CXXConstructorDecl *CD = RD ? CGM.getContext().getCopyConstructorForExceptionObject(RD) : nullptr; CXXCtorType CT = Ctor_Complete; if (CD) if (!hasDefaultCXXMethodCC(getContext(), CD) || CD->getNumParams() != 1) CT = Ctor_CopyingClosure; uint32_t Size = getContext().getTypeSizeInChars(T).getQuantity(); SmallString<256> MangledName; { llvm::raw_svector_ostream Out(MangledName); getMangleContext().mangleCXXCatchableType(T, CD, CT, Size, NVOffset, VBPtrOffset, VBIndex, Out); } if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName)) return getImageRelativeConstant(GV); // The TypeDescriptor is used by the runtime to determine if a catch handler // is appropriate for the exception object. llvm::Constant *TD = getImageRelativeConstant(getAddrOfRTTIDescriptor(T)); // The runtime is responsible for calling the copy constructor if the // exception is caught by value. llvm::Constant *CopyCtor; if (CD) { if (CT == Ctor_CopyingClosure) CopyCtor = getAddrOfCXXCtorClosure(CD, Ctor_CopyingClosure); else CopyCtor = CGM.getAddrOfCXXStructor(CD, StructorType::Complete); CopyCtor = llvm::ConstantExpr::getBitCast(CopyCtor, CGM.Int8PtrTy); } else { CopyCtor = llvm::Constant::getNullValue(CGM.Int8PtrTy); } CopyCtor = getImageRelativeConstant(CopyCtor); bool IsScalar = !RD; bool HasVirtualBases = false; bool IsStdBadAlloc = false; // std::bad_alloc is special for some reason. QualType PointeeType = T; if (T->isPointerType()) PointeeType = T->getPointeeType(); if (const CXXRecordDecl *RD = PointeeType->getAsCXXRecordDecl()) { HasVirtualBases = RD->getNumVBases() > 0; if (IdentifierInfo *II = RD->getIdentifier()) IsStdBadAlloc = II->isStr("bad_alloc") && RD->isInStdNamespace(); } // Encode the relevant CatchableType properties into the Flags bitfield. // FIXME: Figure out how bits 2 or 8 can get set. uint32_t Flags = 0; if (IsScalar) Flags |= 1; if (HasVirtualBases) Flags |= 4; if (IsStdBadAlloc) Flags |= 16; llvm::Constant *Fields[] = { llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags TD, // TypeDescriptor llvm::ConstantInt::get(CGM.IntTy, NVOffset), // NonVirtualAdjustment llvm::ConstantInt::get(CGM.IntTy, VBPtrOffset), // OffsetToVBPtr llvm::ConstantInt::get(CGM.IntTy, VBIndex), // VBTableIndex llvm::ConstantInt::get(CGM.IntTy, Size), // Size CopyCtor // CopyCtor }; llvm::StructType *CTType = getCatchableTypeType(); auto *GV = new llvm::GlobalVariable( CGM.getModule(), CTType, /*Constant=*/true, getLinkageForRTTI(T), llvm::ConstantStruct::get(CTType, Fields), StringRef(MangledName)); GV->setUnnamedAddr(true); GV->setSection(".xdata"); if (GV->isWeakForLinker()) GV->setComdat(CGM.getModule().getOrInsertComdat(GV->getName())); return getImageRelativeConstant(GV); } llvm::GlobalVariable *MicrosoftCXXABI::getCatchableTypeArray(QualType T) { assert(!T->isReferenceType()); // See if we've already generated a CatchableTypeArray for this type before. llvm::GlobalVariable *&CTA = CatchableTypeArrays[T]; if (CTA) return CTA; // Ensure that we don't have duplicate entries in our CatchableTypeArray by // using a SmallSetVector. Duplicates may arise due to virtual bases // occurring more than once in the hierarchy. llvm::SmallSetVector<llvm::Constant *, 2> CatchableTypes; // C++14 [except.handle]p3: // A handler is a match for an exception object of type E if [...] // - the handler is of type cv T or cv T& and T is an unambiguous public // base class of E, or // - the handler is of type cv T or const T& where T is a pointer type and // E is a pointer type that can be converted to T by [...] // - a standard pointer conversion (4.10) not involving conversions to // pointers to private or protected or ambiguous classes const CXXRecordDecl *MostDerivedClass = nullptr; bool IsPointer = T->isPointerType(); if (IsPointer) MostDerivedClass = T->getPointeeType()->getAsCXXRecordDecl(); else MostDerivedClass = T->getAsCXXRecordDecl(); // Collect all the unambiguous public bases of the MostDerivedClass. if (MostDerivedClass) { const ASTContext &Context = getContext(); const ASTRecordLayout &MostDerivedLayout = Context.getASTRecordLayout(MostDerivedClass); MicrosoftVTableContext &VTableContext = CGM.getMicrosoftVTableContext(); SmallVector<MSRTTIClass, 8> Classes; serializeClassHierarchy(Classes, MostDerivedClass); Classes.front().initialize(/*Parent=*/nullptr, /*Specifier=*/nullptr); detectAmbiguousBases(Classes); for (const MSRTTIClass &Class : Classes) { // Skip any ambiguous or private bases. if (Class.Flags & (MSRTTIClass::IsPrivateOnPath | MSRTTIClass::IsAmbiguous)) continue; // Write down how to convert from a derived pointer to a base pointer. uint32_t OffsetInVBTable = 0; int32_t VBPtrOffset = -1; if (Class.VirtualRoot) { OffsetInVBTable = VTableContext.getVBTableIndex(MostDerivedClass, Class.VirtualRoot)*4; VBPtrOffset = MostDerivedLayout.getVBPtrOffset().getQuantity(); } // Turn our record back into a pointer if the exception object is a // pointer. QualType RTTITy = QualType(Class.RD->getTypeForDecl(), 0); if (IsPointer) RTTITy = Context.getPointerType(RTTITy); CatchableTypes.insert(getCatchableType(RTTITy, Class.OffsetInVBase, VBPtrOffset, OffsetInVBTable)); } } // C++14 [except.handle]p3: // A handler is a match for an exception object of type E if // - The handler is of type cv T or cv T& and E and T are the same type // (ignoring the top-level cv-qualifiers) CatchableTypes.insert(getCatchableType(T)); // C++14 [except.handle]p3: // A handler is a match for an exception object of type E if // - the handler is of type cv T or const T& where T is a pointer type and // E is a pointer type that can be converted to T by [...] // - a standard pointer conversion (4.10) not involving conversions to // pointers to private or protected or ambiguous classes // // C++14 [conv.ptr]p2: // A prvalue of type "pointer to cv T," where T is an object type, can be // converted to a prvalue of type "pointer to cv void". if (IsPointer && T->getPointeeType()->isObjectType()) CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy)); // C++14 [except.handle]p3: // A handler is a match for an exception object of type E if [...] // - the handler is of type cv T or const T& where T is a pointer or // pointer to member type and E is std::nullptr_t. // // We cannot possibly list all possible pointer types here, making this // implementation incompatible with the standard. However, MSVC includes an // entry for pointer-to-void in this case. Let's do the same. if (T->isNullPtrType()) CatchableTypes.insert(getCatchableType(getContext().VoidPtrTy)); uint32_t NumEntries = CatchableTypes.size(); llvm::Type *CTType = getImageRelativeType(getCatchableTypeType()->getPointerTo()); llvm::ArrayType *AT = llvm::ArrayType::get(CTType, NumEntries); llvm::StructType *CTAType = getCatchableTypeArrayType(NumEntries); llvm::Constant *Fields[] = { llvm::ConstantInt::get(CGM.IntTy, NumEntries), // NumEntries llvm::ConstantArray::get( AT, llvm::makeArrayRef(CatchableTypes.begin(), CatchableTypes.end())) // CatchableTypes }; SmallString<256> MangledName; { llvm::raw_svector_ostream Out(MangledName); getMangleContext().mangleCXXCatchableTypeArray(T, NumEntries, Out); } CTA = new llvm::GlobalVariable( CGM.getModule(), CTAType, /*Constant=*/true, getLinkageForRTTI(T), llvm::ConstantStruct::get(CTAType, Fields), StringRef(MangledName)); CTA->setUnnamedAddr(true); CTA->setSection(".xdata"); if (CTA->isWeakForLinker()) CTA->setComdat(CGM.getModule().getOrInsertComdat(CTA->getName())); return CTA; } llvm::GlobalVariable *MicrosoftCXXABI::getThrowInfo(QualType T) { bool IsConst, IsVolatile; T = decomposeTypeForEH(getContext(), T, IsConst, IsVolatile); // The CatchableTypeArray enumerates the various (CV-unqualified) types that // the exception object may be caught as. llvm::GlobalVariable *CTA = getCatchableTypeArray(T); // The first field in a CatchableTypeArray is the number of CatchableTypes. // This is used as a component of the mangled name which means that we need to // know what it is in order to see if we have previously generated the // ThrowInfo. uint32_t NumEntries = cast<llvm::ConstantInt>(CTA->getInitializer()->getAggregateElement(0U)) ->getLimitedValue(); SmallString<256> MangledName; { llvm::raw_svector_ostream Out(MangledName); getMangleContext().mangleCXXThrowInfo(T, IsConst, IsVolatile, NumEntries, Out); } // Reuse a previously generated ThrowInfo if we have generated an appropriate // one before. if (llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(MangledName)) return GV; // The RTTI TypeDescriptor uses an unqualified type but catch clauses must // be at least as CV qualified. Encode this requirement into the Flags // bitfield. uint32_t Flags = 0; if (IsConst) Flags |= 1; if (IsVolatile) Flags |= 2; // The cleanup-function (a destructor) must be called when the exception // object's lifetime ends. llvm::Constant *CleanupFn = llvm::Constant::getNullValue(CGM.Int8PtrTy); if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl()) if (CXXDestructorDecl *DtorD = RD->getDestructor()) if (!DtorD->isTrivial()) CleanupFn = llvm::ConstantExpr::getBitCast( CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete), CGM.Int8PtrTy); // This is unused as far as we can tell, initialize it to null. llvm::Constant *ForwardCompat = getImageRelativeConstant(llvm::Constant::getNullValue(CGM.Int8PtrTy)); llvm::Constant *PointerToCatchableTypes = getImageRelativeConstant( llvm::ConstantExpr::getBitCast(CTA, CGM.Int8PtrTy)); llvm::StructType *TIType = getThrowInfoType(); llvm::Constant *Fields[] = { llvm::ConstantInt::get(CGM.IntTy, Flags), // Flags getImageRelativeConstant(CleanupFn), // CleanupFn ForwardCompat, // ForwardCompat PointerToCatchableTypes // CatchableTypeArray }; auto *GV = new llvm::GlobalVariable( CGM.getModule(), TIType, /*Constant=*/true, getLinkageForRTTI(T), llvm::ConstantStruct::get(TIType, Fields), StringRef(MangledName)); GV->setUnnamedAddr(true); GV->setSection(".xdata"); if (GV->isWeakForLinker()) GV->setComdat(CGM.getModule().getOrInsertComdat(GV->getName())); return GV; } void MicrosoftCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { const Expr *SubExpr = E->getSubExpr(); QualType ThrowType = SubExpr->getType(); // The exception object lives on the stack and it's address is passed to the // runtime function. llvm::AllocaInst *AI = CGF.CreateMemTemp(ThrowType); CGF.EmitAnyExprToMem(SubExpr, AI, ThrowType.getQualifiers(), /*IsInit=*/true); // The so-called ThrowInfo is used to describe how the exception object may be // caught. llvm::GlobalVariable *TI = getThrowInfo(ThrowType); // Call into the runtime to throw the exception. llvm::Value *Args[] = {CGF.Builder.CreateBitCast(AI, CGM.Int8PtrTy), TI}; CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(), Args); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGVTables.h
//===--- CGVTables.h - Emit LLVM Code for C++ vtables -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This contains code dealing with C++ code generation of virtual tables. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGVTABLES_H #define LLVM_CLANG_LIB_CODEGEN_CGVTABLES_H #include "clang/AST/BaseSubobject.h" #include "clang/AST/CharUnits.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/VTableBuilder.h" #include "clang/Basic/ABI.h" #include "llvm/ADT/DenseMap.h" #include "llvm/IR/GlobalVariable.h" namespace clang { class CXXRecordDecl; namespace CodeGen { class CodeGenModule; class CodeGenVTables { CodeGenModule &CGM; VTableContextBase *VTContext; /// VTableAddressPointsMapTy - Address points for a single vtable. typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy; typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy; typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy; /// SubVTTIndicies - Contains indices into the various sub-VTTs. SubVTTIndiciesMapTy SubVTTIndicies; typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SecondaryVirtualPointerIndicesMapTy; /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer /// indices. SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices; /// emitThunk - Emit a single thunk. void emitThunk(GlobalDecl GD, const ThunkInfo &Thunk, bool ForVTable); /// maybeEmitThunkForVTable - Emit the given thunk for the vtable if needed by /// the ABI. void maybeEmitThunkForVTable(GlobalDecl GD, const ThunkInfo &Thunk); public: /// CreateVTableInitializer - Create a vtable initializer for the given record /// decl. /// \param Components - The vtable components; this is really an array of /// VTableComponents. llvm::Constant *CreateVTableInitializer( const CXXRecordDecl *RD, const VTableComponent *Components, unsigned NumComponents, const VTableLayout::VTableThunkTy *VTableThunks, unsigned NumVTableThunks, llvm::Constant *RTTI); CodeGenVTables(CodeGenModule &CGM); ItaniumVTableContext &getItaniumVTableContext() { return *cast<ItaniumVTableContext>(VTContext); } MicrosoftVTableContext &getMicrosoftVTableContext() { return *cast<MicrosoftVTableContext>(VTContext); } /// getSubVTTIndex - Return the index of the sub-VTT for the base class of the /// given record decl. uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base); /// getSecondaryVirtualPointerIndex - Return the index in the VTT where the /// virtual pointer for the given subobject is located. uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD, BaseSubobject Base); /// getAddressPoint - Get the address point of the given subobject in the /// class decl. uint64_t getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD); /// GenerateConstructionVTable - Generate a construction vtable for the given /// base subobject. llvm::GlobalVariable * GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base, bool BaseIsVirtual, llvm::GlobalVariable::LinkageTypes Linkage, VTableAddressPointsMapTy& AddressPoints); /// GetAddrOfVTT - Get the address of the VTT for the given record decl. llvm::GlobalVariable *GetAddrOfVTT(const CXXRecordDecl *RD); /// EmitVTTDefinition - Emit the definition of the given vtable. void EmitVTTDefinition(llvm::GlobalVariable *VTT, llvm::GlobalVariable::LinkageTypes Linkage, const CXXRecordDecl *RD); /// EmitThunks - Emit the associated thunks for the given global decl. void EmitThunks(GlobalDecl GD); /// GenerateClassData - Generate all the class data required to be /// generated upon definition of a KeyFunction. This includes the /// vtable, the RTTI data structure (if RTTI is enabled) and the VTT /// (if the class has virtual bases). void GenerateClassData(const CXXRecordDecl *RD); bool isVTableExternal(const CXXRecordDecl *RD); }; } // end namespace CodeGen } // end namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGStmt.cpp
//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This contains code to emit Stmt nodes as LLVM code. // //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" #include "CGDebugInfo.h" #include "CodeGenModule.h" #include "TargetInfo.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/PrettyStackTrace.h" #include "clang/Basic/TargetInfo.h" #include "clang/Sema/LoopHint.h" #include "clang/Sema/SemaDiagnostic.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/Intrinsics.h" #include "CGHLSLRuntime.h" // HLSL Change #include "CGHLSLMSHelper.h" // HLSL Change using namespace clang; using namespace CodeGen; //===----------------------------------------------------------------------===// // Statement Emission //===----------------------------------------------------------------------===// void CodeGenFunction::EmitStopPoint(const Stmt *S) { if (CGDebugInfo *DI = getDebugInfo()) { SourceLocation Loc; Loc = S->getLocStart(); DI->EmitLocation(Builder, Loc); LastStopPoint = Loc; } } void CodeGenFunction::EmitStmt(const Stmt *S) { assert(S && "Null statement?"); PGO.setCurrentStmt(S); // These statements have their own debug info handling. if (EmitSimpleStmt(S)) return; // Check if we are generating unreachable code. if (!HaveInsertPoint()) { // If so, and the statement doesn't contain a label, then we do not need to // generate actual code. This is safe because (1) the current point is // unreachable, so we don't need to execute the code, and (2) we've already // handled the statements which update internal data structures (like the // local variable map) which could be used by subsequent statements. if (!ContainsLabel(S)) { // Verify that any decl statements were handled as simple, they may be in // scope of subsequent reachable statements. assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!"); return; } // Otherwise, make a new block to hold the code. EnsureInsertPoint(); } // Generate a stoppoint if we are emitting debug info. EmitStopPoint(S); switch (S->getStmtClass()) { case Stmt::NoStmtClass: case Stmt::CXXCatchStmtClass: case Stmt::SEHExceptStmtClass: case Stmt::SEHFinallyStmtClass: case Stmt::MSDependentExistsStmtClass: llvm_unreachable("invalid statement class to emit generically"); case Stmt::NullStmtClass: case Stmt::CompoundStmtClass: case Stmt::DeclStmtClass: case Stmt::LabelStmtClass: case Stmt::AttributedStmtClass: case Stmt::GotoStmtClass: case Stmt::BreakStmtClass: case Stmt::ContinueStmtClass: case Stmt::DefaultStmtClass: case Stmt::CaseStmtClass: case Stmt::SEHLeaveStmtClass: llvm_unreachable("should have emitted these statements as simple"); #define STMT(Type, Base) #define ABSTRACT_STMT(Op) #define EXPR(Type, Base) \ case Stmt::Type##Class: #include "clang/AST/StmtNodes.inc" { // Remember the block we came in on. llvm::BasicBlock *incoming = Builder.GetInsertBlock(); assert(incoming && "expression emission must have an insertion point"); EmitIgnoredExpr(cast<Expr>(S)); llvm::BasicBlock *outgoing = Builder.GetInsertBlock(); assert(outgoing && "expression emission cleared block!"); // The expression emitters assume (reasonably!) that the insertion // point is always set. To maintain that, the call-emission code // for noreturn functions has to enter a new block with no // predecessors. We want to kill that block and mark the current // insertion point unreachable in the common case of a call like // "exit();". Since expression emission doesn't otherwise create // blocks with no predecessors, we can just test for that. // However, we must be careful not to do this to our incoming // block, because *statement* emission does sometimes create // reachable blocks which will have no predecessors until later in // the function. This occurs with, e.g., labels that are not // reachable by fallthrough. if (incoming != outgoing && outgoing->use_empty()) { outgoing->eraseFromParent(); Builder.ClearInsertionPoint(); } break; } case Stmt::IndirectGotoStmtClass: EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break; case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break; case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break; case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break; case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break; case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break; case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break; #if 1 // HLSL Change - no support for assembler, captures, ObjC, exception handling, for-range, openmp default: break; #else case Stmt::GCCAsmStmtClass: // Intentional fall-through. case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break; case Stmt::CapturedStmtClass: { const CapturedStmt *CS = cast<CapturedStmt>(S); EmitCapturedStmt(*CS, CS->getCapturedRegionKind()); } break; case Stmt::ObjCAtTryStmtClass: EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S)); break; case Stmt::ObjCAtCatchStmtClass: llvm_unreachable( "@catch statements should be handled by EmitObjCAtTryStmt"); case Stmt::ObjCAtFinallyStmtClass: llvm_unreachable( "@finally statements should be handled by EmitObjCAtTryStmt"); case Stmt::ObjCAtThrowStmtClass: EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S)); break; case Stmt::ObjCAtSynchronizedStmtClass: EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S)); break; case Stmt::ObjCForCollectionStmtClass: EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S)); break; case Stmt::ObjCAutoreleasePoolStmtClass: EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S)); break; case Stmt::CXXTryStmtClass: EmitCXXTryStmt(cast<CXXTryStmt>(*S)); break; case Stmt::CXXForRangeStmtClass: EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S)); break; case Stmt::SEHTryStmtClass: EmitSEHTryStmt(cast<SEHTryStmt>(*S)); break; case Stmt::OMPParallelDirectiveClass: EmitOMPParallelDirective(cast<OMPParallelDirective>(*S)); break; case Stmt::OMPSimdDirectiveClass: EmitOMPSimdDirective(cast<OMPSimdDirective>(*S)); break; case Stmt::OMPForDirectiveClass: EmitOMPForDirective(cast<OMPForDirective>(*S)); break; case Stmt::OMPForSimdDirectiveClass: EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S)); break; case Stmt::OMPSectionsDirectiveClass: EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S)); break; case Stmt::OMPSectionDirectiveClass: EmitOMPSectionDirective(cast<OMPSectionDirective>(*S)); break; case Stmt::OMPSingleDirectiveClass: EmitOMPSingleDirective(cast<OMPSingleDirective>(*S)); break; case Stmt::OMPMasterDirectiveClass: EmitOMPMasterDirective(cast<OMPMasterDirective>(*S)); break; case Stmt::OMPCriticalDirectiveClass: EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S)); break; case Stmt::OMPParallelForDirectiveClass: EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S)); break; case Stmt::OMPParallelForSimdDirectiveClass: EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S)); break; case Stmt::OMPParallelSectionsDirectiveClass: EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S)); break; case Stmt::OMPTaskDirectiveClass: EmitOMPTaskDirective(cast<OMPTaskDirective>(*S)); break; case Stmt::OMPTaskyieldDirectiveClass: EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S)); break; case Stmt::OMPBarrierDirectiveClass: EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S)); break; case Stmt::OMPTaskwaitDirectiveClass: EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S)); break; case Stmt::OMPTaskgroupDirectiveClass: EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S)); break; case Stmt::OMPFlushDirectiveClass: EmitOMPFlushDirective(cast<OMPFlushDirective>(*S)); break; case Stmt::OMPOrderedDirectiveClass: EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S)); break; case Stmt::OMPAtomicDirectiveClass: EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S)); break; case Stmt::OMPTargetDirectiveClass: EmitOMPTargetDirective(cast<OMPTargetDirective>(*S)); break; case Stmt::OMPTeamsDirectiveClass: EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S)); break; case Stmt::OMPCancellationPointDirectiveClass: EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S)); break; case Stmt::OMPCancelDirectiveClass: EmitOMPCancelDirective(cast<OMPCancelDirective>(*S)); break; #endif // HLSL Change - no support for assembler, captures, ObjC, exception handling, for-range, openmp } } bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) { switch (S->getStmtClass()) { default: return false; case Stmt::NullStmtClass: break; case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break; case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break; case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break; case Stmt::AttributedStmtClass: EmitAttributedStmt(cast<AttributedStmt>(*S)); break; case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break; case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break; case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break; case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break; case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break; // HLSL Change Begins. case Stmt::DiscardStmtClass: EmitDiscardStmt(cast<DiscardStmt>(*S)); break; // HLSL Change Ends. #if 0 // HLSL Change - no support for exception handling case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break; #endif } return true; } /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true, /// this captures the expression result of the last sub-statement and returns it /// (for use by the statement expression extension). llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, AggValueSlot AggSlot) { PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(), "LLVM IR generation of compound statement ('{}')"); // Keep track of the current cleanup stack depth, including debug scopes. LexicalScope Scope(*this, S.getSourceRange()); return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot); } llvm::Value* CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast, AggValueSlot AggSlot) { for (CompoundStmt::const_body_iterator I = S.body_begin(), E = S.body_end()-GetLast; I != E; ++I) EmitStmt(*I); llvm::Value *RetAlloca = nullptr; if (GetLast) { // We have to special case labels here. They are statements, but when put // at the end of a statement expression, they yield the value of their // subexpression. Handle this by walking through all labels we encounter, // emitting them before we evaluate the subexpr. const Stmt *LastStmt = S.body_back(); while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) { EmitLabel(LS->getDecl()); LastStmt = LS->getSubStmt(); } EnsureInsertPoint(); QualType ExprTy = cast<Expr>(LastStmt)->getType(); if (hasAggregateEvaluationKind(ExprTy)) { EmitAggExpr(cast<Expr>(LastStmt), AggSlot); } else { // We can't return an RValue here because there might be cleanups at // the end of the StmtExpr. Because of that, we have to emit the result // here into a temporary alloca. RetAlloca = CreateMemTemp(ExprTy); EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(), /*IsInit*/false); } } return RetAlloca; } void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB, CGHLSLMSHelper::Scope *LoopScope) { llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator()); // If there is a cleanup stack, then we it isn't worth trying to // simplify this block (we would need to remove it from the scope map // and cleanup entry). if (!EHStack.empty()) return; // Can only simplify direct branches. if (!BI || !BI->isUnconditional()) return; // Can only simplify empty blocks. if (BI != BB->begin()) return; // HLSL CHANGE // Need to update the loop scope block when we forward the // branch to the new bb. Otherwise we keep a pointer to // the old deleted block. assert(!LoopScope || LoopScope->loopContinueBB == BB); if (LoopScope) LoopScope->loopContinueBB = BI->getSuccessor(0); // HLSL CHANGE BB->replaceAllUsesWith(BI->getSuccessor(0)); BI->eraseFromParent(); BB->eraseFromParent(); } void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) { llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); // Fall out of the current block (if necessary). EmitBranch(BB); if (IsFinished && BB->use_empty()) { delete BB; return; } // Place the block after the current block, if possible, or else at // the end of the function. if (CurBB && CurBB->getParent()) CurFn->getBasicBlockList().insertAfter(CurBB, BB); else CurFn->getBasicBlockList().push_back(BB); Builder.SetInsertPoint(BB); } void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) { // Emit a branch from the current block to the target one if this // was a real block. If this was just a fall-through block after a // terminator, don't emit it. llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); if (!CurBB || CurBB->getTerminator()) { // If there is no insert point or the previous block is already // terminated, don't touch it. } else { // Otherwise, create a fall-through branch. Builder.CreateBr(Target); } Builder.ClearInsertionPoint(); } void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) { bool inserted = false; for (llvm::User *u : block->users()) { if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) { CurFn->getBasicBlockList().insertAfter(insn->getParent(), block); inserted = true; break; } } if (!inserted) CurFn->getBasicBlockList().push_back(block); Builder.SetInsertPoint(block); } CodeGenFunction::JumpDest CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) { JumpDest &Dest = LabelMap[D]; if (Dest.isValid()) return Dest; // Create, but don't insert, the new block. Dest = JumpDest(createBasicBlock(D->getName()), EHScopeStack::stable_iterator::invalid(), NextCleanupDestIndex++); return Dest; } void CodeGenFunction::EmitLabel(const LabelDecl *D) { // Add this label to the current lexical scope if we're within any // normal cleanups. Jumps "in" to this label --- when permitted by // the language --- may need to be routed around such cleanups. if (EHStack.hasNormalCleanups() && CurLexicalScope) CurLexicalScope->addLabel(D); JumpDest &Dest = LabelMap[D]; // If we didn't need a forward reference to this label, just go // ahead and create a destination at the current scope. if (!Dest.isValid()) { Dest = getJumpDestInCurrentScope(D->getName()); // Otherwise, we need to give this label a target depth and remove // it from the branch-fixups list. } else { assert(!Dest.getScopeDepth().isValid() && "already emitted label!"); Dest.setScopeDepth(EHStack.stable_begin()); ResolveBranchFixups(Dest.getBlock()); } EmitBlock(Dest.getBlock()); incrementProfileCounter(D->getStmt()); } /// Change the cleanup scope of the labels in this lexical scope to /// match the scope of the enclosing context. void CodeGenFunction::LexicalScope::rescopeLabels() { assert(!Labels.empty()); EHScopeStack::stable_iterator innermostScope = CGF.EHStack.getInnermostNormalCleanup(); // Change the scope depth of all the labels. for (SmallVectorImpl<const LabelDecl*>::const_iterator i = Labels.begin(), e = Labels.end(); i != e; ++i) { assert(CGF.LabelMap.count(*i)); JumpDest &dest = CGF.LabelMap.find(*i)->second; assert(dest.getScopeDepth().isValid()); assert(innermostScope.encloses(dest.getScopeDepth())); dest.setScopeDepth(innermostScope); } // Reparent the labels if the new scope also has cleanups. if (innermostScope != EHScopeStack::stable_end() && ParentScope) { ParentScope->Labels.append(Labels.begin(), Labels.end()); } } void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { EmitLabel(S.getDecl()); EmitStmt(S.getSubStmt()); } void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) { const Stmt *SubStmt = S.getSubStmt(); switch (SubStmt->getStmtClass()) { case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*SubStmt), S.getAttrs()); break; case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*SubStmt), S.getAttrs()); break; case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*SubStmt), S.getAttrs()); break; case Stmt::CXXForRangeStmtClass: EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*SubStmt), S.getAttrs()); break; // HLSL Change Begins. case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*SubStmt), S.getAttrs()); break; case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*SubStmt), S.getAttrs()); break; // HLSL Change Ends. default: EmitStmt(SubStmt); } } void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { // If this code is reachable then emit a stop point (if generating // debug info). We have to do this ourselves because we are on the // "simple" statement path. if (HaveInsertPoint()) EmitStopPoint(&S); EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel())); } // HLSL Change Begins. void CodeGenFunction::EmitDiscardStmt(const DiscardStmt &S) { CGM.getHLSLRuntime().EmitHLSLDiscard(*this); } // HLSL Change Ends. void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) { if (const LabelDecl *Target = S.getConstantTarget()) { EmitBranchThroughCleanup(getJumpDestForLabel(Target)); return; } // Ensure that we have an i8* for our PHI node. llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()), Int8PtrTy, "addr"); llvm::BasicBlock *CurBB = Builder.GetInsertBlock(); // Get the basic block for the indirect goto. llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock(); // The first instruction in the block has to be the PHI for the switch dest, // add an entry for this branch. cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB); EmitBranch(IndGotoBB); } void CodeGenFunction::EmitIfStmt(const IfStmt &S, ArrayRef<const Attr *> Attrs) { // HLSL Change // HLSL Change Begins // Skip unreachable if. if (!HaveInsertPoint()) return; // HLSL Change Ends // C99 6.8.4.1: The first substatement is executed if the expression compares // unequal to 0. The condition must be a scalar type. LexicalScope ConditionScope(*this, S.getCond()->getSourceRange()); if (S.getConditionVariable()) EmitAutoVarDecl(*S.getConditionVariable()); // If the condition constant folds and can be elided, try to avoid emitting // the condition and the dead arm of the if/else. bool CondConstant; if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) { // Figure out which block (then or else) is executed. const Stmt *Executed = S.getThen(); const Stmt *Skipped = S.getElse(); if (!CondConstant) // Condition false? std::swap(Executed, Skipped); // If the skipped block has no labels in it, just emit the executed block. // This avoids emitting dead code and simplifies the CFG substantially. if (!ContainsLabel(Skipped)) { if (CondConstant) incrementProfileCounter(&S); // HLSL Change Begin. if (getLangOpts().HLSL) { // Emit Cond to make sure not short circuiting. EmitScalarExpr(S.getCond()); } // HLSL Change End. if (Executed) { RunCleanupsScope ExecutedScope(*this); EmitStmt(Executed); } return; } } // Otherwise, the condition did not fold, or we couldn't elide it. Just emit // the conditional branch. llvm::BasicBlock *ThenBlock = createBasicBlock("if.then"); llvm::BasicBlock *ContBlock = createBasicBlock("if.end"); llvm::BasicBlock *ElseBlock = ContBlock; if (S.getElse()) ElseBlock = createBasicBlock("if.else"); EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, getProfileCount(S.getThen())); // HLSL Change Begins llvm::TerminatorInst *TI = cast<llvm::TerminatorInst>(*ThenBlock->user_begin()); CGM.getHLSLRuntime().AddControlFlowHint(*this, S, TI, Attrs); CGM.getHLSLRuntime().MarkIfStmt(*this, ContBlock); // HLSL Change Ends // Emit the 'then' code. EmitBlock(ThenBlock); incrementProfileCounter(&S); { RunCleanupsScope ThenScope(*this); EmitStmt(S.getThen()); } EmitBranch(ContBlock); // Emit the 'else' code if present. if (const Stmt *Else = S.getElse()) { { // There is no need to emit line number for an unconditional branch. auto NL = ApplyDebugLocation::CreateEmpty(*this); EmitBlock(ElseBlock); } { RunCleanupsScope ElseScope(*this); EmitStmt(Else); } { // There is no need to emit line number for an unconditional branch. auto NL = ApplyDebugLocation::CreateEmpty(*this); EmitBranch(ContBlock); } } // HLSL Change Begin. CGM.getHLSLRuntime().MarkScopeEnd(*this); // HLSL Change End. // Emit the continuation block for code after the if. EmitBlock(ContBlock, true); } void CodeGenFunction::EmitCondBrHints(llvm::LLVMContext &Context, llvm::BranchInst *CondBr, ArrayRef<const Attr *> Attrs) { // Return if there are no hints. if (Attrs.empty()) return; // Add vectorize and unroll hints to the metadata on the conditional branch. // // FIXME: Should this really start with a size of 1? SmallVector<llvm::Metadata *, 2> Metadata(1); for (const auto *Attr : Attrs) { const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr); // Skip non loop hint attributes if (!LH) continue; LoopHintAttr::OptionType Option = LH->getOption(); LoopHintAttr::LoopHintState State = LH->getState(); const char *MetadataName; switch (Option) { case LoopHintAttr::Vectorize: case LoopHintAttr::VectorizeWidth: MetadataName = "llvm.loop.vectorize.width"; break; case LoopHintAttr::Interleave: case LoopHintAttr::InterleaveCount: MetadataName = "llvm.loop.interleave.count"; break; case LoopHintAttr::Unroll: // With the unroll loop hint, a non-zero value indicates full unrolling. MetadataName = State == LoopHintAttr::Disable ? "llvm.loop.unroll.disable" : "llvm.loop.unroll.full"; break; case LoopHintAttr::UnrollCount: MetadataName = "llvm.loop.unroll.count"; break; } Expr *ValueExpr = LH->getValue(); int ValueInt = 1; if (ValueExpr) { llvm::APSInt ValueAPS = ValueExpr->EvaluateKnownConstInt(CGM.getContext()); ValueInt = static_cast<int>(ValueAPS.getSExtValue()); } llvm::Constant *Value; llvm::MDString *Name; switch (Option) { case LoopHintAttr::Vectorize: case LoopHintAttr::Interleave: if (State != LoopHintAttr::Disable) { // FIXME: In the future I will modifiy the behavior of the metadata // so we can enable/disable vectorization and interleaving separately. Name = llvm::MDString::get(Context, "llvm.loop.vectorize.enable"); Value = Builder.getTrue(); break; } // Vectorization/interleaving is disabled, set width/count to 1. ValueInt = 1; LLVM_FALLTHROUGH; // HLSL Change case LoopHintAttr::VectorizeWidth: case LoopHintAttr::InterleaveCount: case LoopHintAttr::UnrollCount: Name = llvm::MDString::get(Context, MetadataName); Value = llvm::ConstantInt::get(Int32Ty, ValueInt); break; case LoopHintAttr::Unroll: Name = llvm::MDString::get(Context, MetadataName); Value = nullptr; break; } SmallVector<llvm::Metadata *, 2> OpValues; OpValues.push_back(Name); if (Value) OpValues.push_back(llvm::ConstantAsMetadata::get(Value)); // Set or overwrite metadata indicated by Name. Metadata.push_back(llvm::MDNode::get(Context, OpValues)); } // FIXME: This condition is never false. Should it be an assert? if ( // HLSL Change Begin. // We only want to enter this if we found a llvm loop attribute and we // know we found an llvm attribute if the metadata size > 1. Metadata.size() > 1 // HLSL Change End. ) { // Add llvm.loop MDNode to CondBr. llvm::MDNode *LoopID = llvm::MDNode::get(Context, Metadata); LoopID->replaceOperandWith(0, LoopID); // First op points to itself. CondBr->setMetadata("llvm.loop", LoopID); } } void CodeGenFunction::EmitWhileStmt(const WhileStmt &S, ArrayRef<const Attr *> WhileAttrs) { // Emit the header for the loop, which will also become // the continue target. JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond"); EmitBlock(LoopHeader.getBlock()); LoopStack.push(LoopHeader.getBlock(), WhileAttrs); // Create an exit block for when the condition fails, which will // also become the break target. JumpDest LoopExit = getJumpDestInCurrentScope("while.end"); // Store the blocks to use for break and continue. BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader)); // HLSL Change Begin. CGM.getHLSLRuntime().MarkLoopStmt(*this, LoopHeader.getBlock(), LoopExit.getBlock()); // HLSL Change End. // C++ [stmt.while]p2: // When the condition of a while statement is a declaration, the // scope of the variable that is declared extends from its point // of declaration (3.3.2) to the end of the while statement. // [...] // The object created in a condition is destroyed and created // with each iteration of the loop. RunCleanupsScope ConditionScope(*this); if (S.getConditionVariable()) EmitAutoVarDecl(*S.getConditionVariable()); // Evaluate the conditional in the while header. C99 6.8.5.1: The // evaluation of the controlling expression takes place before each // execution of the loop body. llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); // while(1) is common, avoid extra exit blocks. Be sure // to correctly handle break/continue though. bool EmitBoolCondBranch = true; if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) if (C->isOne()) EmitBoolCondBranch = false; // As long as the condition is true, go to the loop body. llvm::BasicBlock *LoopBody = createBasicBlock("while.body"); if (EmitBoolCondBranch) { llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); if (ConditionScope.requiresCleanups()) ExitBlock = createBasicBlock("while.exit"); llvm::BranchInst *CondBr = Builder.CreateCondBr( BoolCondVal, LoopBody, ExitBlock, createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); if (ExitBlock != LoopExit.getBlock()) { EmitBlock(ExitBlock); EmitBranchThroughCleanup(LoopExit); } // Attach metadata to loop body conditional branch. EmitCondBrHints(LoopBody->getContext(), CondBr, WhileAttrs); } // Emit the loop body. We have to emit this in a cleanup scope // because it might be a singleton DeclStmt. { RunCleanupsScope BodyScope(*this); EmitBlock(LoopBody); incrementProfileCounter(&S); EmitStmt(S.getBody()); } BreakContinueStack.pop_back(); // Immediately force cleanup. ConditionScope.ForceCleanup(); EmitStopPoint(&S); // Branch to the loop header again. EmitBranch(LoopHeader.getBlock()); LoopStack.pop(); // HLSL Change Begin. CGHLSLMSHelper::Scope *LoopScope = CGM.getHLSLRuntime().MarkScopeEnd(*this); // HLSL Change End. // Emit the exit block. EmitBlock(LoopExit.getBlock(), true); // The LoopHeader typically is just a branch if we skipped emitting // a branch, try to erase it. if (!EmitBoolCondBranch) SimplifyForwardingBlocks(LoopHeader.getBlock(), LoopScope); } void CodeGenFunction::EmitDoStmt(const DoStmt &S, ArrayRef<const Attr *> DoAttrs) { JumpDest LoopExit = getJumpDestInCurrentScope("do.end"); JumpDest LoopCond = getJumpDestInCurrentScope("do.cond"); uint64_t ParentCount = getCurrentProfileCount(); // Store the blocks to use for break and continue. BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond)); // HLSL Change Begin. CGM.getHLSLRuntime().MarkLoopStmt(*this, LoopCond.getBlock(), LoopExit.getBlock()); // HLSL Change End. // Emit the body of the loop. llvm::BasicBlock *LoopBody = createBasicBlock("do.body"); LoopStack.push(LoopBody, DoAttrs); EmitBlockWithFallThrough(LoopBody, &S); { RunCleanupsScope BodyScope(*this); EmitStmt(S.getBody()); } EmitBlock(LoopCond.getBlock()); // C99 6.8.5.2: "The evaluation of the controlling expression takes place // after each execution of the loop body." // Evaluate the conditional in the while header. // C99 6.8.5p2/p4: The first substatement is executed if the expression // compares unequal to 0. The condition must be a scalar type. llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); BreakContinueStack.pop_back(); // "do {} while (0)" is common in macros, avoid extra blocks. Be sure // to correctly handle break/continue though. bool EmitBoolCondBranch = true; if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) if (C->isZero()) EmitBoolCondBranch = false; // As long as the condition is true, iterate the loop. if (EmitBoolCondBranch) { uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount; llvm::BranchInst *CondBr = Builder.CreateCondBr( BoolCondVal, LoopBody, LoopExit.getBlock(), createProfileWeightsForLoop(S.getCond(), BackedgeCount)); // Attach metadata to loop body conditional branch. EmitCondBrHints(LoopBody->getContext(), CondBr, DoAttrs); } LoopStack.pop(); // HLSL Change Begin. CGHLSLMSHelper::Scope *LoopScope = CGM.getHLSLRuntime().MarkScopeEnd(*this); // HLSL Change End. // Emit the exit block. EmitBlock(LoopExit.getBlock()); // The DoCond block typically is just a branch if we skipped // emitting a branch, try to erase it. if (!EmitBoolCondBranch) SimplifyForwardingBlocks(LoopCond.getBlock(), LoopScope); } void CodeGenFunction::EmitForStmt(const ForStmt &S, ArrayRef<const Attr *> ForAttrs) { JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); LexicalScope ForScope(*this, S.getSourceRange()); // Evaluate the first part before the loop. if (S.getInit()) EmitStmt(S.getInit()); // Start the loop with a block that tests the condition. // If there's an increment, the continue scope will be overwritten // later. JumpDest Continue = getJumpDestInCurrentScope("for.cond"); llvm::BasicBlock *CondBlock = Continue.getBlock(); EmitBlock(CondBlock); LoopStack.push(CondBlock, ForAttrs); // If the for loop doesn't have an increment we can just use the // condition as the continue block. Otherwise we'll need to create // a block for it (in the current scope, i.e. in the scope of the // condition), and that we will become our continue block. if (S.getInc()) Continue = getJumpDestInCurrentScope("for.inc"); // Store the blocks to use for break and continue. BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); // HLSL Change Begin. CGM.getHLSLRuntime().MarkLoopStmt(*this, Continue.getBlock(), LoopExit.getBlock()); // HLSL Change End. // Create a cleanup scope for the condition variable cleanups. LexicalScope ConditionScope(*this, S.getSourceRange()); if (S.getCond()) { // If the for statement has a condition scope, emit the local variable // declaration. if (S.getConditionVariable()) { EmitAutoVarDecl(*S.getConditionVariable()); } llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); // If there are any cleanups between here and the loop-exit scope, // create a block to stage a loop exit along. if (ForScope.requiresCleanups()) ExitBlock = createBasicBlock("for.cond.cleanup"); // As long as the condition is true, iterate the loop. llvm::BasicBlock *ForBody = createBasicBlock("for.body"); // C99 6.8.5p2/p4: The first substatement is executed if the expression // compares unequal to 0. The condition must be a scalar type. llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); llvm::BranchInst *CondBr = Builder.CreateCondBr( BoolCondVal, ForBody, ExitBlock, createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); // Attach metadata to loop body conditional branch. EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs); if (ExitBlock != LoopExit.getBlock()) { EmitBlock(ExitBlock); EmitBranchThroughCleanup(LoopExit); } EmitBlock(ForBody); } else { // Treat it as a non-zero constant. Don't even create a new block for the // body, just fall into it. } incrementProfileCounter(&S); { // Create a separate cleanup scope for the body, in case it is not // a compound statement. RunCleanupsScope BodyScope(*this); EmitStmt(S.getBody()); } // If there is an increment, emit it next. if (S.getInc()) { EmitBlock(Continue.getBlock()); EmitStmt(S.getInc()); } BreakContinueStack.pop_back(); ConditionScope.ForceCleanup(); EmitStopPoint(&S); EmitBranch(CondBlock); ForScope.ForceCleanup(); LoopStack.pop(); // HLSL Change Begin. CGM.getHLSLRuntime().MarkScopeEnd(*this); // HLSL Change End. // Emit the fall-through block. EmitBlock(LoopExit.getBlock(), true); } void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S, ArrayRef<const Attr *> ForAttrs) { JumpDest LoopExit = getJumpDestInCurrentScope("for.end"); LexicalScope ForScope(*this, S.getSourceRange()); // Evaluate the first pieces before the loop. EmitStmt(S.getRangeStmt()); EmitStmt(S.getBeginEndStmt()); // Start the loop with a block that tests the condition. // If there's an increment, the continue scope will be overwritten // later. llvm::BasicBlock *CondBlock = createBasicBlock("for.cond"); EmitBlock(CondBlock); LoopStack.push(CondBlock, ForAttrs); // If there are any cleanups between here and the loop-exit scope, // create a block to stage a loop exit along. llvm::BasicBlock *ExitBlock = LoopExit.getBlock(); if (ForScope.requiresCleanups()) ExitBlock = createBasicBlock("for.cond.cleanup"); // The loop body, consisting of the specified body and the loop variable. llvm::BasicBlock *ForBody = createBasicBlock("for.body"); // The body is executed if the expression, contextually converted // to bool, is true. llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); llvm::BranchInst *CondBr = Builder.CreateCondBr( BoolCondVal, ForBody, ExitBlock, createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()))); // Attach metadata to loop body conditional branch. EmitCondBrHints(ForBody->getContext(), CondBr, ForAttrs); if (ExitBlock != LoopExit.getBlock()) { EmitBlock(ExitBlock); EmitBranchThroughCleanup(LoopExit); } EmitBlock(ForBody); incrementProfileCounter(&S); // Create a block for the increment. In case of a 'continue', we jump there. JumpDest Continue = getJumpDestInCurrentScope("for.inc"); // Store the blocks to use for break and continue. BreakContinueStack.push_back(BreakContinue(LoopExit, Continue)); { // Create a separate cleanup scope for the loop variable and body. LexicalScope BodyScope(*this, S.getSourceRange()); EmitStmt(S.getLoopVarStmt()); EmitStmt(S.getBody()); } EmitStopPoint(&S); // If there is an increment, emit it next. EmitBlock(Continue.getBlock()); EmitStmt(S.getInc()); BreakContinueStack.pop_back(); EmitBranch(CondBlock); ForScope.ForceCleanup(); LoopStack.pop(); // Emit the fall-through block. EmitBlock(LoopExit.getBlock(), true); } void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) { if (RV.isScalar()) { Builder.CreateStore(RV.getScalarVal(), ReturnValue); } else if (RV.isAggregate()) { EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty); } else { EmitStoreOfComplex(RV.getComplexVal(), MakeNaturalAlignAddrLValue(ReturnValue, Ty), /*init*/ true); } EmitBranchThroughCleanup(ReturnBlock); } /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand /// if the function returns void, or may be missing one if the function returns /// non-void. Fun stuff :). void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { // Returning from an outlined SEH helper is UB, and we already warn on it. if (IsOutlinedSEHHelper) { Builder.CreateUnreachable(); Builder.ClearInsertionPoint(); } // Emit the result value, even if unused, to evalute the side effects. const Expr *RV = S.getRetValue(); // Treat block literals in a return expression as if they appeared // in their own scope. This permits a small, easily-implemented // exception to our over-conservative rules about not jumping to // statements following block literals with non-trivial cleanups. RunCleanupsScope cleanupScope(*this); if (const ExprWithCleanups *cleanups = dyn_cast_or_null<ExprWithCleanups>(RV)) { enterFullExpression(cleanups); RV = cleanups->getSubExpr(); } // FIXME: Clean this up by using an LValue for ReturnTemp, // EmitStoreThroughLValue, and EmitAnyExpr. if (getLangOpts().ElideConstructors && S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable()) { // Apply the named return value optimization for this return statement, // which means doing nothing: the appropriate result has already been // constructed into the NRVO variable. // If there is an NRVO flag for this variable, set it to 1 into indicate // that the cleanup code should not destroy the variable. if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) Builder.CreateStore(Builder.getTrue(), NRVOFlag); } else if (!ReturnValue || (RV && RV->getType()->isVoidType())) { // Make sure not to return anything, but evaluate the expression // for side effects. if (RV) EmitAnyExpr(RV); } else if (!RV) { // Do nothing (return value is left uninitialized) } else if (FnRetTy->isReferenceType()) { // If this function returns a reference, take the address of the expression // rather than the value. RValue Result = EmitReferenceBindingToExpr(RV); Builder.CreateStore(Result.getScalarVal(), ReturnValue); } else { switch (getEvaluationKind(RV->getType())) { case TEK_Scalar: // HLSL Change Begins. if (hlsl::IsHLSLMatType(RV->getType())) { CGM.getHLSLRuntime().EmitHLSLMatrixStore(*this, EmitScalarExpr(RV), ReturnValue, FnRetTy); } else // HLSL Change Ends. Builder.CreateStore(EmitScalarExpr(RV), ReturnValue); break; case TEK_Complex: EmitComplexExprIntoLValue(RV, MakeNaturalAlignAddrLValue(ReturnValue, RV->getType()), /*isInit*/ true); break; case TEK_Aggregate: { CodeGenFunction::HLSLOutParamScope OutParamScope(*this); // HLSL Change Begins. auto MapTemp = [&](const VarDecl *LocalVD, llvm::Value *TmpArg) { OutParamScope.addTemp(LocalVD, TmpArg); }; RV = CGM.getHLSLRuntime().CheckReturnStmtGLCMismatch(*this, RV, S, FnRetTy, MapTemp); // HLSL Change Ends. CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType()); EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, Qualifiers(), AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased)); break; } } } ++NumReturnExprs; if (!RV || RV->isEvaluatable(getContext())) ++NumSimpleReturnExprs; cleanupScope.ForceCleanup(); // HLSL Change Begin. CGM.getHLSLRuntime().MarkReturnStmt(*this, Builder.GetInsertBlock()); // HLSL Change End. EmitBranchThroughCleanup(ReturnBlock); } void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) { // As long as debug info is modeled with instructions, we have to ensure we // have a place to insert here and write the stop point here. if (HaveInsertPoint()) EmitStopPoint(&S); for (const auto *I : S.decls()) EmitDecl(*I); } void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) { assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!"); // If this code is reachable then emit a stop point (if generating // debug info). We have to do this ourselves because we are on the // "simple" statement path. if (HaveInsertPoint()) EmitStopPoint(&S); // HLSL Change Begin - incorporate unconditional branch blocks into loops // If it has a continue location, it's a loop llvm::BasicBlock *lastContinueBlock = BreakContinueStack.back().ContinueBlock.getBlock(); if (lastContinueBlock && (BreakContinueStack.size() < 2 || lastContinueBlock != BreakContinueStack.end()[-2].ContinueBlock.getBlock())) { // We execute this if // - we are in an unnested loop, or // - we are in a nested control construct but the continue block of the enclosing loop is different from the current continue block. // The second condition can happen for switch statements inside loops, which share the same continue block. llvm::BasicBlock *lastBreakBlock = BreakContinueStack.back().BreakBlock.getBlock(); llvm::BranchInst *condBr = CGM.getHLSLRuntime().EmitHLSLCondBreak(*this, CurFn, lastBreakBlock, lastContinueBlock); // Insertion of lifetime.start/end intrinsics may require a cleanup, so we // pass the branch that we already generated into the handler. EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock, condBr); Builder.ClearInsertionPoint(); } else // HLSL Change End - incorporate unconditional branch blocks into loops EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock); } void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) { assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); // If this code is reachable then emit a stop point (if generating // debug info). We have to do this ourselves because we are on the // "simple" statement path. if (HaveInsertPoint()) EmitStopPoint(&S); EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock); } /// EmitCaseStmtRange - If case statement range is not too big then /// add multiple cases to switch instruction, one for each value within /// the range. If range is too big then emit "if" condition check. void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) { assert(S.getRHS() && "Expected RHS value in CaseStmt"); llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext()); llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext()); // Emit the code for this case. We do this first to make sure it is // properly chained from our predecessor before generating the // switch machinery to enter this block. llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); EmitBlockWithFallThrough(CaseDest, &S); EmitStmt(S.getSubStmt()); // If range is empty, do nothing. if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS)) return; llvm::APInt Range = RHS - LHS; // FIXME: parameters such as this should not be hardcoded. if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { // Range is small enough to add multiple switch instruction cases. uint64_t Total = getProfileCount(&S); unsigned NCases = Range.getZExtValue() + 1; // We only have one region counter for the entire set of cases here, so we // need to divide the weights evenly between the generated cases, ensuring // that the total weight is preserved. E.g., a weight of 5 over three cases // will be distributed as weights of 2, 2, and 1. uint64_t Weight = Total / NCases, Rem = Total % NCases; for (unsigned I = 0; I != NCases; ++I) { if (SwitchWeights) SwitchWeights->push_back(Weight + (Rem ? 1 : 0)); if (Rem) Rem--; SwitchInsn->addCase(Builder.getInt(LHS), CaseDest); LHS++; } return; } // The range is too big. Emit "if" condition into a new block, // making sure to save and restore the current insertion point. llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock(); // Push this test onto the chain of range checks (which terminates // in the default basic block). The switch's default will be changed // to the top of this chain after switch emission is complete. llvm::BasicBlock *FalseDest = CaseRangeBlock; CaseRangeBlock = createBasicBlock("sw.caserange"); CurFn->getBasicBlockList().push_back(CaseRangeBlock); Builder.SetInsertPoint(CaseRangeBlock); // Emit range check. llvm::Value *Diff = Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS)); llvm::Value *Cond = Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds"); llvm::MDNode *Weights = nullptr; if (SwitchWeights) { uint64_t ThisCount = getProfileCount(&S); uint64_t DefaultCount = (*SwitchWeights)[0]; Weights = createProfileWeights(ThisCount, DefaultCount); // Since we're chaining the switch default through each large case range, we // need to update the weight for the default, ie, the first case, to include // this case. (*SwitchWeights)[0] += ThisCount; } Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights); // Restore the appropriate insertion point. if (RestoreBB) Builder.SetInsertPoint(RestoreBB); else Builder.ClearInsertionPoint(); } void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { // If there is no enclosing switch instance that we're aware of, then this // case statement and its block can be elided. This situation only happens // when we've constant-folded the switch, are emitting the constant case, // and part of the constant case includes another case statement. For // instance: switch (4) { case 4: do { case 5: } while (1); } if (!SwitchInsn) { EmitStmt(S.getSubStmt()); return; } // Handle case ranges. if (S.getRHS()) { EmitCaseStmtRange(S); return; } llvm::ConstantInt *CaseVal = Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext())); // If the body of the case is just a 'break', try to not emit an empty block. // If we're profiling or we're not optimizing, leave the block in for better // debug and coverage analysis. if (!CGM.getCodeGenOpts().ProfileInstrGenerate && CGM.getCodeGenOpts().OptimizationLevel > 0 && isa<BreakStmt>(S.getSubStmt())) { JumpDest Block = BreakContinueStack.back().BreakBlock; // Only do this optimization if there are no cleanups that need emitting. if (isObviouslyBranchWithoutCleanups(Block)) { if (SwitchWeights) SwitchWeights->push_back(getProfileCount(&S)); SwitchInsn->addCase(CaseVal, Block.getBlock()); // If there was a fallthrough into this case, make sure to redirect it to // the end of the switch as well. if (Builder.GetInsertBlock()) { Builder.CreateBr(Block.getBlock()); Builder.ClearInsertionPoint(); } return; } } llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb"); EmitBlockWithFallThrough(CaseDest, &S); if (SwitchWeights) SwitchWeights->push_back(getProfileCount(&S)); SwitchInsn->addCase(CaseVal, CaseDest); // Recursively emitting the statement is acceptable, but is not wonderful for // code where we have many case statements nested together, i.e.: // case 1: // case 2: // case 3: etc. // Handling this recursively will create a new block for each case statement // that falls through to the next case which is IR intensive. It also causes // deep recursion which can run into stack depth limitations. Handle // sequential non-range case statements specially. const CaseStmt *CurCase = &S; const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt()); // Otherwise, iteratively add consecutive cases to this switch stmt. while (NextCase && NextCase->getRHS() == nullptr) { CurCase = NextCase; llvm::ConstantInt *CaseVal = Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext())); if (SwitchWeights) SwitchWeights->push_back(getProfileCount(NextCase)); if (CGM.getCodeGenOpts().ProfileInstrGenerate) { CaseDest = createBasicBlock("sw.bb"); EmitBlockWithFallThrough(CaseDest, &S); } SwitchInsn->addCase(CaseVal, CaseDest); NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt()); } // Normal default recursion for non-cases. EmitStmt(CurCase->getSubStmt()); } void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) { llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest(); assert(DefaultBlock->empty() && "EmitDefaultStmt: Default block already defined?"); EmitBlockWithFallThrough(DefaultBlock, &S); EmitStmt(S.getSubStmt()); } /// CollectStatementsForCase - Given the body of a 'switch' statement and a /// constant value that is being switched on, see if we can dead code eliminate /// the body of the switch to a simple series of statements to emit. Basically, /// on a switch (5) we want to find these statements: /// case 5: /// printf(...); <-- /// ++i; <-- /// break; /// /// and add them to the ResultStmts vector. If it is unsafe to do this /// transformation (for example, one of the elided statements contains a label /// that might be jumped to), return CSFC_Failure. If we handled it and 'S' /// should include statements after it (e.g. the printf() line is a substmt of /// the case) then return CSFC_FallThrough. If we handled it and found a break /// statement, then return CSFC_Success. /// /// If Case is non-null, then we are looking for the specified case, checking /// that nothing we jump over contains labels. If Case is null, then we found /// the case and are looking for the break. /// /// If the recursive walk actually finds our Case, then we set FoundCase to /// true. /// enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success }; static CSFC_Result CollectStatementsForCase(const Stmt *S, const SwitchCase *Case, bool &FoundCase, SmallVectorImpl<const Stmt*> &ResultStmts) { // If this is a null statement, just succeed. if (!S) return Case ? CSFC_Success : CSFC_FallThrough; // If this is the switchcase (case 4: or default) that we're looking for, then // we're in business. Just add the substatement. if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) { if (S == Case) { FoundCase = true; return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase, ResultStmts); } // Otherwise, this is some other case or default statement, just ignore it. return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase, ResultStmts); } // If we are in the live part of the code and we found our break statement, // return a success! if (!Case && isa<BreakStmt>(S)) return CSFC_Success; // If this is a switch statement, then it might contain the SwitchCase, the // break, or neither. if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) { // Handle this as two cases: we might be looking for the SwitchCase (if so // the skipped statements must be skippable) or we might already have it. CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end(); if (Case) { // Keep track of whether we see a skipped declaration. The code could be // using the declaration even if it is skipped, so we can't optimize out // the decl if the kept statements might refer to it. bool HadSkippedDecl = false; // If we're looking for the case, just see if we can skip each of the // substatements. for (; Case && I != E; ++I) { HadSkippedDecl |= isa<DeclStmt>(*I); switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) { case CSFC_Failure: return CSFC_Failure; case CSFC_Success: // A successful result means that either 1) that the statement doesn't // have the case and is skippable, or 2) does contain the case value // and also contains the break to exit the switch. In the later case, // we just verify the rest of the statements are elidable. if (FoundCase) { // If we found the case and skipped declarations, we can't do the // optimization. if (HadSkippedDecl) return CSFC_Failure; for (++I; I != E; ++I) if (CodeGenFunction::ContainsLabel(*I, true)) return CSFC_Failure; return CSFC_Success; } break; case CSFC_FallThrough: // If we have a fallthrough condition, then we must have found the // case started to include statements. Consider the rest of the // statements in the compound statement as candidates for inclusion. assert(FoundCase && "Didn't find case but returned fallthrough?"); // We recursively found Case, so we're not looking for it anymore. Case = nullptr; // If we found the case and skipped declarations, we can't do the // optimization. if (HadSkippedDecl) return CSFC_Failure; break; } } } // If we have statements in our range, then we know that the statements are // live and need to be added to the set of statements we're tracking. for (; I != E; ++I) { switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) { case CSFC_Failure: return CSFC_Failure; case CSFC_FallThrough: // A fallthrough result means that the statement was simple and just // included in ResultStmt, keep adding them afterwards. break; case CSFC_Success: // A successful result means that we found the break statement and // stopped statement inclusion. We just ensure that any leftover stmts // are skippable and return success ourselves. for (++I; I != E; ++I) if (CodeGenFunction::ContainsLabel(*I, true)) return CSFC_Failure; return CSFC_Success; } } return Case ? CSFC_Success : CSFC_FallThrough; } // Okay, this is some other statement that we don't handle explicitly, like a // for statement or increment etc. If we are skipping over this statement, // just verify it doesn't have labels, which would make it invalid to elide. if (Case) { if (CodeGenFunction::ContainsLabel(S, true)) return CSFC_Failure; return CSFC_Success; } // Otherwise, we want to include this statement. Everything is cool with that // so long as it doesn't contain a break out of the switch we're in. if (CodeGenFunction::containsBreak(S)) return CSFC_Failure; // Otherwise, everything is great. Include the statement and tell the caller // that we fall through and include the next statement as well. ResultStmts.push_back(S); return CSFC_FallThrough; } /// FindCaseStatementsForValue - Find the case statement being jumped to and /// then invoke CollectStatementsForCase to find the list of statements to emit /// for a switch on constant. See the comment above CollectStatementsForCase /// for more details. static bool FindCaseStatementsForValue(const SwitchStmt &S, const llvm::APSInt &ConstantCondValue, SmallVectorImpl<const Stmt*> &ResultStmts, ASTContext &C, const SwitchCase *&ResultCase) { // First step, find the switch case that is being branched to. We can do this // efficiently by scanning the SwitchCase list. const SwitchCase *Case = S.getSwitchCaseList(); const DefaultStmt *DefaultCase = nullptr; for (; Case; Case = Case->getNextSwitchCase()) { // It's either a default or case. Just remember the default statement in // case we're not jumping to any numbered cases. if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) { DefaultCase = DS; continue; } // Check to see if this case is the one we're looking for. const CaseStmt *CS = cast<CaseStmt>(Case); // Don't handle case ranges yet. if (CS->getRHS()) return false; // If we found our case, remember it as 'case'. if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue) break; } // If we didn't find a matching case, we use a default if it exists, or we // elide the whole switch body! if (!Case) { // It is safe to elide the body of the switch if it doesn't contain labels // etc. If it is safe, return successfully with an empty ResultStmts list. if (!DefaultCase) return !CodeGenFunction::ContainsLabel(&S); Case = DefaultCase; } // Ok, we know which case is being jumped to, try to collect all the // statements that follow it. This can fail for a variety of reasons. Also, // check to see that the recursive walk actually found our case statement. // Insane cases like this can fail to find it in the recursive walk since we // don't handle every stmt kind: // switch (4) { // while (1) { // case 4: ... bool FoundCase = false; ResultCase = Case; return CollectStatementsForCase(S.getBody(), Case, FoundCase, ResultStmts) != CSFC_Failure && FoundCase; } void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S, ArrayRef<const Attr *> Attrs) { // HLSL Change // HLSL Change Begins // Skip unreachable switch. if (!HaveInsertPoint()) return; // HLSL Change Ends // Handle nested switch statements. llvm::SwitchInst *SavedSwitchInsn = SwitchInsn; SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights; llvm::BasicBlock *SavedCRBlock = CaseRangeBlock; // See if we can constant fold the condition of the switch and therefore only // emit the live case statement (if any) of the switch. llvm::APSInt ConstantCondValue; if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) { SmallVector<const Stmt*, 4> CaseStmts; const SwitchCase *Case = nullptr; if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts, getContext(), Case)) { if (Case) incrementProfileCounter(Case); RunCleanupsScope ExecutedScope(*this); // Emit the condition variable if needed inside the entire cleanup scope // used by this special case for constant folded switches. if (S.getConditionVariable()) EmitAutoVarDecl(*S.getConditionVariable()); // At this point, we are no longer "within" a switch instance, so // we can temporarily enforce this to ensure that any embedded case // statements are not emitted. SwitchInsn = nullptr; // Okay, we can dead code eliminate everything except this case. Emit the // specified series of statements and we're good. for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i) EmitStmt(CaseStmts[i]); incrementProfileCounter(&S); // Now we want to restore the saved switch instance so that nested // switches continue to function properly SwitchInsn = SavedSwitchInsn; return; } } JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog"); RunCleanupsScope ConditionScope(*this); if (S.getConditionVariable()) EmitAutoVarDecl(*S.getConditionVariable()); llvm::Value *CondV = EmitScalarExpr(S.getCond()); // Create basic block to hold stuff that comes after switch // statement. We also need to create a default block now so that // explicit case ranges tests can have a place to jump to on // failure. llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default"); SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock); // HLSL Change Begins llvm::TerminatorInst *TI = cast<llvm::TerminatorInst>(SwitchInsn); CGM.getHLSLRuntime().AddControlFlowHint(*this, S, TI, Attrs); CGM.getHLSLRuntime().MarkSwitchStmt(*this, SwitchInsn, SwitchExit.getBlock()); // HLSL Change Ends if (PGO.haveRegionCounts()) { // Walk the SwitchCase list to find how many there are. uint64_t DefaultCount = 0; unsigned NumCases = 0; for (const SwitchCase *Case = S.getSwitchCaseList(); Case; Case = Case->getNextSwitchCase()) { if (isa<DefaultStmt>(Case)) DefaultCount = getProfileCount(Case); NumCases += 1; } SwitchWeights = new SmallVector<uint64_t, 16>(); SwitchWeights->reserve(NumCases); // The default needs to be first. We store the edge count, so we already // know the right weight. SwitchWeights->push_back(DefaultCount); } CaseRangeBlock = DefaultBlock; // Clear the insertion point to indicate we are in unreachable code. Builder.ClearInsertionPoint(); // All break statements jump to NextBlock. If BreakContinueStack is non-empty // then reuse last ContinueBlock. JumpDest OuterContinue; if (!BreakContinueStack.empty()) OuterContinue = BreakContinueStack.back().ContinueBlock; BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue)); // Emit switch body. EmitStmt(S.getBody()); BreakContinueStack.pop_back(); // Update the default block in case explicit case range tests have // been chained on top. SwitchInsn->setDefaultDest(CaseRangeBlock); // If a default was never emitted: if (!DefaultBlock->getParent()) { // If we have cleanups, emit the default block so that there's a // place to jump through the cleanups from. if (ConditionScope.requiresCleanups()) { EmitBlock(DefaultBlock); // Otherwise, just forward the default block to the switch end. } else { DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock()); delete DefaultBlock; } } ConditionScope.ForceCleanup(); // HLSL Change Begin. CGM.getHLSLRuntime().MarkScopeEnd(*this); // HLSL Change End. // Emit continuation. EmitBlock(SwitchExit.getBlock(), true); incrementProfileCounter(&S); if (SwitchWeights) { assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() && "switch weights do not match switch cases"); // If there's only one jump destination there's no sense weighting it. if (SwitchWeights->size() > 1) SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof, createProfileWeights(*SwitchWeights)); delete SwitchWeights; } SwitchInsn = SavedSwitchInsn; SwitchWeights = SavedSwitchWeights; CaseRangeBlock = SavedCRBlock; } static std::string SimplifyConstraint(const char *Constraint, const TargetInfo &Target, SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) { std::string Result; while (*Constraint) { switch (*Constraint) { default: Result += Target.convertConstraint(Constraint); break; // Ignore these case '*': case '?': case '!': case '=': // Will see this and the following in mult-alt constraints. case '+': break; case '#': // Ignore the rest of the constraint alternative. while (Constraint[1] && Constraint[1] != ',') Constraint++; break; case '&': case '%': Result += *Constraint; while (Constraint[1] && Constraint[1] == *Constraint) Constraint++; break; case ',': Result += "|"; break; case 'g': Result += "imr"; break; case '[': { assert(OutCons && "Must pass output names to constraints with a symbolic name"); unsigned Index; bool result = Target.resolveSymbolicName(Constraint, &(*OutCons)[0], OutCons->size(), Index); assert(result && "Could not resolve symbolic name"); (void)result; Result += llvm::utostr(Index); break; } } Constraint++; } return Result; } /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared /// as using a particular register add that as a constraint that will be used /// in this asm stmt. static std::string AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr, const TargetInfo &Target, CodeGenModule &CGM, const AsmStmt &Stmt, const bool EarlyClobber) { const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr); if (!AsmDeclRef) return Constraint; const ValueDecl &Value = *AsmDeclRef->getDecl(); const VarDecl *Variable = dyn_cast<VarDecl>(&Value); if (!Variable) return Constraint; if (Variable->getStorageClass() != SC_Register) return Constraint; AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>(); if (!Attr) return Constraint; StringRef Register = Attr->getLabel(); assert(Target.isValidGCCRegisterName(Register)); // We're using validateOutputConstraint here because we only care if // this is a register constraint. TargetInfo::ConstraintInfo Info(Constraint, ""); if (Target.validateOutputConstraint(Info) && !Info.allowsRegister()) { CGM.ErrorUnsupported(&Stmt, "__asm__"); return Constraint; } // Canonicalize the register here before returning it. Register = Target.getNormalizedGCCRegisterName(Register); return (EarlyClobber ? "&{" : "{") + Register.str() + "}"; } llvm::Value* CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info, LValue InputValue, QualType InputType, std::string &ConstraintStr, SourceLocation Loc) { llvm::Value *Arg; if (Info.allowsRegister() || !Info.allowsMemory()) { if (CodeGenFunction::hasScalarEvaluationKind(InputType)) { Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal(); } else { llvm::Type *Ty = ConvertType(InputType); uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty); if (Size <= 64 && llvm::isPowerOf2_64(Size)) { Ty = llvm::IntegerType::get(getLLVMContext(), Size); Ty = llvm::PointerType::getUnqual(Ty); Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(), Ty)); } else { Arg = InputValue.getAddress(); ConstraintStr += '*'; } } } else { Arg = InputValue.getAddress(); ConstraintStr += '*'; } return Arg; } llvm::Value* CodeGenFunction::EmitAsmInput( const TargetInfo::ConstraintInfo &Info, const Expr *InputExpr, std::string &ConstraintStr) { // If this can't be a register or memory, i.e., has to be a constant // (immediate or symbolic), try to emit it as such. if (!Info.allowsRegister() && !Info.allowsMemory()) { llvm::APSInt Result; if (InputExpr->EvaluateAsInt(Result, getContext())) return llvm::ConstantInt::get(getLLVMContext(), Result); assert(!Info.requiresImmediateConstant() && "Required-immediate inlineasm arg isn't constant?"); } if (Info.allowsRegister() || !Info.allowsMemory()) if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType())) return EmitScalarExpr(InputExpr); InputExpr = InputExpr->IgnoreParenNoopCasts(getContext()); LValue Dest = EmitLValue(InputExpr); return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr, InputExpr->getExprLoc()); } /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline /// asm call instruction. The !srcloc MDNode contains a list of constant /// integers which are the source locations of the start of each line in the /// asm. static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str, CodeGenFunction &CGF) { SmallVector<llvm::Metadata *, 8> Locs; // Add the location of the first line to the MDNode. Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( CGF.Int32Ty, Str->getLocStart().getRawEncoding()))); StringRef StrVal = Str->getString(); if (!StrVal.empty()) { const SourceManager &SM = CGF.CGM.getContext().getSourceManager(); const LangOptions &LangOpts = CGF.CGM.getLangOpts(); // Add the location of the start of each subsequent line of the asm to the // MDNode. for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) { if (StrVal[i] != '\n') continue; SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts, CGF.getTarget()); Locs.push_back(llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding()))); } } return llvm::MDNode::get(CGF.getLLVMContext(), Locs); } void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // Assemble the final asm string. std::string AsmString = S.generateAsmString(getContext()); // Get all the output and input constraints together. SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos; SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos; for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { StringRef Name; if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) Name = GAS->getOutputName(i); TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name); bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid; assert(IsValid && "Failed to parse output constraint"); OutputConstraintInfos.push_back(Info); } for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { StringRef Name; if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S)) Name = GAS->getInputName(i); TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name); bool IsValid = getTarget().validateInputConstraint(OutputConstraintInfos.data(), S.getNumOutputs(), Info); assert(IsValid && "Failed to parse input constraint"); (void)IsValid; InputConstraintInfos.push_back(Info); } std::string Constraints; std::vector<LValue> ResultRegDests; std::vector<QualType> ResultRegQualTys; std::vector<llvm::Type *> ResultRegTypes; std::vector<llvm::Type *> ResultTruncRegTypes; std::vector<llvm::Type *> ArgTypes; std::vector<llvm::Value*> Args; // Keep track of inout constraints. std::string InOutConstraints; std::vector<llvm::Value*> InOutArgs; std::vector<llvm::Type*> InOutArgTypes; // An inline asm can be marked readonly if it meets the following conditions: // - it doesn't have any sideeffects // - it doesn't clobber memory // - it doesn't return a value by-reference // It can be marked readnone if it doesn't have any input memory constraints // in addition to meeting the conditions listed above. bool ReadOnly = true, ReadNone = true; for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i]; // Simplify the output constraint. std::string OutputConstraint(S.getOutputConstraint(i)); OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, getTarget()); const Expr *OutExpr = S.getOutputExpr(i); OutExpr = OutExpr->IgnoreParenNoopCasts(getContext()); OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr, getTarget(), CGM, S, Info.earlyClobber()); LValue Dest = EmitLValue(OutExpr); if (!Constraints.empty()) Constraints += ','; // If this is a register output, then make the inline asm return it // by-value. If this is a memory result, return the value by-reference. if (!Info.allowsMemory() && hasScalarEvaluationKind(OutExpr->getType())) { Constraints += "=" + OutputConstraint; ResultRegQualTys.push_back(OutExpr->getType()); ResultRegDests.push_back(Dest); ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType())); ResultTruncRegTypes.push_back(ResultRegTypes.back()); // If this output is tied to an input, and if the input is larger, then // we need to set the actual result type of the inline asm node to be the // same as the input type. if (Info.hasMatchingInput()) { unsigned InputNo; for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) { TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo]; if (Input.hasTiedOperand() && Input.getTiedOperand() == i) break; } assert(InputNo != S.getNumInputs() && "Didn't find matching input!"); QualType InputTy = S.getInputExpr(InputNo)->getType(); QualType OutputType = OutExpr->getType(); uint64_t InputSize = getContext().getTypeSize(InputTy); if (getContext().getTypeSize(OutputType) < InputSize) { // Form the asm to return the value as a larger integer or fp type. ResultRegTypes.back() = ConvertType(InputTy); } } if (llvm::Type* AdjTy = getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, ResultRegTypes.back())) ResultRegTypes.back() = AdjTy; else { CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) << OutExpr->getType() << OutputConstraint; } } else { ArgTypes.push_back(Dest.getAddress()->getType()); Args.push_back(Dest.getAddress()); Constraints += "=*"; Constraints += OutputConstraint; ReadOnly = ReadNone = false; } if (Info.isReadWrite()) { InOutConstraints += ','; const Expr *InputExpr = S.getOutputExpr(i); llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(), InOutConstraints, InputExpr->getExprLoc()); if (llvm::Type* AdjTy = getTargetHooks().adjustInlineAsmType(*this, OutputConstraint, Arg->getType())) Arg = Builder.CreateBitCast(Arg, AdjTy); if (Info.allowsRegister()) InOutConstraints += llvm::utostr(i); else InOutConstraints += OutputConstraint; InOutArgTypes.push_back(Arg->getType()); InOutArgs.push_back(Arg); } } // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX) // to the return value slot. Only do this when returning in registers. if (isa<MSAsmStmt>(&S)) { const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); if (RetAI.isDirect() || RetAI.isExtend()) { // Make a fake lvalue for the return value slot. LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy); CGM.getTargetCodeGenInfo().addReturnRegisterOutputs( *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes, ResultRegDests, AsmString, S.getNumOutputs()); SawAsmBlock = true; } } for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { const Expr *InputExpr = S.getInputExpr(i); TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i]; if (Info.allowsMemory()) ReadNone = false; if (!Constraints.empty()) Constraints += ','; // Simplify the input constraint. std::string InputConstraint(S.getInputConstraint(i)); InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(), &OutputConstraintInfos); InputConstraint = AddVariableConstraints( InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()), getTarget(), CGM, S, false /* No EarlyClobber */); llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints); // If this input argument is tied to a larger output result, extend the // input to be the same size as the output. The LLVM backend wants to see // the input and output of a matching constraint be the same size. Note // that GCC does not define what the top bits are here. We use zext because // that is usually cheaper, but LLVM IR should really get an anyext someday. if (Info.hasTiedOperand()) { unsigned Output = Info.getTiedOperand(); QualType OutputType = S.getOutputExpr(Output)->getType(); QualType InputTy = InputExpr->getType(); if (getContext().getTypeSize(OutputType) > getContext().getTypeSize(InputTy)) { // Use ptrtoint as appropriate so that we can do our extension. if (isa<llvm::PointerType>(Arg->getType())) Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); llvm::Type *OutputTy = ConvertType(OutputType); if (isa<llvm::IntegerType>(OutputTy)) Arg = Builder.CreateZExt(Arg, OutputTy); else if (isa<llvm::PointerType>(OutputTy)) Arg = Builder.CreateZExt(Arg, IntPtrTy); else { assert(OutputTy->isFloatingPointTy() && "Unexpected output type"); Arg = Builder.CreateFPExt(Arg, OutputTy); } } } if (llvm::Type* AdjTy = getTargetHooks().adjustInlineAsmType(*this, InputConstraint, Arg->getType())) Arg = Builder.CreateBitCast(Arg, AdjTy); else CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input) << InputExpr->getType() << InputConstraint; ArgTypes.push_back(Arg->getType()); Args.push_back(Arg); Constraints += InputConstraint; } // Append the "input" part of inout constraints last. for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { ArgTypes.push_back(InOutArgTypes[i]); Args.push_back(InOutArgs[i]); } Constraints += InOutConstraints; // Clobbers for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { StringRef Clobber = S.getClobber(i); if (Clobber == "memory") ReadOnly = ReadNone = false; else if (Clobber != "cc") Clobber = getTarget().getNormalizedGCCRegisterName(Clobber); if (!Constraints.empty()) Constraints += ','; Constraints += "~{"; Constraints += Clobber; Constraints += '}'; } // Add machine specific clobbers std::string MachineClobbers = getTarget().getClobbers(); if (!MachineClobbers.empty()) { if (!Constraints.empty()) Constraints += ','; Constraints += MachineClobbers; } llvm::Type *ResultType; if (ResultRegTypes.empty()) ResultType = VoidTy; else if (ResultRegTypes.size() == 1) ResultType = ResultRegTypes[0]; else ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes); llvm::FunctionType *FTy = llvm::FunctionType::get(ResultType, ArgTypes, false); bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0; llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ? llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT; llvm::InlineAsm *IA = llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect, /* IsAlignStack */ false, AsmDialect); llvm::CallInst *Result = Builder.CreateCall(IA, Args); Result->addAttribute(llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoUnwind); // Attach readnone and readonly attributes. if (!HasSideEffect) { if (ReadNone) Result->addAttribute(llvm::AttributeSet::FunctionIndex, llvm::Attribute::ReadNone); else if (ReadOnly) Result->addAttribute(llvm::AttributeSet::FunctionIndex, llvm::Attribute::ReadOnly); } // Slap the source location of the inline asm into a !srcloc metadata on the // call. if (const GCCAsmStmt *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S)) { Result->setMetadata("srcloc", getAsmSrcLocInfo(gccAsmStmt->getAsmString(), *this)); } else { // At least put the line number on MS inline asm blobs. auto Loc = llvm::ConstantInt::get(Int32Ty, S.getAsmLoc().getRawEncoding()); Result->setMetadata("srcloc", llvm::MDNode::get(getLLVMContext(), llvm::ConstantAsMetadata::get(Loc))); } // Extract all of the register value results from the asm. std::vector<llvm::Value*> RegResults; if (ResultRegTypes.size() == 1) { RegResults.push_back(Result); } else { for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) { llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult"); RegResults.push_back(Tmp); } } assert(RegResults.size() == ResultRegTypes.size()); assert(RegResults.size() == ResultTruncRegTypes.size()); assert(RegResults.size() == ResultRegDests.size()); for (unsigned i = 0, e = RegResults.size(); i != e; ++i) { llvm::Value *Tmp = RegResults[i]; // If the result type of the LLVM IR asm doesn't match the result type of // the expression, do the conversion. if (ResultRegTypes[i] != ResultTruncRegTypes[i]) { llvm::Type *TruncTy = ResultTruncRegTypes[i]; // Truncate the integer result to the right size, note that TruncTy can be // a pointer. if (TruncTy->isFloatingPointTy()) Tmp = Builder.CreateFPTrunc(Tmp, TruncTy); else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) { uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy); Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize)); Tmp = Builder.CreateIntToPtr(Tmp, TruncTy); } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) { uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType()); Tmp = Builder.CreatePtrToInt(Tmp, llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize)); Tmp = Builder.CreateTrunc(Tmp, TruncTy); } else if (TruncTy->isIntegerTy()) { Tmp = Builder.CreateTrunc(Tmp, TruncTy); } else if (TruncTy->isVectorTy()) { Tmp = Builder.CreateBitCast(Tmp, TruncTy); } } EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]); } } LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) { const RecordDecl *RD = S.getCapturedRecordDecl(); QualType RecordTy = getContext().getRecordType(RD); // Initialize the captured struct. LValue SlotLV = MakeNaturalAlignAddrLValue( CreateMemTemp(RecordTy, "agg.captured"), RecordTy); RecordDecl::field_iterator CurField = RD->field_begin(); for (CapturedStmt::capture_init_iterator I = S.capture_init_begin(), E = S.capture_init_end(); I != E; ++I, ++CurField) { LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField); if (CurField->hasCapturedVLAType()) { auto VAT = CurField->getCapturedVLAType(); EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV); } else { EmitInitializerForField(*CurField, LV, *I, None); } } return SlotLV; } /// Generate an outlined function for the body of a CapturedStmt, store any /// captured variables into the captured struct, and call the outlined function. llvm::Function * CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) { LValue CapStruct = InitCapturedStruct(S); // Emit the CapturedDecl CodeGenFunction CGF(CGM, true); CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K)); llvm::Function *F = CGF.GenerateCapturedStmtFunction(S); delete CGF.CapturedStmtInfo; // Emit call to the helper function. EmitCallOrInvoke(F, CapStruct.getAddress()); return F; } llvm::Value * CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) { LValue CapStruct = InitCapturedStruct(S); return CapStruct.getAddress(); } /// Creates the outlined function for a CapturedStmt. llvm::Function * CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) { assert(CapturedStmtInfo && "CapturedStmtInfo should be set when generating the captured function"); const CapturedDecl *CD = S.getCapturedDecl(); const RecordDecl *RD = S.getCapturedRecordDecl(); SourceLocation Loc = S.getLocStart(); assert(CD->hasBody() && "missing CapturedDecl body"); // Build the argument list. ASTContext &Ctx = CGM.getContext(); FunctionArgList Args; Args.append(CD->param_begin(), CD->param_end()); // Create the function declaration. FunctionType::ExtInfo ExtInfo; const CGFunctionInfo &FuncInfo = CGM.getTypes().arrangeFreeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo, /*IsVariadic=*/false); llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo); llvm::Function *F = llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage, CapturedStmtInfo->getHelperName(), &CGM.getModule()); CGM.SetInternalFunctionAttributes(CD, F, FuncInfo); if (CD->isNothrow()) F->addFnAttr(llvm::Attribute::NoUnwind); // Generate the function. StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(), CD->getBody()->getLocStart()); // Set the context parameter in CapturedStmtInfo. llvm::Value *DeclPtr = LocalDeclMap[CD->getContextParam()]; assert(DeclPtr && "missing context parameter for CapturedStmt"); CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr)); // Initialize variable-length arrays. LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(), Ctx.getTagDeclType(RD)); for (auto *FD : RD->fields()) { if (FD->hasCapturedVLAType()) { auto *ExprArg = EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getLocStart()).getScalarVal(); auto VAT = FD->getCapturedVLAType(); VLASizeMap[VAT->getSizeExpr()] = ExprArg; } } // If 'this' is captured, load it into CXXThisValue. if (CapturedStmtInfo->isCXXThisExprCaptured()) { FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl(); LValue ThisLValue = EmitLValueForField(Base, FD); CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal(); } PGO.assignRegionCounters(CD, F); CapturedStmtInfo->EmitBody(*this, CD->getBody()); FinishFunction(CD->getBodyRBrace()); return F; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGExpr.cpp
//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This contains code to emit Expr nodes as LLVM code. // //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" #include "CGCXXABI.h" #include "CGCall.h" #include "CGDebugInfo.h" #include "CGObjCRuntime.h" #include "CGOpenMPRuntime.h" #include "CGHLSLRuntime.h" // HLSL Change #include "dxc/HLSL/HLOperations.h" // HLSL Change #include "dxc/DXIL/DxilUtil.h" // HLSL Change #include "dxc/DXIL/DxilResource.h" // HLSL Change #include "CGRecordLayout.h" #include "CodeGenModule.h" #include "TargetInfo.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/DeclObjC.h" #include "clang/Frontend/CodeGenOptions.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/MDBuilder.h" #include "llvm/Support/ConvertUTF.h" #include "llvm/Support/MathExtras.h" using namespace clang; using namespace CodeGen; //===--------------------------------------------------------------------===// // Miscellaneous Helper Methods //===--------------------------------------------------------------------===// llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) { unsigned addressSpace = cast<llvm::PointerType>(value->getType())->getAddressSpace(); llvm::PointerType *destType = Int8PtrTy; if (addressSpace) destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace); if (value->getType() == destType) return value; return Builder.CreateBitCast(value, destType); } /// CreateTempAlloca - This creates a alloca and inserts it into the entry /// block. llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, const Twine &Name) { if (!Builder.isNamePreserving()) return new llvm::AllocaInst(Ty, nullptr, "", AllocaInsertPt); return new llvm::AllocaInst(Ty, nullptr, Name, AllocaInsertPt); } void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var, llvm::Value *Init) { auto *Store = new llvm::StoreInst(Init, Var); llvm::BasicBlock *Block = AllocaInsertPt->getParent(); Block->getInstList().insertAfter(&*AllocaInsertPt, Store); } llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty, const Twine &Name) { llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name); // FIXME: Should we prefer the preferred type alignment here? CharUnits Align = getContext().getTypeAlignInChars(Ty); Alloc->setAlignment(Align.getQuantity()); return Alloc; } llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty, const Twine &Name) { llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name); // FIXME: Should we prefer the preferred type alignment here? CharUnits Align = getContext().getTypeAlignInChars(Ty); Alloc->setAlignment(Align.getQuantity()); return Alloc; } /// EvaluateExprAsBool - Perform the usual unary conversions on the specified /// expression and compare the result against zero, returning an Int1Ty value. llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { PGO.setCurrentStmt(E); if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) { llvm::Value *MemPtr = EmitScalarExpr(E); return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT); } QualType BoolTy = getContext().BoolTy; if (!E->getType()->isAnyComplexType()) return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); } /// EmitIgnoredExpr - Emit code to compute the specified expression, /// ignoring the result. void CodeGenFunction::EmitIgnoredExpr(const Expr *E) { if (E->isRValue()) return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true); // Just emit it as an l-value and drop the result. EmitLValue(E); } /// EmitAnyExpr - Emit code to compute the specified expression which /// can have any type. The result is returned as an RValue struct. /// If this is an aggregate expression, AggSlot indicates where the /// result should be returned. RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot aggSlot, bool ignoreResult) { switch (getEvaluationKind(E->getType())) { case TEK_Scalar: return RValue::get(EmitScalarExpr(E, ignoreResult)); case TEK_Complex: return RValue::getComplex(EmitComplexExpr(E, ignoreResult, ignoreResult)); case TEK_Aggregate: if (!ignoreResult && aggSlot.isIgnored()) aggSlot = CreateAggTemp(E->getType(), "agg-temp"); EmitAggExpr(E, aggSlot); return aggSlot.asRValue(); } llvm_unreachable("bad evaluation kind"); } /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will /// always be accessible even if no aggregate location is provided. RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) { AggValueSlot AggSlot = AggValueSlot::ignored(); if (hasAggregateEvaluationKind(E->getType())) AggSlot = CreateAggTemp(E->getType(), "agg.tmp"); return EmitAnyExpr(E, AggSlot); } /// EmitAnyExprToMem - Evaluate an expression into a given memory /// location. void CodeGenFunction::EmitAnyExprToMem(const Expr *E, llvm::Value *Location, Qualifiers Quals, bool IsInit) { // FIXME: This function should take an LValue as an argument. switch (getEvaluationKind(E->getType())) { case TEK_Complex: EmitComplexExprIntoLValue(E, MakeNaturalAlignAddrLValue(Location, E->getType()), /*isInit*/ false); return; case TEK_Aggregate: { CharUnits Alignment = getContext().getTypeAlignInChars(E->getType()); EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals, AggValueSlot::IsDestructed_t(IsInit), AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsAliased_t(!IsInit))); return; } case TEK_Scalar: { RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false)); LValue LV = MakeAddrLValue(Location, E->getType()); EmitStoreThroughLValue(RV, LV); return; } } llvm_unreachable("bad evaluation kind"); } static void pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *E, llvm::Value *ReferenceTemporary) { // Objective-C++ ARC: // If we are binding a reference to a temporary that has ownership, we // need to perform retain/release operations on the temporary. // // FIXME: This should be looking at E, not M. if (CGF.getLangOpts().ObjCAutoRefCount && M->getType()->isObjCLifetimeType()) { QualType ObjCARCReferenceLifetimeType = M->getType(); switch (Qualifiers::ObjCLifetime Lifetime = ObjCARCReferenceLifetimeType.getObjCLifetime()) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: // Carry on to normal cleanup handling. break; case Qualifiers::OCL_Autoreleasing: // Nothing to do; cleaned up by an autorelease pool. return; case Qualifiers::OCL_Strong: case Qualifiers::OCL_Weak: switch (StorageDuration Duration = M->getStorageDuration()) { case SD_Static: // Note: we intentionally do not register a cleanup to release // the object on program termination. return; case SD_Thread: // FIXME: We should probably register a cleanup in this case. return; case SD_Automatic: case SD_FullExpression: CodeGenFunction::Destroyer *Destroy; CleanupKind CleanupKind; if (Lifetime == Qualifiers::OCL_Strong) { const ValueDecl *VD = M->getExtendingDecl(); bool Precise = VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>(); CleanupKind = CGF.getARCCleanupKind(); Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise : &CodeGenFunction::destroyARCStrongImprecise; } else { // __weak objects always get EH cleanups; otherwise, exceptions // could cause really nasty crashes instead of mere leaks. CleanupKind = NormalAndEHCleanup; Destroy = &CodeGenFunction::destroyARCWeak; } if (Duration == SD_FullExpression) CGF.pushDestroy(CleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType, *Destroy, CleanupKind & EHCleanup); else CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType, *Destroy, CleanupKind & EHCleanup); return; case SD_Dynamic: llvm_unreachable("temporary cannot have dynamic storage duration"); } llvm_unreachable("unknown storage duration"); } } CXXDestructorDecl *ReferenceTemporaryDtor = nullptr; if (const RecordType *RT = E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) { // Get the destructor for the reference temporary. auto *ClassDecl = cast<CXXRecordDecl>(RT->getDecl()); if (!ClassDecl->hasTrivialDestructor()) ReferenceTemporaryDtor = ClassDecl->getDestructor(); } if (!ReferenceTemporaryDtor) return; // Call the destructor for the temporary. switch (M->getStorageDuration()) { case SD_Static: case SD_Thread: { llvm::Constant *CleanupFn; llvm::Constant *CleanupArg; if (E->getType()->isArrayType()) { CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper( cast<llvm::Constant>(ReferenceTemporary), E->getType(), CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions, dyn_cast_or_null<VarDecl>(M->getExtendingDecl())); CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy); } else { CleanupFn = CGF.CGM.getAddrOfCXXStructor(ReferenceTemporaryDtor, StructorType::Complete); CleanupArg = cast<llvm::Constant>(ReferenceTemporary); } CGF.CGM.getCXXABI().registerGlobalDtor( CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg); break; } case SD_FullExpression: CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions); break; case SD_Automatic: CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(), CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions); break; case SD_Dynamic: llvm_unreachable("temporary cannot have dynamic storage duration"); } } static llvm::Value * createReferenceTemporary(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M, const Expr *Inner) { switch (M->getStorageDuration()) { case SD_FullExpression: case SD_Automatic: { // If we have a constant temporary array or record try to promote it into a // constant global under the same rules a normal constant would've been // promoted. This is easier on the optimizer and generally emits fewer // instructions. QualType Ty = Inner->getType(); if (CGF.CGM.getCodeGenOpts().MergeAllConstants && (Ty->isArrayType() || Ty->isRecordType()) && CGF.CGM.isTypeConstant(Ty, true)) if (llvm::Constant *Init = CGF.CGM.EmitConstantExpr(Inner, Ty, &CGF)) { auto *GV = new llvm::GlobalVariable( CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp"); GV->setAlignment( CGF.getContext().getTypeAlignInChars(Ty).getQuantity()); // FIXME: Should we put the new global into a COMDAT? return GV; } return CGF.CreateMemTemp(Ty, "ref.tmp"); } case SD_Thread: case SD_Static: return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner); case SD_Dynamic: llvm_unreachable("temporary can't have dynamic storage duration"); } llvm_unreachable("unknown storage duration"); } LValue CodeGenFunction:: EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) { const Expr *E = M->GetTemporaryExpr(); // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so // as that will cause the lifetime adjustment to be lost for ARC if (getLangOpts().ObjCAutoRefCount && M->getType()->isObjCLifetimeType() && M->getType().getObjCLifetime() != Qualifiers::OCL_None && M->getType().getObjCLifetime() != Qualifiers::OCL_ExplicitNone) { llvm::Value *Object = createReferenceTemporary(*this, M, E); if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object)) { Object = llvm::ConstantExpr::getBitCast( Var, ConvertTypeForMem(E->getType())->getPointerTo()); // We should not have emitted the initializer for this temporary as a // constant. assert(!Var->hasInitializer()); Var->setInitializer(CGM.EmitNullConstant(E->getType())); } LValue RefTempDst = MakeAddrLValue(Object, M->getType()); switch (getEvaluationKind(E->getType())) { default: llvm_unreachable("expected scalar or aggregate expression"); case TEK_Scalar: EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false); break; case TEK_Aggregate: { CharUnits Alignment = getContext().getTypeAlignInChars(E->getType()); EmitAggExpr(E, AggValueSlot::forAddr(Object, Alignment, E->getType().getQualifiers(), AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased)); break; } } pushTemporaryCleanup(*this, M, E, Object); return RefTempDst; } SmallVector<const Expr *, 2> CommaLHSs; SmallVector<SubobjectAdjustment, 2> Adjustments; E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); for (const auto &Ignored : CommaLHSs) EmitIgnoredExpr(Ignored); if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) { if (opaque->getType()->isRecordType()) { assert(Adjustments.empty()); return EmitOpaqueValueLValue(opaque); } } // Create and initialize the reference temporary. llvm::Value *Object = createReferenceTemporary(*this, M, E); if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object)) { Object = llvm::ConstantExpr::getBitCast( Var, ConvertTypeForMem(E->getType())->getPointerTo()); // If the temporary is a global and has a constant initializer or is a // constant temporary that we promoted to a global, we may have already // initialized it. if (!Var->hasInitializer()) { Var->setInitializer(CGM.EmitNullConstant(E->getType())); EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); } } else { EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true); } pushTemporaryCleanup(*this, M, E, Object); // Perform derived-to-base casts and/or field accesses, to get from the // temporary object we created (and, potentially, for which we extended // the lifetime) to the subobject we're binding the reference to. for (unsigned I = Adjustments.size(); I != 0; --I) { SubobjectAdjustment &Adjustment = Adjustments[I-1]; switch (Adjustment.Kind) { case SubobjectAdjustment::DerivedToBaseAdjustment: Object = GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass, Adjustment.DerivedToBase.BasePath->path_begin(), Adjustment.DerivedToBase.BasePath->path_end(), /*NullCheckValue=*/ false, E->getExprLoc()); break; case SubobjectAdjustment::FieldAdjustment: { LValue LV = MakeAddrLValue(Object, E->getType()); LV = EmitLValueForField(LV, Adjustment.Field); assert(LV.isSimple() && "materialized temporary field is not a simple lvalue"); Object = LV.getAddress(); break; } case SubobjectAdjustment::MemberPointerAdjustment: { llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS); Object = CGM.getCXXABI().EmitMemberDataPointerAddress( *this, E, Object, Ptr, Adjustment.Ptr.MPT); break; } } } return MakeAddrLValue(Object, M->getType()); } RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) { // Emit the expression as an lvalue. LValue LV = EmitLValue(E); assert(LV.isSimple()); llvm::Value *Value = LV.getAddress(); if (sanitizePerformTypeCheck() && !E->getType()->isFunctionType()) { // C++11 [dcl.ref]p5 (as amended by core issue 453): // If a glvalue to which a reference is directly bound designates neither // an existing object or function of an appropriate type nor a region of // storage of suitable size and alignment to contain an object of the // reference's type, the behavior is undefined. QualType Ty = E->getType(); EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty); } return RValue::get(Value); } /// getAccessedFieldNo - Given an encoded value and a result number, return the /// input field number being accessed. unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts) { return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx)) ->getZExtValue(); } /// Emit the hash_16_bytes function from include/llvm/ADT/Hashing.h. static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low, llvm::Value *High) { llvm::Value *KMul = Builder.getInt64(0x9ddfea08eb382d69ULL); llvm::Value *K47 = Builder.getInt64(47); llvm::Value *A0 = Builder.CreateMul(Builder.CreateXor(Low, High), KMul); llvm::Value *A1 = Builder.CreateXor(Builder.CreateLShr(A0, K47), A0); llvm::Value *B0 = Builder.CreateMul(Builder.CreateXor(High, A1), KMul); llvm::Value *B1 = Builder.CreateXor(Builder.CreateLShr(B0, K47), B0); return Builder.CreateMul(B1, KMul); } bool CodeGenFunction::sanitizePerformTypeCheck() const { return SanOpts.has(SanitizerKind::Null) || SanOpts.has(SanitizerKind::Alignment) || SanOpts.has(SanitizerKind::ObjectSize) || SanOpts.has(SanitizerKind::Vptr); } void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *Address, QualType Ty, CharUnits Alignment, bool SkipNullCheck) { if (!sanitizePerformTypeCheck()) return; // Don't check pointers outside the default address space. The null check // isn't correct, the object-size check isn't supported by LLVM, and we can't // communicate the addresses to the runtime handler for the vptr check. if (Address->getType()->getPointerAddressSpace()) return; SanitizerScope SanScope(this); SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks; llvm::BasicBlock *Done = nullptr; bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast || TCK == TCK_UpcastToVirtualBase; if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) && !SkipNullCheck) { // The glvalue must not be an empty glvalue. llvm::Value *IsNonNull = Builder.CreateICmpNE( Address, llvm::Constant::getNullValue(Address->getType())); if (AllowNullPointers) { // When performing pointer casts, it's OK if the value is null. // Skip the remaining checks in that case. Done = createBasicBlock("null"); llvm::BasicBlock *Rest = createBasicBlock("not.null"); Builder.CreateCondBr(IsNonNull, Rest, Done); EmitBlock(Rest); } else { Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null)); } } if (SanOpts.has(SanitizerKind::ObjectSize) && !Ty->isIncompleteType()) { uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity(); // The glvalue must refer to a large enough storage region. // FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation // to check this. // FIXME: Get object address space llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy }; llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys); llvm::Value *Min = Builder.getFalse(); llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy); llvm::Value *LargeEnough = Builder.CreateICmpUGE(Builder.CreateCall(F, {CastAddr, Min}), llvm::ConstantInt::get(IntPtrTy, Size)); Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize)); } uint64_t AlignVal = 0; if (SanOpts.has(SanitizerKind::Alignment)) { AlignVal = Alignment.getQuantity(); if (!Ty->isIncompleteType() && !AlignVal) AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity(); // The glvalue must be suitably aligned. if (AlignVal) { llvm::Value *Align = Builder.CreateAnd(Builder.CreatePtrToInt(Address, IntPtrTy), llvm::ConstantInt::get(IntPtrTy, AlignVal - 1)); llvm::Value *Aligned = Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0)); Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment)); } } if (Checks.size() > 0) { llvm::Constant *StaticData[] = { EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty), llvm::ConstantInt::get(SizeTy, AlignVal), llvm::ConstantInt::get(Int8Ty, TCK) }; EmitCheck(Checks, "type_mismatch", StaticData, Address); } // If possible, check that the vptr indicates that there is a subobject of // type Ty at offset zero within this object. // // C++11 [basic.life]p5,6: // [For storage which does not refer to an object within its lifetime] // The program has undefined behavior if: // -- the [pointer or glvalue] is used to access a non-static data member // or call a non-static member function CXXRecordDecl *RD = Ty->getAsCXXRecordDecl(); if (SanOpts.has(SanitizerKind::Vptr) && (TCK == TCK_MemberAccess || TCK == TCK_MemberCall || TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference || TCK == TCK_UpcastToVirtualBase) && RD && RD->hasDefinition() && RD->isDynamicClass()) { // Compute a hash of the mangled name of the type. // // FIXME: This is not guaranteed to be deterministic! Move to a // fingerprinting mechanism once LLVM provides one. For the time // being the implementation happens to be deterministic. SmallString<64> MangledName; llvm::raw_svector_ostream Out(MangledName); CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(), Out); // Blacklist based on the mangled type. if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType( Out.str())) { llvm::hash_code TypeHash = hash_value(Out.str()); // Load the vptr, and compute hash_16_bytes(TypeHash, vptr). llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash); llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0); llvm::Value *VPtrAddr = Builder.CreateBitCast(Address, VPtrTy); llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr); llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty); llvm::Value *Hash = emitHash16Bytes(Builder, Low, High); Hash = Builder.CreateTrunc(Hash, IntPtrTy); // Look the hash up in our cache. const int CacheSize = 128; llvm::Type *HashTable = llvm::ArrayType::get(IntPtrTy, CacheSize); llvm::Value *Cache = CGM.CreateRuntimeVariable(HashTable, "__ubsan_vptr_type_cache"); llvm::Value *Slot = Builder.CreateAnd(Hash, llvm::ConstantInt::get(IntPtrTy, CacheSize-1)); llvm::Value *Indices[] = { Builder.getInt32(0), Slot }; llvm::Value *CacheVal = Builder.CreateLoad(Builder.CreateInBoundsGEP(Cache, Indices)); // If the hash isn't in the cache, call a runtime handler to perform the // hard work of checking whether the vptr is for an object of the right // type. This will either fill in the cache and return, or produce a // diagnostic. llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash); llvm::Constant *StaticData[] = { EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty), CGM.GetAddrOfRTTIDescriptor(Ty.getUnqualifiedType()), llvm::ConstantInt::get(Int8Ty, TCK) }; llvm::Value *DynamicData[] = { Address, Hash }; EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr), "dynamic_type_cache_miss", StaticData, DynamicData); } } if (Done) { Builder.CreateBr(Done); EmitBlock(Done); } } /// Determine whether this expression refers to a flexible array member in a /// struct. We disable array bounds checks for such members. static bool isFlexibleArrayMemberExpr(const Expr *E) { // For compatibility with existing code, we treat arrays of length 0 or // 1 as flexible array members. const ArrayType *AT = E->getType()->castAsArrayTypeUnsafe(); if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) { if (CAT->getSize().ugt(1)) return false; } else if (!isa<IncompleteArrayType>(AT)) return false; E = E->IgnoreParens(); // A flexible array member must be the last member in the class. if (const auto *ME = dyn_cast<MemberExpr>(E)) { // FIXME: If the base type of the member expr is not FD->getParent(), // this should not be treated as a flexible array member access. if (const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) { RecordDecl::field_iterator FI( DeclContext::decl_iterator(const_cast<FieldDecl *>(FD))); return ++FI == FD->getParent()->field_end(); } } return false; } /// If Base is known to point to the start of an array, return the length of /// that array. Return 0 if the length cannot be determined. static llvm::Value *getArrayIndexingBound( CodeGenFunction &CGF, const Expr *Base, QualType &IndexedType) { // For the vector indexing extension, the bound is the number of elements. if (const VectorType *VT = Base->getType()->getAs<VectorType>()) { IndexedType = Base->getType(); return CGF.Builder.getInt32(VT->getNumElements()); } Base = Base->IgnoreParens(); if (const auto *CE = dyn_cast<CastExpr>(Base)) { if (CE->getCastKind() == CK_ArrayToPointerDecay && !isFlexibleArrayMemberExpr(CE->getSubExpr())) { IndexedType = CE->getSubExpr()->getType(); const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe(); if (const auto *CAT = dyn_cast<ConstantArrayType>(AT)) return CGF.Builder.getInt(CAT->getSize()); else if (const auto *VAT = dyn_cast<VariableArrayType>(AT)) return CGF.getVLASize(VAT).first; } } return nullptr; } void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed) { assert(SanOpts.has(SanitizerKind::ArrayBounds) && "should not be called unless adding bounds checks"); SanitizerScope SanScope(this); QualType IndexedType; llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType); if (!Bound) return; bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType(); llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned); llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false); llvm::Constant *StaticData[] = { EmitCheckSourceLocation(E->getExprLoc()), EmitCheckTypeDescriptor(IndexedType), EmitCheckTypeDescriptor(IndexType) }; llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal) : Builder.CreateICmpULE(IndexVal, BoundVal); EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), "out_of_bounds", StaticData, Index); } CodeGenFunction::ComplexPairTy CodeGenFunction:: EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre) { ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc()); llvm::Value *NextVal; if (isa<llvm::IntegerType>(InVal.first->getType())) { uint64_t AmountVal = isInc ? 1 : -1; NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); // Add the inc/dec to the real part. NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); } else { QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType(); llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1); if (!isInc) FVal.changeSign(); NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal); // Add the inc/dec to the real part. NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); } ComplexPairTy IncVal(NextVal, InVal.second); // Store the updated result through the lvalue. EmitStoreOfComplex(IncVal, LV, /*init*/ false); // If this is a postinc, return the value read from memory, otherwise use the // updated value. return isPre ? IncVal : InVal; } //===----------------------------------------------------------------------===// // LValue Expression Emission //===----------------------------------------------------------------------===// RValue CodeGenFunction::GetUndefRValue(QualType Ty) { if (Ty->isVoidType()) return RValue::get(nullptr); switch (getEvaluationKind(Ty)) { case TEK_Complex: { llvm::Type *EltTy = ConvertType(Ty->castAs<ComplexType>()->getElementType()); llvm::Value *U = llvm::UndefValue::get(EltTy); return RValue::getComplex(std::make_pair(U, U)); } // If this is a use of an undefined aggregate type, the aggregate must have an // identifiable address. Just because the contents of the value are undefined // doesn't mean that the address can't be taken and compared. case TEK_Aggregate: { llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp"); return RValue::getAggregate(DestPtr); } case TEK_Scalar: return RValue::get(llvm::UndefValue::get(ConvertType(Ty))); } llvm_unreachable("bad evaluation kind"); } RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E, const char *Name) { ErrorUnsupported(E, Name); return GetUndefRValue(E->getType()); } LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, const char *Name) { ErrorUnsupported(E, Name); llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType()); } LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) { LValue LV; if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E)) LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true); else LV = EmitLValue(E); if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) EmitTypeCheck(TCK, E->getExprLoc(), LV.getAddress(), E->getType(), LV.getAlignment()); return LV; } /// EmitLValue - Emit code to compute a designator that specifies the location /// of the expression. /// /// This can return one of two things: a simple address or a bitfield reference. /// In either case, the LLVM Value* in the LValue structure is guaranteed to be /// an LLVM pointer type. /// /// If this returns a bitfield reference, nothing about the pointee type of the /// LLVM value is known: For example, it may not be a pointer to an integer. /// /// If this returns a normal address, and if the lvalue's C type is fixed size, /// this method guarantees that the returned pointer type will point to an LLVM /// type of the same size of the lvalue's type. If the lvalue has a variable /// length type, this is not possible. /// LValue CodeGenFunction::EmitLValue(const Expr *E) { ApplyDebugLocation DL(*this, E); switch (E->getStmtClass()) { default: return EmitUnsupportedLValue(E, "l-value expression"); case Expr::ObjCPropertyRefExprClass: llvm_unreachable("cannot emit a property reference directly"); case Expr::ObjCSelectorExprClass: return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E)); case Expr::ObjCIsaExprClass: return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E)); case Expr::BinaryOperatorClass: return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); case Expr::CompoundAssignOperatorClass: { QualType Ty = E->getType(); if (const AtomicType *AT = Ty->getAs<AtomicType>()) Ty = AT->getValueType(); if (!Ty->isAnyComplexType()) return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E)); } case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: case Expr::CXXOperatorCallExprClass: case Expr::UserDefinedLiteralClass: return EmitCallExprLValue(cast<CallExpr>(E)); case Expr::VAArgExprClass: return EmitVAArgExprLValue(cast<VAArgExpr>(E)); case Expr::DeclRefExprClass: return EmitDeclRefLValue(cast<DeclRefExpr>(E)); case Expr::ParenExprClass: return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); case Expr::GenericSelectionExprClass: return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr()); case Expr::PredefinedExprClass: return EmitPredefinedLValue(cast<PredefinedExpr>(E)); case Expr::StringLiteralClass: return EmitStringLiteralLValue(cast<StringLiteral>(E)); case Expr::ObjCEncodeExprClass: return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); case Expr::PseudoObjectExprClass: return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E)); case Expr::InitListExprClass: return EmitInitListLValue(cast<InitListExpr>(E)); case Expr::CXXTemporaryObjectExprClass: case Expr::CXXConstructExprClass: return EmitCXXConstructLValue(cast<CXXConstructExpr>(E)); case Expr::CXXBindTemporaryExprClass: return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E)); case Expr::CXXUuidofExprClass: return EmitCXXUuidofLValue(cast<CXXUuidofExpr>(E)); case Expr::LambdaExprClass: return EmitLambdaLValue(cast<LambdaExpr>(E)); case Expr::ExprWithCleanupsClass: { const auto *cleanups = cast<ExprWithCleanups>(E); enterFullExpression(cleanups); RunCleanupsScope Scope(*this); return EmitLValue(cleanups->getSubExpr()); } case Expr::CXXDefaultArgExprClass: return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr()); case Expr::CXXDefaultInitExprClass: { CXXDefaultInitExprScope Scope(*this); return EmitLValue(cast<CXXDefaultInitExpr>(E)->getExpr()); } case Expr::CXXTypeidExprClass: return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E)); case Expr::ObjCMessageExprClass: return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); case Expr::ObjCIvarRefExprClass: return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); case Expr::StmtExprClass: return EmitStmtExprLValue(cast<StmtExpr>(E)); case Expr::UnaryOperatorClass: return EmitUnaryOpLValue(cast<UnaryOperator>(E)); case Expr::ArraySubscriptExprClass: return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); case Expr::ExtVectorElementExprClass: return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); // HLSL Change Starts case Expr::ExtMatrixElementExprClass: return EmitExtMatrixElementExpr(cast<ExtMatrixElementExpr>(E)); case Expr::HLSLVectorElementExprClass: return EmitHLSLVectorElementExpr(cast<HLSLVectorElementExpr>(E)); case Expr::CXXThisExprClass: return MakeAddrLValue(LoadCXXThis(), E->getType()); // HLSL Change Ends case Expr::MemberExprClass: return EmitMemberExpr(cast<MemberExpr>(E)); case Expr::CompoundLiteralExprClass: return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); case Expr::ConditionalOperatorClass: return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E)); case Expr::BinaryConditionalOperatorClass: return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E)); case Expr::ChooseExprClass: return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr()); case Expr::OpaqueValueExprClass: return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E)); case Expr::SubstNonTypeTemplateParmExprClass: return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement()); case Expr::ImplicitCastExprClass: case Expr::CStyleCastExprClass: case Expr::CXXFunctionalCastExprClass: case Expr::CXXStaticCastExprClass: case Expr::CXXDynamicCastExprClass: case Expr::CXXReinterpretCastExprClass: case Expr::CXXConstCastExprClass: case Expr::ObjCBridgedCastExprClass: return EmitCastLValue(cast<CastExpr>(E)); case Expr::MaterializeTemporaryExprClass: return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E)); } } /// Given an object of the given canonical type, can we safely copy a /// value out of it based on its initializer? static bool isConstantEmittableObjectType(QualType type) { assert(type.isCanonical()); assert(!type->isReferenceType()); // Must be const-qualified but non-volatile. Qualifiers qs = type.getLocalQualifiers(); if (!qs.hasConst() || qs.hasVolatile()) return false; // Otherwise, all object types satisfy this except C++ classes with // mutable subobjects or non-trivial copy/destroy behavior. if (const auto *RT = dyn_cast<RecordType>(type)) if (const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) if (RD->hasMutableFields() || !RD->isTrivial()) return false; return true; } /// Can we constant-emit a load of a reference to a variable of the /// given type? This is different from predicates like /// Decl::isUsableInConstantExpressions because we do want it to apply /// in situations that don't necessarily satisfy the language's rules /// for this (e.g. C++'s ODR-use rules). For example, we want to able /// to do this with const float variables even if those variables /// aren't marked 'constexpr'. enum ConstantEmissionKind { CEK_None, CEK_AsReferenceOnly, CEK_AsValueOrReference, CEK_AsValueOnly }; static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) { type = type.getCanonicalType(); if (const auto *ref = dyn_cast<ReferenceType>(type)) { if (isConstantEmittableObjectType(ref->getPointeeType())) return CEK_AsValueOrReference; return CEK_AsReferenceOnly; } if (isConstantEmittableObjectType(type)) return CEK_AsValueOnly; return CEK_None; } /// Try to emit a reference to the given value without producing it as /// an l-value. This is actually more than an optimization: we can't /// produce an l-value for variables that we never actually captured /// in a block or lambda, which means const int variables or constexpr /// literals or similar. CodeGenFunction::ConstantEmission CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) { ValueDecl *value = refExpr->getDecl(); // The value needs to be an enum constant or a constant variable. ConstantEmissionKind CEK; if (isa<ParmVarDecl>(value)) { CEK = CEK_None; } else if (auto *var = dyn_cast<VarDecl>(value)) { CEK = checkVarTypeForConstantEmission(var->getType()); } else if (isa<EnumConstantDecl>(value)) { CEK = CEK_AsValueOnly; } else { CEK = CEK_None; } if (CEK == CEK_None) return ConstantEmission(); Expr::EvalResult result; bool resultIsReference; QualType resultType; // It's best to evaluate all the way as an r-value if that's permitted. if (CEK != CEK_AsReferenceOnly && refExpr->EvaluateAsRValue(result, getContext())) { resultIsReference = false; resultType = refExpr->getType(); // Otherwise, try to evaluate as an l-value. } else if (CEK != CEK_AsValueOnly && refExpr->EvaluateAsLValue(result, getContext())) { resultIsReference = true; resultType = value->getType(); // Failure. } else { return ConstantEmission(); } // In any case, if the initializer has side-effects, abandon ship. if (result.HasSideEffects) return ConstantEmission(); // Emit as a constant. llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this); // Make sure we emit a debug reference to the global variable. // This should probably fire even for if (isa<VarDecl>(value)) { if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value))) EmitDeclRefExprDbgValue(refExpr, C); } else { assert(isa<EnumConstantDecl>(value)); EmitDeclRefExprDbgValue(refExpr, C); } // If we emitted a reference constant, we need to dereference that. if (resultIsReference) return ConstantEmission::forReference(C); return ConstantEmission::forValue(C); } llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue, SourceLocation Loc) { return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(), lvalue.getAlignment().getQuantity(), lvalue.getType(), Loc, lvalue.getTBAAInfo(), lvalue.getTBAABaseType(), lvalue.getTBAAOffset()); } static bool hasBooleanRepresentation(QualType Ty) { if (Ty->isBooleanType()) return true; if (const EnumType *ET = Ty->getAs<EnumType>()) return ET->getDecl()->getIntegerType()->isBooleanType(); if (const AtomicType *AT = Ty->getAs<AtomicType>()) return hasBooleanRepresentation(AT->getValueType()); return false; } // HLSL Change Begin. static bool hasBooleanScalarOrVectorRepresentation(QualType Ty) { if (hlsl::IsHLSLVecType(Ty)) return hasBooleanRepresentation(hlsl::GetElementTypeOrType(Ty)); return hasBooleanRepresentation(Ty); } // HLSL Change End. static bool getRangeForType(CodeGenFunction &CGF, QualType Ty, llvm::APInt &Min, llvm::APInt &End, bool StrictEnums) { const EnumType *ET = Ty->getAs<EnumType>(); bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums && ET && !ET->getDecl()->isFixed(); bool IsBool = hasBooleanRepresentation(Ty); if (!IsBool && !IsRegularCPlusPlusEnum) return false; if (IsBool) { Min = llvm::APInt(CGF.getContext().getTypeSize(Ty), 0); End = llvm::APInt(CGF.getContext().getTypeSize(Ty), 2); } else { const EnumDecl *ED = ET->getDecl(); llvm::Type *LTy = CGF.ConvertTypeForMem(ED->getIntegerType()); unsigned Bitwidth = LTy->getScalarSizeInBits(); unsigned NumNegativeBits = ED->getNumNegativeBits(); unsigned NumPositiveBits = ED->getNumPositiveBits(); if (NumNegativeBits) { unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1); assert(NumBits <= Bitwidth); End = llvm::APInt(Bitwidth, 1) << (NumBits - 1); Min = -End; } else { assert(NumPositiveBits <= Bitwidth); End = llvm::APInt(Bitwidth, 1) << NumPositiveBits; Min = llvm::APInt(Bitwidth, 0); } } return true; } llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) { llvm::APInt Min, End; if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums)) return nullptr; llvm::MDBuilder MDHelper(getLLVMContext()); return MDHelper.createRange(Min, End); } static bool ShouldEmitRangeMD(llvm::Value *Value, QualType Ty) { if (hasBooleanRepresentation(Ty)) return cast<llvm::IntegerType>(Value->getType())->getBitWidth() != 1; return true; } llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, SourceLocation Loc, llvm::MDNode *TBAAInfo, QualType TBAABaseType, uint64_t TBAAOffset) { // For better performance, handle vector loads differently. if (Ty->isVectorType()) { llvm::Value *V; const llvm::Type *EltTy = cast<llvm::PointerType>(Addr->getType())->getElementType(); const auto *VTy = cast<llvm::VectorType>(EltTy); // Handle vectors of size 3, like size 4 for better performance. if (VTy->getNumElements() == 3) { // Bitcast to vec4 type. llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(), 4); llvm::PointerType *ptVec4Ty = llvm::PointerType::get(vec4Ty, (cast<llvm::PointerType>( Addr->getType()))->getAddressSpace()); llvm::Value *Cast = Builder.CreateBitCast(Addr, ptVec4Ty, "castToVec4"); // Now load value. llvm::Value *LoadVal = Builder.CreateLoad(Cast, Volatile, "loadVec4"); // Shuffle vector to get vec3. llvm::Constant *Mask[] = { llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 0), llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 1), llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 2) }; llvm::Value *MaskV = llvm::ConstantVector::get(Mask); V = Builder.CreateShuffleVector(LoadVal, llvm::UndefValue::get(vec4Ty), MaskV, "extractVec"); return EmitFromMemory(V, Ty); } } // Atomic operations have to be done on integral types. if (Ty->isAtomicType() || typeIsSuitableForInlineAtomic(Ty, Volatile)) { LValue lvalue = LValue::MakeAddr(Addr, Ty, CharUnits::fromQuantity(Alignment), getContext(), TBAAInfo); return EmitAtomicLoad(lvalue, Loc).getScalarVal(); } // HLSL Change Begins if (hlsl::IsHLSLMatType(Ty)) { // Use matrix load to keep major info. return CGM.getHLSLRuntime().EmitHLSLMatrixLoad(*this, Addr, Ty); } // HLSL Change Ends llvm::LoadInst *Load = Builder.CreateLoad(Addr); if (Volatile) Load->setVolatile(true); if (Alignment) Load->setAlignment(Alignment); if (TBAAInfo) { llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo, TBAAOffset); if (TBAAPath) CGM.DecorateInstruction(Load, TBAAPath, false/*ConvertTypeToTag*/); } bool NeedsBoolCheck = SanOpts.has(SanitizerKind::Bool) && hasBooleanRepresentation(Ty); bool NeedsEnumCheck = SanOpts.has(SanitizerKind::Enum) && Ty->getAs<EnumType>(); if (NeedsBoolCheck || NeedsEnumCheck) { SanitizerScope SanScope(this); llvm::APInt Min, End; if (getRangeForType(*this, Ty, Min, End, true)) { --End; llvm::Value *Check; if (!Min) Check = Builder.CreateICmpULE( Load, llvm::ConstantInt::get(getLLVMContext(), End)); else { llvm::Value *Upper = Builder.CreateICmpSLE( Load, llvm::ConstantInt::get(getLLVMContext(), End)); llvm::Value *Lower = Builder.CreateICmpSGE( Load, llvm::ConstantInt::get(getLLVMContext(), Min)); Check = Builder.CreateAnd(Upper, Lower); } llvm::Constant *StaticArgs[] = { EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty) }; SanitizerMask Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool; EmitCheck(std::make_pair(Check, Kind), "load_invalid_value", StaticArgs, EmitCheckValue(Load)); } } else if (CGM.getCodeGenOpts().OptimizationLevel > 0 && ShouldEmitRangeMD(Load, Ty)) if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty)) Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo); return EmitFromMemory(Load, Ty); } llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) { // HLSL Change Begin. // Bool scalar and vectors have a different representation in memory than in registers. if (hasBooleanScalarOrVectorRepresentation(Ty)) { if (Value->getType()->getScalarType()->isIntegerTy(1)) return Builder.CreateZExt(Value, ConvertTypeForMem(Ty), "frombool"); } // HLSL Change End. return Value; } llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) { // HLSL Change Begin. // Bool scalar and vectors have a different representation in memory than in registers. if (hasBooleanScalarOrVectorRepresentation(Ty)) { // Use ne v, 0 to convert to i1 instead of trunc. return Builder.CreateICmpNE( Value, llvm::ConstantVector::getNullValue(Value->getType()), "tobool"); } // HLSL Change End. return Value; } void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, bool Volatile, unsigned Alignment, QualType Ty, llvm::MDNode *TBAAInfo, bool isInit, QualType TBAABaseType, uint64_t TBAAOffset) { // Handle vectors differently to get better performance. if (Ty->isVectorType()) { llvm::Type *SrcTy = Value->getType(); auto *VecTy = cast<llvm::VectorType>(SrcTy); // Handle vec3 special. if (VecTy->getNumElements() == 3) { llvm::LLVMContext &VMContext = getLLVMContext(); // Our source is a vec3, do a shuffle vector to make it a vec4. SmallVector<llvm::Constant*, 4> Mask; Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0)); Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1)); Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2)); Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext))); llvm::Value *MaskV = llvm::ConstantVector::get(Mask); Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy), MaskV, "extractVec"); SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4); } auto *DstPtr = cast<llvm::PointerType>(Addr->getType()); if (DstPtr->getElementType() != SrcTy) { llvm::Type *MemTy = llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); } } Value = EmitToMemory(Value, Ty); if (Ty->isAtomicType() || (!isInit && typeIsSuitableForInlineAtomic(Ty, Volatile))) { EmitAtomicStore(RValue::get(Value), LValue::MakeAddr(Addr, Ty, CharUnits::fromQuantity(Alignment), getContext(), TBAAInfo), isInit); return; } // HLSL Change Begins if (hlsl::IsHLSLMatType(Ty)) { // Use matrix store to keep major info. CGM.getHLSLRuntime().EmitHLSLMatrixStore(*this, Value, Addr, Ty); return; } // HLSL Change Ends llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile); if (Alignment) Store->setAlignment(Alignment); if (TBAAInfo) { llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo, TBAAOffset); if (TBAAPath) CGM.DecorateInstruction(Store, TBAAPath, false/*ConvertTypeToTag*/); } } void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit) { EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(), lvalue.getAlignment().getQuantity(), lvalue.getType(), lvalue.getTBAAInfo(), isInit, lvalue.getTBAABaseType(), lvalue.getTBAAOffset()); } // HLSL Change Begin - find immediate value for literal. static llvm::Value *GetStoredValue(llvm::Value *Ptr) { llvm::Value *V = nullptr; for (llvm::User *U : Ptr->users()) { if (llvm::StoreInst *ST = dyn_cast<llvm::StoreInst>(U)) { if (V) { // More than one store. // Skip. V = nullptr; break; } V = ST->getValueOperand(); } } return V; } static bool IsLiteralType(QualType QT) { if (const BuiltinType *BTy = QT->getAs<BuiltinType>()) { if (BTy->getKind() == BuiltinType::LitFloat || BTy->getKind() == BuiltinType::LitInt) return true; } return false; } // HLSL Change End. /// EmitLoadOfLValue - Given an expression that represents a value lvalue, this /// method emits the address of the lvalue, then loads the result as an rvalue, /// returning the rvalue. RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) { if (LV.isObjCWeak()) { // load of a __weak object. llvm::Value *AddrWeakObj = LV.getAddress(); return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this, AddrWeakObj)); } if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress()); Object = EmitObjCConsumeObject(LV.getType(), Object); return RValue::get(Object); } if (LV.isSimple()) { assert(!LV.getType()->isFunctionType()); // HLSL Change Begin - find immediate value for literal. if (IsLiteralType(LV.getType())) { // The value must be stored only once. // Scan all use to find it. llvm::Value *Ptr = LV.getAddress(); if (llvm::Value *V = GetStoredValue(Ptr)) { return RValue::get(V); } } if (hlsl::IsHLSLAggregateType(LV.getType())) { // We cannot load the value because we don't expect to ever have // user-defined struct or array-typed llvm registers, only pointers to them. // To preserve the snapshot semantics of LValue loads, we copy the // value to a temporary and return a pointer to it. llvm::Value *Alloca = CreateMemTemp(LV.getType(), "rval"); auto CharSizeAlignPair = getContext().getTypeInfoInChars(LV.getType()); Builder.CreateMemCpy(Alloca, LV.getAddress(), static_cast<uint64_t>(CharSizeAlignPair.first.getQuantity()), static_cast<unsigned>(CharSizeAlignPair.second.getQuantity())); return RValue::get(Alloca); } // HLSL Change End. // Everything needs a load. return RValue::get(EmitLoadOfScalar(LV, Loc)); } if (LV.isVectorElt()) { // HLSL Change Begin - find immediate value for literal. if (IsLiteralType(LV.getType())) { // The value must be stored only once. // Scan all use to find it. llvm::Value *Ptr = LV.getAddress(); if (llvm::Value *V = GetStoredValue(Ptr)) { return RValue::get(Builder.CreateExtractElement(V, LV.getVectorIdx(), "vecext")); } } // HLSL Change End. llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(), LV.isVolatileQualified()); Load->setAlignment(LV.getAlignment().getQuantity()); return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(), "vecext")); } // If this is a reference to a subset of the elements of a vector, either // shuffle the input or extract/insert them as appropriate. if (LV.isExtVectorElt()) return EmitLoadOfExtVectorElementLValue(LV); // Global Register variables always invoke intrinsics if (LV.isGlobalReg()) return EmitLoadOfGlobalRegLValue(LV); // HLSL Change Starts if (LV.isExtMatrixElt()) return EmitLoadOfExtMatrixElementLValue(LV); // HLSL Change Ends assert(LV.isBitField() && "Unknown LValue type!"); return EmitLoadOfBitfieldLValue(LV); } RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) { const CGBitFieldInfo &Info = LV.getBitFieldInfo(); CharUnits Align = LV.getAlignment().alignmentAtOffset(Info.StorageOffset); // Get the output type. llvm::Type *ResLTy = ConvertType(LV.getType()); llvm::Value *Ptr = LV.getBitFieldAddr(); llvm::Value *Val = Builder.CreateAlignedLoad(Ptr, Align.getQuantity(), LV.isVolatileQualified(), "bf.load"); if (Info.IsSigned) { assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize); unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size; if (HighBits) Val = Builder.CreateShl(Val, HighBits, "bf.shl"); if (Info.Offset + HighBits) Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr"); } else { if (Info.Offset) Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr"); if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize) Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize, Info.Size), "bf.clear"); } Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast"); return RValue::get(Val); } // If this is a reference to a subset of the elements of a vector, create an // appropriate shufflevector. RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) { llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(), LV.isVolatileQualified()); Load->setAlignment(LV.getAlignment().getQuantity()); llvm::Value *Vec = Load; Vec = EmitFromMemory(Vec, LV.getType()); // HLSL Change const llvm::Constant *Elts = LV.getExtVectorElts(); // If the result of the expression is a non-vector type, we must be extracting // a single element. Just codegen as an extractelement. const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); // HLSL Change Starts if (ExprVT == nullptr && getContext().getLangOpts().HLSL) ExprVT = hlsl::ConvertHLSLVecMatTypeToExtVectorType(getContext(), LV.getType()); // HLSL Change Ends // HLSL Change Begin - find immediate value for literal. QualType QT = LV.getType(); if (ExprVT) { QT = ExprVT->getElementType(); } if (IsLiteralType(QT)) { // The value must be stored only once. // Scan all use to find it. llvm::Value *Ptr = LV.getExtVectorAddr(); if (llvm::Value *V = GetStoredValue(Ptr)) { Vec = V; } } // HLSL Change End. if (!ExprVT) { unsigned InIdx = getAccessedFieldNo(0, Elts); llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); return RValue::get(Builder.CreateExtractElement(Vec, Elt)); } // Always use shuffle vector to try to retain the original program structure unsigned NumResultElts = ExprVT->getNumElements(); SmallVector<llvm::Constant*, 4> Mask; for (unsigned i = 0; i != NumResultElts; ++i) Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts))); llvm::Value *MaskV = llvm::ConstantVector::get(Mask); Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), MaskV); return RValue::get(Vec); } /// @brief Generates lvalue for partial ext_vector access. llvm::Value *CodeGenFunction::EmitExtVectorElementLValue(LValue LV) { llvm::Value *VectorAddress = LV.getExtVectorAddr(); const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); QualType EQT = ExprVT->getElementType(); llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT); llvm::Type *VectorElementPtrToTy = VectorElementTy->getPointerTo(); llvm::Value *CastToPointerElement = Builder.CreateBitCast(VectorAddress, VectorElementPtrToTy, "conv.ptr.element"); const llvm::Constant *Elts = LV.getExtVectorElts(); unsigned ix = getAccessedFieldNo(0, Elts); llvm::Value *VectorBasePtrPlusIx = Builder.CreateInBoundsGEP(CastToPointerElement, llvm::ConstantInt::get(SizeTy, ix), "add.ptr"); return VectorBasePtrPlusIx; } /// @brief Load of global gamed gegisters are always calls to intrinsics. RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) { assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) && "Bad type for register variable"); llvm::MDNode *RegName = cast<llvm::MDNode>( cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata()); // We accept integer and pointer types only llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType()); llvm::Type *Ty = OrigTy; if (OrigTy->isPointerTy()) Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); llvm::Type *Types[] = { Ty }; llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types); llvm::Value *Call = Builder.CreateCall( F, llvm::MetadataAsValue::get(Ty->getContext(), RegName)); if (OrigTy->isPointerTy()) Call = Builder.CreateIntToPtr(Call, OrigTy); return RValue::get(Call); } // HLSL Change Starts RValue CodeGenFunction::EmitLoadOfExtMatrixElementLValue(LValue LV) { // TODO: Matrix swizzle members - emit return RValue(); } // HLSL Change Ends /// EmitStoreThroughLValue - Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) { if (!Dst.isSimple()) { if (Dst.isVectorElt()) { // Read/modify/write the vector, inserting the new element. llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(), Dst.isVolatileQualified()); Load->setAlignment(Dst.getAlignment().getQuantity()); llvm::Value *Vec = Load; Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), Dst.getVectorIdx(), "vecins"); llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(), Dst.isVolatileQualified()); Store->setAlignment(Dst.getAlignment().getQuantity()); return; } // If this is an update of extended vector elements, insert them as // appropriate. if (Dst.isExtVectorElt()) return EmitStoreThroughExtVectorComponentLValue(Src, Dst); if (Dst.isGlobalReg()) return EmitStoreThroughGlobalRegLValue(Src, Dst); assert(Dst.isBitField() && "Unknown LValue type"); return EmitStoreThroughBitfieldLValue(Src, Dst); } // There's special magic for assigning into an ARC-qualified l-value. if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) { switch (Lifetime) { case Qualifiers::OCL_None: llvm_unreachable("present but none"); case Qualifiers::OCL_ExplicitNone: // nothing special break; case Qualifiers::OCL_Strong: EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true); return; case Qualifiers::OCL_Weak: EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true); return; case Qualifiers::OCL_Autoreleasing: Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(), Src.getScalarVal())); // fall into the normal path break; } } if (Dst.isObjCWeak() && !Dst.isNonGC()) { // load of a __weak object. llvm::Value *LvalueDst = Dst.getAddress(); llvm::Value *src = Src.getScalarVal(); CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); return; } if (Dst.isObjCStrong() && !Dst.isNonGC()) { // load of a __strong object. llvm::Value *LvalueDst = Dst.getAddress(); llvm::Value *src = Src.getScalarVal(); if (Dst.isObjCIvar()) { assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL"); llvm::Type *ResultType = ConvertType(getContext().LongTy); llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp()); llvm::Value *dst = RHS; RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); llvm::Value *LHS = Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast"); llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset"); CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst, BytesBetween); } else if (Dst.isGlobalObjCRef()) { CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst, Dst.isThreadLocalRef()); } else CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); return; } assert(Src.isScalar() && "Can't emit an agg store with this method"); EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit); } void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result) { const CGBitFieldInfo &Info = Dst.getBitFieldInfo(); CharUnits Align = Dst.getAlignment().alignmentAtOffset(Info.StorageOffset); llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType()); llvm::Value *Ptr = Dst.getBitFieldAddr(); // Get the source value, truncated to the width of the bit-field. llvm::Value *SrcVal = Src.getScalarVal(); // Cast the source to the storage type and shift it into place. SrcVal = Builder.CreateIntCast(SrcVal, Ptr->getType()->getPointerElementType(), /*IsSigned=*/false); llvm::Value *MaskedVal = SrcVal; // See if there are other bits in the bitfield's storage we'll need to load // and mask together with source before storing. if (Info.StorageSize != Info.Size) { assert(Info.StorageSize > Info.Size && "Invalid bitfield size."); llvm::Value *Val = Builder.CreateAlignedLoad(Ptr, Align.getQuantity(), Dst.isVolatileQualified(), "bf.load"); // Mask the source value as needed. if (!hasBooleanRepresentation(Dst.getType())) SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(Info.StorageSize, Info.Size), "bf.value"); MaskedVal = SrcVal; if (Info.Offset) SrcVal = Builder.CreateShl(SrcVal, Info.Offset, "bf.shl"); // Mask out the original value. Val = Builder.CreateAnd(Val, ~llvm::APInt::getBitsSet(Info.StorageSize, Info.Offset, Info.Offset + Info.Size), "bf.clear"); // Or together the unchanged values and the source value. SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set"); } else { assert(Info.Offset == 0); } // Write the new value back out. Builder.CreateAlignedStore(SrcVal, Ptr, Align.getQuantity(), Dst.isVolatileQualified()); // Return the new value of the bit-field, if requested. if (Result) { llvm::Value *ResultVal = MaskedVal; // Sign extend the value if needed. if (Info.IsSigned) { assert(Info.Size <= Info.StorageSize); unsigned HighBits = Info.StorageSize - Info.Size; if (HighBits) { ResultVal = Builder.CreateShl(ResultVal, HighBits, "bf.result.shl"); ResultVal = Builder.CreateAShr(ResultVal, HighBits, "bf.result.ashr"); } } ResultVal = Builder.CreateIntCast(ResultVal, ResLTy, Info.IsSigned, "bf.result.cast"); *Result = EmitFromMemory(ResultVal, Dst.getType()); } } // HLSL Change - begin static bool IsHLSubscriptOfTypedBuffer(llvm::Value *V) { llvm::CallInst *CI = nullptr; llvm::Function *F = nullptr; if ((CI = dyn_cast<llvm::CallInst>(V)) && (F = CI->getCalledFunction()) && hlsl::GetHLOpcodeGroup(F) == hlsl::HLOpcodeGroup::HLSubscript) { for (llvm::Value *arg : CI->arg_operands()) { llvm::Type *Ty = arg->getType(); if (Ty->isPointerTy()) { std::pair<bool, hlsl::DxilResourceProperties> Result = hlsl::dxilutil::GetHLSLResourceProperties(Ty->getPointerElementType()); if (Result.first && Result.second.isUAV() && // These are the types of buffers that are typed. (hlsl::DxilResource::IsAnyTexture(Result.second.getResourceKind()) || Result.second.getResourceKind() == hlsl::DXIL::ResourceKind::TypedBuffer)) { return true; } } } } return false; } // HLSL Change - end void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst) { // This access turns into a read/modify/write of the vector. Load the input // value now. llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(), Dst.isVolatileQualified()); Load->setAlignment(Dst.getAlignment().getQuantity()); llvm::Value *Vec = Load; const llvm::Constant *Elts = Dst.getExtVectorElts(); llvm::Value *SrcVal = Src.getScalarVal(); // HLSL Change Starts SrcVal = EmitToMemory(SrcVal, Dst.getType()); const VectorType *VTy = Dst.getType()->getAs<VectorType>(); if (VTy == nullptr && getContext().getLangOpts().HLSL) VTy = hlsl::ConvertHLSLVecMatTypeToExtVectorType(getContext(), Dst.getType()); llvm::Value * VecDstPtr = Dst.getExtVectorAddr(); llvm::Value *Zero = Builder.getInt32(0); if (VTy) { llvm::Type *VecTy = VecDstPtr->getType()->getPointerElementType(); unsigned NumSrcElts = VTy->getNumElements(); if (VecTy->getVectorNumElements() == NumSrcElts) { // Full vector write, create one store. for (unsigned i = 0; i < VecTy->getVectorNumElements(); i++) { if (llvm::Constant *Elt = Elts->getAggregateElement(i)) { llvm::Value *SrcElt = Builder.CreateExtractElement(SrcVal, i); Vec = Builder.CreateInsertElement(Vec, SrcElt, Elt); } } Builder.CreateStore(Vec, VecDstPtr); } else { // If the vector pointer comes from subscripting a typed rw buffer (Buffer<>, Texture*<>, etc.), // insert the elements from the load. // // This is to avoid the final DXIL producing a load+store for each component later down the line, // as there's no clean way to associate the geps+store with each other. // if (IsHLSubscriptOfTypedBuffer(VecDstPtr)) { llvm::Value *vec = Load; for (unsigned i = 0; i < VecTy->getVectorNumElements(); i++) { if (llvm::Constant *Elt = Elts->getAggregateElement(i)) { llvm::Value *SrcElt = Builder.CreateExtractElement(SrcVal, i); vec = Builder.CreateInsertElement(vec, SrcElt, Elt); } } Builder.CreateStore(vec, VecDstPtr); } // Otherwise just do a gep + store for each component that we're writing to. else { for (unsigned i = 0; i < VecTy->getVectorNumElements(); i++) { if (llvm::Constant *Elt = Elts->getAggregateElement(i)) { llvm::Value *EltGEP = Builder.CreateGEP(VecDstPtr, {Zero, Elt}); llvm::Value *SrcElt = Builder.CreateExtractElement(SrcVal, i); Builder.CreateStore(SrcElt, EltGEP); } } } } } else { // If the Src is a scalar (not a vector) it must be updating one element. llvm::Value *EltGEP = Builder.CreateGEP( VecDstPtr, {Zero, Elts->getAggregateElement((unsigned)0)}); Builder.CreateStore(SrcVal, EltGEP); } return; // HLSL Change Ends if (VTy) { // HLSL Change unsigned NumSrcElts = VTy->getNumElements(); unsigned NumDstElts = cast<llvm::VectorType>(Vec->getType())->getNumElements(); if (NumDstElts == NumSrcElts) { // Use shuffle vector is the src and destination are the same number of // elements and restore the vector mask since it is on the side it will be // stored. SmallVector<llvm::Constant*, 4> Mask(NumDstElts); for (unsigned i = 0; i != NumSrcElts; ++i) Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i); llvm::Value *MaskV = llvm::ConstantVector::get(Mask); Vec = Builder.CreateShuffleVector(SrcVal, llvm::UndefValue::get(Vec->getType()), MaskV); } else if (NumDstElts > NumSrcElts) { // Extended the source vector to the same length and then shuffle it // into the destination. // FIXME: since we're shuffling with undef, can we just use the indices // into that? This could be simpler. SmallVector<llvm::Constant*, 4> ExtMask; for (unsigned i = 0; i != NumSrcElts; ++i) ExtMask.push_back(Builder.getInt32(i)); ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty)); llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask); llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, llvm::UndefValue::get(SrcVal->getType()), ExtMaskV); // build identity SmallVector<llvm::Constant*, 4> Mask; for (unsigned i = 0; i != NumDstElts; ++i) Mask.push_back(Builder.getInt32(i)); // When the vector size is odd and .odd or .hi is used, the last element // of the Elts constant array will be one past the size of the vector. // Ignore the last element here, if it is greater than the mask size. if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size()) NumSrcElts--; // modify when what gets shuffled in for (unsigned i = 0; i != NumSrcElts; ++i) Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts); llvm::Value *MaskV = llvm::ConstantVector::get(Mask); Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV); } else { // We should never shorten the vector llvm_unreachable("unexpected shorten vector length"); } } else { // If the Src is a scalar (not a vector) it must be updating one element. unsigned InIdx = getAccessedFieldNo(0, Elts); llvm::Value *Elt = llvm::ConstantInt::get(SizeTy, InIdx); Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt); } llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); Store->setAlignment(Dst.getAlignment().getQuantity()); } /// @brief Store of global named registers are always calls to intrinsics. void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) { assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) && "Bad type for register variable"); llvm::MDNode *RegName = cast<llvm::MDNode>( cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata()); assert(RegName && "Register LValue is not metadata"); // We accept integer and pointer types only llvm::Type *OrigTy = CGM.getTypes().ConvertType(Dst.getType()); llvm::Type *Ty = OrigTy; if (OrigTy->isPointerTy()) Ty = CGM.getTypes().getDataLayout().getIntPtrType(OrigTy); llvm::Type *Types[] = { Ty }; llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types); llvm::Value *Value = Src.getScalarVal(); if (OrigTy->isPointerTy()) Value = Builder.CreatePtrToInt(Value, Ty); Builder.CreateCall( F, {llvm::MetadataAsValue::get(Ty->getContext(), RegName), Value}); } // setObjCGCLValueClass - sets class of the lvalue for the purpose of // generating write-barries API. It is currently a global, ivar, // or neither. static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E, LValue &LV, bool IsMemberAccess=false) { if (Ctx.getLangOpts().getGC() == LangOptions::NonGC) return; if (isa<ObjCIvarRefExpr>(E)) { QualType ExpTy = E->getType(); if (IsMemberAccess && ExpTy->isPointerType()) { // If ivar is a structure pointer, assigning to field of // this struct follows gcc's behavior and makes it a non-ivar // writer-barrier conservatively. ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); if (ExpTy->isRecordType()) { LV.setObjCIvar(false); return; } } LV.setObjCIvar(true); auto *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr *>(E)); LV.setBaseIvarExp(Exp->getBase()); LV.setObjCArray(E->getType()->isArrayType()); return; } if (const auto *Exp = dyn_cast<DeclRefExpr>(E)) { if (const auto *VD = dyn_cast<VarDecl>(Exp->getDecl())) { if (VD->hasGlobalStorage()) { LV.setGlobalObjCRef(true); LV.setThreadLocalRef(VD->getTLSKind() != VarDecl::TLS_None); } } LV.setObjCArray(E->getType()->isArrayType()); return; } if (const auto *Exp = dyn_cast<UnaryOperator>(E)) { setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); return; } if (const auto *Exp = dyn_cast<ParenExpr>(E)) { setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); if (LV.isObjCIvar()) { // If cast is to a structure pointer, follow gcc's behavior and make it // a non-ivar write-barrier. QualType ExpTy = E->getType(); if (ExpTy->isPointerType()) ExpTy = ExpTy->getAs<PointerType>()->getPointeeType(); if (ExpTy->isRecordType()) LV.setObjCIvar(false); } return; } if (const auto *Exp = dyn_cast<GenericSelectionExpr>(E)) { setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV); return; } if (const auto *Exp = dyn_cast<ImplicitCastExpr>(E)) { setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); return; } if (const auto *Exp = dyn_cast<CStyleCastExpr>(E)) { setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); return; } if (const auto *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) { setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess); return; } if (const auto *Exp = dyn_cast<ArraySubscriptExpr>(E)) { setObjCGCLValueClass(Ctx, Exp->getBase(), LV); if (LV.isObjCIvar() && !LV.isObjCArray()) // Using array syntax to assigning to what an ivar points to is not // same as assigning to the ivar itself. {id *Names;} Names[i] = 0; LV.setObjCIvar(false); else if (LV.isGlobalObjCRef() && !LV.isObjCArray()) // Using array syntax to assigning to what global points to is not // same as assigning to the global itself. {id *G;} G[i] = 0; LV.setGlobalObjCRef(false); return; } if (const auto *Exp = dyn_cast<MemberExpr>(E)) { setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true); // We don't know if member is an 'ivar', but this flag is looked at // only in the context of LV.isObjCIvar(). LV.setObjCArray(E->getType()->isArrayType()); return; } } static llvm::Value * EmitBitCastOfLValueToProperType(CodeGenFunction &CGF, llvm::Value *V, llvm::Type *IRType, StringRef Name = StringRef()) { unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace(); return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name); } static LValue EmitThreadPrivateVarDeclLValue( CodeGenFunction &CGF, const VarDecl *VD, QualType T, llvm::Value *V, llvm::Type *RealVarTy, CharUnits Alignment, SourceLocation Loc) { V = CGF.CGM.getOpenMPRuntime().getAddrOfThreadPrivate(CGF, VD, V, Loc); V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); return CGF.MakeAddrLValue(V, T, Alignment); } static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF, const Expr *E, const VarDecl *VD) { QualType T = E->getType(); // If it's thread_local, emit a call to its wrapper function instead. if (VD->getTLSKind() == VarDecl::TLS_Dynamic && CGF.CGM.getCXXABI().usesThreadWrapperFunction()) return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T); llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD); llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType()); V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy); CharUnits Alignment = CGF.getContext().getDeclAlign(VD); LValue LV; // Emit reference to the private copy of the variable if it is an OpenMP // threadprivate variable. if (CGF.getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>()) return EmitThreadPrivateVarDeclLValue(CGF, VD, T, V, RealVarTy, Alignment, E->getExprLoc()); if (VD->getType()->isReferenceType()) { llvm::LoadInst *LI = CGF.Builder.CreateLoad(V); LI->setAlignment(Alignment.getQuantity()); V = LI; LV = CGF.MakeNaturalAlignAddrLValue(V, T); } else { LV = CGF.MakeAddrLValue(V, T, Alignment); } setObjCGCLValueClass(CGF.getContext(), E, LV); return LV; } static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF, const Expr *E, const FunctionDecl *FD) { llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD); if (!FD->hasPrototype()) { if (const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>()) { // Ugly case: for a K&R-style definition, the type of the definition // isn't the same as the type of a use. Correct for this with a // bitcast. QualType NoProtoType = CGF.getContext().getFunctionNoProtoType(Proto->getReturnType()); NoProtoType = CGF.getContext().getPointerType(NoProtoType); V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType)); } } CharUnits Alignment = CGF.getContext().getDeclAlign(FD); return CGF.MakeAddrLValue(V, E->getType(), Alignment); } static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD, llvm::Value *ThisValue) { QualType TagType = CGF.getContext().getTagDeclType(FD->getParent()); LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType); return CGF.EmitLValueForField(LV, FD); } /// Named Registers are named metadata pointing to the register name /// which will be read from/written to as an argument to the intrinsic /// @llvm.read/write_register. /// So far, only the name is being passed down, but other options such as /// register type, allocation type or even optimization options could be /// passed down via the metadata node. static LValue EmitGlobalNamedRegister(const VarDecl *VD, CodeGenModule &CGM, CharUnits Alignment) { SmallString<64> Name("llvm.named.register."); AsmLabelAttr *Asm = VD->getAttr<AsmLabelAttr>(); assert(Asm->getLabel().size() < 64-Name.size() && "Register name too big"); Name.append(Asm->getLabel()); llvm::NamedMDNode *M = CGM.getModule().getOrInsertNamedMetadata(Name); if (M->getNumOperands() == 0) { llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(), Asm->getLabel()); llvm::Metadata *Ops[] = {Str}; M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops)); } return LValue::MakeGlobalReg( llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)), VD->getType(), Alignment); } LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { const NamedDecl *ND = E->getDecl(); CharUnits Alignment = getContext().getDeclAlign(ND); QualType T = E->getType(); if (const auto *VD = dyn_cast<VarDecl>(ND)) { // Global Named registers access via intrinsics only if (VD->getStorageClass() == SC_Register && VD->hasAttr<AsmLabelAttr>() && !VD->isLocalVarDecl()) return EmitGlobalNamedRegister(VD, CGM, Alignment); // A DeclRefExpr for a reference initialized by a constant expression can // appear without being odr-used. Directly emit the constant initializer. const Expr *Init = VD->getAnyInitializer(VD); if (Init && !isa<ParmVarDecl>(VD) && VD->getType()->isReferenceType() && VD->isUsableInConstantExpressions(getContext()) && VD->checkInitIsICE()) { llvm::Constant *Val = CGM.EmitConstantValue(*VD->evaluateValue(), VD->getType(), this); assert(Val && "failed to emit reference constant expression"); // FIXME: Eventually we will want to emit vector element references. return MakeAddrLValue(Val, T, Alignment); } // Check for captured variables. if (E->refersToEnclosingVariableOrCapture()) { if (auto *FD = LambdaCaptureFields.lookup(VD)) return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue); else if (CapturedStmtInfo) { if (auto *V = LocalDeclMap.lookup(VD)) return MakeAddrLValue(V, T, Alignment); else return EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD), CapturedStmtInfo->getContextValue()); } assert(isa<BlockDecl>(CurCodeDecl)); return MakeAddrLValue(GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>()), T, Alignment); } } // FIXME: We should be able to assert this for FunctionDecls as well! // FIXME: We should be able to assert this for all DeclRefExprs, not just // those with a valid source location. assert((ND->isUsed(false) || !isa<VarDecl>(ND) || !E->getLocation().isValid()) && "Should not use decl without marking it used!"); if (ND->hasAttr<WeakRefAttr>()) { const auto *VD = cast<ValueDecl>(ND); llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD); return MakeAddrLValue(Aliasee, T, Alignment); } if (const auto *VD = dyn_cast<VarDecl>(ND)) { // Check if this is a global variable. if (VD->hasLinkage() || VD->isStaticDataMember()) return EmitGlobalVarDeclLValue(*this, E, VD); bool isBlockVariable = VD->hasAttr<BlocksAttr>(); llvm::Value *V = LocalDeclMap.lookup(VD); if (!V && VD->isStaticLocal()) V = CGM.getOrCreateStaticVarDecl( *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false)); // Check if variable is threadprivate. if (V && getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>()) return EmitThreadPrivateVarDeclLValue( *this, VD, T, V, getTypes().ConvertTypeForMem(VD->getType()), Alignment, E->getExprLoc()); assert(V && "DeclRefExpr not entered in LocalDeclMap?"); if (isBlockVariable) V = BuildBlockByrefAddress(V, VD); LValue LV; // HLSL Change Begins if (getLangOpts().HLSL) { // In hlsl, the referent type is for out parameter. // No pointer of pointer temp alloca created for it. // So always use V directly. LV = MakeAddrLValue(V, T, Alignment); } else // HLSL Change Ends if (VD->getType()->isReferenceType()) { llvm::LoadInst *LI = Builder.CreateLoad(V); LI->setAlignment(Alignment.getQuantity()); V = LI; LV = MakeNaturalAlignAddrLValue(V, T); } else { LV = MakeAddrLValue(V, T, Alignment); } bool isLocalStorage = VD->hasLocalStorage(); bool NonGCable = isLocalStorage && !VD->getType()->isReferenceType() && !isBlockVariable; if (NonGCable) { LV.getQuals().removeObjCGCAttr(); LV.setNonGC(true); } bool isImpreciseLifetime = (isLocalStorage && !VD->hasAttr<ObjCPreciseLifetimeAttr>()); if (isImpreciseLifetime) LV.setARCPreciseLifetime(ARCImpreciseLifetime); setObjCGCLValueClass(getContext(), E, LV); return LV; } if (const auto *FD = dyn_cast<FunctionDecl>(ND)) return EmitFunctionDeclLValue(*this, E, FD); llvm_unreachable("Unhandled DeclRefExpr"); } LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { // __extension__ doesn't affect lvalue-ness. if (E->getOpcode() == UO_Extension) return EmitLValue(E->getSubExpr()); QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); switch (E->getOpcode()) { default: llvm_unreachable("Unknown unary operator lvalue!"); case UO_Deref: { QualType T = E->getSubExpr()->getType()->getPointeeType(); assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T); LV.getQuals().setAddressSpace(ExprTy.getAddressSpace()); // We should not generate __weak write barrier on indirect reference // of a pointer to object; as in void foo (__weak id *param); *param = 0; // But, we continue to generate __strong write barrier on indirect write // into a pointer to object. if (getLangOpts().ObjC1 && getLangOpts().getGC() != LangOptions::NonGC && LV.isObjCWeak()) LV.setNonGC(!E->isOBJCGCCandidate(getContext())); return LV; } case UO_Real: case UO_Imag: { LValue LV = EmitLValue(E->getSubExpr()); assert(LV.isSimple() && "real/imag on non-ordinary l-value"); llvm::Value *Addr = LV.getAddress(); // __real is valid on scalars. This is a faster way of testing that. // __imag can only produce an rvalue on scalars. if (E->getOpcode() == UO_Real && !cast<llvm::PointerType>(Addr->getType()) ->getElementType()->isStructTy()) { assert(E->getSubExpr()->getType()->isArithmeticType()); return LV; } assert(E->getSubExpr()->getType()->isAnyComplexType()); unsigned Idx = E->getOpcode() == UO_Imag; return MakeAddrLValue( Builder.CreateStructGEP(nullptr, LV.getAddress(), Idx, "idx"), ExprTy); } case UO_PreInc: case UO_PreDec: { LValue LV = EmitLValue(E->getSubExpr()); bool isInc = E->getOpcode() == UO_PreInc; if (E->getType()->isAnyComplexType()) EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/); else EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/); return LV; } } } LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E), E->getType()); } LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) { return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E), E->getType()); } LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { auto SL = E->getFunctionName(); assert(SL != nullptr && "No StringLiteral name in PredefinedExpr"); StringRef FnName = CurFn->getName(); if (FnName.startswith("\01")) FnName = FnName.substr(1); StringRef NameItems[] = { PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName}; std::string GVName = llvm::join(NameItems, NameItems + 2, "."); if (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)) { auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str(), 1); return MakeAddrLValue(C, E->getType()); } auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName); return MakeAddrLValue(C, E->getType()); } /// Emit a type description suitable for use by a runtime sanitizer library. The /// format of a type descriptor is /// /// \code /// { i16 TypeKind, i16 TypeInfo } /// \endcode /// /// followed by an array of i8 containing the type name. TypeKind is 0 for an /// integer, 1 for a floating point value, and -1 for anything else. llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) { // Only emit each type's descriptor once. if (llvm::Constant *C = CGM.getTypeDescriptorFromMap(T)) return C; uint16_t TypeKind = -1; uint16_t TypeInfo = 0; if (T->isIntegerType()) { TypeKind = 0; TypeInfo = (llvm::Log2_32(getContext().getTypeSize(T)) << 1) | (T->isSignedIntegerType() ? 1 : 0); } else if (T->isFloatingType()) { TypeKind = 1; TypeInfo = getContext().getTypeSize(T); } // Format the type name as if for a diagnostic, including quotes and // optionally an 'aka'. SmallString<32> Buffer; CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype, (intptr_t)T.getAsOpaquePtr(), StringRef(), StringRef(), None, Buffer, None); llvm::Constant *Components[] = { Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo), llvm::ConstantDataArray::getString(getLLVMContext(), Buffer) }; llvm::Constant *Descriptor = llvm::ConstantStruct::getAnon(Components); auto *GV = new llvm::GlobalVariable( CGM.getModule(), Descriptor->getType(), /*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor); GV->setUnnamedAddr(true); CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV); // Remember the descriptor for this type. CGM.setTypeDescriptorInMap(T, GV); return GV; } llvm::Value *CodeGenFunction::EmitCheckValue(llvm::Value *V) { llvm::Type *TargetTy = IntPtrTy; // Floating-point types which fit into intptr_t are bitcast to integers // and then passed directly (after zero-extension, if necessary). if (V->getType()->isFloatingPointTy()) { unsigned Bits = V->getType()->getPrimitiveSizeInBits(); if (Bits <= TargetTy->getIntegerBitWidth()) V = Builder.CreateBitCast(V, llvm::Type::getIntNTy(getLLVMContext(), Bits)); } // Integers which fit in intptr_t are zero-extended and passed directly. if (V->getType()->isIntegerTy() && V->getType()->getIntegerBitWidth() <= TargetTy->getIntegerBitWidth()) return Builder.CreateZExt(V, TargetTy); // Pointers are passed directly, everything else is passed by address. if (!V->getType()->isPointerTy()) { llvm::Value *Ptr = CreateTempAlloca(V->getType()); Builder.CreateStore(V, Ptr); V = Ptr; } return Builder.CreatePtrToInt(V, TargetTy); } /// \brief Emit a representation of a SourceLocation for passing to a handler /// in a sanitizer runtime library. The format for this data is: /// \code /// struct SourceLocation { /// const char *Filename; /// int32_t Line, Column; /// }; /// \endcode /// For an invalid SourceLocation, the Filename pointer is null. llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) { llvm::Constant *Filename; int Line, Column; PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc); if (PLoc.isValid()) { auto FilenameGV = CGM.GetAddrOfConstantCString(PLoc.getFilename(), ".src"); CGM.getSanitizerMetadata()->disableSanitizerForGlobal(FilenameGV); Filename = FilenameGV; Line = PLoc.getLine(); Column = PLoc.getColumn(); } else { Filename = llvm::Constant::getNullValue(Int8PtrTy); Line = Column = 0; } llvm::Constant *Data[] = {Filename, Builder.getInt32(Line), Builder.getInt32(Column)}; return llvm::ConstantStruct::getAnon(Data); } namespace { /// \brief Specify under what conditions this check can be recovered enum class CheckRecoverableKind { /// Always terminate program execution if this check fails. Unrecoverable, /// Check supports recovering, runtime has both fatal (noreturn) and /// non-fatal handlers for this check. Recoverable, /// Runtime conditionally aborts, always need to support recovery. AlwaysRecoverable }; } static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) { assert(llvm::countPopulation(Kind) == 1); switch (Kind) { case SanitizerKind::Vptr: return CheckRecoverableKind::AlwaysRecoverable; case SanitizerKind::Return: case SanitizerKind::Unreachable: return CheckRecoverableKind::Unrecoverable; default: return CheckRecoverableKind::Recoverable; } } static void emitCheckHandlerCall(CodeGenFunction &CGF, llvm::FunctionType *FnType, ArrayRef<llvm::Value *> FnArgs, StringRef CheckName, CheckRecoverableKind RecoverKind, bool IsFatal, llvm::BasicBlock *ContBB) { assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable); bool NeedsAbortSuffix = IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable; std::string FnName = ("__ubsan_handle_" + CheckName + (NeedsAbortSuffix ? "_abort" : "")).str(); bool MayReturn = !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable; llvm::AttrBuilder B; if (!MayReturn) { B.addAttribute(llvm::Attribute::NoReturn) .addAttribute(llvm::Attribute::NoUnwind); } B.addAttribute(llvm::Attribute::UWTable); llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction( FnType, FnName, llvm::AttributeSet::get(CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, B)); llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs); if (!MayReturn) { HandlerCall->setDoesNotReturn(); CGF.Builder.CreateUnreachable(); } else { CGF.Builder.CreateBr(ContBB); } } void CodeGenFunction::EmitCheck( ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked, StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs, ArrayRef<llvm::Value *> DynamicArgs) { assert(IsSanitizerScope); assert(Checked.size() > 0); llvm::Value *FatalCond = nullptr; llvm::Value *RecoverableCond = nullptr; llvm::Value *TrapCond = nullptr; for (int i = 0, n = Checked.size(); i < n; ++i) { llvm::Value *Check = Checked[i].first; // -fsanitize-trap= overrides -fsanitize-recover=. llvm::Value *&Cond = CGM.getCodeGenOpts().SanitizeTrap.has(Checked[i].second) ? TrapCond : CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second) ? RecoverableCond : FatalCond; Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check; } if (TrapCond) EmitTrapCheck(TrapCond); if (!FatalCond && !RecoverableCond) return; llvm::Value *JointCond; if (FatalCond && RecoverableCond) JointCond = Builder.CreateAnd(FatalCond, RecoverableCond); else JointCond = FatalCond ? FatalCond : RecoverableCond; assert(JointCond); CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second); assert(SanOpts.has(Checked[0].second)); #ifndef NDEBUG for (int i = 1, n = Checked.size(); i < n; ++i) { assert(RecoverKind == getRecoverableKind(Checked[i].second) && "All recoverable kinds in a single check must be same!"); assert(SanOpts.has(Checked[i].second)); } #endif llvm::BasicBlock *Cont = createBasicBlock("cont"); llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName); llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers); // Give hint that we very much don't expect to execute the handler // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp llvm::MDBuilder MDHelper(getLLVMContext()); llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1); Branch->setMetadata(llvm::LLVMContext::MD_prof, Node); EmitBlock(Handlers); // Emit handler arguments and create handler function type. llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs); auto *InfoPtr = new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false, llvm::GlobalVariable::PrivateLinkage, Info); InfoPtr->setUnnamedAddr(true); CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr); SmallVector<llvm::Value *, 4> Args; SmallVector<llvm::Type *, 4> ArgTypes; Args.reserve(DynamicArgs.size() + 1); ArgTypes.reserve(DynamicArgs.size() + 1); // Handler functions take an i8* pointing to the (handler-specific) static // information block, followed by a sequence of intptr_t arguments // representing operand values. Args.push_back(Builder.CreateBitCast(InfoPtr, Int8PtrTy)); ArgTypes.push_back(Int8PtrTy); for (size_t i = 0, n = DynamicArgs.size(); i != n; ++i) { Args.push_back(EmitCheckValue(DynamicArgs[i])); ArgTypes.push_back(IntPtrTy); } llvm::FunctionType *FnType = llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false); if (!FatalCond || !RecoverableCond) { // Simple case: we need to generate a single handler call, either // fatal, or non-fatal. emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, (FatalCond != nullptr), Cont); } else { // Emit two handler calls: first one for set of unrecoverable checks, // another one for recoverable. llvm::BasicBlock *NonFatalHandlerBB = createBasicBlock("non_fatal." + CheckName); llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName); Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB); EmitBlock(FatalHandlerBB); emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, true, NonFatalHandlerBB); EmitBlock(NonFatalHandlerBB); emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, false, Cont); } EmitBlock(Cont); } void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) { llvm::BasicBlock *Cont = createBasicBlock("cont"); // If we're optimizing, collapse all calls to trap down to just one per // function to save on code size. if (!CGM.getCodeGenOpts().OptimizationLevel || !TrapBB) { TrapBB = createBasicBlock("trap"); Builder.CreateCondBr(Checked, Cont, TrapBB); EmitBlock(TrapBB); llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap); TrapCall->setDoesNotReturn(); TrapCall->setDoesNotThrow(); Builder.CreateUnreachable(); } else { Builder.CreateCondBr(Checked, Cont, TrapBB); } EmitBlock(Cont); } llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) { llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID)); if (!CGM.getCodeGenOpts().TrapFuncName.empty()) TrapCall->addAttribute(llvm::AttributeSet::FunctionIndex, "trap-func-name", CGM.getCodeGenOpts().TrapFuncName); return TrapCall; } /// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an /// array to pointer, return the array subexpression. static const Expr *isSimpleArrayDecayOperand(const Expr *E) { // If this isn't just an array->pointer decay, bail out. const auto *CE = dyn_cast<CastExpr>(E); if (!CE || CE->getCastKind() != CK_ArrayToPointerDecay) return nullptr; // If this is a decay from variable width array, bail out. const Expr *SubExpr = CE->getSubExpr(); if (SubExpr->getType()->isVariableArrayType()) return nullptr; return SubExpr; } LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E, bool Accessed) { // The index must always be an integer, which is not an aggregate. Emit it. llvm::Value *Idx = EmitScalarExpr(E->getIdx()); QualType IdxTy = E->getIdx()->getType(); bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType(); if (SanOpts.has(SanitizerKind::ArrayBounds)) EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed); // If the base is a vector type, then we are forming a vector element lvalue // with this subscript. if (E->getBase()->getType()->isVectorType() && !isa<ExtVectorElementExpr>(E->getBase())) { // Emit the vector as an lvalue to get its address. LValue LHS = EmitLValue(E->getBase()); assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(), LHS.getAlignment()); } // Extend or truncate the index type to 32 or 64-bits. if (Idx->getType() != IntPtrTy) Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); // HLSL Change Starts const Expr *Array = isSimpleArrayDecayOperand(E->getBase()); assert((!getLangOpts().HLSL || nullptr == Array) && "else array decay snuck in AST for HLSL"); // HLSL Change Ends // We know that the pointer points to a type of the correct size, unless the // size is a VLA or Objective-C interface. llvm::Value *Address = nullptr; CharUnits ArrayAlignment; if (isa<ExtVectorElementExpr>(E->getBase())) { LValue LV = EmitLValue(E->getBase()); Address = EmitExtVectorElementLValue(LV); Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); const VectorType *ExprVT = LV.getType()->getAs<VectorType>(); QualType EQT = ExprVT->getElementType(); return MakeAddrLValue(Address, EQT, getContext().getTypeAlignInChars(EQT)); } else if (const VariableArrayType *vla = getContext().getAsVariableArrayType(E->getType())) { // The base must be a pointer, which is not an aggregate. Emit // it. It needs to be emitted first in case it's what captures // the VLA bounds. Address = EmitScalarExpr(E->getBase()); // The element count here is the total number of non-VLA elements. llvm::Value *numElements = getVLASize(vla).first; // Effectively, the multiply by the VLA size is part of the GEP. // GEP indexes are signed, and scaling an index isn't permitted to // signed-overflow, so we use the same semantics for our explicit // multiply. We suppress this if overflow is not undefined behavior. if (getLangOpts().isSignedOverflowDefined()) { Idx = Builder.CreateMul(Idx, numElements); Address = Builder.CreateGEP(Address, Idx, "arrayidx"); } else { Idx = Builder.CreateNSWMul(Idx, numElements); Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx"); } } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){ // Indexing over an interface, as in "NSString *P; P[4];" llvm::Value *InterfaceSize = llvm::ConstantInt::get(Idx->getType(), getContext().getTypeSizeInChars(OIT).getQuantity()); Idx = Builder.CreateMul(Idx, InterfaceSize); // The base must be a pointer, which is not an aggregate. Emit it. llvm::Value *Base = EmitScalarExpr(E->getBase()); Address = EmitCastToVoidPtr(Base); Address = Builder.CreateGEP(Address, Idx, "arrayidx"); Address = Builder.CreateBitCast(Address, Base->getType()); } else if (!getLangOpts().HLSL && Array) { // HLSL Change - No Array to pointer decay for HLSL // If this is A[i] where A is an array, the frontend will have decayed the // base to be a ArrayToPointerDecay implicit cast. While correct, it is // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a // "gep x, i" here. Emit one "gep A, 0, i". assert(Array->getType()->isArrayType() && "Array to pointer decay must have array source type!"); LValue ArrayLV; // For simple multidimensional array indexing, set the 'accessed' flag for // better bounds-checking of the base expression. if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Array)) ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); else ArrayLV = EmitLValue(Array); llvm::Value *ArrayPtr = ArrayLV.getAddress(); llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); llvm::Value *Args[] = { Zero, Idx }; // Propagate the alignment from the array itself to the result. ArrayAlignment = ArrayLV.getAlignment(); if (getLangOpts().isSignedOverflowDefined()) Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); else Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); } else { // HLSL Change Starts const ArrayType *AT = dyn_cast<ArrayType>(E->getBase()->getType()->getCanonicalTypeUnqualified()); if (getContext().getLangOpts().HLSL && AT) { LValue ArrayLV; // For simple multidimensional array indexing, set the 'accessed' flag for // better bounds-checking of the base expression. if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(E->getBase())) ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true); else ArrayLV = EmitLValue(E->getBase()); llvm::Value *ArrayPtr = ArrayLV.getAddress(); llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); llvm::Value *Args[] = { Zero, Idx }; // Propagate the alignment from the array itself to the result. ArrayAlignment = ArrayLV.getAlignment(); if (getLangOpts().isSignedOverflowDefined()) Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx"); else Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx"); } else { // HLSL Change Ends // The base must be a pointer, which is not an aggregate. Emit it. llvm::Value *Base = EmitScalarExpr(E->getBase()); if (getLangOpts().isSignedOverflowDefined()) Address = Builder.CreateGEP(Base, Idx, "arrayidx"); else Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); } // HLSL Change } QualType T = E->getBase()->getType()->getPointeeType(); // HLSL Change Starts if (getContext().getLangOpts().HLSL && T.isNull()) { T = QualType(E->getBase()->getType()->getArrayElementTypeNoTypeQual(), 0); } // HLSL Change Ends assert(!T.isNull() && "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); // Limit the alignment to that of the result type. LValue LV; if (!ArrayAlignment.isZero()) { CharUnits Align = getContext().getTypeAlignInChars(T); ArrayAlignment = std::min(Align, ArrayAlignment); LV = MakeAddrLValue(Address, T, ArrayAlignment); } else { LV = MakeNaturalAlignAddrLValue(Address, T); } LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace()); if (getLangOpts().ObjC1 && getLangOpts().getGC() != LangOptions::NonGC) { LV.setNonGC(!E->isOBJCGCCandidate(getContext())); setObjCGCLValueClass(getContext(), E, LV); } return LV; } static llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder, SmallVectorImpl<unsigned> &Elts) { SmallVector<llvm::Constant*, 4> CElts; for (unsigned i = 0, e = Elts.size(); i != e; ++i) CElts.push_back(Builder.getInt32(Elts[i])); return llvm::ConstantVector::get(CElts); } LValue CodeGenFunction:: EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { // Emit the base vector as an l-value. LValue Base; // ExtVectorElementExpr's base can either be a vector or pointer to vector. if (E->isArrow()) { // If it is a pointer to a vector, emit the address and form an lvalue with // it. llvm::Value *Ptr = EmitScalarExpr(E->getBase()); const PointerType *PT = E->getBase()->getType()->getAs<PointerType>(); Base = MakeAddrLValue(Ptr, PT->getPointeeType()); Base.getQuals().removeObjCGCAttr(); } else if (E->getBase()->isGLValue()) { // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), // emit the base as an lvalue. assert(E->getBase()->getType()->isVectorType()); Base = EmitLValue(E->getBase()); } else { // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. assert(E->getBase()->getType()->isVectorType() && "Result must be a vector"); llvm::Value *Vec = EmitScalarExpr(E->getBase()); // Store the vector to memory (because LValue wants an address). llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); Builder.CreateStore(Vec, VecMem); Base = MakeAddrLValue(VecMem, E->getBase()->getType()); } QualType type = E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); // Encode the element access list into a vector of unsigned indices. SmallVector<unsigned, 4> Indices; E->getEncodedElementAccess(Indices); if (Base.isSimple()) { llvm::Constant *CV = GenerateConstantVector(Builder, Indices); return LValue::MakeExtVectorElt(Base.getAddress(), CV, type, Base.getAlignment()); } assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); llvm::Constant *BaseElts = Base.getExtVectorElts(); SmallVector<llvm::Constant *, 4> CElts; for (unsigned i = 0, e = Indices.size(); i != e; ++i) CElts.push_back(BaseElts->getAggregateElement(Indices[i])); llvm::Constant *CV = llvm::ConstantVector::get(CElts); return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type, Base.getAlignment()); } // HLSL Change Starts LValue CodeGenFunction::EmitExtMatrixElementExpr(const ExtMatrixElementExpr *E) { LValue Base; assert(!E->isArrow() && "ExtMatrixElementExpr's base will not be Arrow"); if (E->getBase()->isGLValue()) { // if the base is an lvalue ( as in the case of foo.x.x), // emit the base as an lvalue. const Expr *base = E->getBase(); assert(hlsl::IsHLSLMatType(base->getType())); Base = EmitLValue(base); } else { // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. assert(hlsl::IsHLSLMatType(E->getBase()->getType()) && "Result must be a vector"); llvm::Value *Vec = EmitScalarExpr(E->getBase()); // Store the vector to memory (because LValue wants an address). llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType()); CGM.getHLSLRuntime().EmitHLSLMatrixStore(*this, Vec, VecMem, E->getBase()->getType()); Base = MakeAddrLValue(VecMem, E->getBase()->getType()); } // Encode the element access list into a vector of unsigned indices. SmallVector<unsigned, 4> Indices; E->getEncodedElementAccess(Indices); llvm::Type *ResultTy = ConvertType(getContext().getLValueReferenceType(E->getType())); llvm::Value *matBase = nullptr; llvm::Constant *CV = nullptr; if (Base.isSimple()) { SmallVector<llvm::Constant *, 4> CElts; for (unsigned i = 0, e = Indices.size(); i != e; ++i) CElts.push_back(Builder.getInt32(Indices[i])); CV = llvm::ConstantVector::get(CElts); matBase = Base.getAddress(); } else { assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); llvm::Constant *BaseElts = Base.getExtVectorElts(); SmallVector<llvm::Constant *, 4> CElts; for (unsigned i = 0, e = Indices.size(); i != e; ++i) CElts.push_back(BaseElts->getAggregateElement(Indices[i])); CV = llvm::ConstantVector::get(CElts); matBase = Base.getExtVectorAddr(); } llvm::Value *Result = CGM.getHLSLRuntime().EmitHLSLMatrixElement( *this, ResultTy, {matBase, CV}, E->getBase()->getType()); return MakeAddrLValue(Result, E->getType()); } LValue CodeGenFunction::EmitHLSLVectorElementExpr(const HLSLVectorElementExpr *E) { // Emit the base vector as an l-value. // Clone EmitExtVectorElementExpr for now // TODO: difference between ExtVector and HlslVector LValue Base; // ExtVectorElementExpr's base can either be a vector or pointer to vector. if (E->isArrow()) { // If it is a pointer to a vector, emit the address and form an lvalue with // it. assert(!getLangOpts().HLSL && "this will not happen for hlsl"); } else if (E->getBase()->isGLValue()) { // Otherwise, if the base is an lvalue ( as in the case of foo.x.x), // emit the base as an lvalue. const Expr *base = E->getBase(); if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(base)) { if (ICE->getCastKind() == CastKind::CK_HLSLVectorSplat && E->getNumElements() == 1) { // For pattern like: // static bool t; // t.x = bool(a); // Just ignore the .x, treat it like t = bool(a); return EmitLValue(ICE->getSubExpr()); } } assert(hlsl::IsHLSLVecType(base->getType())); Base = EmitLValue(base); } else { // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such. assert(hlsl::IsHLSLVecType(E->getBase()->getType()) && "Result must be a vector"); llvm::Value *Vec = EmitScalarExpr(E->getBase()); Vec = EmitToMemory(Vec, E->getBase()->getType()); // Store the vector to memory (because LValue wants an address). llvm::Value *VecMemPtr = CreateMemTemp(E->getBase()->getType()); Builder.CreateStore(Vec, VecMemPtr); Base = MakeAddrLValue(VecMemPtr, E->getBase()->getType()); } QualType type = E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers()); // Encode the element access list into a vector of unsigned indices. SmallVector<unsigned, 4> Indices; E->getEncodedElementAccess(Indices); if (Base.isSimple()) { llvm::Constant *CV = GenerateConstantVector(Builder, Indices); return LValue::MakeExtVectorElt(Base.getAddress(), CV, type, Base.getAlignment()); } assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!"); llvm::Constant *BaseElts = Base.getExtVectorElts(); SmallVector<llvm::Constant *, 4> CElts; for (unsigned i = 0, e = Indices.size(); i != e; ++i) CElts.push_back(BaseElts->getAggregateElement(Indices[i])); llvm::Constant *CV = llvm::ConstantVector::get(CElts); return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type, Base.getAlignment()); } // HLSL Change Ends LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { Expr *BaseExpr = E->getBase(); // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. LValue BaseLV; if (E->isArrow()) { llvm::Value *Ptr = EmitScalarExpr(BaseExpr); QualType PtrTy = BaseExpr->getType()->getPointeeType(); EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Ptr, PtrTy); BaseLV = MakeNaturalAlignAddrLValue(Ptr, PtrTy); } else BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess); NamedDecl *ND = E->getMemberDecl(); if (auto *Field = dyn_cast<FieldDecl>(ND)) { LValue LV = EmitLValueForField(BaseLV, Field); setObjCGCLValueClass(getContext(), E, LV); return LV; } if (auto *VD = dyn_cast<VarDecl>(ND)) return EmitGlobalVarDeclLValue(*this, E, VD); if (const auto *FD = dyn_cast<FunctionDecl>(ND)) return EmitFunctionDeclLValue(*this, E, FD); llvm_unreachable("Unhandled member declaration!"); } /// Given that we are currently emitting a lambda, emit an l-value for /// one of its members. LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) { assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda()); assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent()); QualType LambdaTagType = getContext().getTagDeclType(Field->getParent()); LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType); return EmitLValueForField(LambdaLV, Field); } LValue CodeGenFunction::EmitLValueForField(LValue base, const FieldDecl *field) { if (field->isBitField()) { const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(field->getParent()); const CGBitFieldInfo &Info = RL.getBitFieldInfo(field); llvm::Value *Addr = base.getAddress(); unsigned Idx = RL.getLLVMFieldNo(field); // For structs, we GEP to the field that the record layout suggests. Addr = Builder.CreateStructGEP(nullptr, Addr, Idx, field->getName()); // Get the access type. llvm::Type *PtrTy = llvm::Type::getIntNPtrTy( getLLVMContext(), Info.StorageSize, CGM.getContext().getTargetAddressSpace(base.getType())); if (Addr->getType() != PtrTy) Addr = Builder.CreateBitCast(Addr, PtrTy); QualType fieldType = field->getType().withCVRQualifiers(base.getVRQualifiers()); return LValue::MakeBitfield(Addr, Info, fieldType, base.getAlignment()); } const RecordDecl *rec = field->getParent(); QualType type = field->getType(); CharUnits alignment = getContext().getDeclAlign(field); // FIXME: It should be impossible to have an LValue without alignment for a // complete type. if (!base.getAlignment().isZero()) alignment = std::min(alignment, base.getAlignment()); bool mayAlias = rec->hasAttr<MayAliasAttr>(); llvm::Value *addr = base.getAddress(); unsigned cvr = base.getVRQualifiers(); bool TBAAPath = CGM.getCodeGenOpts().StructPathTBAA; if (rec->isUnion()) { // For unions, there is no pointer adjustment. assert(!type->isReferenceType() && "union has reference member"); // TODO: handle path-aware TBAA for union. TBAAPath = false; } else { // For structs, we GEP to the field that the record layout suggests. unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field); addr = Builder.CreateStructGEP(nullptr, addr, idx, field->getName()); // If this is a reference field, load the reference right now. if (const ReferenceType *refType = type->getAs<ReferenceType>()) { llvm::LoadInst *load = Builder.CreateLoad(addr, "ref"); if (cvr & Qualifiers::Volatile) load->setVolatile(true); load->setAlignment(alignment.getQuantity()); // Loading the reference will disable path-aware TBAA. TBAAPath = false; if (CGM.shouldUseTBAA()) { llvm::MDNode *tbaa; if (mayAlias) tbaa = CGM.getTBAAInfo(getContext().CharTy); else tbaa = CGM.getTBAAInfo(type); if (tbaa) CGM.DecorateInstruction(load, tbaa); } addr = load; mayAlias = false; type = refType->getPointeeType(); if (type->isIncompleteType()) alignment = CharUnits(); else alignment = getContext().getTypeAlignInChars(type); cvr = 0; // qualifiers don't recursively apply to referencee } } // Make sure that the address is pointing to the right type. This is critical // for both unions and structs. A union needs a bitcast, a struct element // will need a bitcast if the LLVM type laid out doesn't match the desired // type. addr = EmitBitCastOfLValueToProperType(*this, addr, CGM.getTypes().ConvertTypeForMem(type), field->getName()); if (field->hasAttr<AnnotateAttr>()) addr = EmitFieldAnnotations(field, addr); LValue LV = MakeAddrLValue(addr, type, alignment); LV.getQuals().addCVRQualifiers(cvr); if (TBAAPath) { const ASTRecordLayout &Layout = getContext().getASTRecordLayout(field->getParent()); // Set the base type to be the base type of the base LValue and // update offset to be relative to the base type. LV.setTBAABaseType(mayAlias ? getContext().CharTy : base.getTBAABaseType()); LV.setTBAAOffset(mayAlias ? 0 : base.getTBAAOffset() + Layout.getFieldOffset(field->getFieldIndex()) / getContext().getCharWidth()); } // __weak attribute on a field is ignored. if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak) LV.getQuals().removeObjCGCAttr(); // Fields of may_alias structs act like 'char' for TBAA purposes. // FIXME: this should get propagated down through anonymous structs // and unions. if (mayAlias && LV.getTBAAInfo()) LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy)); return LV; } LValue CodeGenFunction::EmitLValueForFieldInitialization(LValue Base, const FieldDecl *Field) { QualType FieldType = Field->getType(); if (!FieldType->isReferenceType()) return EmitLValueForField(Base, Field); const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(Field->getParent()); unsigned idx = RL.getLLVMFieldNo(Field); llvm::Value *V = Builder.CreateStructGEP(nullptr, Base.getAddress(), idx); assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs"); // Make sure that the address is pointing to the right type. This is critical // for both unions and structs. A union needs a bitcast, a struct element // will need a bitcast if the LLVM type laid out doesn't match the desired // type. llvm::Type *llvmType = ConvertTypeForMem(FieldType); V = EmitBitCastOfLValueToProperType(*this, V, llvmType, Field->getName()); CharUnits Alignment = getContext().getDeclAlign(Field); // FIXME: It should be impossible to have an LValue without alignment for a // complete type. if (!Base.getAlignment().isZero()) Alignment = std::min(Alignment, Base.getAlignment()); return MakeAddrLValue(V, FieldType, Alignment); } LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){ if (E->isFileScope()) { llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E); return MakeAddrLValue(GlobalPtr, E->getType()); } if (E->getType()->isVariablyModifiedType()) // make sure to emit the VLA size. EmitVariablyModifiedType(E->getType()); llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral"); const Expr *InitExpr = E->getInitializer(); LValue Result = MakeAddrLValue(DeclPtr, E->getType()); EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(), /*Init*/ true); return Result; } LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) { if (!E->isGLValue()) // Initializing an aggregate temporary in C++11: T{...}. return EmitAggExprToLValue(E); // An lvalue initializer list must be initializing a reference. assert(E->getNumInits() == 1 && "reference init with multiple values"); return EmitLValue(E->getInit(0)); } /// Emit the operand of a glvalue conditional operator. This is either a glvalue /// or a (possibly-parenthesized) throw-expression. If this is a throw, no /// LValue is returned and the current block has been terminated. static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF, const Expr *Operand) { if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Operand->IgnoreParens())) { CGF.EmitCXXThrowExpr(ThrowExpr, /*KeepInsertionPoint*/false); return None; } return CGF.EmitLValue(Operand); } LValue CodeGenFunction:: EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) { if (!expr->isGLValue()) { // ?: here should be an aggregate. assert(hasAggregateEvaluationKind(expr->getType()) && "Unexpected conditional operator!"); return EmitAggExprToLValue(expr); } OpaqueValueMapping binding(*this, expr); const Expr *condExpr = expr->getCond(); bool CondExprBool; if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) { const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr(); if (!CondExprBool) std::swap(live, dead); if (!ContainsLabel(dead)) { // If the true case is live, we need to track its region. if (CondExprBool) incrementProfileCounter(expr); return EmitLValue(live); } } llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true"); llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false"); llvm::BasicBlock *contBlock = createBasicBlock("cond.end"); ConditionalEvaluation eval(*this); EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr)); // Any temporaries created here are conditional. EmitBlock(lhsBlock); incrementProfileCounter(expr); eval.begin(*this); Optional<LValue> lhs = EmitLValueOrThrowExpression(*this, expr->getTrueExpr()); eval.end(*this); if (lhs && !lhs->isSimple()) return EmitUnsupportedLValue(expr, "conditional operator"); lhsBlock = Builder.GetInsertBlock(); if (lhs) Builder.CreateBr(contBlock); // Any temporaries created here are conditional. EmitBlock(rhsBlock); eval.begin(*this); Optional<LValue> rhs = EmitLValueOrThrowExpression(*this, expr->getFalseExpr()); eval.end(*this); if (rhs && !rhs->isSimple()) return EmitUnsupportedLValue(expr, "conditional operator"); rhsBlock = Builder.GetInsertBlock(); EmitBlock(contBlock); if (lhs && rhs) { llvm::PHINode *phi = Builder.CreatePHI(lhs->getAddress()->getType(), 2, "cond-lvalue"); phi->addIncoming(lhs->getAddress(), lhsBlock); phi->addIncoming(rhs->getAddress(), rhsBlock); return MakeAddrLValue(phi, expr->getType()); } else { assert((lhs || rhs) && "both operands of glvalue conditional are throw-expressions?"); return lhs ? *lhs : *rhs; } } /// EmitCastLValue - Casts are never lvalues unless that cast is to a reference /// type. If the cast is to a reference, we can have the usual lvalue result, /// otherwise if a cast is needed by the code generator in an lvalue context, /// then it must mean that we need the address of an aggregate in order to /// access one of its members. This can happen for all the reasons that casts /// are permitted with aggregate result, including noop aggregate casts, and /// cast from scalar to union. LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { // HLSL Change Begins if (hlsl::IsHLSLMatType(E->getType()) || hlsl::IsHLSLMatType(E->getSubExpr()->getType())) { LValue LV = EmitLValue(E->getSubExpr()); QualType ToType = getContext().getLValueReferenceType(E->getType()); llvm::Value *FromValue = LV.getAddress(); llvm::Type *FromTy = FromValue->getType(); llvm::Type *RetTy = ConvertType(ToType); // type not changed, LValueToRValue, CStyleCast may go this path if (FromTy == RetTy) { return LV; // If only address space changed, add address space cast } if (FromTy->getPointerAddressSpace() != RetTy->getPointerAddressSpace()) { llvm::Type *ConvertedFromTy = llvm::PointerType::get( FromTy->getPointerElementType(), RetTy->getPointerAddressSpace()); assert(ConvertedFromTy == RetTy && "otherwise, more than just address space changing in one step"); llvm::Value *cast = Builder.CreateAddrSpaceCast(FromValue, ConvertedFromTy); return MakeAddrLValue(cast, ToType); } llvm::Value *cast = CGM.getHLSLRuntime().EmitHLSLMatrixOperationCall(*this, E, RetTy, { LV.getAddress() }); return MakeAddrLValue(cast, ToType); } // HLSL Change Ends switch (E->getCastKind()) { case CK_ToVoid: case CK_BitCast: case CK_ArrayToPointerDecay: case CK_FunctionToPointerDecay: case CK_NullToMemberPointer: case CK_NullToPointer: case CK_IntegralToPointer: case CK_PointerToIntegral: case CK_PointerToBoolean: case CK_VectorSplat: case CK_IntegralCast: case CK_IntegralToBoolean: case CK_IntegralToFloating: case CK_FloatingToIntegral: case CK_FloatingToBoolean: case CK_FloatingCast: case CK_FloatingRealToComplex: case CK_FloatingComplexToReal: case CK_FloatingComplexToBoolean: case CK_FloatingComplexCast: case CK_FloatingComplexToIntegralComplex: case CK_IntegralRealToComplex: case CK_IntegralComplexToReal: case CK_IntegralComplexToBoolean: case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: case CK_DerivedToBaseMemberPointer: case CK_BaseToDerivedMemberPointer: case CK_MemberPointerToBoolean: case CK_ReinterpretMemberPointer: case CK_AnyPointerToBlockPointerCast: case CK_ARCProduceObject: case CK_ARCConsumeObject: case CK_ARCReclaimReturnedObject: case CK_ARCExtendBlockObject: case CK_CopyAndAutoreleaseBlockObject: case CK_AddressSpaceConversion: return EmitUnsupportedLValue(E, "unexpected cast lvalue"); case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!"); case CK_BuiltinFnToFnPtr: llvm_unreachable("builtin functions are handled elsewhere"); // These are never l-values; just use the aggregate emission code. case CK_NonAtomicToAtomic: case CK_AtomicToNonAtomic: return EmitAggExprToLValue(E); case CK_Dynamic: { LValue LV = EmitLValue(E->getSubExpr()); llvm::Value *V = LV.getAddress(); const auto *DCE = cast<CXXDynamicCastExpr>(E); return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType()); } case CK_ConstructorConversion: case CK_UserDefinedConversion: case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_NoOp: case CK_LValueToRValue: return EmitLValue(E->getSubExpr()); case CK_UncheckedDerivedToBase: case CK_DerivedToBase: { const RecordType *DerivedClassTy = E->getSubExpr()->getType()->getAs<RecordType>(); auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); LValue LV = EmitLValue(E->getSubExpr()); llvm::Value *This = LV.getAddress(); // Perform the derived-to-base conversion llvm::Value *Base = GetAddressOfBaseClass( This, DerivedClassDecl, E->path_begin(), E->path_end(), /*NullCheckValue=*/false, E->getExprLoc()); return MakeAddrLValue(Base, E->getType()); } case CK_ToUnion: return EmitAggExprToLValue(E); case CK_BaseToDerived: { const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>(); auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl()); LValue LV = EmitLValue(E->getSubExpr()); // Perform the base-to-derived conversion llvm::Value *Derived = GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(), /*NullCheckValue=*/false); // C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is // performed and the object is not of the derived type. if (sanitizePerformTypeCheck()) EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(), Derived, E->getType()); if (SanOpts.has(SanitizerKind::CFIDerivedCast)) EmitVTablePtrCheckForCast(E->getType(), Derived, /*MayBeNull=*/false, CFITCK_DerivedCast, E->getLocStart()); return MakeAddrLValue(Derived, E->getType()); } case CK_LValueBitCast: { // This must be a reinterpret_cast (or c-style equivalent). const auto *CE = cast<ExplicitCastExpr>(E); LValue LV = EmitLValue(E->getSubExpr()); llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), ConvertType(CE->getTypeAsWritten())); if (SanOpts.has(SanitizerKind::CFIUnrelatedCast)) EmitVTablePtrCheckForCast(E->getType(), V, /*MayBeNull=*/false, CFITCK_UnrelatedCast, E->getLocStart()); return MakeAddrLValue(V, E->getType()); } case CK_ObjCObjectLValueCast: { LValue LV = EmitLValue(E->getSubExpr()); QualType ToType = getContext().getLValueReferenceType(E->getType()); llvm::Value *V = Builder.CreateBitCast(LV.getAddress(), ConvertType(ToType)); return MakeAddrLValue(V, E->getType()); } // HLSL Change Starts case CK_HLSLVectorSplat: { LValue LV = EmitLValue(E->getSubExpr()); llvm::Value *LVal = nullptr; if (LV.isSimple()) LVal = LV.getAddress(); else if (LV.isExtVectorElt()) { llvm::Constant *VecElts = LV.getExtVectorElts(); LVal = Builder.CreateGEP( LV.getExtVectorAddr(), {Builder.getInt32(0), VecElts->getAggregateElement((unsigned)0)}); } else assert(0 && "All other types should not be LValues at this point"); QualType ToType = getContext().getLValueReferenceType(E->getType()); // bitcast to target type llvm::Type *ResultType = ConvertType(ToType); llvm::Value *bitcast = Builder.CreateBitCast(LVal, ResultType); return MakeAddrLValue(bitcast, ToType); } case CK_HLSLVectorTruncationCast: { LValue LV = EmitLValue(E->getSubExpr()); QualType ToType = getContext().getLValueReferenceType(E->getType()); // bitcast to target type llvm::Type *ResultType = ConvertType(ToType); llvm::Value *bitcast = Builder.CreateBitCast(LV.getAddress(), ResultType); return MakeAddrLValue(bitcast, ToType); } case CK_HLSLVectorToScalarCast: { LValue LV = EmitLValue(E->getSubExpr()); QualType ToType = getContext().getLValueReferenceType(E->getType()); llvm::ConstantInt *idxZero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(getLLVMContext()), 0); llvm::Value *GEP = Builder.CreateInBoundsGEP(LV.getAddress(), {idxZero, idxZero}); return MakeAddrLValue(GEP, ToType); } break; case CK_HLSLCC_IntegralToFloating: case CK_HLSLCC_FloatingToIntegral: { LValue LV = EmitLValue(E->getSubExpr()); QualType ToType = getContext().getLValueReferenceType(E->getType()); // bitcast to target type llvm::Type *ResultType = ConvertType(ToType); llvm::Value *bitcast = Builder.CreateBitCast(LV.getAddress(), ResultType); return MakeAddrLValue(bitcast, ToType); } case CK_FlatConversion: { // Just bitcast. QualType ToType = getContext().getLValueReferenceType(E->getType()); LValue LV = EmitLValue(E->getSubExpr()); llvm::Value *This = LV.getAddress(); // bitcast to target type llvm::Type *ResultType = ConvertType(ToType); // Make sure generate Inst not Operator to make lowering easy. bool originAllowFolding = Builder.AllowFolding; Builder.AllowFolding = false; llvm::Value *bitcast = Builder.CreateBitCast(This, ResultType); Builder.AllowFolding = originAllowFolding; return MakeAddrLValue(bitcast, ToType); } case CK_HLSLDerivedToBase: { // HLSL only single inheritance. // Just GEP. QualType ToType = getContext().getLValueReferenceType(E->getType()); LValue LV = EmitLValue(E->getSubExpr()); llvm::Value *This = LV.getAddress(); // gep to target type llvm::Type *ResultType = ConvertType(ToType); unsigned level = 0; llvm::Type *ToTy = ResultType->getPointerElementType(); llvm::Type *FromTy = This->getType()->getPointerElementType(); // For empty struct, just bitcast. if (FromTy->getStructNumElements()== 0) { llvm::Value *bitcast = Builder.CreateBitCast(This, ResultType); return MakeAddrLValue(bitcast, ToType); } while (ToTy != FromTy) { FromTy = FromTy->getStructElementType(0); ++level; } llvm::Value *zeroIdx = Builder.getInt32(0); SmallVector<llvm::Value *, 2> IdxList(level + 1, zeroIdx); llvm::Value *GEP = Builder.CreateInBoundsGEP(This, IdxList); return MakeAddrLValue(GEP, ToType); } case CK_HLSLMatrixSplat: case CK_HLSLMatrixToScalarCast: case CK_HLSLMatrixTruncationCast: case CK_HLSLMatrixToVectorCast: // Matrices should be handled above. case CK_HLSLVectorToMatrixCast: case CK_HLSLCC_IntegralCast: case CK_HLSLCC_IntegralToBoolean: case CK_HLSLCC_FloatingToBoolean: case CK_HLSLCC_FloatingCast: llvm_unreachable("Unhandled HLSL lvalue cast"); // HLSL Change Ends case CK_ZeroToOCLEvent: llvm_unreachable("NULL to OpenCL event lvalue cast is not valid"); } llvm_unreachable("Unhandled lvalue cast kind?"); } LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) { assert(OpaqueValueMappingData::shouldBindAsLValue(e)); return getOpaqueLValueMapping(e); } RValue CodeGenFunction::EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc) { QualType FT = FD->getType(); LValue FieldLV = EmitLValueForField(LV, FD); switch (getEvaluationKind(FT)) { case TEK_Complex: return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc)); case TEK_Aggregate: return FieldLV.asAggregateRValue(); case TEK_Scalar: return EmitLoadOfLValue(FieldLV, Loc); } llvm_unreachable("bad evaluation kind"); } //===--------------------------------------------------------------------===// // Expression Emission //===--------------------------------------------------------------------===// RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue) { // Builtins never have block type. if (E->getCallee()->getType()->isBlockPointerType()) return EmitBlockCallExpr(E, ReturnValue); if (const auto *CE = dyn_cast<CXXMemberCallExpr>(E)) return EmitCXXMemberCallExpr(CE, ReturnValue); if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E)) return EmitCUDAKernelCallExpr(CE, ReturnValue); const Decl *TargetDecl = E->getCalleeDecl(); if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { if (unsigned builtinID = FD->getBuiltinID()) return EmitBuiltinExpr(FD, builtinID, E, ReturnValue); // HLSL Change Starts if (getLangOpts().HLSL) { if (const NamespaceDecl *ns = dyn_cast<NamespaceDecl>(FD->getParent())) { if (ns->getName() == "hlsl") { // do hlsl intrinsic generation return EmitHLSLBuiltinCallExpr(FD, E, ReturnValue); } } } // HLSL Change End } if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E)) if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue); if (const auto *PseudoDtor = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) { QualType DestroyedType = PseudoDtor->getDestroyedType(); if (getLangOpts().ObjCAutoRefCount && DestroyedType->isObjCLifetimeType() && (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong || DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) { // Automatic Reference Counting: // If the pseudo-expression names a retainable object with weak or // strong lifetime, the object shall be released. Expr *BaseExpr = PseudoDtor->getBase(); llvm::Value *BaseValue = nullptr; Qualifiers BaseQuals; // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. if (PseudoDtor->isArrow()) { BaseValue = EmitScalarExpr(BaseExpr); const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); BaseQuals = PTy->getPointeeType().getQualifiers(); } else { LValue BaseLV = EmitLValue(BaseExpr); BaseValue = BaseLV.getAddress(); QualType BaseTy = BaseExpr->getType(); BaseQuals = BaseTy.getQualifiers(); } switch (PseudoDtor->getDestroyedType().getObjCLifetime()) { case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: break; case Qualifiers::OCL_Strong: EmitARCRelease(Builder.CreateLoad(BaseValue, PseudoDtor->getDestroyedType().isVolatileQualified()), ARCPreciseLifetime); break; case Qualifiers::OCL_Weak: EmitARCDestroyWeak(BaseValue); break; } } else { // C++ [expr.pseudo]p1: // The result shall only be used as the operand for the function call // operator (), and the result of such a call has type void. The only // effect is the evaluation of the postfix-expression before the dot or // arrow. EmitScalarExpr(E->getCallee()); } return RValue::get(nullptr); } llvm::Value *Callee = EmitScalarExpr(E->getCallee()); return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue, TargetDecl); } LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { // Comma expressions just emit their LHS then their RHS as an l-value. if (E->getOpcode() == BO_Comma) { EmitIgnoredExpr(E->getLHS()); EnsureInsertPoint(); return EmitLValue(E->getRHS()); } if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI) return EmitPointerToDataMemberBinaryExpr(E); assert(E->getOpcode() == BO_Assign && "unexpected binary l-value"); // Note that in all of these cases, __block variables need the RHS // evaluated first just in case the variable gets moved by the RHS. switch (getEvaluationKind(E->getType())) { case TEK_Scalar: { switch (E->getLHS()->getType().getObjCLifetime()) { case Qualifiers::OCL_Strong: return EmitARCStoreStrong(E, /*ignored*/ false).first; case Qualifiers::OCL_Autoreleasing: return EmitARCStoreAutoreleasing(E).first; // No reason to do any of these differently. case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Weak: break; } RValue RV = EmitAnyExpr(E->getRHS()); LValue LV = EmitCheckedLValue(E->getLHS(), TCK_Store); EmitStoreThroughLValue(RV, LV); return LV; } case TEK_Complex: return EmitComplexAssignmentLValue(E); case TEK_Aggregate: return EmitAggExprToLValue(E); } llvm_unreachable("bad evaluation kind"); } LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { RValue RV = EmitCallExpr(E); if (!RV.isScalar()) return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); assert(E->getCallReturnType(getContext())->isReferenceType() && "Can't have a scalar return unless the return type is a " "reference type!"); return MakeAddrLValue(RV.getScalarVal(), E->getType()); } LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) { // FIXME: This shouldn't require another copy. return EmitAggExprToLValue(E); } LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor() && "binding l-value to type which needs a temporary"); AggValueSlot Slot = CreateAggTemp(E->getType()); EmitCXXConstructExpr(E, Slot); return MakeAddrLValue(Slot.getAddr(), E->getType()); } LValue CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) { return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType()); } llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) { return Builder.CreateBitCast(CGM.GetAddrOfUuidDescriptor(E), ConvertType(E->getType())->getPointerTo()); } LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) { return MakeAddrLValue(EmitCXXUuidofExpr(E), E->getType()); } LValue CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); Slot.setExternallyDestructed(); EmitAggExpr(E->getSubExpr(), Slot); EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr()); return MakeAddrLValue(Slot.getAddr(), E->getType()); } LValue CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) { AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue"); EmitLambdaExpr(E, Slot); return MakeAddrLValue(Slot.getAddr(), E->getType()); } LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) { RValue RV = EmitObjCMessageExpr(E); if (!RV.isScalar()) return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); assert(E->getMethodDecl()->getReturnType()->isReferenceType() && "Can't have a scalar return unless the return type is a " "reference type!"); return MakeAddrLValue(RV.getScalarVal(), E->getType()); } LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) { llvm::Value *V = CGM.getObjCRuntime().GetSelector(*this, E->getSelector(), true); return MakeAddrLValue(V, E->getType()); } llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar) { return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar); } LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers) { return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue, Ivar, CVRQualifiers); } LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { // FIXME: A lot of the code below could be shared with EmitMemberExpr. llvm::Value *BaseValue = nullptr; const Expr *BaseExpr = E->getBase(); Qualifiers BaseQuals; QualType ObjectTy; if (E->isArrow()) { BaseValue = EmitScalarExpr(BaseExpr); ObjectTy = BaseExpr->getType()->getPointeeType(); BaseQuals = ObjectTy.getQualifiers(); } else { LValue BaseLV = EmitLValue(BaseExpr); // FIXME: this isn't right for bitfields. BaseValue = BaseLV.getAddress(); ObjectTy = BaseExpr->getType(); BaseQuals = ObjectTy.getQualifiers(); } LValue LV = EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), BaseQuals.getCVRQualifiers()); setObjCGCLValueClass(getContext(), E, LV); return LV; } LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { // Can only get l-value for message expression returning aggregate type RValue RV = EmitAnyExprToTemp(E); return MakeAddrLValue(RV.getAggregateAddr(), E->getType()); } RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee, const CallExpr *E, ReturnValueSlot ReturnValue, const Decl *TargetDecl, llvm::Value *Chain) { // Get the actual function type. The callee type will always be a pointer to // function type or a block pointer type. assert(CalleeType->isFunctionPointerType() && "Call must have function pointer type!"); CalleeType = getContext().getCanonicalType(CalleeType); const auto *FnType = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType()); if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) && (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) { if (llvm::Constant *PrefixSig = CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) { SanitizerScope SanScope(this); llvm::Constant *FTRTTIConst = CGM.GetAddrOfRTTIDescriptor(QualType(FnType, 0), /*ForEH=*/true); llvm::Type *PrefixStructTyElems[] = { PrefixSig->getType(), FTRTTIConst->getType() }; llvm::StructType *PrefixStructTy = llvm::StructType::get( CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true); llvm::Value *CalleePrefixStruct = Builder.CreateBitCast( Callee, llvm::PointerType::getUnqual(PrefixStructTy)); llvm::Value *CalleeSigPtr = Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0); llvm::Value *CalleeSig = Builder.CreateLoad(CalleeSigPtr); llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig); llvm::BasicBlock *Cont = createBasicBlock("cont"); llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck"); Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont); EmitBlock(TypeCheck); llvm::Value *CalleeRTTIPtr = Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1); llvm::Value *CalleeRTTI = Builder.CreateLoad(CalleeRTTIPtr); llvm::Value *CalleeRTTIMatch = Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst); llvm::Constant *StaticData[] = { EmitCheckSourceLocation(E->getLocStart()), EmitCheckTypeDescriptor(CalleeType) }; EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function), "function_type_mismatch", StaticData, Callee); Builder.CreateBr(Cont); EmitBlock(Cont); } } // HLSL Change Begins llvm::SmallVector<LValue, 8> castArgList; llvm::SmallVector<LValue, 8> lifetimeCleanupList; // The argList of the CallExpr, may be update for out parameter llvm::SmallVector<const Stmt *, 8> argList(E->arg_begin(), E->arg_end()); ConstExprIterator argBegin = argList.data(); ConstExprIterator argEnd = argList.data() + E->getNumArgs(); // out param conversion CodeGenFunction::HLSLOutParamScope OutParamScope(*this); auto MapTemp = [&](const VarDecl *LocalVD, llvm::Value *TmpArg) { OutParamScope.addTemp(LocalVD, TmpArg); }; if (getLangOpts().HLSL) { if (const FunctionDecl *FD = E->getDirectCallee()) CGM.getHLSLRuntime().EmitHLSLOutParamConversionInit(*this, FD, E, castArgList, argList, lifetimeCleanupList, MapTemp); } // HLSL Change Ends CallArgList Args; if (Chain) Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)), CGM.getContext().VoidPtrTy); EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), argBegin, argEnd, // HLSL Change - use updated argList E->getDirectCallee(), /*ParamsToSkip*/ 0); const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall( Args, FnType, /*isChainCall=*/Chain); // C99 6.5.2.2p6: // If the expression that denotes the called function has a type // that does not include a prototype, [the default argument // promotions are performed]. If the number of arguments does not // equal the number of parameters, the behavior is undefined. If // the function is defined with a type that includes a prototype, // and either the prototype ends with an ellipsis (, ...) or the // types of the arguments after promotion are not compatible with // the types of the parameters, the behavior is undefined. If the // function is defined with a type that does not include a // prototype, and the types of the arguments after promotion are // not compatible with those of the parameters after promotion, // the behavior is undefined [except in some trivial cases]. // That is, in the general case, we should assume that a call // through an unprototyped function type works like a *non-variadic* // call. The way we make this work is to cast to the exact type // of the promoted arguments. // // Chain calls use this same code path to add the invisible chain parameter // to the function type. if (isa<FunctionNoProtoType>(FnType) || Chain) { llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo); CalleeTy = CalleeTy->getPointerTo(); Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast"); } RValue CallVal = EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl); // HLSL Change Begins // out param conversion // conversion and copy back after the call if (getLangOpts().HLSL) CGM.getHLSLRuntime().EmitHLSLOutParamConversionCopyBack(*this, castArgList, lifetimeCleanupList); // HLSL Change Ends return CallVal; } LValue CodeGenFunction:: EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) { llvm::Value *BaseV; if (E->getOpcode() == BO_PtrMemI) BaseV = EmitScalarExpr(E->getLHS()); else BaseV = EmitLValue(E->getLHS()).getAddress(); llvm::Value *OffsetV = EmitScalarExpr(E->getRHS()); const MemberPointerType *MPT = E->getRHS()->getType()->getAs<MemberPointerType>(); llvm::Value *AddV = CGM.getCXXABI().EmitMemberDataPointerAddress( *this, E, BaseV, OffsetV, MPT); return MakeAddrLValue(AddV, MPT->getPointeeType()); } /// Given the address of a temporary variable, produce an r-value of /// its type. RValue CodeGenFunction::convertTempToRValue(llvm::Value *addr, QualType type, SourceLocation loc) { LValue lvalue = MakeNaturalAlignAddrLValue(addr, type); switch (getEvaluationKind(type)) { case TEK_Complex: return RValue::getComplex(EmitLoadOfComplex(lvalue, loc)); case TEK_Aggregate: return lvalue.asAggregateRValue(); case TEK_Scalar: return RValue::get(EmitLoadOfScalar(lvalue, loc)); } llvm_unreachable("bad evaluation kind"); } void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) { assert(Val->getType()->isFPOrFPVectorTy()); if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val)) return; llvm::MDBuilder MDHelper(getLLVMContext()); llvm::MDNode *Node = MDHelper.createFPMath(Accuracy); cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpmath, Node); } namespace { struct LValueOrRValue { LValue LV; RValue RV; }; } static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF, const PseudoObjectExpr *E, bool forLValue, AggValueSlot slot) { SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; // Find the result expression, if any. const Expr *resultExpr = E->getResultExpr(); LValueOrRValue result; for (PseudoObjectExpr::const_semantics_iterator i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { const Expr *semantic = *i; // If this semantic expression is an opaque value, bind it // to the result of its source expression. if (const auto *ov = dyn_cast<OpaqueValueExpr>(semantic)) { // If this is the result expression, we may need to evaluate // directly into the slot. typedef CodeGenFunction::OpaqueValueMappingData OVMA; OVMA opaqueData; if (ov == resultExpr && ov->isRValue() && !forLValue && CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) { CGF.EmitAggExpr(ov->getSourceExpr(), slot); LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType()); opaqueData = OVMA::bind(CGF, ov, LV); result.RV = slot.asRValue(); // Otherwise, emit as normal. } else { opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); // If this is the result, also evaluate the result now. if (ov == resultExpr) { if (forLValue) result.LV = CGF.EmitLValue(ov); else result.RV = CGF.EmitAnyExpr(ov, slot); } } opaques.push_back(opaqueData); // Otherwise, if the expression is the result, evaluate it // and remember the result. } else if (semantic == resultExpr) { if (forLValue) result.LV = CGF.EmitLValue(semantic); else result.RV = CGF.EmitAnyExpr(semantic, slot); // Otherwise, evaluate the expression in an ignored context. } else { CGF.EmitIgnoredExpr(semantic); } } // Unbind all the opaques now. for (unsigned i = 0, e = opaques.size(); i != e; ++i) opaques[i].unbind(CGF); return result; } RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E, AggValueSlot slot) { return emitPseudoObjectExpr(*this, E, false, slot).RV; } LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) { return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenPGO.h
//===--- CodeGenPGO.h - PGO Instrumentation for LLVM CodeGen ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Instrumentation-based profile-guided optimization // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENPGO_H #define LLVM_CLANG_LIB_CODEGEN_CODEGENPGO_H #include "CGBuilder.h" #include "CodeGenModule.h" #include "CodeGenTypes.h" #include "clang/Frontend/CodeGenOptions.h" #include "llvm/ADT/StringMap.h" #include "llvm/Support/MemoryBuffer.h" #include <memory> namespace clang { namespace CodeGen { /// Per-function PGO state. class CodeGenPGO { private: CodeGenModule &CGM; std::string FuncName; llvm::GlobalVariable *FuncNameVar; unsigned NumRegionCounters; uint64_t FunctionHash; std::unique_ptr<llvm::DenseMap<const Stmt *, unsigned>> RegionCounterMap; std::unique_ptr<llvm::DenseMap<const Stmt *, uint64_t>> StmtCountMap; std::vector<uint64_t> RegionCounts; uint64_t CurrentRegionCount; /// \brief A flag that is set to true when this function doesn't need /// to have coverage mapping data. bool SkipCoverageMapping; public: CodeGenPGO(CodeGenModule &CGM) : CGM(CGM), NumRegionCounters(0), FunctionHash(0), CurrentRegionCount(0), SkipCoverageMapping(false) {} /// Whether or not we have PGO region data for the current function. This is /// false both when we have no data at all and when our data has been /// discarded. bool haveRegionCounts() const { return !RegionCounts.empty(); } /// Return the counter value of the current region. uint64_t getCurrentRegionCount() const { return CurrentRegionCount; } /// Set the counter value for the current region. This is used to keep track /// of changes to the most recent counter from control flow and non-local /// exits. void setCurrentRegionCount(uint64_t Count) { CurrentRegionCount = Count; } /// Check if an execution count is known for a given statement. If so, return /// true and put the value in Count; else return false. Optional<uint64_t> getStmtCount(const Stmt *S) { if (!StmtCountMap) return None; auto I = StmtCountMap->find(S); if (I == StmtCountMap->end()) return None; return I->second; } /// If the execution count for the current statement is known, record that /// as the current count. void setCurrentStmt(const Stmt *S) { if (auto Count = getStmtCount(S)) setCurrentRegionCount(*Count); } /// Check if we need to emit coverage mapping for a given declaration void checkGlobalDecl(GlobalDecl GD); /// Assign counters to regions and configure them for PGO of a given /// function. Does nothing if instrumentation is not enabled and either /// generates global variables or associates PGO data with each of the /// counters depending on whether we are generating or using instrumentation. void assignRegionCounters(const Decl *D, llvm::Function *Fn); /// Emit a coverage mapping range with a counter zero /// for an unused declaration. void emitEmptyCounterMapping(const Decl *D, StringRef FuncName, llvm::GlobalValue::LinkageTypes Linkage); private: void setFuncName(llvm::Function *Fn); void setFuncName(StringRef Name, llvm::GlobalValue::LinkageTypes Linkage); void createFuncNameVar(llvm::GlobalValue::LinkageTypes Linkage); void mapRegionCounters(const Decl *D); void computeRegionCounts(const Decl *D); void applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader, llvm::Function *Fn); void loadRegionCounts(llvm::IndexedInstrProfReader *PGOReader, bool IsInMainFile); void emitCounterVariables(); void emitCounterRegionMapping(const Decl *D); public: void emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S); /// Return the region count for the counter at the given index. uint64_t getRegionCount(const Stmt *S) { if (!RegionCounterMap) return 0; if (!haveRegionCounts()) return 0; return RegionCounts[(*RegionCounterMap)[S]]; } }; } // end namespace CodeGen } // end namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/BackendUtil.cpp
//===--- BackendUtil.cpp - LLVM Backend Utilities -------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "clang/CodeGen/BackendUtil.h" #include "dxc/HLSL/DxilGenerationPass.h" // HLSL Change #include "dxc/HLSL/HLMatrixLowerPass.h" // HLSL Change #include "dxc/Support/Global.h" // HLSL Change #include "dxc/config.h" // HLSL Change #include "clang/Basic/Diagnostic.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/TargetOptions.h" #include "clang/Frontend/CodeGenOptions.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Frontend/Utils.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/Bitcode/BitcodeWriterPass.h" #include "llvm/CodeGen/RegAllocRegistry.h" #include "llvm/CodeGen/SchedulerRegistry.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/IRPrintingPasses.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Module.h" #include "llvm/IR/Verifier.h" #include "llvm/MC/SubtargetFeature.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/TimeProfiler.h" // HLSL Change #include "llvm/Support/Timer.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetSubtargetInfo.h" #include "llvm/Transforms/IPO.h" #include "llvm/Transforms/IPO/PassManagerBuilder.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/ObjCARC.h" #include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Utils/SymbolRewriter.h" #include <cstdio> #include <memory> using namespace clang; using namespace llvm; namespace { class EmitAssemblyHelper { DiagnosticsEngine &Diags; const CodeGenOptions &CodeGenOpts; const clang::TargetOptions &TargetOpts; const LangOptions &LangOpts; Module *TheModule; // HLSL Changes Start std::string CodeGenPassesConfig; std::string PerModulePassesConfig; std::string PerFunctionPassesConfig; mutable llvm::raw_string_ostream CodeGenPassesConfigOS; mutable llvm::raw_string_ostream PerModulePassesConfigOS; mutable llvm::raw_string_ostream PerFunctionPassesConfigOS; // HLSL Changes End Timer CodeGenerationTime; mutable legacy::PassManager *CodeGenPasses; mutable legacy::PassManager *PerModulePasses; mutable legacy::FunctionPassManager *PerFunctionPasses; private: TargetIRAnalysis getTargetIRAnalysis() const { if (TM) return TM->getTargetIRAnalysis(); return TargetIRAnalysis(); } legacy::PassManager *getCodeGenPasses() const { if (!CodeGenPasses) { CodeGenPasses = new legacy::PassManager(); CodeGenPasses->TrackPassOS = &CodeGenPassesConfigOS; CodeGenPasses->add( createTargetTransformInfoWrapperPass(getTargetIRAnalysis())); } return CodeGenPasses; } legacy::PassManager *getPerModulePasses() const { if (!PerModulePasses) { PerModulePasses = new legacy::PassManager(); PerModulePasses->HLSLPrintBeforeAll = this->CodeGenOpts.HLSLPrintBeforeAll; PerModulePasses->HLSLPrintBefore = this->CodeGenOpts.HLSLPrintBefore; PerModulePasses->HLSLPrintAfterAll = this->CodeGenOpts.HLSLPrintAfterAll; PerModulePasses->HLSLPrintAfter = this->CodeGenOpts.HLSLPrintAfter; PerModulePasses->TrackPassOS = &PerModulePassesConfigOS; PerModulePasses->add( createTargetTransformInfoWrapperPass(getTargetIRAnalysis())); } return PerModulePasses; } legacy::FunctionPassManager *getPerFunctionPasses() const { if (!PerFunctionPasses) { PerFunctionPasses = new legacy::FunctionPassManager(TheModule); PerFunctionPasses->HLSLPrintBeforeAll = this->CodeGenOpts.HLSLPrintBeforeAll; PerFunctionPasses->HLSLPrintBefore = this->CodeGenOpts.HLSLPrintBefore; PerFunctionPasses->HLSLPrintAfterAll = this->CodeGenOpts.HLSLPrintAfterAll; PerFunctionPasses->HLSLPrintAfter = this->CodeGenOpts.HLSLPrintAfter; PerFunctionPasses->TrackPassOS = &PerFunctionPassesConfigOS; PerFunctionPasses->add( createTargetTransformInfoWrapperPass(getTargetIRAnalysis())); } return PerFunctionPasses; } void CreatePasses(); /// Generates the TargetMachine. /// Returns Null if it is unable to create the target machine. /// Some of our clang tests specify triples which are not built /// into clang. This is okay because these tests check the generated /// IR, and they require DataLayout which depends on the triple. /// In this case, we allow this method to fail and not report an error. /// When MustCreateTM is used, we print an error if we are unable to load /// the requested target. TargetMachine *CreateTargetMachine(bool MustCreateTM); /// Add passes necessary to emit assembly or LLVM IR. /// /// \return True on success. bool AddEmitPasses(BackendAction Action, raw_pwrite_stream &OS); public: EmitAssemblyHelper(DiagnosticsEngine &_Diags, const CodeGenOptions &CGOpts, const clang::TargetOptions &TOpts, const LangOptions &LOpts, Module *M) : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts), TheModule(M), // HLSL Changes Start CodeGenPassesConfigOS(CodeGenPassesConfig), PerModulePassesConfigOS(PerModulePassesConfig), PerFunctionPassesConfigOS(PerFunctionPassesConfig), // HLSL Changes End CodeGenerationTime("Code Generation Time"), CodeGenPasses(nullptr), PerModulePasses(nullptr), PerFunctionPasses(nullptr) {} ~EmitAssemblyHelper() { delete CodeGenPasses; delete PerModulePasses; delete PerFunctionPasses; if (CodeGenOpts.DisableFree) BuryPointer(std::move(TM)); } std::unique_ptr<TargetMachine> TM; void EmitAssembly(BackendAction Action, raw_pwrite_stream *OS); }; // We need this wrapper to access LangOpts and CGOpts from extension functions // that we add to the PassManagerBuilder. class PassManagerBuilderWrapper : public PassManagerBuilder { public: PassManagerBuilderWrapper(const CodeGenOptions &CGOpts, const LangOptions &LangOpts) : PassManagerBuilder(), CGOpts(CGOpts), LangOpts(LangOpts) {} const CodeGenOptions &getCGOpts() const { return CGOpts; } const LangOptions &getLangOpts() const { return LangOpts; } private: const CodeGenOptions &CGOpts; const LangOptions &LangOpts; }; } #ifdef MS_ENABLE_OBJCARC // HLSL Change static void addObjCARCAPElimPass(const PassManagerBuilder &Builder, PassManagerBase &PM) { if (Builder.OptLevel > 0) PM.add(createObjCARCAPElimPass()); } static void addObjCARCExpandPass(const PassManagerBuilder &Builder, PassManagerBase &PM) { if (Builder.OptLevel > 0) PM.add(createObjCARCExpandPass()); } static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase &PM) { if (Builder.OptLevel > 0) PM.add(createObjCARCOptPass()); } #endif // MS_ENABLE_OBJCARC - HLSL Change static void addSampleProfileLoaderPass(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { const PassManagerBuilderWrapper &BuilderWrapper = static_cast<const PassManagerBuilderWrapper &>(Builder); const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts(); PM.add(createSampleProfileLoaderPass(CGOpts.SampleProfileFile)); } static void addAddDiscriminatorsPass(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { PM.add(createAddDiscriminatorsPass()); } #ifdef MS_ENABLE_OBJCARC // HLSL Change static void addBoundsCheckingPass(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { PM.add(createBoundsCheckingPass()); } #endif // MS_ENABLE_OBJCARC // HLSL Change #ifdef MS_ENABLE_INSTR // HLSL Change static void addSanitizerCoveragePass(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { const PassManagerBuilderWrapper &BuilderWrapper = static_cast<const PassManagerBuilderWrapper&>(Builder); const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts(); SanitizerCoverageOptions Opts; Opts.CoverageType = static_cast<SanitizerCoverageOptions::Type>(CGOpts.SanitizeCoverageType); Opts.IndirectCalls = CGOpts.SanitizeCoverageIndirectCalls; Opts.TraceBB = CGOpts.SanitizeCoverageTraceBB; Opts.TraceCmp = CGOpts.SanitizeCoverageTraceCmp; Opts.Use8bitCounters = CGOpts.SanitizeCoverage8bitCounters; PM.add(createSanitizerCoverageModulePass(Opts)); } static void addAddressSanitizerPasses(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/false)); PM.add(createAddressSanitizerModulePass(/*CompileKernel*/false)); } static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/true)); PM.add(createAddressSanitizerModulePass(/*CompileKernel*/true)); } static void addMemorySanitizerPass(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { const PassManagerBuilderWrapper &BuilderWrapper = static_cast<const PassManagerBuilderWrapper&>(Builder); const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts(); PM.add(createMemorySanitizerPass(CGOpts.SanitizeMemoryTrackOrigins)); // MemorySanitizer inserts complex instrumentation that mostly follows // the logic of the original code, but operates on "shadow" values. // It can benefit from re-running some general purpose optimization passes. if (Builder.OptLevel > 0) { PM.add(createEarlyCSEPass()); PM.add(createReassociatePass()); PM.add(createLICMPass()); PM.add(createGVNPass()); PM.add(createInstructionCombiningPass()); PM.add(createDeadStoreEliminationPass()); } } static void addThreadSanitizerPass(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { PM.add(createThreadSanitizerPass()); } static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) { const PassManagerBuilderWrapper &BuilderWrapper = static_cast<const PassManagerBuilderWrapper&>(Builder); const LangOptions &LangOpts = BuilderWrapper.getLangOpts(); PM.add(createDataFlowSanitizerPass(LangOpts.SanitizerBlacklistFiles)); } #endif // MS_ENABLE_INSTR - HLSL Change static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple, const CodeGenOptions &CodeGenOpts) { TargetLibraryInfoImpl *TLII = new TargetLibraryInfoImpl(TargetTriple); if (!CodeGenOpts.SimplifyLibCalls) TLII->disableAllFunctions(); switch (CodeGenOpts.getVecLib()) { case CodeGenOptions::Accelerate: TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::Accelerate); break; default: break; } return TLII; } static void addSymbolRewriterPass(const CodeGenOptions &Opts, legacy::PassManager *MPM) { llvm::SymbolRewriter::RewriteDescriptorList DL; llvm::SymbolRewriter::RewriteMapParser MapParser; for (const auto &MapFile : Opts.RewriteMapFiles) MapParser.parse(MapFile, &DL); MPM->add(createRewriteSymbolsPass(DL)); } void EmitAssemblyHelper::CreatePasses() { unsigned OptLevel = CodeGenOpts.OptimizationLevel; CodeGenOptions::InliningMethod Inlining = CodeGenOpts.getInlining(); // Handle disabling of LLVM optimization, where we want to preserve the // internal module before any optimization. if (CodeGenOpts.DisableLLVMOpts || CodeGenOpts.HLSLHighLevel) { // HLSL Change OptLevel = 0; // HLSL Change Begins. // HLSL always inline. if (!LangOpts.HLSL || CodeGenOpts.HLSLHighLevel) Inlining = CodeGenOpts.NoInlining; // HLSL Change Ends. } PassManagerBuilderWrapper PMBuilder(CodeGenOpts, LangOpts); PMBuilder.OptLevel = OptLevel; PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize; PMBuilder.BBVectorize = CodeGenOpts.VectorizeBB; PMBuilder.SLPVectorize = CodeGenOpts.VectorizeSLP; PMBuilder.LoopVectorize = CodeGenOpts.VectorizeLoop; // HLSL Change - begin PMBuilder.HLSLHighLevel = CodeGenOpts.HLSLHighLevel; PMBuilder.HLSLAllowPreserveValues = CodeGenOpts.HLSLAllowPreserveValues; PMBuilder.HLSLOnlyWarnOnUnrollFail = CodeGenOpts.HLSLOnlyWarnOnUnrollFail; PMBuilder.HLSLExtensionsCodeGen = CodeGenOpts.HLSLExtensionsCodegen.get(); PMBuilder.HLSLResMayAlias = CodeGenOpts.HLSLResMayAlias; PMBuilder.ScanLimit = CodeGenOpts.ScanLimit; // Opt toggles const hlsl::options::OptimizationToggles &OptToggles = CodeGenOpts.HLSLOptimizationToggles; PMBuilder.EnableGVN = OptToggles.IsEnabled(hlsl::options::TOGGLE_GVN); PMBuilder.HLSLNoSink = !OptToggles.IsEnabled(hlsl::options::TOGGLE_SINK); PMBuilder.StructurizeLoopExitsForUnroll = OptToggles.IsEnabled( hlsl::options::TOGGLE_STRUCTURIZE_LOOP_EXITS_FOR_UNROLL); PMBuilder.HLSLEnableDebugNops = OptToggles.IsEnabled(hlsl::options::TOGGLE_DEBUG_NOPS); PMBuilder.HLSLEnableLifetimeMarkers = CodeGenOpts.HLSLEnableLifetimeMarkers && OptToggles.IsEnabled(hlsl::options::TOGGLE_LIFETIME_MARKERS); PMBuilder.HLSLEnablePartialLifetimeMarkers = OptToggles.IsEnabled(hlsl::options::TOGGLE_PARTIAL_LIFETIME_MARKERS); PMBuilder.HLSLEnableAggressiveReassociation = OptToggles.IsEnabled( hlsl::options::TOGGLE_ENABLE_AGGRESSIVE_REASSOCIATION); // HLSL Change - end PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime; PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops; PMBuilder.MergeFunctions = CodeGenOpts.MergeFunctions; PMBuilder.PrepareForLTO = CodeGenOpts.PrepareForLTO; PMBuilder.RerollLoops = CodeGenOpts.RerollLoops; PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible, addAddDiscriminatorsPass); if (!CodeGenOpts.SampleProfileFile.empty()) PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible, addSampleProfileLoaderPass); // In ObjC ARC mode, add the main ARC optimization passes. #ifdef MS_ENABLE_OBJCARC // HLSL Change if (LangOpts.ObjCAutoRefCount) { PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible, addObjCARCExpandPass); PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly, addObjCARCAPElimPass); PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate, addObjCARCOptPass); } if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds)) { PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate, addBoundsCheckingPass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addBoundsCheckingPass); } #endif // MS_ENABLE_OBJCARC - HLSL Change #ifdef MS_ENABLE_INSTR // HLSL Change if (CodeGenOpts.SanitizeCoverageType || CodeGenOpts.SanitizeCoverageIndirectCalls || CodeGenOpts.SanitizeCoverageTraceCmp) { PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addSanitizerCoveragePass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addSanitizerCoveragePass); } if (LangOpts.Sanitize.has(SanitizerKind::Address)) { PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addAddressSanitizerPasses); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addAddressSanitizerPasses); } if (LangOpts.Sanitize.has(SanitizerKind::KernelAddress)) { PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addKernelAddressSanitizerPasses); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addKernelAddressSanitizerPasses); } if (LangOpts.Sanitize.has(SanitizerKind::Memory)) { PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addMemorySanitizerPass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addMemorySanitizerPass); } if (LangOpts.Sanitize.has(SanitizerKind::Thread)) { PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addThreadSanitizerPass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addThreadSanitizerPass); } if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) { PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast, addDataFlowSanitizerPass); PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0, addDataFlowSanitizerPass); } #endif // MS_ENABLE_INSTR - HLSL Change // Figure out TargetLibraryInfo. Triple TargetTriple(TheModule->getTargetTriple()); PMBuilder.LibraryInfo = createTLII(TargetTriple, CodeGenOpts); switch (Inlining) { case CodeGenOptions::NoInlining: break; case CodeGenOptions::NormalInlining: { PMBuilder.HLSLEarlyInlining = false; // HLSL Change PMBuilder.Inliner = createFunctionInliningPass(OptLevel, CodeGenOpts.OptimizeSize); break; } case CodeGenOptions::OnlyAlwaysInlining: // Respect always_inline. if (OptLevel == 0) // Do not insert lifetime intrinsics at -O0. PMBuilder.Inliner = createAlwaysInlinerPass(false); else PMBuilder.Inliner = createAlwaysInlinerPass(PMBuilder.HLSLEnableLifetimeMarkers); // HLSL Change break; } // Set up the per-function pass manager. legacy::FunctionPassManager *FPM = getPerFunctionPasses(); if (CodeGenOpts.VerifyModule) FPM->add(createVerifierPass()); PMBuilder.populateFunctionPassManager(*FPM); // Set up the per-module pass manager. legacy::PassManager *MPM = getPerModulePasses(); if (!CodeGenOpts.RewriteMapFiles.empty()) addSymbolRewriterPass(CodeGenOpts, MPM); #ifdef MS_ENABLE_INSTR // HLSL Change if (!CodeGenOpts.DisableGCov && (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes)) { // Not using 'GCOVOptions::getDefault' allows us to avoid exiting if // LLVM's -default-gcov-version flag is set to something invalid. GCOVOptions Options; Options.EmitNotes = CodeGenOpts.EmitGcovNotes; Options.EmitData = CodeGenOpts.EmitGcovArcs; memcpy(Options.Version, CodeGenOpts.CoverageVersion, 4); Options.UseCfgChecksum = CodeGenOpts.CoverageExtraChecksum; Options.NoRedZone = CodeGenOpts.DisableRedZone; Options.FunctionNamesInData = !CodeGenOpts.CoverageNoFunctionNamesInData; Options.ExitBlockBeforeBody = CodeGenOpts.CoverageExitBlockBeforeBody; MPM->add(createGCOVProfilerPass(Options)); if (CodeGenOpts.getDebugInfo() == CodeGenOptions::NoDebugInfo) MPM->add(createStripSymbolsPass(true)); } if (CodeGenOpts.ProfileInstrGenerate) { InstrProfOptions Options; Options.NoRedZone = CodeGenOpts.DisableRedZone; Options.InstrProfileOutput = CodeGenOpts.InstrProfileOutput; MPM->add(createInstrProfilingPass(Options)); } #endif PMBuilder.populateModulePassManager(*MPM); } TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) { // Create the TargetMachine for generating code. std::string Error; std::string Triple = TheModule->getTargetTriple(); const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error); if (!TheTarget) { if (MustCreateTM) Diags.Report(diag::err_fe_unable_to_create_target) << Error; return nullptr; } unsigned CodeModel = llvm::StringSwitch<unsigned>(CodeGenOpts.CodeModel) .Case("small", llvm::CodeModel::Small) .Case("kernel", llvm::CodeModel::Kernel) .Case("medium", llvm::CodeModel::Medium) .Case("large", llvm::CodeModel::Large) .Case("default", llvm::CodeModel::Default) .Default(~0u); assert(CodeModel != ~0u && "invalid code model!"); llvm::CodeModel::Model CM = static_cast<llvm::CodeModel::Model>(CodeModel); #if 0 // HLSL Change - no global state mutation on per-instance work SmallVector<const char *, 16> BackendArgs; BackendArgs.push_back("clang"); // Fake program name. if (!CodeGenOpts.DebugPass.empty()) { BackendArgs.push_back("-debug-pass"); BackendArgs.push_back(CodeGenOpts.DebugPass.c_str()); } if (!CodeGenOpts.LimitFloatPrecision.empty()) { BackendArgs.push_back("-limit-float-precision"); BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str()); } for (unsigned i = 0, e = CodeGenOpts.BackendOptions.size(); i != e; ++i) BackendArgs.push_back(CodeGenOpts.BackendOptions[i].c_str()); BackendArgs.push_back(nullptr); llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1, BackendArgs.data()); #endif // HLSL Change std::string FeaturesStr; if (!TargetOpts.Features.empty()) { #if 0 // HLSL Change SubtargetFeatures Features; for (const std::string &Feature : TargetOpts.Features) Features.AddFeature(Feature); FeaturesStr = Features.getString(); #endif // HLSL Change } llvm::Reloc::Model RM = llvm::Reloc::Default; if (CodeGenOpts.RelocationModel == "static") { RM = llvm::Reloc::Static; } else if (CodeGenOpts.RelocationModel == "pic") { RM = llvm::Reloc::PIC_; } else { assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" && "Invalid PIC model!"); RM = llvm::Reloc::DynamicNoPIC; } CodeGenOpt::Level OptLevel = CodeGenOpt::Default; switch (CodeGenOpts.OptimizationLevel) { default: break; case 0: OptLevel = CodeGenOpt::None; break; case 3: OptLevel = CodeGenOpt::Aggressive; break; } llvm::TargetOptions Options; if (!TargetOpts.Reciprocals.empty()) Options.Reciprocals = TargetRecip(TargetOpts.Reciprocals); Options.ThreadModel = llvm::StringSwitch<llvm::ThreadModel::Model>(CodeGenOpts.ThreadModel) .Case("posix", llvm::ThreadModel::POSIX) .Case("single", llvm::ThreadModel::Single); if (CodeGenOpts.DisableIntegratedAS) Options.DisableIntegratedAS = true; if (CodeGenOpts.CompressDebugSections) Options.CompressDebugSections = true; if (CodeGenOpts.UseInitArray) Options.UseInitArray = true; // Set float ABI type. if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp") Options.FloatABIType = llvm::FloatABI::Soft; else if (CodeGenOpts.FloatABI == "hard") Options.FloatABIType = llvm::FloatABI::Hard; else { assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!"); Options.FloatABIType = llvm::FloatABI::Default; } // Set FP fusion mode. switch (CodeGenOpts.getFPContractMode()) { case CodeGenOptions::FPC_Off: Options.AllowFPOpFusion = llvm::FPOpFusion::Strict; break; case CodeGenOptions::FPC_On: Options.AllowFPOpFusion = llvm::FPOpFusion::Standard; break; case CodeGenOptions::FPC_Fast: Options.AllowFPOpFusion = llvm::FPOpFusion::Fast; break; } Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD; Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath; Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath; Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS; Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath; Options.StackAlignmentOverride = CodeGenOpts.StackAlignment; Options.PositionIndependentExecutable = LangOpts.PIELevel != 0; Options.FunctionSections = CodeGenOpts.FunctionSections; Options.DataSections = CodeGenOpts.DataSections; Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames; Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll; Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels; Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm; Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack; Options.MCOptions.MCFatalWarnings = CodeGenOpts.FatalWarnings; Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose; Options.MCOptions.ABIName = TargetOpts.ABI; TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU, FeaturesStr, Options, RM, CM, OptLevel); return TM; } bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action, raw_pwrite_stream &OS) { // Create the code generator passes. legacy::PassManager *PM = getCodeGenPasses(); // Add LibraryInfo. llvm::Triple TargetTriple(TheModule->getTargetTriple()); std::unique_ptr<TargetLibraryInfoImpl> TLII( createTLII(TargetTriple, CodeGenOpts)); PM->add(new TargetLibraryInfoWrapperPass(*TLII)); // Normal mode, emit a .s or .o file by running the code generator. Note, // this also adds codegenerator level optimization passes. TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile; if (Action == Backend_EmitObj) CGFT = TargetMachine::CGFT_ObjectFile; else if (Action == Backend_EmitMCNull) CGFT = TargetMachine::CGFT_Null; else assert(Action == Backend_EmitAssembly && "Invalid action!"); // Add ObjC ARC final-cleanup optimizations. This is done as part of the // "codegen" passes so that it isn't run multiple times when there is // inlining happening. #ifdef MS_ENABLE_OBJCARC // HLSL Change if (CodeGenOpts.OptimizationLevel > 0) PM->add(createObjCARCContractPass()); #endif // HLSL Change if (TM->addPassesToEmitFile(*PM, OS, CGFT, /*DisableVerify=*/!CodeGenOpts.VerifyModule)) { Diags.Report(diag::err_fe_unable_to_interface_with_target); return false; } return true; } void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_pwrite_stream *OS) { TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : nullptr); bool UsesCodeGen = (Action != Backend_EmitNothing && Action != Backend_EmitBC && Action != Backend_EmitPasses && Action != Backend_EmitLL); if (!TM) TM.reset(CreateTargetMachine(UsesCodeGen)); if (UsesCodeGen && !TM) return; if (TM) TheModule->setDataLayout(*TM->getDataLayout()); CreatePasses(); switch (Action) { case Backend_EmitNothing: case Backend_EmitPasses: // HLSL Change break; case Backend_EmitBC: getPerModulePasses()->add( createBitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists)); break; case Backend_EmitLL: getPerModulePasses()->add( createPrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists)); break; default: if (!AddEmitPasses(Action, *OS)) return; } // Before executing passes, print the final values of the LLVM options. cl::PrintOptionValues(); // HLSL Change Starts if (Action == Backend_EmitPasses) { if (PerFunctionPasses) { *OS << "# Per-function passes\n" "-opt-fn-passes\n"; *OS << PerFunctionPassesConfigOS.str(); } if (PerModulePasses) { *OS << "# Per-module passes\n" "-opt-mod-passes\n"; *OS << PerModulePassesConfigOS.str(); } if (CodeGenPasses) { *OS << "# Code generation passes\n" "-opt-mod-passes\n"; *OS << CodeGenPassesConfigOS.str(); } return; } // HLSL Change Ends // Run passes. For now we do all passes at once, but eventually we // would like to have the option of streaming code generation. if (PerFunctionPasses) { PrettyStackTraceString CrashInfo("Per-function optimization"); PerFunctionPasses->doInitialization(); for (Function &F : *TheModule) if (!F.isDeclaration()) PerFunctionPasses->run(F); PerFunctionPasses->doFinalization(); } if (PerModulePasses) { PrettyStackTraceString CrashInfo("Per-module optimization passes"); PerModulePasses->run(*TheModule); } if (CodeGenPasses) { PrettyStackTraceString CrashInfo("Code generation"); CodeGenPasses->run(*TheModule); } } void clang::EmitBackendOutput(DiagnosticsEngine &Diags, const CodeGenOptions &CGOpts, const clang::TargetOptions &TOpts, const LangOptions &LOpts, StringRef TDesc, Module *M, BackendAction Action, raw_pwrite_stream *OS) { EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M); // HLSL Change - Support hierarchial time tracing. TimeTraceScope TimeScope("Backend", StringRef("")); try { // HLSL Change Starts // Catch any fatal errors during optimization passes here // so that future passes can be skipped. AsmHelper.EmitAssembly(Action, OS); } catch (const ::hlsl::Exception &hlslException) { Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, "%0\n")) << StringRef(hlslException.what()); #if defined(DXC_CODEGEN_EXCEPTIONS_TRAP) // llvm::errs() doesn't work in release builds on Linux. // Use C-style fprintf because it works everywhere. fprintf(stderr, "internal codegen error: %s\n", hlslException.what()); fflush(stderr); LLVM_BUILTIN_TRAP; #endif } // HLSL Change Ends // If an optional clang TargetInfo description string was passed in, use it to // verify the LLVM TargetMachine's DataLayout. if (AsmHelper.TM && !TDesc.empty()) { std::string DLDesc = AsmHelper.TM->getDataLayout()->getStringRepresentation(); if (DLDesc != TDesc) { unsigned DiagID = Diags.getCustomDiagID( DiagnosticsEngine::Error, "backend data layout '%0' does not match " "expected target description '%1'"); Diags.Report(DiagID) << DLDesc << TDesc; } } }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
//===--- ObjectFilePCHContainerOperations.cpp -----------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "clang/CodeGen/ObjectFilePCHContainerOperations.h" #include "CGDebugInfo.h" #include "CodeGenModule.h" #include "clang/AST/ASTContext.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/Expr.h" #include "clang/AST/RecursiveASTVisitor.h" #include "clang/Basic/Diagnostic.h" #include "clang/Basic/TargetInfo.h" #include "clang/CodeGen/BackendUtil.h" #include "clang/Frontend/CodeGenOptions.h" #include "clang/Serialization/ASTWriter.h" #include "llvm/ADT/StringRef.h" #include "llvm/Bitcode/BitstreamReader.h" #include "llvm/DebugInfo/DWARF/DWARFContext.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Object/COFF.h" #include "llvm/Object/ObjectFile.h" #include "llvm/Support/TargetRegistry.h" #include <memory> using namespace clang; #define DEBUG_TYPE "pchcontainer" namespace { class PCHContainerGenerator : public ASTConsumer { DiagnosticsEngine &Diags; const std::string MainFileName; ASTContext *Ctx; const HeaderSearchOptions &HeaderSearchOpts; const PreprocessorOptions &PreprocessorOpts; CodeGenOptions CodeGenOpts; const TargetOptions TargetOpts; const LangOptions LangOpts; std::unique_ptr<llvm::LLVMContext> VMContext; std::unique_ptr<llvm::Module> M; std::unique_ptr<CodeGen::CodeGenModule> Builder; raw_pwrite_stream *OS; std::shared_ptr<PCHBuffer> Buffer; public: PCHContainerGenerator(DiagnosticsEngine &diags, const HeaderSearchOptions &HSO, const PreprocessorOptions &PPO, const TargetOptions &TO, const LangOptions &LO, const std::string &MainFileName, const std::string &OutputFileName, raw_pwrite_stream *OS, std::shared_ptr<PCHBuffer> Buffer) : Diags(diags), HeaderSearchOpts(HSO), PreprocessorOpts(PPO), TargetOpts(TO), LangOpts(LO), OS(OS), Buffer(Buffer) { // The debug info output isn't affected by CodeModel and // ThreadModel, but the backend expects them to be nonempty. CodeGenOpts.CodeModel = "default"; CodeGenOpts.ThreadModel = "single"; CodeGenOpts.setDebugInfo(CodeGenOptions::FullDebugInfo); CodeGenOpts.SplitDwarfFile = OutputFileName; } virtual ~PCHContainerGenerator() {} void Initialize(ASTContext &Context) override { Ctx = &Context; VMContext.reset(new llvm::LLVMContext()); M.reset(new llvm::Module(MainFileName, *VMContext)); M->setDataLayout(Ctx->getTargetInfo().getTargetDescription()); Builder.reset(new CodeGen::CodeGenModule(*Ctx, HeaderSearchOpts, PreprocessorOpts, CodeGenOpts, *M, M->getDataLayout(), Diags)); } /// Emit a container holding the serialized AST. void HandleTranslationUnit(ASTContext &Ctx) override { assert(M && VMContext && Builder); // Delete these on function exit. std::unique_ptr<llvm::LLVMContext> VMContext = std::move(this->VMContext); std::unique_ptr<llvm::Module> M = std::move(this->M); std::unique_ptr<CodeGen::CodeGenModule> Builder = std::move(this->Builder); if (Diags.hasErrorOccurred()) return; M->setTargetTriple(Ctx.getTargetInfo().getTriple().getTriple()); M->setDataLayout(Ctx.getTargetInfo().getTargetDescription()); // Finalize the Builder. if (Builder) Builder->Release(); // Ensure the target exists. std::string Error; auto Triple = Ctx.getTargetInfo().getTriple(); if (!llvm::TargetRegistry::lookupTarget(Triple.getTriple(), Error)) llvm::report_fatal_error(Error); // Emit the serialized Clang AST into its own section. assert(Buffer->IsComplete && "serialization did not complete"); auto &SerializedAST = Buffer->Data; auto Size = SerializedAST.size(); auto Int8Ty = llvm::Type::getInt8Ty(*VMContext); auto *Ty = llvm::ArrayType::get(Int8Ty, Size); auto *Data = llvm::ConstantDataArray::getString( *VMContext, StringRef(SerializedAST.data(), Size), /*AddNull=*/false); auto *ASTSym = new llvm::GlobalVariable( *M, Ty, /*constant*/ true, llvm::GlobalVariable::InternalLinkage, Data, "__clang_ast"); // The on-disk hashtable needs to be aligned. ASTSym->setAlignment(8); // Mach-O also needs a segment name. if (Triple.isOSBinFormatMachO()) ASTSym->setSection("__CLANG,__clangast"); // COFF has an eight character length limit. else if (Triple.isOSBinFormatCOFF()) ASTSym->setSection("clangast"); else ASTSym->setSection("__clangast"); DEBUG({ // Print the IR for the PCH container to the debug output. llvm::SmallString<0> Buffer; llvm::raw_svector_ostream OS(Buffer); clang::EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts, Ctx.getTargetInfo().getTargetDescription(), M.get(), BackendAction::Backend_EmitLL, &OS); OS.flush(); llvm::dbgs() << Buffer; }); // Use the LLVM backend to emit the pch container. clang::EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts, Ctx.getTargetInfo().getTargetDescription(), M.get(), BackendAction::Backend_EmitObj, OS); // Make sure the pch container hits disk. OS->flush(); // Free the memory for the temporary buffer. llvm::SmallVector<char, 0> Empty; SerializedAST = std::move(Empty); } }; } // namespace std::unique_ptr<ASTConsumer> ObjectFilePCHContainerWriter::CreatePCHContainerGenerator( DiagnosticsEngine &Diags, const HeaderSearchOptions &HSO, const PreprocessorOptions &PPO, const TargetOptions &TO, const LangOptions &LO, const std::string &MainFileName, const std::string &OutputFileName, llvm::raw_pwrite_stream *OS, std::shared_ptr<PCHBuffer> Buffer) const { return llvm::make_unique<PCHContainerGenerator>( Diags, HSO, PPO, TO, LO, MainFileName, OutputFileName, OS, Buffer); } void ObjectFilePCHContainerReader::ExtractPCH( llvm::MemoryBufferRef Buffer, llvm::BitstreamReader &StreamFile) const { if (auto OF = llvm::object::ObjectFile::createObjectFile(Buffer)) { auto *Obj = OF.get().get(); bool IsCOFF = isa<llvm::object::COFFObjectFile>(Obj); // Find the clang AST section in the container. for (auto &Section : OF->get()->sections()) { StringRef Name; Section.getName(Name); if ((!IsCOFF && Name == "__clangast") || ( IsCOFF && Name == "clangast")) { StringRef Buf; Section.getContents(Buf); StreamFile.init((const unsigned char *)Buf.begin(), (const unsigned char *)Buf.end()); return; } } } // As a fallback, treat the buffer as a raw AST. StreamFile.init((const unsigned char *)Buffer.getBufferStart(), (const unsigned char *)Buffer.getBufferEnd()); return; }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGCall.h
//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // These classes wrap the information about a call or function // definition used to handle ABI compliancy. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGCALL_H #define LLVM_CLANG_LIB_CODEGEN_CGCALL_H #include "CGValue.h" #include "EHScopeStack.h" #include "clang/AST/CanonicalType.h" #include "clang/AST/Type.h" #include "llvm/ADT/FoldingSet.h" #include "llvm/IR/Value.h" // FIXME: Restructure so we don't have to expose so much stuff. #include "ABIInfo.h" namespace llvm { class AttributeSet; class Function; class Type; class Value; } namespace clang { class ASTContext; class Decl; class FunctionDecl; class ObjCMethodDecl; class VarDecl; namespace CodeGen { typedef SmallVector<llvm::AttributeSet, 8> AttributeListType; struct CallArg { RValue RV; QualType Ty; bool NeedsCopy; CallArg(RValue rv, QualType ty, bool needscopy) : RV(rv), Ty(ty), NeedsCopy(needscopy) { } }; /// CallArgList - Type for representing both the value and type of /// arguments in a call. class CallArgList : public SmallVector<CallArg, 16> { public: CallArgList() : StackBase(nullptr), StackBaseMem(nullptr) {} struct Writeback { /// The original argument. Note that the argument l-value /// is potentially null. LValue Source; /// The temporary alloca. llvm::Value *Temporary; /// A value to "use" after the writeback, or null. llvm::Value *ToUse; }; struct CallArgCleanup { EHScopeStack::stable_iterator Cleanup; /// The "is active" insertion point. This instruction is temporary and /// will be removed after insertion. llvm::Instruction *IsActiveIP; }; void add(RValue rvalue, QualType type, bool needscopy = false) { push_back(CallArg(rvalue, type, needscopy)); } void addFrom(const CallArgList &other) { insert(end(), other.begin(), other.end()); #if 0 // HLSL Change - no ObjC support Writebacks.insert(Writebacks.end(), other.Writebacks.begin(), other.Writebacks.end()); #else assert(!other.hasWritebacks() && "writeback is unreachable in HLSL"); #endif // HLSL Change - no ObjC support } void addWriteback(LValue srcLV, llvm::Value *temporary, llvm::Value *toUse) { #if 0 // HLSL Change - no ObjC support Writeback writeback; writeback.Source = srcLV; writeback.Temporary = temporary; writeback.ToUse = toUse; Writebacks.push_back(writeback); #else llvm_unreachable("addWriteback is unreachable in HLSL"); #endif // HLSL Change - no ObjC support } bool hasWritebacks() const { return !Writebacks.empty(); } typedef llvm::iterator_range<SmallVectorImpl<Writeback>::const_iterator> writeback_const_range; writeback_const_range writebacks() const { return writeback_const_range(Writebacks.begin(), Writebacks.end()); } void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup, llvm::Instruction *IsActiveIP) { CallArgCleanup ArgCleanup; ArgCleanup.Cleanup = Cleanup; ArgCleanup.IsActiveIP = IsActiveIP; CleanupsToDeactivate.push_back(ArgCleanup); } ArrayRef<CallArgCleanup> getCleanupsToDeactivate() const { return CleanupsToDeactivate; } void allocateArgumentMemory(CodeGenFunction &CGF); llvm::Instruction *getStackBase() const { return StackBase; } void freeArgumentMemory(CodeGenFunction &CGF) const; /// \brief Returns if we're using an inalloca struct to pass arguments in /// memory. bool isUsingInAlloca() const { return StackBase; } private: SmallVector<Writeback, 1> Writebacks; /// Deactivate these cleanups immediately before making the call. This /// is used to cleanup objects that are owned by the callee once the call /// occurs. SmallVector<CallArgCleanup, 1> CleanupsToDeactivate; /// The stacksave call. It dominates all of the argument evaluation. llvm::CallInst *StackBase; /// The alloca holding the stackbase. We need it to maintain SSA form. llvm::AllocaInst *StackBaseMem; /// The iterator pointing to the stack restore cleanup. We manually run and /// deactivate this cleanup after the call in the unexceptional case because /// it doesn't run in the normal order. EHScopeStack::stable_iterator StackCleanup; }; /// FunctionArgList - Type for representing both the decl and type /// of parameters to a function. The decl must be either a /// ParmVarDecl or ImplicitParamDecl. class FunctionArgList : public SmallVector<const VarDecl*, 16> { }; /// ReturnValueSlot - Contains the address where the return value of a /// function can be stored, and whether the address is volatile or not. class ReturnValueSlot { llvm::PointerIntPair<llvm::Value *, 2, unsigned int> Value; // Return value slot flags enum Flags { IS_VOLATILE = 0x1, IS_UNUSED = 0x2, }; public: ReturnValueSlot() {} ReturnValueSlot(llvm::Value *Value, bool IsVolatile, bool IsUnused = false) : Value(Value, (IsVolatile ? IS_VOLATILE : 0) | (IsUnused ? IS_UNUSED : 0)) {} bool isNull() const { return !getValue(); } bool isVolatile() const { return Value.getInt() & IS_VOLATILE; } llvm::Value *getValue() const { return Value.getPointer(); } bool isUnused() const { return Value.getInt() & IS_UNUSED; } }; } // end namespace CodeGen } // end namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGVTables.cpp
//===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This contains code dealing with C++ code generation of virtual tables. // //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" #include "CGCXXABI.h" #include "CodeGenModule.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/RecordLayout.h" #include "clang/CodeGen/CGFunctionInfo.h" #include "clang/Frontend/CodeGenOptions.h" #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SetVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Format.h" #include "llvm/Transforms/Utils/Cloning.h" #include <algorithm> #include <cstdio> using namespace clang; using namespace CodeGen; CodeGenVTables::CodeGenVTables(CodeGenModule &CGM) : CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {} llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); // Compute the mangled name. SmallString<256> Name; llvm::raw_svector_ostream Out(Name); if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD)) getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), Thunk.This, Out); else getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out); Out.flush(); llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD); return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true, /*DontDefer=*/true, /*IsThunk=*/true); } static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD, const ThunkInfo &Thunk, llvm::Function *Fn) { CGM.setGlobalVisibility(Fn, MD); } static void setThunkProperties(CodeGenModule &CGM, const ThunkInfo &Thunk, llvm::Function *ThunkFn, bool ForVTable, GlobalDecl GD) { CGM.setFunctionLinkage(GD, ThunkFn); CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable, GD, !Thunk.Return.isEmpty()); // Set the right visibility. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); setThunkVisibility(CGM, MD, Thunk, ThunkFn); if (CGM.supportsCOMDAT() && ThunkFn->isWeakForLinker()) ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName())); } #ifndef NDEBUG static bool similar(const ABIArgInfo &infoL, CanQualType typeL, const ABIArgInfo &infoR, CanQualType typeR) { return (infoL.getKind() == infoR.getKind() && (typeL == typeR || (isa<PointerType>(typeL) && isa<PointerType>(typeR)) || (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR)))); } #endif static RValue PerformReturnAdjustment(CodeGenFunction &CGF, QualType ResultType, RValue RV, const ThunkInfo &Thunk) { // Emit the return adjustment. bool NullCheckValue = !ResultType->isReferenceType(); llvm::BasicBlock *AdjustNull = nullptr; llvm::BasicBlock *AdjustNotNull = nullptr; llvm::BasicBlock *AdjustEnd = nullptr; llvm::Value *ReturnValue = RV.getScalarVal(); if (NullCheckValue) { AdjustNull = CGF.createBasicBlock("adjust.null"); AdjustNotNull = CGF.createBasicBlock("adjust.notnull"); AdjustEnd = CGF.createBasicBlock("adjust.end"); llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue); CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull); CGF.EmitBlock(AdjustNotNull); } ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF, ReturnValue, Thunk.Return); if (NullCheckValue) { CGF.Builder.CreateBr(AdjustEnd); CGF.EmitBlock(AdjustNull); CGF.Builder.CreateBr(AdjustEnd); CGF.EmitBlock(AdjustEnd); llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2); PHI->addIncoming(ReturnValue, AdjustNotNull); PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()), AdjustNull); ReturnValue = PHI; } return RValue::get(ReturnValue); } // This function does roughly the same thing as GenerateThunk, but in a // very different way, so that va_start and va_end work correctly. // FIXME: This function assumes "this" is the first non-sret LLVM argument of // a function, and that there is an alloca built in the entry block // for all accesses to "this". // FIXME: This function assumes there is only one "ret" statement per function. // FIXME: Cloning isn't correct in the presence of indirect goto! // FIXME: This implementation of thunks bloats codesize by duplicating the // function definition. There are alternatives: // 1. Add some sort of stub support to LLVM for cases where we can // do a this adjustment, then a sibcall. // 2. We could transform the definition to take a va_list instead of an // actual variable argument list, then have the thunks (including a // no-op thunk for the regular definition) call va_start/va_end. // There's a bit of per-call overhead for this solution, but it's // better for codesize if the definition is long. llvm::Function * CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); QualType ResultType = FPT->getReturnType(); // Get the original function assert(FnInfo.isVariadic()); llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo); llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); llvm::Function *BaseFn = cast<llvm::Function>(Callee); // Clone to thunk. llvm::ValueToValueMapTy VMap; llvm::Function *NewFn = llvm::CloneFunction(BaseFn, VMap, /*ModuleLevelChanges=*/false); CGM.getModule().getFunctionList().push_back(NewFn); Fn->replaceAllUsesWith(NewFn); NewFn->takeName(Fn); Fn->eraseFromParent(); Fn = NewFn; // "Initialize" CGF (minimally). CurFn = Fn; // Get the "this" value llvm::Function::arg_iterator AI = Fn->arg_begin(); if (CGM.ReturnTypeUsesSRet(FnInfo)) ++AI; // Find the first store of "this", which will be to the alloca associated // with "this". llvm::Value *ThisPtr = &*AI; llvm::BasicBlock *EntryBB = Fn->begin(); llvm::Instruction *ThisStore = std::find_if(EntryBB->begin(), EntryBB->end(), [&](llvm::Instruction &I) { return isa<llvm::StoreInst>(I) && I.getOperand(0) == ThisPtr; }); assert(ThisStore && "Store of this should be in entry block?"); // Adjust "this", if necessary. Builder.SetInsertPoint(ThisStore); llvm::Value *AdjustedThisPtr = CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This); ThisStore->setOperand(0, AdjustedThisPtr); if (!Thunk.Return.isEmpty()) { // Fix up the returned value, if necessary. for (llvm::Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) { llvm::Instruction *T = I->getTerminator(); if (isa<llvm::ReturnInst>(T)) { RValue RV = RValue::get(T->getOperand(0)); T->eraseFromParent(); Builder.SetInsertPoint(&*I); RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk); Builder.CreateRet(RV.getScalarVal()); break; } } } return Fn; } void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD, const CGFunctionInfo &FnInfo) { assert(!CurGD.getDecl() && "CurGD was already set!"); CurGD = GD; CurFuncIsThunk = true; // Build FunctionArgs. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); QualType ThisType = MD->getThisType(getContext()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); QualType ResultType = CGM.getCXXABI().HasThisReturn(GD) ? ThisType : CGM.getCXXABI().hasMostDerivedReturn(GD) ? CGM.getContext().VoidPtrTy : FPT->getReturnType(); FunctionArgList FunctionArgs; // Create the implicit 'this' parameter declaration. CGM.getCXXABI().buildThisParam(*this, FunctionArgs); // Add the rest of the parameters. FunctionArgs.append(MD->param_begin(), MD->param_end()); if (isa<CXXDestructorDecl>(MD)) CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs); // Start defining the function. StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs, MD->getLocation(), MD->getLocation()); // Since we didn't pass a GlobalDecl to StartFunction, do this ourselves. CGM.getCXXABI().EmitInstanceFunctionProlog(*this); CXXThisValue = CXXABIThisValue; } void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee, const ThunkInfo *Thunk) { assert(isa<CXXMethodDecl>(CurGD.getDecl()) && "Please use a new CGF for this thunk"); const CXXMethodDecl *MD = cast<CXXMethodDecl>(CurGD.getDecl()); // Adjust the 'this' pointer if necessary llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment( *this, LoadCXXThis(), Thunk->This) : LoadCXXThis(); if (CurFnInfo->usesInAlloca()) { // We don't handle return adjusting thunks, because they require us to call // the copy constructor. For now, fall through and pretend the return // adjustment was empty so we don't crash. if (Thunk && !Thunk->Return.isEmpty()) { CGM.ErrorUnsupported( MD, "non-trivial argument copy for return-adjusting thunk"); } EmitMustTailThunk(MD, AdjustedThisPtr, Callee); return; } // Start building CallArgs. CallArgList CallArgs; QualType ThisType = MD->getThisType(getContext()); CallArgs.add(RValue::get(AdjustedThisPtr), ThisType); if (isa<CXXDestructorDecl>(MD)) CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, CurGD, CallArgs); // Add the rest of the arguments. for (const ParmVarDecl *PD : MD->params()) EmitDelegateCallArg(CallArgs, PD, PD->getLocStart()); const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); #ifndef NDEBUG const CGFunctionInfo &CallFnInfo = CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, RequiredArgs::forPrototypePlus(FPT, 1)); assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() && CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() && CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention()); assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(), CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType())); assert(CallFnInfo.arg_size() == CurFnInfo->arg_size()); for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i) assert(similar(CallFnInfo.arg_begin()[i].info, CallFnInfo.arg_begin()[i].type, CurFnInfo->arg_begin()[i].info, CurFnInfo->arg_begin()[i].type)); #endif // Determine whether we have a return value slot to use. QualType ResultType = CGM.getCXXABI().HasThisReturn(CurGD) ? ThisType : CGM.getCXXABI().hasMostDerivedReturn(CurGD) ? CGM.getContext().VoidPtrTy : FPT->getReturnType(); ReturnValueSlot Slot; if (!ResultType->isVoidType() && CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect && !hasScalarEvaluationKind(CurFnInfo->getReturnType())) Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified()); // Now emit our call. llvm::Instruction *CallOrInvoke; RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD, &CallOrInvoke); // Consider return adjustment if we have ThunkInfo. if (Thunk && !Thunk->Return.isEmpty()) RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk); // Emit return. if (!ResultType->isVoidType() && Slot.isNull()) CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType); // Disable the final ARC autorelease. AutoreleaseResult = false; FinishFunction(); } void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr, llvm::Value *Callee) { // Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery // to translate AST arguments into LLVM IR arguments. For thunks, we know // that the caller prototype more or less matches the callee prototype with // the exception of 'this'. SmallVector<llvm::Value *, 8> Args; for (llvm::Argument &A : CurFn->args()) Args.push_back(&A); // Set the adjusted 'this' pointer. const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info; if (ThisAI.isDirect()) { const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo(); int ThisArgNo = RetAI.isIndirect() && !RetAI.isSRetAfterThis() ? 1 : 0; llvm::Type *ThisType = Args[ThisArgNo]->getType(); if (ThisType != AdjustedThisPtr->getType()) AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType); Args[ThisArgNo] = AdjustedThisPtr; } else { assert(ThisAI.isInAlloca() && "this is passed directly or inalloca"); llvm::Value *ThisAddr = GetAddrOfLocalVar(CXXABIThisDecl); llvm::Type *ThisType = cast<llvm::PointerType>(ThisAddr->getType())->getElementType(); if (ThisType != AdjustedThisPtr->getType()) AdjustedThisPtr = Builder.CreateBitCast(AdjustedThisPtr, ThisType); Builder.CreateStore(AdjustedThisPtr, ThisAddr); } // Emit the musttail call manually. Even if the prologue pushed cleanups, we // don't actually want to run them. llvm::CallInst *Call = Builder.CreateCall(Callee, Args); Call->setTailCallKind(llvm::CallInst::TCK_MustTail); // Apply the standard set of call attributes. unsigned CallingConv; CodeGen::AttributeListType AttributeList; CGM.ConstructAttributeList(*CurFnInfo, MD, AttributeList, CallingConv, /*AttrOnCallSite=*/true); llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), AttributeList); Call->setAttributes(Attrs); Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); if (Call->getType()->isVoidTy()) Builder.CreateRetVoid(); else Builder.CreateRet(Call); // Finish the function to maintain CodeGenFunction invariants. // FIXME: Don't emit unreachable code. EmitBlock(createBasicBlock()); FinishFunction(); } void CodeGenFunction::generateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo, GlobalDecl GD, const ThunkInfo &Thunk) { StartThunk(Fn, GD, FnInfo); // Get our callee. llvm::Type *Ty = CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD)); llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); // Make the call and return the result. EmitCallAndReturnForThunk(Callee, &Thunk); } void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk, bool ForVTable) { const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD); // FIXME: re-use FnInfo in this computation. llvm::Constant *C = CGM.GetAddrOfThunk(GD, Thunk); llvm::GlobalValue *Entry; // Strip off a bitcast if we got one back. if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(C)) { assert(CE->getOpcode() == llvm::Instruction::BitCast); Entry = cast<llvm::GlobalValue>(CE->getOperand(0)); } else { Entry = cast<llvm::GlobalValue>(C); } // There's already a declaration with the same name, check if it has the same // type or if we need to replace it. if (Entry->getType()->getElementType() != CGM.getTypes().GetFunctionTypeForVTable(GD)) { llvm::GlobalValue *OldThunkFn = Entry; // If the types mismatch then we have to rewrite the definition. assert(OldThunkFn->isDeclaration() && "Shouldn't replace non-declaration"); // Remove the name from the old thunk function and get a new thunk. OldThunkFn->setName(StringRef()); Entry = cast<llvm::GlobalValue>(CGM.GetAddrOfThunk(GD, Thunk)); // If needed, replace the old thunk with a bitcast. if (!OldThunkFn->use_empty()) { llvm::Constant *NewPtrForOldDecl = llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType()); OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl); } // Remove the old thunk. OldThunkFn->eraseFromParent(); } llvm::Function *ThunkFn = cast<llvm::Function>(Entry); bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions(); bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions; if (!ThunkFn->isDeclaration()) { if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) { // There is already a thunk emitted for this function, do nothing. return; } setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD); return; } CGM.SetLLVMFunctionAttributesForDefinition(GD.getDecl(), ThunkFn); if (ThunkFn->isVarArg()) { // Varargs thunks are special; we can't just generate a call because // we can't copy the varargs. Our implementation is rather // expensive/sucky at the moment, so don't generate the thunk unless // we have to. // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly. if (UseAvailableExternallyLinkage) return; ThunkFn = CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk); } else { // Normal thunk body generation. CodeGenFunction(CGM).generateThunk(ThunkFn, FnInfo, GD, Thunk); } setThunkProperties(CGM, Thunk, ThunkFn, ForVTable, GD); } void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD, const ThunkInfo &Thunk) { // If the ABI has key functions, only the TU with the key function should emit // the thunk. However, we can allow inlining of thunks if we emit them with // available_externally linkage together with vtables when optimizations are // enabled. if (CGM.getTarget().getCXXABI().hasKeyFunctions() && !CGM.getCodeGenOpts().OptimizationLevel) return; // We can't emit thunks for member functions with incomplete types. const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); if (!CGM.getTypes().isFuncTypeConvertible( MD->getType()->castAs<FunctionType>())) return; emitThunk(GD, Thunk, /*ForVTable=*/true); } void CodeGenVTables::EmitThunks(GlobalDecl GD) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl(); // We don't need to generate thunks for the base destructor. if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) return; const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector = VTContext->getThunkInfo(GD); if (!ThunkInfoVector) return; for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I) emitThunk(GD, (*ThunkInfoVector)[I], /*ForVTable=*/false); } llvm::Constant *CodeGenVTables::CreateVTableInitializer( const CXXRecordDecl *RD, const VTableComponent *Components, unsigned NumComponents, const VTableLayout::VTableThunkTy *VTableThunks, unsigned NumVTableThunks, llvm::Constant *RTTI) { SmallVector<llvm::Constant *, 64> Inits; llvm::Type *Int8PtrTy = CGM.Int8PtrTy; llvm::Type *PtrDiffTy = CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); unsigned NextVTableThunkIndex = 0; llvm::Constant *PureVirtualFn = nullptr, *DeletedVirtualFn = nullptr; for (unsigned I = 0; I != NumComponents; ++I) { VTableComponent Component = Components[I]; llvm::Constant *Init = nullptr; switch (Component.getKind()) { case VTableComponent::CK_VCallOffset: Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVCallOffset().getQuantity()); Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); break; case VTableComponent::CK_VBaseOffset: Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVBaseOffset().getQuantity()); Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); break; case VTableComponent::CK_OffsetToTop: Init = llvm::ConstantInt::get(PtrDiffTy, Component.getOffsetToTop().getQuantity()); Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy); break; case VTableComponent::CK_RTTI: Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy); break; case VTableComponent::CK_FunctionPointer: case VTableComponent::CK_CompleteDtorPointer: case VTableComponent::CK_DeletingDtorPointer: { GlobalDecl GD; // Get the right global decl. switch (Component.getKind()) { default: llvm_unreachable("Unexpected vtable component kind"); case VTableComponent::CK_FunctionPointer: GD = Component.getFunctionDecl(); break; case VTableComponent::CK_CompleteDtorPointer: GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete); break; case VTableComponent::CK_DeletingDtorPointer: GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting); break; } if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) { // We have a pure virtual member function. if (!PureVirtualFn) { llvm::FunctionType *Ty = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName(); PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName); PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn, CGM.Int8PtrTy); } Init = PureVirtualFn; } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) { if (!DeletedVirtualFn) { llvm::FunctionType *Ty = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); StringRef DeletedCallName = CGM.getCXXABI().GetDeletedVirtualCallName(); DeletedVirtualFn = CGM.CreateRuntimeFunction(Ty, DeletedCallName); DeletedVirtualFn = llvm::ConstantExpr::getBitCast(DeletedVirtualFn, CGM.Int8PtrTy); } Init = DeletedVirtualFn; } else { // Check if we should use a thunk. if (NextVTableThunkIndex < NumVTableThunks && VTableThunks[NextVTableThunkIndex].first == I) { const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second; maybeEmitThunkForVTable(GD, Thunk); Init = CGM.GetAddrOfThunk(GD, Thunk); NextVTableThunkIndex++; } else { llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD); Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true); } Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy); } break; } case VTableComponent::CK_UnusedFunctionPointer: Init = llvm::ConstantExpr::getNullValue(Int8PtrTy); break; }; Inits.push_back(Init); } llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents); return llvm::ConstantArray::get(ArrayType, Inits); } llvm::GlobalVariable * CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base, bool BaseIsVirtual, llvm::GlobalVariable::LinkageTypes Linkage, VTableAddressPointsMapTy& AddressPoints) { if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) DI->completeClassData(Base.getBase()); std::unique_ptr<VTableLayout> VTLayout( getItaniumVTableContext().createConstructionVTableLayout( Base.getBase(), Base.getBaseOffset(), BaseIsVirtual, RD)); // Add the address points. AddressPoints = VTLayout->getAddressPoints(); // Get the mangled construction vtable name. SmallString<256> OutName; llvm::raw_svector_ostream Out(OutName); cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext()) .mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), Base.getBase(), Out); Out.flush(); StringRef Name = OutName.str(); llvm::ArrayType *ArrayType = llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents()); // Construction vtable symbols are not part of the Itanium ABI, so we cannot // guarantee that they actually will be available externally. Instead, when // emitting an available_externally VTT, we provide references to an internal // linkage construction vtable. The ABI only requires complete-object vtables // to be the same for all instances of a type, not construction vtables. if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage) Linkage = llvm::GlobalVariable::InternalLinkage; // Create the variable that will hold the construction vtable. llvm::GlobalVariable *VTable = CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage); CGM.setGlobalVisibility(VTable, RD); // V-tables are always unnamed_addr. VTable->setUnnamedAddr(true); llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor( CGM.getContext().getTagDeclType(Base.getBase())); // Create and set the initializer. llvm::Constant *Init = CreateVTableInitializer( Base.getBase(), VTLayout->vtable_component_begin(), VTLayout->getNumVTableComponents(), VTLayout->vtable_thunk_begin(), VTLayout->getNumVTableThunks(), RTTI); VTable->setInitializer(Init); CGM.EmitVTableBitSetEntries(VTable, *VTLayout.get()); return VTable; } /// Compute the required linkage of the v-table for the given class. /// /// Note that we only call this at the end of the translation unit. llvm::GlobalVariable::LinkageTypes CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) { if (!RD->isExternallyVisible()) return llvm::GlobalVariable::InternalLinkage; // We're at the end of the translation unit, so the current key // function is fully correct. const CXXMethodDecl *keyFunction = Context.getCurrentKeyFunction(RD); if (keyFunction && !RD->hasAttr<DLLImportAttr>()) { // If this class has a key function, use that to determine the // linkage of the vtable. const FunctionDecl *def = nullptr; if (keyFunction->hasBody(def)) keyFunction = cast<CXXMethodDecl>(def); switch (keyFunction->getTemplateSpecializationKind()) { case TSK_Undeclared: case TSK_ExplicitSpecialization: assert(def && "Should not have been asked to emit this"); if (keyFunction->isInlined()) return !Context.getLangOpts().AppleKext ? llvm::GlobalVariable::LinkOnceODRLinkage : llvm::Function::InternalLinkage; return llvm::GlobalVariable::ExternalLinkage; case TSK_ImplicitInstantiation: return !Context.getLangOpts().AppleKext ? llvm::GlobalVariable::LinkOnceODRLinkage : llvm::Function::InternalLinkage; case TSK_ExplicitInstantiationDefinition: return !Context.getLangOpts().AppleKext ? llvm::GlobalVariable::WeakODRLinkage : llvm::Function::InternalLinkage; case TSK_ExplicitInstantiationDeclaration: llvm_unreachable("Should not have been asked to emit this"); } } // -fapple-kext mode does not support weak linkage, so we must use // internal linkage. if (Context.getLangOpts().AppleKext) return llvm::Function::InternalLinkage; llvm::GlobalVariable::LinkageTypes DiscardableODRLinkage = llvm::GlobalValue::LinkOnceODRLinkage; llvm::GlobalVariable::LinkageTypes NonDiscardableODRLinkage = llvm::GlobalValue::WeakODRLinkage; if (RD->hasAttr<DLLExportAttr>()) { // Cannot discard exported vtables. DiscardableODRLinkage = NonDiscardableODRLinkage; } else if (RD->hasAttr<DLLImportAttr>()) { // Imported vtables are available externally. DiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage; NonDiscardableODRLinkage = llvm::GlobalVariable::AvailableExternallyLinkage; } switch (RD->getTemplateSpecializationKind()) { case TSK_Undeclared: case TSK_ExplicitSpecialization: case TSK_ImplicitInstantiation: return DiscardableODRLinkage; case TSK_ExplicitInstantiationDeclaration: return llvm::GlobalVariable::ExternalLinkage; case TSK_ExplicitInstantiationDefinition: return NonDiscardableODRLinkage; } llvm_unreachable("Invalid TemplateSpecializationKind!"); } /// This is a callback from Sema to tell us that that a particular v-table is /// required to be emitted in this translation unit. /// /// This is only called for vtables that _must_ be emitted (mainly due to key /// functions). For weak vtables, CodeGen tracks when they are needed and /// emits them as-needed. void CodeGenModule::EmitVTable(CXXRecordDecl *theClass) { VTables.GenerateClassData(theClass); } void CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) { if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) DI->completeClassData(RD); if (RD->getNumVBases()) CGM.getCXXABI().emitVirtualInheritanceTables(RD); CGM.getCXXABI().emitVTableDefinitions(*this, RD); } /// At this point in the translation unit, does it appear that can we /// rely on the vtable being defined elsewhere in the program? /// /// The response is really only definitive when called at the end of /// the translation unit. /// /// The only semantic restriction here is that the object file should /// not contain a v-table definition when that v-table is defined /// strongly elsewhere. Otherwise, we'd just like to avoid emitting /// v-tables when unnecessary. bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) { assert(RD->isDynamicClass() && "Non-dynamic classes have no VTable."); // If we have an explicit instantiation declaration (and not a // definition), the v-table is defined elsewhere. TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind(); if (TSK == TSK_ExplicitInstantiationDeclaration) return true; // Otherwise, if the class is an instantiated template, the // v-table must be defined here. if (TSK == TSK_ImplicitInstantiation || TSK == TSK_ExplicitInstantiationDefinition) return false; // Otherwise, if the class doesn't have a key function (possibly // anymore), the v-table must be defined here. const CXXMethodDecl *keyFunction = CGM.getContext().getCurrentKeyFunction(RD); if (!keyFunction) return false; // Otherwise, if we don't have a definition of the key function, the // v-table must be defined somewhere else. return !keyFunction->hasBody(); } /// Given that we're currently at the end of the translation unit, and /// we've emitted a reference to the v-table for this class, should /// we define that v-table? static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM, const CXXRecordDecl *RD) { return !CGM.getVTables().isVTableExternal(RD); } /// Given that at some point we emitted a reference to one or more /// v-tables, and that we are now at the end of the translation unit, /// decide whether we should emit them. void CodeGenModule::EmitDeferredVTables() { #ifndef NDEBUG // Remember the size of DeferredVTables, because we're going to assume // that this entire operation doesn't modify it. size_t savedSize = DeferredVTables.size(); #endif typedef std::vector<const CXXRecordDecl *>::const_iterator const_iterator; for (const_iterator i = DeferredVTables.begin(), e = DeferredVTables.end(); i != e; ++i) { const CXXRecordDecl *RD = *i; if (shouldEmitVTableAtEndOfTranslationUnit(*this, RD)) VTables.GenerateClassData(RD); } assert(savedSize == DeferredVTables.size() && "deferred extra v-tables during v-table emission?"); DeferredVTables.clear(); } bool CodeGenModule::IsCFIBlacklistedRecord(const CXXRecordDecl *RD) { // FIXME: Make this user configurable. return RD->isInStdNamespace(); } void CodeGenModule::EmitVTableBitSetEntries(llvm::GlobalVariable *VTable, const VTableLayout &VTLayout) { if (!LangOpts.Sanitize.has(SanitizerKind::CFIVCall) && !LangOpts.Sanitize.has(SanitizerKind::CFINVCall) && !LangOpts.Sanitize.has(SanitizerKind::CFIDerivedCast) && !LangOpts.Sanitize.has(SanitizerKind::CFIUnrelatedCast)) return; CharUnits PointerWidth = Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); std::vector<llvm::MDTuple *> BitsetEntries; // Create a bit set entry for each address point. for (auto &&AP : VTLayout.getAddressPoints()) { if (IsCFIBlacklistedRecord(AP.first.getBase())) continue; BitsetEntries.push_back(CreateVTableBitSetEntry( VTable, PointerWidth * AP.second, AP.first.getBase())); } // Sort the bit set entries for determinism. std::sort(BitsetEntries.begin(), BitsetEntries.end(), [](llvm::MDTuple *T1, llvm::MDTuple *T2) { if (T1 == T2) return false; StringRef S1 = cast<llvm::MDString>(T1->getOperand(0))->getString(); StringRef S2 = cast<llvm::MDString>(T2->getOperand(0))->getString(); if (S1 < S2) return true; if (S1 != S2) return false; uint64_t Offset1 = cast<llvm::ConstantInt>( cast<llvm::ConstantAsMetadata>(T1->getOperand(2)) ->getValue())->getZExtValue(); uint64_t Offset2 = cast<llvm::ConstantInt>( cast<llvm::ConstantAsMetadata>(T2->getOperand(2)) ->getValue())->getZExtValue(); assert(Offset1 != Offset2); return Offset1 < Offset2; }); llvm::NamedMDNode *BitsetsMD = getModule().getOrInsertNamedMetadata("llvm.bitsets"); for (auto BitsetEntry : BitsetEntries) BitsetsMD->addOperand(BitsetEntry); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenModule.h
//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This is the internal per-translation-unit state used for llvm translation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CODEGENMODULE_H #define LLVM_CLANG_LIB_CODEGEN_CODEGENMODULE_H #include "CGVTables.h" #include "CodeGenTypes.h" #include "SanitizerMetadata.h" #include "clang/AST/Attr.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Mangle.h" #include "clang/Basic/ABI.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/SanitizerBlacklist.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/Module.h" #include "llvm/IR/ValueHandle.h" namespace llvm { class Module; class Constant; class ConstantInt; class Function; class GlobalValue; class DataLayout; class FunctionType; class LLVMContext; class IndexedInstrProfReader; } namespace clang { class TargetCodeGenInfo; class ASTContext; class AtomicType; class FunctionDecl; class IdentifierInfo; class ObjCMethodDecl; class ObjCImplementationDecl; class ObjCCategoryImplDecl; class ObjCProtocolDecl; class ObjCEncodeExpr; class BlockExpr; class CharUnits; class Decl; class Expr; class Stmt; class InitListExpr; class StringLiteral; class NamedDecl; class ValueDecl; class VarDecl; class LangOptions; class CodeGenOptions; class HeaderSearchOptions; class PreprocessorOptions; class DiagnosticsEngine; class AnnotateAttr; class CXXDestructorDecl; class Module; class CoverageSourceInfo; namespace CodeGen { class CallArgList; class CodeGenFunction; class CodeGenTBAA; class CGCXXABI; class CGDebugInfo; class CGObjCRuntime; class CGOpenCLRuntime; class CGOpenMPRuntime; class CGCUDARuntime; class CGHLSLRuntime; // HLSL Change class BlockFieldFlags; class FunctionArgList; class CoverageMappingModuleGen; struct OrderGlobalInits { unsigned int priority; unsigned int lex_order; OrderGlobalInits(unsigned int p, unsigned int l) : priority(p), lex_order(l) {} bool operator==(const OrderGlobalInits &RHS) const { return priority == RHS.priority && lex_order == RHS.lex_order; } bool operator<(const OrderGlobalInits &RHS) const { return std::tie(priority, lex_order) < std::tie(RHS.priority, RHS.lex_order); } }; struct CodeGenTypeCache { /// void llvm::Type *VoidTy; /// i8, i16, i32, and i64 llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty; /// float, double llvm::Type *FloatTy, *DoubleTy; /// int llvm::IntegerType *IntTy; /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size. union { llvm::IntegerType *IntPtrTy; llvm::IntegerType *SizeTy; llvm::IntegerType *PtrDiffTy; }; /// void* in address space 0 union { llvm::PointerType *VoidPtrTy; llvm::PointerType *Int8PtrTy; }; /// void** in address space 0 union { llvm::PointerType *VoidPtrPtrTy; llvm::PointerType *Int8PtrPtrTy; }; /// The width of a pointer into the generic address space. unsigned char PointerWidthInBits; /// The size and alignment of a pointer into the generic address /// space. union { unsigned char PointerAlignInBytes; unsigned char PointerSizeInBytes; unsigned char SizeSizeInBytes; // sizeof(size_t) }; llvm::CallingConv::ID RuntimeCC; llvm::CallingConv::ID getRuntimeCC() const { return RuntimeCC; } llvm::CallingConv::ID BuiltinCC; llvm::CallingConv::ID getBuiltinCC() const { return BuiltinCC; } }; struct RREntrypoints { RREntrypoints() { memset(this, 0, sizeof(*this)); } /// void objc_autoreleasePoolPop(void*); llvm::Constant *objc_autoreleasePoolPop; /// void *objc_autoreleasePoolPush(void); llvm::Constant *objc_autoreleasePoolPush; }; struct ARCEntrypoints { ARCEntrypoints() { memset(this, 0, sizeof(*this)); } /// id objc_autorelease(id); llvm::Constant *objc_autorelease; /// id objc_autoreleaseReturnValue(id); llvm::Constant *objc_autoreleaseReturnValue; /// void objc_copyWeak(id *dest, id *src); llvm::Constant *objc_copyWeak; /// void objc_destroyWeak(id*); llvm::Constant *objc_destroyWeak; /// id objc_initWeak(id*, id); llvm::Constant *objc_initWeak; /// id objc_loadWeak(id*); llvm::Constant *objc_loadWeak; /// id objc_loadWeakRetained(id*); llvm::Constant *objc_loadWeakRetained; /// void objc_moveWeak(id *dest, id *src); llvm::Constant *objc_moveWeak; /// id objc_retain(id); llvm::Constant *objc_retain; /// id objc_retainAutorelease(id); llvm::Constant *objc_retainAutorelease; /// id objc_retainAutoreleaseReturnValue(id); llvm::Constant *objc_retainAutoreleaseReturnValue; /// id objc_retainAutoreleasedReturnValue(id); llvm::Constant *objc_retainAutoreleasedReturnValue; /// id objc_retainBlock(id); llvm::Constant *objc_retainBlock; /// void objc_release(id); llvm::Constant *objc_release; /// id objc_storeStrong(id*, id); llvm::Constant *objc_storeStrong; /// id objc_storeWeak(id*, id); llvm::Constant *objc_storeWeak; /// A void(void) inline asm to use to mark that the return value of /// a call will be immediately retain. llvm::InlineAsm *retainAutoreleasedReturnValueMarker; /// void clang.arc.use(...); llvm::Constant *clang_arc_use; }; /// This class records statistics on instrumentation based profiling. class InstrProfStats { uint32_t VisitedInMainFile; uint32_t MissingInMainFile; uint32_t Visited; uint32_t Missing; uint32_t Mismatched; public: InstrProfStats() : VisitedInMainFile(0), MissingInMainFile(0), Visited(0), Missing(0), Mismatched(0) {} /// Record that we've visited a function and whether or not that function was /// in the main source file. void addVisited(bool MainFile) { if (MainFile) ++VisitedInMainFile; ++Visited; } /// Record that a function we've visited has no profile data. void addMissing(bool MainFile) { if (MainFile) ++MissingInMainFile; ++Missing; } /// Record that a function we've visited has mismatched profile data. void addMismatched(bool MainFile) { ++Mismatched; } /// Whether or not the stats we've gathered indicate any potential problems. bool hasDiagnostics() { return Missing || Mismatched; } /// Report potential problems we've found to \c Diags. void reportDiagnostics(DiagnosticsEngine &Diags, StringRef MainFile); }; /// This class organizes the cross-function state that is used while generating /// LLVM code. class CodeGenModule : public CodeGenTypeCache { CodeGenModule(const CodeGenModule &) = delete; void operator=(const CodeGenModule &) = delete; public: struct Structor { Structor() : Priority(0), Initializer(nullptr), AssociatedData(nullptr) {} Structor(int Priority, llvm::Constant *Initializer, llvm::Constant *AssociatedData) : Priority(Priority), Initializer(Initializer), AssociatedData(AssociatedData) {} int Priority; llvm::Constant *Initializer; llvm::Constant *AssociatedData; }; typedef std::vector<Structor> CtorList; private: ASTContext &Context; const LangOptions &LangOpts; const HeaderSearchOptions &HeaderSearchOpts; // Only used for debug info. const PreprocessorOptions &PreprocessorOpts; // Only used for debug info. const CodeGenOptions &CodeGenOpts; llvm::Module &TheModule; DiagnosticsEngine &Diags; const llvm::DataLayout &TheDataLayout; const TargetInfo &Target; std::unique_ptr<CGCXXABI> ABI; llvm::LLVMContext &VMContext; CodeGenTBAA *TBAA; mutable std::unique_ptr<TargetCodeGenInfo> TheTargetCodeGenInfo; // HLSL Change - unique_ptr rather than const * // This should not be moved earlier, since its initialization depends on some // of the previous reference members being already initialized and also checks // if TheTargetCodeGenInfo is NULL CodeGenTypes Types; /// Holds information about C++ vtables. CodeGenVTables VTables; CGObjCRuntime* ObjCRuntime; CGOpenCLRuntime* OpenCLRuntime; CGOpenMPRuntime* OpenMPRuntime; CGCUDARuntime* CUDARuntime; CGHLSLRuntime* HLSLRuntime; // HLSL Change CGDebugInfo* DebugInfo; ARCEntrypoints *ARCData; llvm::MDNode *NoObjCARCExceptionsMetadata; RREntrypoints *RRData; std::unique_ptr<llvm::IndexedInstrProfReader> PGOReader; InstrProfStats PGOStats; // A set of references that have only been seen via a weakref so far. This is // used to remove the weak of the reference if we ever see a direct reference // or a definition. llvm::SmallPtrSet<llvm::GlobalValue*, 10> WeakRefReferences; /// This contains all the decls which have definitions but/ which are deferred /// for emission and therefore should only be output if they are actually /// used. If a decl is in this, then it is known to have not been referenced /// yet. std::map<StringRef, GlobalDecl> DeferredDecls; /// This is a list of deferred decls which we have seen that *are* actually /// referenced. These get code generated when the module is done. struct DeferredGlobal { DeferredGlobal(llvm::GlobalValue *GV, GlobalDecl GD) : GV(GV), GD(GD) {} llvm::TrackingVH<llvm::GlobalValue> GV; GlobalDecl GD; }; std::vector<DeferredGlobal> DeferredDeclsToEmit; void addDeferredDeclToEmit(llvm::GlobalValue *GV, GlobalDecl GD) { DeferredDeclsToEmit.emplace_back(GV, GD); } /// List of alias we have emitted. Used to make sure that what they point to /// is defined once we get to the end of the of the translation unit. std::vector<GlobalDecl> Aliases; typedef llvm::StringMap<llvm::TrackingVH<llvm::Constant> > ReplacementsTy; ReplacementsTy Replacements; /// A queue of (optional) vtables to consider emitting. std::vector<const CXXRecordDecl*> DeferredVTables; /// List of global values which are required to be present in the object file; /// bitcast to i8*. This is used for forcing visibility of symbols which may /// otherwise be optimized out. std::vector<llvm::WeakTrackingVH> LLVMUsed; std::vector<llvm::WeakTrackingVH> LLVMCompilerUsed; /// Store the list of global constructors and their respective priorities to /// be emitted when the translation unit is complete. CtorList GlobalCtors; /// Store the list of global destructors and their respective priorities to be /// emitted when the translation unit is complete. CtorList GlobalDtors; /// An ordered map of canonical GlobalDecls to their mangled names. llvm::MapVector<GlobalDecl, StringRef> MangledDeclNames; llvm::StringMap<GlobalDecl, llvm::BumpPtrAllocator> Manglings; /// Global annotations. std::vector<llvm::Constant*> Annotations; /// Map used to get unique annotation strings. llvm::StringMap<llvm::Constant*> AnnotationStrings; llvm::StringMap<llvm::GlobalVariable *> CFConstantStringMap; llvm::DenseMap<llvm::Constant *, llvm::GlobalVariable *> ConstantStringMap; llvm::DenseMap<const Decl*, llvm::Constant *> StaticLocalDeclMap; llvm::DenseMap<const Decl*, llvm::GlobalVariable*> StaticLocalDeclGuardMap; llvm::DenseMap<const Expr*, llvm::Constant *> MaterializedGlobalTemporaryMap; llvm::DenseMap<QualType, llvm::Constant *> AtomicSetterHelperFnMap; llvm::DenseMap<QualType, llvm::Constant *> AtomicGetterHelperFnMap; /// Map used to get unique type descriptor constants for sanitizers. llvm::DenseMap<QualType, llvm::Constant *> TypeDescriptorMap; /// Map used to track internal linkage functions declared within /// extern "C" regions. typedef llvm::MapVector<IdentifierInfo *, llvm::GlobalValue *> StaticExternCMap; StaticExternCMap StaticExternCValues; /// \brief thread_local variables defined or used in this TU. std::vector<std::pair<const VarDecl *, llvm::GlobalVariable *> > CXXThreadLocals; /// \brief thread_local variables with initializers that need to run /// before any thread_local variable in this TU is odr-used. std::vector<llvm::Function *> CXXThreadLocalInits; std::vector<llvm::GlobalVariable *> CXXThreadLocalInitVars; /// Global variables with initializers that need to run before main. std::vector<llvm::Function *> CXXGlobalInits; /// When a C++ decl with an initializer is deferred, null is /// appended to CXXGlobalInits, and the index of that null is placed /// here so that the initializer will be performed in the correct /// order. Once the decl is emitted, the index is replaced with ~0U to ensure /// that we don't re-emit the initializer. llvm::DenseMap<const Decl*, unsigned> DelayedCXXInitPosition; typedef std::pair<OrderGlobalInits, llvm::Function*> GlobalInitData; struct GlobalInitPriorityCmp { bool operator()(const GlobalInitData &LHS, const GlobalInitData &RHS) const { return LHS.first.priority < RHS.first.priority; } }; /// Global variables with initializers whose order of initialization is set by /// init_priority attribute. SmallVector<GlobalInitData, 8> PrioritizedCXXGlobalInits; /// Global destructor functions and arguments that need to run on termination. std::vector<std::pair<llvm::WeakTrackingVH, llvm::Constant *>> CXXGlobalDtors; /// \brief The complete set of modules that has been imported. llvm::SetVector<clang::Module *> ImportedModules; /// \brief A vector of metadata strings. SmallVector<llvm::Metadata *, 16> LinkerOptionsMetadata; /// @name Cache for Objective-C runtime types /// @{ /// Cached reference to the class for constant strings. This value has type /// int * but is actually an Obj-C class pointer. llvm::WeakTrackingVH CFConstantStringClassRef; /// Cached reference to the class for constant strings. This value has type /// int * but is actually an Obj-C class pointer. llvm::WeakTrackingVH ConstantStringClassRef; /// \brief The LLVM type corresponding to NSConstantString. llvm::StructType *NSConstantStringType; /// \brief The type used to describe the state of a fast enumeration in /// Objective-C's for..in loop. QualType ObjCFastEnumerationStateType; /// @} /// Lazily create the Objective-C runtime void createObjCRuntime(); void createOpenCLRuntime(); void createOpenMPRuntime(); void createCUDARuntime(); void createHLSLRuntime(); // HLSL Change bool isTriviallyRecursive(const FunctionDecl *F); bool shouldEmitFunction(GlobalDecl GD); /// @name Cache for Blocks Runtime Globals /// @{ llvm::Constant *NSConcreteGlobalBlock; llvm::Constant *NSConcreteStackBlock; llvm::Constant *BlockObjectAssign; llvm::Constant *BlockObjectDispose; llvm::Type *BlockDescriptorType; llvm::Type *GenericBlockLiteralType; struct { int GlobalUniqueCount; } Block; /// void @llvm.lifetime.start(i64 %size, i8* nocapture <ptr>) llvm::Constant *LifetimeStartFn; /// void @llvm.lifetime.end(i64 %size, i8* nocapture <ptr>) llvm::Constant *LifetimeEndFn; GlobalDecl initializedGlobalDecl; std::unique_ptr<SanitizerMetadata> SanitizerMD; /// @} llvm::DenseMap<const Decl *, bool> DeferredEmptyCoverageMappingDecls; std::unique_ptr<CoverageMappingModuleGen> CoverageMapping; public: CodeGenModule(ASTContext &C, const HeaderSearchOptions &headersearchopts, const PreprocessorOptions &ppopts, const CodeGenOptions &CodeGenOpts, llvm::Module &M, const llvm::DataLayout &TD, DiagnosticsEngine &Diags, CoverageSourceInfo *CoverageInfo = nullptr); ~CodeGenModule(); void clear(); /// Finalize LLVM code generation. void Release(); /// Return a reference to the configured Objective-C runtime. CGObjCRuntime &getObjCRuntime() { if (!ObjCRuntime) createObjCRuntime(); return *ObjCRuntime; } /// Return true iff an Objective-C runtime has been configured. bool hasObjCRuntime() { return !!ObjCRuntime; } /// Return a reference to the configured OpenCL runtime. CGOpenCLRuntime &getOpenCLRuntime() { assert(OpenCLRuntime != nullptr); return *OpenCLRuntime; } /// Return a reference to the configured OpenMP runtime. CGOpenMPRuntime &getOpenMPRuntime() { assert(OpenMPRuntime != nullptr); return *OpenMPRuntime; } /// Return a reference to the configured CUDA runtime. CGCUDARuntime &getCUDARuntime() { assert(CUDARuntime != nullptr); return *CUDARuntime; } // HLSL Change Starts /// Return a reference to the configured HLSL runtime. CGHLSLRuntime &getHLSLRuntime() { assert(HLSLRuntime != nullptr); return *HLSLRuntime; } void FinishCodeGen(); // HLSL Change Ends ARCEntrypoints &getARCEntrypoints() const { assert(getLangOpts().ObjCAutoRefCount && ARCData != nullptr); return *ARCData; } RREntrypoints &getRREntrypoints() const { assert(RRData != nullptr); return *RRData; } InstrProfStats &getPGOStats() { return PGOStats; } llvm::IndexedInstrProfReader *getPGOReader() const { return PGOReader.get(); } CoverageMappingModuleGen *getCoverageMapping() const { return CoverageMapping.get(); } llvm::Constant *getStaticLocalDeclAddress(const VarDecl *D) { return StaticLocalDeclMap[D]; } void setStaticLocalDeclAddress(const VarDecl *D, llvm::Constant *C) { StaticLocalDeclMap[D] = C; } llvm::Constant * getOrCreateStaticVarDecl(const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage); llvm::GlobalVariable *getStaticLocalDeclGuardAddress(const VarDecl *D) { return StaticLocalDeclGuardMap[D]; } void setStaticLocalDeclGuardAddress(const VarDecl *D, llvm::GlobalVariable *C) { StaticLocalDeclGuardMap[D] = C; } bool lookupRepresentativeDecl(StringRef MangledName, GlobalDecl &Result) const; llvm::Constant *getAtomicSetterHelperFnMap(QualType Ty) { return AtomicSetterHelperFnMap[Ty]; } void setAtomicSetterHelperFnMap(QualType Ty, llvm::Constant *Fn) { AtomicSetterHelperFnMap[Ty] = Fn; } llvm::Constant *getAtomicGetterHelperFnMap(QualType Ty) { return AtomicGetterHelperFnMap[Ty]; } void setAtomicGetterHelperFnMap(QualType Ty, llvm::Constant *Fn) { AtomicGetterHelperFnMap[Ty] = Fn; } llvm::Constant *getTypeDescriptorFromMap(QualType Ty) { return TypeDescriptorMap[Ty]; } void setTypeDescriptorInMap(QualType Ty, llvm::Constant *C) { TypeDescriptorMap[Ty] = C; } CGDebugInfo *getModuleDebugInfo() { return DebugInfo; } llvm::MDNode *getNoObjCARCExceptionsMetadata() { if (!NoObjCARCExceptionsMetadata) NoObjCARCExceptionsMetadata = llvm::MDNode::get(getLLVMContext(), None); return NoObjCARCExceptionsMetadata; } ASTContext &getContext() const { return Context; } const LangOptions &getLangOpts() const { return LangOpts; } const HeaderSearchOptions &getHeaderSearchOpts() const { return HeaderSearchOpts; } const PreprocessorOptions &getPreprocessorOpts() const { return PreprocessorOpts; } const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; } llvm::Module &getModule() const { return TheModule; } DiagnosticsEngine &getDiags() const { return Diags; } const llvm::DataLayout &getDataLayout() const { return TheDataLayout; } const TargetInfo &getTarget() const { return Target; } const llvm::Triple &getTriple() const; bool supportsCOMDAT() const; void maybeSetTrivialComdat(const Decl &D, llvm::GlobalObject &GO); CGCXXABI &getCXXABI() const { return *ABI; } llvm::LLVMContext &getLLVMContext() { return VMContext; } bool shouldUseTBAA() const { return TBAA != nullptr; } const TargetCodeGenInfo &getTargetCodeGenInfo(); CodeGenTypes &getTypes() { return Types; } CodeGenVTables &getVTables() { return VTables; } ItaniumVTableContext &getItaniumVTableContext() { return VTables.getItaniumVTableContext(); } MicrosoftVTableContext &getMicrosoftVTableContext() { return VTables.getMicrosoftVTableContext(); } CtorList &getGlobalCtors() { return GlobalCtors; } CtorList &getGlobalDtors() { return GlobalDtors; } llvm::MDNode *getTBAAInfo(QualType QTy); llvm::MDNode *getTBAAInfoForVTablePtr(); llvm::MDNode *getTBAAStructInfo(QualType QTy); /// Return the MDNode in the type DAG for the given struct type. llvm::MDNode *getTBAAStructTypeInfo(QualType QTy); /// Return the path-aware tag for given base type, access node and offset. llvm::MDNode *getTBAAStructTagInfo(QualType BaseTy, llvm::MDNode *AccessN, uint64_t O); bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor); bool isPaddedAtomicType(QualType type); bool isPaddedAtomicType(const AtomicType *type); /// Decorate the instruction with a TBAA tag. For scalar TBAA, the tag /// is the same as the type. For struct-path aware TBAA, the tag /// is different from the type: base type, access type and offset. /// When ConvertTypeToTag is true, we create a tag based on the scalar type. void DecorateInstruction(llvm::Instruction *Inst, llvm::MDNode *TBAAInfo, bool ConvertTypeToTag = true); /// Emit the given number of characters as a value of type size_t. llvm::ConstantInt *getSize(CharUnits numChars); /// Set the visibility for the given LLVM GlobalValue. void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const; /// Set the TLS mode for the given LLVM GlobalValue for the thread-local /// variable declaration D. void setTLSMode(llvm::GlobalValue *GV, const VarDecl &D) const; static llvm::GlobalValue::VisibilityTypes GetLLVMVisibility(Visibility V) { switch (V) { case DefaultVisibility: return llvm::GlobalValue::DefaultVisibility; case HiddenVisibility: return llvm::GlobalValue::HiddenVisibility; case ProtectedVisibility: return llvm::GlobalValue::ProtectedVisibility; } llvm_unreachable("unknown visibility!"); } llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) { if (isa<CXXConstructorDecl>(GD.getDecl())) return getAddrOfCXXStructor(cast<CXXConstructorDecl>(GD.getDecl()), getFromCtorType(GD.getCtorType())); else if (isa<CXXDestructorDecl>(GD.getDecl())) return getAddrOfCXXStructor(cast<CXXDestructorDecl>(GD.getDecl()), getFromDtorType(GD.getDtorType())); else if (isa<FunctionDecl>(GD.getDecl())) return GetAddrOfFunction(GD); else return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl())); } /// Will return a global variable of the given type. If a variable with a /// different type already exists then a new variable with the right type /// will be created and all uses of the old variable will be replaced with a /// bitcast to the new variable. llvm::GlobalVariable * CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty, llvm::GlobalValue::LinkageTypes Linkage); llvm::Function * CreateGlobalInitOrDestructFunction(llvm::FunctionType *ty, const Twine &name, SourceLocation Loc = SourceLocation(), bool TLS = false); /// Return the address space of the underlying global variable for D, as /// determined by its declaration. Normally this is the same as the address /// space of D's type, but in CUDA, address spaces are associated with /// declarations, not types. unsigned GetGlobalVarAddressSpace(const VarDecl *D, unsigned AddrSpace); /// Return the llvm::Constant for the address of the given global variable. /// If Ty is non-null and if the global doesn't exist, then it will be greated /// with the specified type instead of whatever the normal requested type /// would be. llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D, llvm::Type *Ty = nullptr); /// Return the address of the given function. If Ty is non-null, then this /// function will use the specified type if it has to create it. llvm::Constant *GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty = 0, bool ForVTable = false, bool DontDefer = false); /// Get the address of the RTTI descriptor for the given type. llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false); llvm::Constant *getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType); /// Get the address of a uuid descriptor . llvm::Constant *GetAddrOfUuidDescriptor(const CXXUuidofExpr* E); /// Get the address of the thunk for the given global decl. llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk); /// Get a reference to the target of VD. llvm::Constant *GetWeakRefReference(const ValueDecl *VD); CharUnits computeNonVirtualBaseClassOffset(const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start, CastExpr::path_const_iterator End); /// Returns the offset from a derived class to a class. Returns null if the /// offset is 0. llvm::Constant * GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd); /// A pair of helper functions for a __block variable. class ByrefHelpers : public llvm::FoldingSetNode { public: llvm::Constant *CopyHelper; llvm::Constant *DisposeHelper; /// The alignment of the field. This is important because /// different offsets to the field within the byref struct need to /// have different helper functions. CharUnits Alignment; ByrefHelpers(CharUnits alignment) : Alignment(alignment) {} virtual ~ByrefHelpers(); void Profile(llvm::FoldingSetNodeID &id) const { id.AddInteger(Alignment.getQuantity()); profileImpl(id); } virtual void profileImpl(llvm::FoldingSetNodeID &id) const = 0; virtual bool needsCopy() const { return true; } virtual void emitCopy(CodeGenFunction &CGF, llvm::Value *dest, llvm::Value *src) = 0; virtual bool needsDispose() const { return true; } virtual void emitDispose(CodeGenFunction &CGF, llvm::Value *field) = 0; }; llvm::FoldingSet<ByrefHelpers> ByrefHelpersCache; /// Fetches the global unique block count. int getUniqueBlockCount() { return ++Block.GlobalUniqueCount; } /// Fetches the type of a generic block descriptor. llvm::Type *getBlockDescriptorType(); /// The type of a generic block literal. llvm::Type *getGenericBlockLiteralType(); /// Gets the address of a block which requires no captures. llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *); /// Return a pointer to a constant CFString object for the given string. llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal); /// Return a pointer to a constant NSString object for the given string. Or a /// user defined String object as defined via /// -fconstant-string-class=class_name option. llvm::GlobalVariable *GetAddrOfConstantString(const StringLiteral *Literal); /// Return a constant array for the given string. llvm::Constant *GetConstantArrayFromStringLiteral(const StringLiteral *E); /// Return a pointer to a constant array for the given string literal. llvm::GlobalVariable * GetAddrOfConstantStringFromLiteral(const StringLiteral *S, StringRef Name = ".str"); /// Return a pointer to a constant array for the given ObjCEncodeExpr node. llvm::GlobalVariable * GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *); /// Returns a pointer to a character array containing the literal and a /// terminating '\0' character. The result has pointer to array type. /// /// \param GlobalName If provided, the name to use for the global (if one is /// created). llvm::GlobalVariable * GetAddrOfConstantCString(const std::string &Str, const char *GlobalName = nullptr, unsigned Alignment = 0); /// Returns a pointer to a constant global variable for the given file-scope /// compound literal expression. llvm::Constant *GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E); /// \brief Returns a pointer to a global variable representing a temporary /// with static or thread storage duration. llvm::Constant *GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E, const Expr *Inner); /// \brief Retrieve the record type that describes the state of an /// Objective-C fast enumeration loop (for..in). QualType getObjCFastEnumerationStateType(); // Produce code for this constructor/destructor. This method doesn't try // to apply any ABI rules about which other constructors/destructors // are needed or if they are alias to each other. llvm::Function *codegenCXXStructor(const CXXMethodDecl *MD, StructorType Type); /// Return the address of the constructor/destructor of the given type. llvm::GlobalValue * getAddrOfCXXStructor(const CXXMethodDecl *MD, StructorType Type, const CGFunctionInfo *FnInfo = nullptr, llvm::FunctionType *FnType = nullptr, bool DontDefer = false); /// Given a builtin id for a function like "__builtin_fabsf", return a /// Function* for "fabsf". llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID); llvm::Function *getIntrinsic(unsigned IID, ArrayRef<llvm::Type*> Tys = None); /// Emit code for a single top level declaration. void EmitTopLevelDecl(Decl *D); /// \brief Stored a deferred empty coverage mapping for an unused /// and thus uninstrumented top level declaration. void AddDeferredUnusedCoverageMapping(Decl *D); /// \brief Remove the deferred empty coverage mapping as this /// declaration is actually instrumented. void ClearUnusedCoverageMapping(const Decl *D); /// \brief Emit all the deferred coverage mappings /// for the uninstrumented functions. void EmitDeferredUnusedCoverageMappings(); /// Tell the consumer that this variable has been instantiated. void HandleCXXStaticMemberVarInstantiation(VarDecl *VD); /// \brief If the declaration has internal linkage but is inside an /// extern "C" linkage specification, prepare to emit an alias for it /// to the expected name. template<typename SomeDecl> void MaybeHandleStaticInExternC(const SomeDecl *D, llvm::GlobalValue *GV); /// Add a global to a list to be added to the llvm.used metadata. void addUsedGlobal(llvm::GlobalValue *GV); /// Add a global to a list to be added to the llvm.compiler.used metadata. void addCompilerUsedGlobal(llvm::GlobalValue *GV); /// Add a destructor and object to add to the C++ global destructor function. void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object) { CXXGlobalDtors.emplace_back(DtorFn, Object); } /// Create a new runtime function with the specified type and name. llvm::Constant *CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeSet ExtraAttrs = llvm::AttributeSet()); /// Create a new compiler builtin function with the specified type and name. llvm::Constant *CreateBuiltinFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeSet ExtraAttrs = llvm::AttributeSet()); /// Create a new runtime global variable with the specified type and name. llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty, StringRef Name); ///@name Custom Blocks Runtime Interfaces ///@{ llvm::Constant *getNSConcreteGlobalBlock(); llvm::Constant *getNSConcreteStackBlock(); llvm::Constant *getBlockObjectAssign(); llvm::Constant *getBlockObjectDispose(); ///@} llvm::Constant *getLLVMLifetimeStartFn(); llvm::Constant *getLLVMLifetimeEndFn(); // Make sure that this type is translated. void UpdateCompletedType(const TagDecl *TD); llvm::Constant *getMemberPointerConstant(const UnaryOperator *e); /// Try to emit the initializer for the given declaration as a constant; /// returns 0 if the expression cannot be emitted as a constant. llvm::Constant *EmitConstantInit(const VarDecl &D, CodeGenFunction *CGF = nullptr); /// Try to emit the given expression as a constant; returns 0 if the /// expression cannot be emitted as a constant. llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType, CodeGenFunction *CGF = nullptr); /// Emit the given constant value as a constant, in the type's scalar /// representation. llvm::Constant *EmitConstantValue(const APValue &Value, QualType DestType, CodeGenFunction *CGF = nullptr); /// Emit the given constant value as a constant, in the type's memory /// representation. llvm::Constant *EmitConstantValueForMemory(const APValue &Value, QualType DestType, CodeGenFunction *CGF = nullptr); /// Return the result of value-initializing the given type, i.e. a null /// expression of the given type. This is usually, but not always, an LLVM /// null constant. llvm::Constant *EmitNullConstant(QualType T); /// Return a null constant appropriate for zero-initializing a base class with /// the given type. This is usually, but not always, an LLVM null constant. llvm::Constant *EmitNullConstantForBase(const CXXRecordDecl *Record); /// Emit a general error that something can't be done. void Error(SourceLocation loc, StringRef error); /// Print out an error that codegen doesn't support the specified stmt yet. void ErrorUnsupported(const Stmt *S, const char *Type); /// Print out an error that codegen doesn't support the specified decl yet. void ErrorUnsupported(const Decl *D, const char *Type); /// Set the attributes on the LLVM function for the given decl and function /// info. This applies attributes necessary for handling the ABI as well as /// user specified attributes like section. void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F, const CGFunctionInfo &FI); /// Set the LLVM function attributes (sext, zext, etc). void SetLLVMFunctionAttributes(const Decl *D, const CGFunctionInfo &Info, llvm::Function *F); /// Set the LLVM function attributes which only apply to a function /// definition. void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F); /// Return true iff the given type uses 'sret' when used as a return type. bool ReturnTypeUsesSRet(const CGFunctionInfo &FI); /// Return true iff the given type uses an argument slot when 'sret' is used /// as a return type. bool ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI); /// Return true iff the given type uses 'fpret' when used as a return type. bool ReturnTypeUsesFPRet(QualType ResultType); /// Return true iff the given type uses 'fp2ret' when used as a return type. bool ReturnTypeUsesFP2Ret(QualType ResultType); /// Get the LLVM attributes and calling convention to use for a particular /// function type. /// /// \param Info - The function type information. /// \param TargetDecl - The decl these attributes are being constructed /// for. If supplied the attributes applied to this decl may contribute to the /// function attributes and calling convention. /// \param PAL [out] - On return, the attribute list to use. /// \param CallingConv [out] - On return, the LLVM calling convention to use. void ConstructAttributeList(const CGFunctionInfo &Info, const Decl *TargetDecl, AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite); StringRef getMangledName(GlobalDecl GD); StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD); void EmitTentativeDefinition(const VarDecl *D); void EmitVTable(CXXRecordDecl *Class); /// Emit the RTTI descriptors for the builtin types. void EmitFundamentalRTTIDescriptors(); /// \brief Appends Opts to the "Linker Options" metadata value. void AppendLinkerOptions(StringRef Opts); /// \brief Appends a detect mismatch command to the linker options. void AddDetectMismatch(StringRef Name, StringRef Value); /// \brief Appends a dependent lib to the "Linker Options" metadata value. void AddDependentLib(StringRef Lib); llvm::GlobalVariable::LinkageTypes getFunctionLinkage(GlobalDecl GD); void setFunctionLinkage(GlobalDecl GD, llvm::Function *F) { F->setLinkage(getFunctionLinkage(GD)); } /// Set the DLL storage class on F. void setFunctionDLLStorageClass(GlobalDecl GD, llvm::Function *F); /// Return the appropriate linkage for the vtable, VTT, and type information /// of the given class. llvm::GlobalVariable::LinkageTypes getVTableLinkage(const CXXRecordDecl *RD); /// Return the store size, in character units, of the given LLVM type. CharUnits GetTargetTypeStoreSize(llvm::Type *Ty) const; /// Returns LLVM linkage for a declarator. llvm::GlobalValue::LinkageTypes getLLVMLinkageForDeclarator(const DeclaratorDecl *D, GVALinkage Linkage, bool IsConstantVariable); /// Returns LLVM linkage for a declarator. llvm::GlobalValue::LinkageTypes getLLVMLinkageVarDefinition(const VarDecl *VD, bool IsConstant); /// Emit all the global annotations. void EmitGlobalAnnotations(); /// Emit an annotation string. llvm::Constant *EmitAnnotationString(StringRef Str); /// Emit the annotation's translation unit. llvm::Constant *EmitAnnotationUnit(SourceLocation Loc); /// Emit the annotation line number. llvm::Constant *EmitAnnotationLineNo(SourceLocation L); /// Generate the llvm::ConstantStruct which contains the annotation /// information for a given GlobalValue. The annotation struct is /// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the /// GlobalValue being annotated. The second field is the constant string /// created from the AnnotateAttr's annotation. The third field is a constant /// string containing the name of the translation unit. The fourth field is /// the line number in the file of the annotated value declaration. llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV, const AnnotateAttr *AA, SourceLocation L); /// Add global annotations that are set on D, for the global GV. Those /// annotations are emitted during finalization of the LLVM code. void AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV); bool isInSanitizerBlacklist(llvm::Function *Fn, SourceLocation Loc) const; bool isInSanitizerBlacklist(llvm::GlobalVariable *GV, SourceLocation Loc, QualType Ty, StringRef Category = StringRef()) const; SanitizerMetadata *getSanitizerMetadata() { return SanitizerMD.get(); } void addDeferredVTable(const CXXRecordDecl *RD) { DeferredVTables.push_back(RD); } /// Emit code for a singal global function or var decl. Forward declarations /// are emitted lazily. void EmitGlobal(GlobalDecl D); bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target, bool InEveryTU); bool TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D); /// Set attributes for a global definition. void setFunctionDefinitionAttributes(const FunctionDecl *D, llvm::Function *F); llvm::GlobalValue *GetGlobalValue(StringRef Ref); /// Set attributes which are common to any form of a global definition (alias, /// Objective-C method, function, global variable). /// /// NOTE: This should only be called for definitions. void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV); /// Set attributes which must be preserved by an alias. This includes common /// attributes (i.e. it includes a call to SetCommonAttributes). /// /// NOTE: This should only be called for definitions. void setAliasAttributes(const Decl *D, llvm::GlobalValue *GV); void addReplacement(StringRef Name, llvm::Constant *C); /// \brief Emit a code for threadprivate directive. /// \param D Threadprivate declaration. void EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D); /// Returns whether the given record is blacklisted from control flow /// integrity checks. bool IsCFIBlacklistedRecord(const CXXRecordDecl *RD); /// Emit bit set entries for the given vtable using the given layout if /// vptr CFI is enabled. void EmitVTableBitSetEntries(llvm::GlobalVariable *VTable, const VTableLayout &VTLayout); /// Create a bitset entry for the given vtable. llvm::MDTuple *CreateVTableBitSetEntry(llvm::GlobalVariable *VTable, CharUnits Offset, const CXXRecordDecl *RD); /// \breif Get the declaration of std::terminate for the platform. llvm::Constant *getTerminateFn(); private: llvm::Constant * GetOrCreateLLVMFunction(StringRef MangledName, llvm::Type *Ty, GlobalDecl D, bool ForVTable, bool DontDefer = false, bool IsThunk = false, llvm::AttributeSet ExtraAttrs = llvm::AttributeSet()); llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName, llvm::PointerType *PTy, const VarDecl *D); void setNonAliasAttributes(const Decl *D, llvm::GlobalObject *GO); /// Set function attributes for a function declaration. void SetFunctionAttributes(GlobalDecl GD, llvm::Function *F, bool IsIncompleteFunction, bool IsThunk); void EmitGlobalDefinition(GlobalDecl D, llvm::GlobalValue *GV = nullptr); void EmitGlobalFunctionDefinition(GlobalDecl GD, llvm::GlobalValue *GV); void EmitGlobalVarDefinition(const VarDecl *D); void EmitAliasDefinition(GlobalDecl GD); void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D); void EmitObjCIvarInitializations(ObjCImplementationDecl *D); // C++ related functions. void EmitNamespace(const NamespaceDecl *D); void EmitLinkageSpec(const LinkageSpecDecl *D); void CompleteDIClassType(const CXXMethodDecl* D); /// \brief Emit the function that initializes C++ thread_local variables. void EmitCXXThreadLocalInitFunc(); /// Emit the function that initializes C++ globals. void EmitCXXGlobalInitFunc(); /// Emit the function that destroys C++ globals. void EmitCXXGlobalDtorFunc(); /// Emit the function that initializes the specified global (if PerformInit is /// true) and registers its destructor. void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D, llvm::GlobalVariable *Addr, bool PerformInit); void EmitPointerToInitFunc(const VarDecl *VD, llvm::GlobalVariable *Addr, llvm::Function *InitFunc, InitSegAttr *ISA); // FIXME: Hardcoding priority here is gross. void AddGlobalCtor(llvm::Function *Ctor, int Priority = 65535, llvm::Constant *AssociatedData = 0); void AddGlobalDtor(llvm::Function *Dtor, int Priority = 65535); /// Generates a global array of functions and priorities using the given list /// and name. This array will have appending linkage and is suitable for use /// as a LLVM constructor or destructor array. void EmitCtorList(const CtorList &Fns, const char *GlobalName); /// Emit the RTTI descriptors for the given type. void EmitFundamentalRTTIDescriptor(QualType Type); /// Emit any needed decls for which code generation was deferred. void EmitDeferred(); /// Call replaceAllUsesWith on all pairs in Replacements. void applyReplacements(); void checkAliases(); /// Emit any vtables which we deferred and still have a use for. void EmitDeferredVTables(); /// Emit the llvm.used and llvm.compiler.used metadata. void emitLLVMUsed(); /// \brief Emit the link options introduced by imported modules. void EmitModuleLinkOptions(); /// \brief Emit aliases for internal-linkage declarations inside "C" language /// linkage specifications, giving them the "expected" name where possible. void EmitStaticExternCAliases(); void EmitDeclMetadata(); /// \brief Emit the Clang version as llvm.ident metadata. void EmitVersionIdentMetadata(); /// Emits target specific Metadata for global declarations. void EmitTargetMetadata(); /// Emit the llvm.gcov metadata used to tell LLVM where to emit the .gcno and /// .gcda files in a way that persists in .bc files. void EmitCoverageFile(); /// Emits the initializer for a uuidof string. llvm::Constant *EmitUuidofInitializer(StringRef uuidstr); /// Determine whether the definition must be emitted; if this returns \c /// false, the definition can be emitted lazily if it's used. bool MustBeEmitted(const ValueDecl *D); /// Determine whether the definition can be emitted eagerly, or should be /// delayed until the end of the translation unit. This is relevant for /// definitions whose linkage can change, e.g. implicit function instantions /// which may later be explicitly instantiated. bool MayBeEmittedEagerly(const ValueDecl *D); /// Check whether we can use a "simpler", more core exceptions personality /// function. void SimplifyPersonality(); }; } // end namespace CodeGen } // end namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This provides C++ code generation targeting the Itanium C++ ABI. The class // in this file generates structures that follow the Itanium C++ ABI, which is // documented at: // http://www.codesourcery.com/public/cxx-abi/abi.html // http://www.codesourcery.com/public/cxx-abi/abi-eh.html // // It also supports the closely-related ARM ABI, documented at: // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf // //===----------------------------------------------------------------------===// #include "CGCXXABI.h" #include "CGCleanup.h" #include "CGRecordLayout.h" #include "CGVTables.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "TargetInfo.h" #include "clang/AST/Mangle.h" #include "clang/AST/Type.h" #include "clang/AST/StmtCXX.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Value.h" using namespace clang; using namespace CodeGen; namespace { class ItaniumCXXABI : public CodeGen::CGCXXABI { /// VTables - All the vtables which have been defined. llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables; protected: bool UseARMMethodPtrABI; bool UseARMGuardVarABI; ItaniumMangleContext &getMangleContext() { return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext()); } public: ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool UseARMMethodPtrABI = false, bool UseARMGuardVarABI = false) : CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI), UseARMGuardVarABI(UseARMGuardVarABI) { } bool classifyReturnType(CGFunctionInfo &FI) const override; RecordArgABI getRecordArgABI(const CXXRecordDecl *RD) const override { // Structures with either a non-trivial destructor or a non-trivial // copy constructor are always indirect. // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared // special members. if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) return RAA_Indirect; return RAA_Default; } bool isZeroInitializable(const MemberPointerType *MPT) override; llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override; llvm::Value * EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E, llvm::Value *&This, llvm::Value *MemFnPtr, const MemberPointerType *MPT) override; llvm::Value * EmitMemberDataPointerAddress(CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr, const MemberPointerType *MPT) override; llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src) override; llvm::Constant *EmitMemberPointerConversion(const CastExpr *E, llvm::Constant *Src) override; llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT) override; llvm::Constant *EmitMemberFunctionPointer(const CXXMethodDecl *MD) override; llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT, CharUnits offset) override; llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT) override; llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD, CharUnits ThisAdjustment); llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality) override; llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *Addr, const MemberPointerType *MPT) override; void emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, llvm::Value *Ptr, QualType ElementType, const CXXDestructorDecl *Dtor) override; void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) override; void emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) override; void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override; llvm::CallInst * emitTerminateForUnexpectedException(CodeGenFunction &CGF, llvm::Value *Exn) override; void EmitFundamentalRTTIDescriptor(QualType Type); void EmitFundamentalRTTIDescriptors(); llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override; llvm::Constant * getAddrOfCXXCatchHandlerType(QualType Ty, QualType CatchHandlerType) override { return getAddrOfRTTIDescriptor(Ty); } bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override; void EmitBadTypeidCall(CodeGenFunction &CGF) override; llvm::Value *EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, llvm::Value *ThisPtr, llvm::Type *StdTypeInfoPtrTy) override; bool shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, QualType SrcRecordTy) override; llvm::Value *EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) override; llvm::Value *EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy, QualType DestTy) override; bool EmitBadCastCall(CodeGenFunction &CGF) override; llvm::Value * GetVirtualBaseClassOffset(CodeGenFunction &CGF, llvm::Value *This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl) override; void EmitCXXConstructors(const CXXConstructorDecl *D) override; void buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl<CanQualType> &ArgTys) override; bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor, CXXDtorType DT) const override { // Itanium does not emit any destructor variant as an inline thunk. // Delegating may occur as an optimization, but all variants are either // emitted with external linkage or as linkonce if they are inline and used. return false; } void EmitCXXDestructors(const CXXDestructorDecl *D) override; void addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params) override; void EmitInstanceFunctionProlog(CodeGenFunction &CGF) override; unsigned addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, CallArgList &Args) override; void EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, llvm::Value *This) override; void emitVTableDefinitions(CodeGenVTables &CGVT, const CXXRecordDecl *RD) override; llvm::Value *getVTableAddressPointInStructor( CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) override; llvm::Constant * getVTableAddressPointForConstExpr(BaseSubobject Base, const CXXRecordDecl *VTableClass) override; llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) override; llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This, llvm::Type *Ty, SourceLocation Loc) override; llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, llvm::Value *This, const CXXMemberCallExpr *CE) override; void emitVirtualInheritanceTables(const CXXRecordDecl *RD) override; void setThunkLinkage(llvm::Function *Thunk, bool ForVTable, GlobalDecl GD, bool ReturnAdjustment) override { // Allow inlining of thunks by emitting them with available_externally // linkage together with vtables when needed. if (ForVTable && !Thunk->hasLocalLinkage()) Thunk->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage); } llvm::Value *performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This, const ThisAdjustment &TA) override; llvm::Value *performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret, const ReturnAdjustment &RA) override; size_t getSrcArgforCopyCtor(const CXXConstructorDecl *, FunctionArgList &Args) const override { assert(!Args.empty() && "expected the arglist to not be empty!"); return Args.size() - 1; } StringRef GetPureVirtualCallName() override { return "__cxa_pure_virtual"; } StringRef GetDeletedVirtualCallName() override { return "__cxa_deleted_virtual"; } CharUnits getArrayCookieSizeImpl(QualType elementType) override; llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *NewPtr, llvm::Value *NumElements, const CXXNewExpr *expr, QualType ElementType) override; llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr, CharUnits cookieSize) override; void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *DeclPtr, bool PerformInit) override; void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *dtor, llvm::Constant *addr) override; llvm::Function *getOrCreateThreadLocalWrapper(const VarDecl *VD, llvm::Value *Val); void EmitThreadLocalInitFuncs( CodeGenModule &CGM, ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>> CXXThreadLocals, ArrayRef<llvm::Function *> CXXThreadLocalInits, ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) override; bool usesThreadWrapperFunction() const override { return true; } LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType) override; bool NeedsVTTParameter(GlobalDecl GD) override; /**************************** RTTI Uniqueness ******************************/ protected: /// Returns true if the ABI requires RTTI type_info objects to be unique /// across a program. virtual bool shouldRTTIBeUnique() const { return true; } public: /// What sort of unique-RTTI behavior should we use? enum RTTIUniquenessKind { /// We are guaranteeing, or need to guarantee, that the RTTI string /// is unique. RUK_Unique, /// We are not guaranteeing uniqueness for the RTTI string, so we /// can demote to hidden visibility but must use string comparisons. RUK_NonUniqueHidden, /// We are not guaranteeing uniqueness for the RTTI string, so we /// have to use string comparisons, but we also have to emit it with /// non-hidden visibility. RUK_NonUniqueVisible }; /// Return the required visibility status for the given type and linkage in /// the current ABI. RTTIUniquenessKind classifyRTTIUniqueness(QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const; friend class ItaniumRTTIBuilder; void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override; }; class ARMCXXABI : public ItaniumCXXABI { public: ARMCXXABI(CodeGen::CodeGenModule &CGM) : ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true, /* UseARMGuardVarABI = */ true) {} bool HasThisReturn(GlobalDecl GD) const override { return (isa<CXXConstructorDecl>(GD.getDecl()) || ( isa<CXXDestructorDecl>(GD.getDecl()) && GD.getDtorType() != Dtor_Deleting)); } void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy) override; CharUnits getArrayCookieSizeImpl(QualType elementType) override; llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *NewPtr, llvm::Value *NumElements, const CXXNewExpr *expr, QualType ElementType) override; llvm::Value *readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr, CharUnits cookieSize) override; }; class iOS64CXXABI : public ARMCXXABI { public: iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {} // ARM64 libraries are prepared for non-unique RTTI. bool shouldRTTIBeUnique() const override { return false; } }; } CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) { switch (CGM.getTarget().getCXXABI().getKind()) { // For IR-generation purposes, there's no significant difference // between the ARM and iOS ABIs. case TargetCXXABI::GenericARM: case TargetCXXABI::iOS: return new ARMCXXABI(CGM); case TargetCXXABI::iOS64: return new iOS64CXXABI(CGM); // Note that AArch64 uses the generic ItaniumCXXABI class since it doesn't // include the other 32-bit ARM oddities: constructor/destructor return values // and array cookies. case TargetCXXABI::GenericAArch64: return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true, /* UseARMGuardVarABI = */ true); case TargetCXXABI::GenericMIPS: return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true); case TargetCXXABI::GenericItanium: if (CGM.getContext().getTargetInfo().getTriple().getArch() == llvm::Triple::le32) { // For PNaCl, use ARM-style method pointers so that PNaCl code // does not assume anything about the alignment of function // pointers. return new ItaniumCXXABI(CGM, /* UseARMMethodPtrABI = */ true, /* UseARMGuardVarABI = */ false); } return new ItaniumCXXABI(CGM); case TargetCXXABI::Microsoft: llvm_unreachable("Microsoft ABI is not Itanium-based"); } llvm_unreachable("bad ABI kind"); } llvm::Type * ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) { if (MPT->isMemberDataPointer()) return CGM.PtrDiffTy; return llvm::StructType::get(CGM.PtrDiffTy, CGM.PtrDiffTy, nullptr); } /// In the Itanium and ARM ABIs, method pointers have the form: /// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr; /// /// In the Itanium ABI: /// - method pointers are virtual if (memptr.ptr & 1) is nonzero /// - the this-adjustment is (memptr.adj) /// - the virtual offset is (memptr.ptr - 1) /// /// In the ARM ABI: /// - method pointers are virtual if (memptr.adj & 1) is nonzero /// - the this-adjustment is (memptr.adj >> 1) /// - the virtual offset is (memptr.ptr) /// ARM uses 'adj' for the virtual flag because Thumb functions /// may be only single-byte aligned. /// /// If the member is virtual, the adjusted 'this' pointer points /// to a vtable pointer from which the virtual offset is applied. /// /// If the member is non-virtual, memptr.ptr is the address of /// the function to call. llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( CodeGenFunction &CGF, const Expr *E, llvm::Value *&This, llvm::Value *MemFnPtr, const MemberPointerType *MPT) { CGBuilderTy &Builder = CGF.Builder; const FunctionProtoType *FPT = MPT->getPointeeType()->getAs<FunctionProtoType>(); const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl()); llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType( CGM.getTypes().arrangeCXXMethodType(RD, FPT)); llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual"); llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual"); llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end"); // Extract memptr.adj, which is in the second field. llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj"); // Compute the true adjustment. llvm::Value *Adj = RawAdj; if (UseARMMethodPtrABI) Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted"); // Apply the adjustment and cast back to the original struct type // for consistency. llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy()); Ptr = Builder.CreateInBoundsGEP(Ptr, Adj); This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted"); // Load the function pointer. llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr"); // If the LSB in the function pointer is 1, the function pointer points to // a virtual function. llvm::Value *IsVirtual; if (UseARMMethodPtrABI) IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1); else IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1); IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual"); Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual); // In the virtual path, the adjustment left 'This' pointing to the // vtable of the correct base subobject. The "function pointer" is an // offset within the vtable (+1 for the virtual flag on non-ARM). CGF.EmitBlock(FnVirtual); // Cast the adjusted this to a pointer to vtable pointer and load. llvm::Type *VTableTy = Builder.getInt8PtrTy(); llvm::Value *VTable = CGF.GetVTablePtr(This, VTableTy); // Apply the offset. llvm::Value *VTableOffset = FnAsInt; if (!UseARMMethodPtrABI) VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1); VTable = Builder.CreateGEP(VTable, VTableOffset); // Load the virtual function to call. VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo()); llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn"); CGF.EmitBranch(FnEnd); // In the non-virtual path, the function pointer is actually a // function pointer. CGF.EmitBlock(FnNonVirtual); llvm::Value *NonVirtualFn = Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn"); // We're done. CGF.EmitBlock(FnEnd); llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2); Callee->addIncoming(VirtualFn, FnVirtual); Callee->addIncoming(NonVirtualFn, FnNonVirtual); return Callee; } /// Compute an l-value by applying the given pointer-to-member to a /// base object. llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress( CodeGenFunction &CGF, const Expr *E, llvm::Value *Base, llvm::Value *MemPtr, const MemberPointerType *MPT) { assert(MemPtr->getType() == CGM.PtrDiffTy); CGBuilderTy &Builder = CGF.Builder; unsigned AS = Base->getType()->getPointerAddressSpace(); // Cast to char*. Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS)); // Apply the offset, which we assume is non-null. llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset"); // Cast the address to the appropriate pointer type, adopting the // address space of the base pointer. llvm::Type *PType = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS); return Builder.CreateBitCast(Addr, PType); } /// Perform a bitcast, derived-to-base, or base-to-derived member pointer /// conversion. /// /// Bitcast conversions are always a no-op under Itanium. /// /// Obligatory offset/adjustment diagram: /// <-- offset --> <-- adjustment --> /// |--------------------------|----------------------|--------------------| /// ^Derived address point ^Base address point ^Member address point /// /// So when converting a base member pointer to a derived member pointer, /// we add the offset to the adjustment because the address point has /// decreased; and conversely, when converting a derived MP to a base MP /// we subtract the offset from the adjustment because the address point /// has increased. /// /// The standard forbids (at compile time) conversion to and from /// virtual bases, which is why we don't have to consider them here. /// /// The standard forbids (at run time) casting a derived MP to a base /// MP when the derived MP does not point to a member of the base. /// This is why -1 is a reasonable choice for null data member /// pointers. llvm::Value * ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *src) { assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || E->getCastKind() == CK_BaseToDerivedMemberPointer || E->getCastKind() == CK_ReinterpretMemberPointer); // Under Itanium, reinterprets don't require any additional processing. if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; // Use constant emission if we can. if (isa<llvm::Constant>(src)) return EmitMemberPointerConversion(E, cast<llvm::Constant>(src)); llvm::Constant *adj = getMemberPointerAdjustment(E); if (!adj) return src; CGBuilderTy &Builder = CGF.Builder; bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); const MemberPointerType *destTy = E->getType()->castAs<MemberPointerType>(); // For member data pointers, this is just a matter of adding the // offset if the source is non-null. if (destTy->isMemberDataPointer()) { llvm::Value *dst; if (isDerivedToBase) dst = Builder.CreateNSWSub(src, adj, "adj"); else dst = Builder.CreateNSWAdd(src, adj, "adj"); // Null check. llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType()); llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull"); return Builder.CreateSelect(isNull, src, dst); } // The this-adjustment is left-shifted by 1 on ARM. if (UseARMMethodPtrABI) { uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); offset <<= 1; adj = llvm::ConstantInt::get(adj->getType(), offset); } llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj"); llvm::Value *dstAdj; if (isDerivedToBase) dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj"); else dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj"); return Builder.CreateInsertValue(src, dstAdj, 1); } llvm::Constant * ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, llvm::Constant *src) { assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || E->getCastKind() == CK_BaseToDerivedMemberPointer || E->getCastKind() == CK_ReinterpretMemberPointer); // Under Itanium, reinterprets don't require any additional processing. if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; // If the adjustment is trivial, we don't need to do anything. llvm::Constant *adj = getMemberPointerAdjustment(E); if (!adj) return src; bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); const MemberPointerType *destTy = E->getType()->castAs<MemberPointerType>(); // For member data pointers, this is just a matter of adding the // offset if the source is non-null. if (destTy->isMemberDataPointer()) { // null maps to null. if (src->isAllOnesValue()) return src; if (isDerivedToBase) return llvm::ConstantExpr::getNSWSub(src, adj); else return llvm::ConstantExpr::getNSWAdd(src, adj); } // The this-adjustment is left-shifted by 1 on ARM. if (UseARMMethodPtrABI) { uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue(); offset <<= 1; adj = llvm::ConstantInt::get(adj->getType(), offset); } llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1); llvm::Constant *dstAdj; if (isDerivedToBase) dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj); else dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj); return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1); } llvm::Constant * ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) { // Itanium C++ ABI 2.3: // A NULL pointer is represented as -1. if (MPT->isMemberDataPointer()) return llvm::ConstantInt::get(CGM.PtrDiffTy, -1ULL, /*isSigned=*/true); llvm::Constant *Zero = llvm::ConstantInt::get(CGM.PtrDiffTy, 0); llvm::Constant *Values[2] = { Zero, Zero }; return llvm::ConstantStruct::getAnon(Values); } llvm::Constant * ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT, CharUnits offset) { // Itanium C++ ABI 2.3: // A pointer to data member is an offset from the base address of // the class object containing it, represented as a ptrdiff_t return llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()); } llvm::Constant * ItaniumCXXABI::EmitMemberFunctionPointer(const CXXMethodDecl *MD) { return BuildMemberPointer(MD, CharUnits::Zero()); } llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, CharUnits ThisAdjustment) { assert(MD->isInstance() && "Member function must not be static!"); MD = MD->getCanonicalDecl(); CodeGenTypes &Types = CGM.getTypes(); // Get the function pointer (or index if this is a virtual function). llvm::Constant *MemPtr[2]; if (MD->isVirtual()) { uint64_t Index = CGM.getItaniumVTableContext().getMethodVTableIndex(MD); const ASTContext &Context = getContext(); CharUnits PointerWidth = Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0)); uint64_t VTableOffset = (Index * PointerWidth.getQuantity()); if (UseARMMethodPtrABI) { // ARM C++ ABI 3.2.1: // This ABI specifies that adj contains twice the this // adjustment, plus 1 if the member function is virtual. The // least significant bit of adj then makes exactly the same // discrimination as the least significant bit of ptr does for // Itanium. MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, 2 * ThisAdjustment.getQuantity() + 1); } else { // Itanium C++ ABI 2.3: // For a virtual function, [the pointer field] is 1 plus the // virtual table offset (in bytes) of the function, // represented as a ptrdiff_t. MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset + 1); MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, ThisAdjustment.getQuantity()); } } else { const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>(); llvm::Type *Ty; // Check whether the function has a computable LLVM signature. if (Types.isFuncTypeConvertible(FPT)) { // The function has a computable LLVM signature; use the correct type. Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD)); } else { // Use an arbitrary non-function type to tell GetAddrOfFunction that the // function type is incomplete. Ty = CGM.PtrDiffTy; } llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, (UseARMMethodPtrABI ? 2 : 1) * ThisAdjustment.getQuantity()); } return llvm::ConstantStruct::getAnon(MemPtr); } llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, QualType MPType) { const MemberPointerType *MPT = MPType->castAs<MemberPointerType>(); const ValueDecl *MPD = MP.getMemberPointerDecl(); if (!MPD) return EmitNullMemberPointer(MPT); CharUnits ThisAdjustment = getMemberPointerPathAdjustment(MP); if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD)) return BuildMemberPointer(MD, ThisAdjustment); CharUnits FieldOffset = getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset); } /// The comparison algorithm is pretty easy: the member pointers are /// the same if they're either bitwise identical *or* both null. /// /// ARM is different here only because null-ness is more complicated. llvm::Value * ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality) { CGBuilderTy &Builder = CGF.Builder; llvm::ICmpInst::Predicate Eq; llvm::Instruction::BinaryOps And, Or; if (Inequality) { Eq = llvm::ICmpInst::ICMP_NE; And = llvm::Instruction::Or; Or = llvm::Instruction::And; } else { Eq = llvm::ICmpInst::ICMP_EQ; And = llvm::Instruction::And; Or = llvm::Instruction::Or; } // Member data pointers are easy because there's a unique null // value, so it just comes down to bitwise equality. if (MPT->isMemberDataPointer()) return Builder.CreateICmp(Eq, L, R); // For member function pointers, the tautologies are more complex. // The Itanium tautology is: // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj)) // The ARM tautology is: // (L == R) <==> (L.ptr == R.ptr && // (L.adj == R.adj || // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0))) // The inequality tautologies have exactly the same structure, except // applying De Morgan's laws. llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr"); llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr"); // This condition tests whether L.ptr == R.ptr. This must always be // true for equality to hold. llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr"); // This condition, together with the assumption that L.ptr == R.ptr, // tests whether the pointers are both null. ARM imposes an extra // condition. llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType()); llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null"); // This condition tests whether L.adj == R.adj. If this isn't // true, the pointers are unequal unless they're both null. llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj"); llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj"); llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj"); // Null member function pointers on ARM clear the low bit of Adj, // so the zero condition has to check that neither low bit is set. if (UseARMMethodPtrABI) { llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1); // Compute (l.adj | r.adj) & 1 and test it against zero. llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj"); llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One); llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero, "cmp.or.adj"); EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero); } // Tie together all our conditions. llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq); Result = Builder.CreateBinOp(And, PtrEq, Result, Inequality ? "memptr.ne" : "memptr.eq"); return Result; } llvm::Value * ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT) { CGBuilderTy &Builder = CGF.Builder; /// For member data pointers, this is just a check against -1. if (MPT->isMemberDataPointer()) { assert(MemPtr->getType() == CGM.PtrDiffTy); llvm::Value *NegativeOne = llvm::Constant::getAllOnesValue(MemPtr->getType()); return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool"); } // In Itanium, a member function pointer is not null if 'ptr' is not null. llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr"); llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0); llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool"); // On ARM, a member function pointer is also non-null if the low bit of 'adj' // (the virtual bit) is set. if (UseARMMethodPtrABI) { llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1); llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj"); llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit"); llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero, "memptr.isvirtual"); Result = Builder.CreateOr(Result, IsVirtual); } return Result; } bool ItaniumCXXABI::classifyReturnType(CGFunctionInfo &FI) const { const CXXRecordDecl *RD = FI.getReturnType()->getAsCXXRecordDecl(); if (!RD) return false; // Return indirectly if we have a non-trivial copy ctor or non-trivial dtor. // FIXME: Use canCopyArgument() when it is fixed to handle lazily declared // special members. if (RD->hasNonTrivialDestructor() || RD->hasNonTrivialCopyConstructor()) { FI.getReturnInfo() = ABIArgInfo::getIndirect(0, /*ByVal=*/false); return true; } return false; } /// The Itanium ABI requires non-zero initialization only for data /// member pointers, for which '0' is a valid offset. bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) { return MPT->isMemberFunctionPointer(); } /// The Itanium ABI always places an offset to the complete object /// at entry -2 in the vtable. void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF, const CXXDeleteExpr *DE, llvm::Value *Ptr, QualType ElementType, const CXXDestructorDecl *Dtor) { bool UseGlobalDelete = DE->isGlobalDelete(); if (UseGlobalDelete) { // Derive the complete-object pointer, which is what we need // to pass to the deallocation function. // Grab the vtable pointer as an intptr_t*. llvm::Value *VTable = CGF.GetVTablePtr(Ptr, CGF.IntPtrTy->getPointerTo()); // Track back to entry -2 and pull out the offset there. llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64( VTable, -2, "complete-offset.ptr"); llvm::LoadInst *Offset = CGF.Builder.CreateLoad(OffsetPtr); Offset->setAlignment(CGF.PointerAlignInBytes); // Apply the offset. llvm::Value *CompletePtr = CGF.Builder.CreateBitCast(Ptr, CGF.Int8PtrTy); CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset); // If we're supposed to call the global delete, make sure we do so // even if the destructor throws. CGF.pushCallObjectDeleteCleanup(DE->getOperatorDelete(), CompletePtr, ElementType); } // FIXME: Provide a source location here even though there's no // CXXMemberCallExpr for dtor call. CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting; EmitVirtualDestructorCall(CGF, Dtor, DtorType, Ptr, /*CE=*/nullptr); if (UseGlobalDelete) CGF.PopCleanupBlock(); } void ItaniumCXXABI::emitRethrow(CodeGenFunction &CGF, bool isNoReturn) { // void __cxa_rethrow(); llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false); llvm::Constant *Fn = CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow"); if (isNoReturn) CGF.EmitNoreturnRuntimeCallOrInvoke(Fn, None); else CGF.EmitRuntimeCallOrInvoke(Fn); } #if 0 // HLSL Change Starts static llvm::Constant *getAllocateExceptionFn(CodeGenModule &CGM) { // void *__cxa_allocate_exception(size_t thrown_size); llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.Int8PtrTy, CGM.SizeTy, /*IsVarArgs=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception"); } static llvm::Constant *getThrowFn(CodeGenModule &CGM) { // void __cxa_throw(void *thrown_exception, std::type_info *tinfo, // void (*dest) (void *)); llvm::Type *Args[3] = { CGM.Int8PtrTy, CGM.Int8PtrTy, CGM.Int8PtrTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, Args, /*IsVarArgs=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_throw"); } #endif void ItaniumCXXABI::emitThrow(CodeGenFunction &CGF, const CXXThrowExpr *E) { #if 1 // HLSL Change Starts llvm_unreachable("HLSL does not support throw."); #else // HLSL Change Ends QualType ThrowType = E->getSubExpr()->getType(); // Now allocate the exception object. llvm::Type *SizeTy = CGF.ConvertType(getContext().getSizeType()); uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity(); llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(CGM); llvm::CallInst *ExceptionPtr = CGF.EmitNounwindRuntimeCall( AllocExceptionFn, llvm::ConstantInt::get(SizeTy, TypeSize), "exception"); CGF.EmitAnyExprToExn(E->getSubExpr(), ExceptionPtr); // Now throw the exception. llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType, /*ForEH=*/true); // The address of the destructor. If the exception type has a // trivial destructor (or isn't a record), we just pass null. llvm::Constant *Dtor = nullptr; if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) { CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl()); if (!Record->hasTrivialDestructor()) { CXXDestructorDecl *DtorD = Record->getDestructor(); Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete); Dtor = llvm::ConstantExpr::getBitCast(Dtor, CGM.Int8PtrTy); } } if (!Dtor) Dtor = llvm::Constant::getNullValue(CGM.Int8PtrTy); llvm::Value *args[] = { ExceptionPtr, TypeInfo, Dtor }; CGF.EmitNoreturnRuntimeCallOrInvoke(getThrowFn(CGM), args); #endif // HLSL Change } static llvm::Constant *getItaniumDynamicCastFn(CodeGenFunction &CGF) { // void *__dynamic_cast(const void *sub, // const abi::__class_type_info *src, // const abi::__class_type_info *dst, // std::ptrdiff_t src2dst_offset); llvm::Type *Int8PtrTy = CGF.Int8PtrTy; llvm::Type *PtrDiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy }; llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false); // Mark the function as nounwind readonly. llvm::Attribute::AttrKind FuncAttrs[] = { llvm::Attribute::NoUnwind, llvm::Attribute::ReadOnly }; llvm::AttributeSet Attrs = llvm::AttributeSet::get( CGF.getLLVMContext(), llvm::AttributeSet::FunctionIndex, FuncAttrs); return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast", Attrs); } static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) { // void __cxa_bad_cast(); llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast"); } /// \brief Compute the src2dst_offset hint as described in the /// Itanium C++ ABI [2.9.7] static CharUnits computeOffsetHint(ASTContext &Context, const CXXRecordDecl *Src, const CXXRecordDecl *Dst) { CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true, /*DetectVirtual=*/false); // If Dst is not derived from Src we can skip the whole computation below and // return that Src is not a public base of Dst. Record all inheritance paths. if (!Dst->isDerivedFrom(Src, Paths)) return CharUnits::fromQuantity(-2ULL); unsigned NumPublicPaths = 0; CharUnits Offset; // Now walk all possible inheritance paths. for (CXXBasePaths::paths_iterator I = Paths.begin(), E = Paths.end(); I != E; ++I) { if (I->Access != AS_public) // Ignore non-public inheritance. continue; ++NumPublicPaths; for (CXXBasePath::iterator J = I->begin(), JE = I->end(); J != JE; ++J) { // If the path contains a virtual base class we can't give any hint. // -1: no hint. if (J->Base->isVirtual()) return CharUnits::fromQuantity(-1ULL); if (NumPublicPaths > 1) // Won't use offsets, skip computation. continue; // Accumulate the base class offsets. const ASTRecordLayout &L = Context.getASTRecordLayout(J->Class); Offset += L.getBaseClassOffset(J->Base->getType()->getAsCXXRecordDecl()); } } // -2: Src is not a public base of Dst. if (NumPublicPaths == 0) return CharUnits::fromQuantity(-2ULL); // -3: Src is a multiple public base type but never a virtual base type. if (NumPublicPaths > 1) return CharUnits::fromQuantity(-3ULL); // Otherwise, the Src type is a unique public nonvirtual base type of Dst. // Return the offset of Src from the origin of Dst. return Offset; } static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) { // void __cxa_bad_typeid(); llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false); return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid"); } bool ItaniumCXXABI::shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) { return IsDeref; } void ItaniumCXXABI::EmitBadTypeidCall(CodeGenFunction &CGF) { llvm::Value *Fn = getBadTypeidFn(CGF); CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn(); CGF.Builder.CreateUnreachable(); } llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF, QualType SrcRecordTy, llvm::Value *ThisPtr, llvm::Type *StdTypeInfoPtrTy) { llvm::Value *Value = CGF.GetVTablePtr(ThisPtr, StdTypeInfoPtrTy->getPointerTo()); // Load the type info. Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL); return CGF.Builder.CreateLoad(Value); } bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr, QualType SrcRecordTy) { return SrcIsPtr; } llvm::Value *ItaniumCXXABI::EmitDynamicCastCall( CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy, QualType DestTy, QualType DestRecordTy, llvm::BasicBlock *CastEnd) { llvm::Type *PtrDiffLTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); llvm::Type *DestLTy = CGF.ConvertType(DestTy); llvm::Value *SrcRTTI = CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType()); llvm::Value *DestRTTI = CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType()); // Compute the offset hint. const CXXRecordDecl *SrcDecl = SrcRecordTy->getAsCXXRecordDecl(); const CXXRecordDecl *DestDecl = DestRecordTy->getAsCXXRecordDecl(); llvm::Value *OffsetHint = llvm::ConstantInt::get( PtrDiffLTy, computeOffsetHint(CGF.getContext(), SrcDecl, DestDecl).getQuantity()); // Emit the call to __dynamic_cast. Value = CGF.EmitCastToVoidPtr(Value); llvm::Value *args[] = {Value, SrcRTTI, DestRTTI, OffsetHint}; Value = CGF.EmitNounwindRuntimeCall(getItaniumDynamicCastFn(CGF), args); Value = CGF.Builder.CreateBitCast(Value, DestLTy); /// C++ [expr.dynamic.cast]p9: /// A failed cast to reference type throws std::bad_cast if (DestTy->isReferenceType()) { llvm::BasicBlock *BadCastBlock = CGF.createBasicBlock("dynamic_cast.bad_cast"); llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value); CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd); CGF.EmitBlock(BadCastBlock); EmitBadCastCall(CGF); } return Value; } llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF, llvm::Value *Value, QualType SrcRecordTy, QualType DestTy) { llvm::Type *PtrDiffLTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); llvm::Type *DestLTy = CGF.ConvertType(DestTy); // Get the vtable pointer. llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo()); // Get the offset-to-top from the vtable. llvm::Value *OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL); OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top"); // Finally, add the offset to the pointer. Value = CGF.EmitCastToVoidPtr(Value); Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop); return CGF.Builder.CreateBitCast(Value, DestLTy); } bool ItaniumCXXABI::EmitBadCastCall(CodeGenFunction &CGF) { llvm::Value *Fn = getBadCastFn(CGF); CGF.EmitRuntimeCallOrInvoke(Fn).setDoesNotReturn(); CGF.Builder.CreateUnreachable(); return true; } llvm::Value * ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF, llvm::Value *This, const CXXRecordDecl *ClassDecl, const CXXRecordDecl *BaseClassDecl) { llvm::Value *VTablePtr = CGF.GetVTablePtr(This, CGM.Int8PtrTy); CharUnits VBaseOffsetOffset = CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl); llvm::Value *VBaseOffsetPtr = CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(), "vbase.offset.ptr"); VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGM.PtrDiffTy->getPointerTo()); llvm::Value *VBaseOffset = CGF.Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset"); return VBaseOffset; } void ItaniumCXXABI::EmitCXXConstructors(const CXXConstructorDecl *D) { // Just make sure we're in sync with TargetCXXABI. assert(CGM.getTarget().getCXXABI().hasConstructorVariants()); // The constructor used for constructing this as a base class; // ignores virtual bases. CGM.EmitGlobal(GlobalDecl(D, Ctor_Base)); // The constructor used for constructing this as a complete class; // constructs the virtual bases, then calls the base constructor. if (!D->getParent()->isAbstract()) { // We don't need to emit the complete ctor if the class is abstract. CGM.EmitGlobal(GlobalDecl(D, Ctor_Complete)); } } void ItaniumCXXABI::buildStructorSignature(const CXXMethodDecl *MD, StructorType T, SmallVectorImpl<CanQualType> &ArgTys) { ASTContext &Context = getContext(); // All parameters are already in place except VTT, which goes after 'this'. // These are Clang types, so we don't need to worry about sret yet. // Check if we need to add a VTT parameter (which has type void **). if (T == StructorType::Base && MD->getParent()->getNumVBases() != 0) ArgTys.insert(ArgTys.begin() + 1, Context.getPointerType(Context.VoidPtrTy)); } void ItaniumCXXABI::EmitCXXDestructors(const CXXDestructorDecl *D) { // The destructor used for destructing this as a base class; ignores // virtual bases. CGM.EmitGlobal(GlobalDecl(D, Dtor_Base)); // The destructor used for destructing this as a most-derived class; // call the base destructor and then destructs any virtual bases. CGM.EmitGlobal(GlobalDecl(D, Dtor_Complete)); // The destructor in a virtual table is always a 'deleting' // destructor, which calls the complete destructor and then uses the // appropriate operator delete. if (D->isVirtual()) CGM.EmitGlobal(GlobalDecl(D, Dtor_Deleting)); } void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF, QualType &ResTy, FunctionArgList &Params) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl()); assert(isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)); // Check if we need a VTT parameter as well. if (NeedsVTTParameter(CGF.CurGD)) { ASTContext &Context = getContext(); // FIXME: avoid the fake decl QualType T = Context.getPointerType(Context.VoidPtrTy); ImplicitParamDecl *VTTDecl = ImplicitParamDecl::Create(Context, nullptr, MD->getLocation(), &Context.Idents.get("vtt"), T); Params.insert(Params.begin() + 1, VTTDecl); getStructorImplicitParamDecl(CGF) = VTTDecl; } } void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) { /// Initialize the 'this' slot. EmitThisParam(CGF); /// Initialize the 'vtt' slot if needed. if (getStructorImplicitParamDecl(CGF)) { getStructorImplicitParamValue(CGF) = CGF.Builder.CreateLoad( CGF.GetAddrOfLocalVar(getStructorImplicitParamDecl(CGF)), "vtt"); } /// If this is a function that the ABI specifies returns 'this', initialize /// the return slot to 'this' at the start of the function. /// /// Unlike the setting of return types, this is done within the ABI /// implementation instead of by clients of CGCXXABI because: /// 1) getThisValue is currently protected /// 2) in theory, an ABI could implement 'this' returns some other way; /// HasThisReturn only specifies a contract, not the implementation if (HasThisReturn(CGF.CurGD)) CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue); } unsigned ItaniumCXXABI::addImplicitConstructorArgs( CodeGenFunction &CGF, const CXXConstructorDecl *D, CXXCtorType Type, bool ForVirtualBase, bool Delegating, CallArgList &Args) { if (!NeedsVTTParameter(GlobalDecl(D, Type))) return 0; // Insert the implicit 'vtt' argument as the second argument. llvm::Value *VTT = CGF.GetVTTParameter(GlobalDecl(D, Type), ForVirtualBase, Delegating); QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); Args.insert(Args.begin() + 1, CallArg(RValue::get(VTT), VTTTy, /*needscopy=*/false)); return 1; // Added one arg. } void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *DD, CXXDtorType Type, bool ForVirtualBase, bool Delegating, llvm::Value *This) { GlobalDecl GD(DD, Type); llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating); QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy); llvm::Value *Callee = nullptr; if (getContext().getLangOpts().AppleKext) Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent()); if (!Callee) Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)); CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(), This, VTT, VTTTy, nullptr); } void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT, const CXXRecordDecl *RD) { llvm::GlobalVariable *VTable = getAddrOfVTable(RD, CharUnits()); if (VTable->hasInitializer()) return; ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); const VTableLayout &VTLayout = VTContext.getVTableLayout(RD); llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD); llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD)); // Create and set the initializer. llvm::Constant *Init = CGVT.CreateVTableInitializer( RD, VTLayout.vtable_component_begin(), VTLayout.getNumVTableComponents(), VTLayout.vtable_thunk_begin(), VTLayout.getNumVTableThunks(), RTTI); VTable->setInitializer(Init); // Set the correct linkage. VTable->setLinkage(Linkage); if (CGM.supportsCOMDAT() && VTable->isWeakForLinker()) VTable->setComdat(CGM.getModule().getOrInsertComdat(VTable->getName())); // Set the right visibility. CGM.setGlobalVisibility(VTable, RD); // Use pointer alignment for the vtable. Otherwise we would align them based // on the size of the initializer which doesn't make sense as only single // values are read. unsigned PAlign = CGM.getTarget().getPointerAlign(0); VTable->setAlignment(getContext().toCharUnitsFromBits(PAlign).getQuantity()); // If this is the magic class __cxxabiv1::__fundamental_type_info, // we will emit the typeinfo for the fundamental types. This is the // same behaviour as GCC. const DeclContext *DC = RD->getDeclContext(); if (RD->getIdentifier() && RD->getIdentifier()->isStr("__fundamental_type_info") && isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() && cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") && DC->getParent()->isTranslationUnit()) EmitFundamentalRTTIDescriptors(); CGM.EmitVTableBitSetEntries(VTable, VTLayout); } llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructor( CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base, const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) { bool NeedsVTTParam = CGM.getCXXABI().NeedsVTTParameter(CGF.CurGD); NeedsVirtualOffset = (NeedsVTTParam && NearestVBase); llvm::Value *VTableAddressPoint; if (NeedsVTTParam && (Base.getBase()->getNumVBases() || NearestVBase)) { // Get the secondary vpointer index. uint64_t VirtualPointerIndex = CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base); /// Load the VTT. llvm::Value *VTT = CGF.LoadCXXVTT(); if (VirtualPointerIndex) VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex); // And load the address point from the VTT. VTableAddressPoint = CGF.Builder.CreateLoad(VTT); } else { llvm::Constant *VTable = CGM.getCXXABI().getAddrOfVTable(VTableClass, CharUnits()); uint64_t AddressPoint = CGM.getItaniumVTableContext() .getVTableLayout(VTableClass) .getAddressPoint(Base); VTableAddressPoint = CGF.Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint); } return VTableAddressPoint; } llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr( BaseSubobject Base, const CXXRecordDecl *VTableClass) { auto *VTable = getAddrOfVTable(VTableClass, CharUnits()); // Find the appropriate vtable within the vtable group. uint64_t AddressPoint = CGM.getItaniumVTableContext() .getVTableLayout(VTableClass) .getAddressPoint(Base); llvm::Value *Indices[] = { llvm::ConstantInt::get(CGM.Int64Ty, 0), llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint) }; return llvm::ConstantExpr::getInBoundsGetElementPtr(VTable->getValueType(), VTable, Indices); } llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD, CharUnits VPtrOffset) { assert(VPtrOffset.isZero() && "Itanium ABI only supports zero vptr offsets"); llvm::GlobalVariable *&VTable = VTables[RD]; if (VTable) return VTable; // Queue up this v-table for possible deferred emission. CGM.addDeferredVTable(RD); SmallString<256> OutName; llvm::raw_svector_ostream Out(OutName); getMangleContext().mangleCXXVTable(RD, Out); Out.flush(); StringRef Name = OutName.str(); ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext(); llvm::ArrayType *ArrayType = llvm::ArrayType::get( CGM.Int8PtrTy, VTContext.getVTableLayout(RD).getNumVTableComponents()); VTable = CGM.CreateOrReplaceCXXRuntimeVariable( Name, ArrayType, llvm::GlobalValue::ExternalLinkage); VTable->setUnnamedAddr(true); if (RD->hasAttr<DLLImportAttr>()) VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass); else if (RD->hasAttr<DLLExportAttr>()) VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass); return VTable; } llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This, llvm::Type *Ty, SourceLocation Loc) { GD = GD.getCanonicalDecl(); Ty = Ty->getPointerTo()->getPointerTo(); llvm::Value *VTable = CGF.GetVTablePtr(This, Ty); if (CGF.SanOpts.has(SanitizerKind::CFIVCall)) CGF.EmitVTablePtrCheckForCall(cast<CXXMethodDecl>(GD.getDecl()), VTable, CodeGenFunction::CFITCK_VCall, Loc); uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD); llvm::Value *VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn"); return CGF.Builder.CreateLoad(VFuncPtr); } llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall( CodeGenFunction &CGF, const CXXDestructorDecl *Dtor, CXXDtorType DtorType, llvm::Value *This, const CXXMemberCallExpr *CE) { assert(CE == nullptr || CE->arg_begin() == CE->arg_end()); assert(DtorType == Dtor_Deleting || DtorType == Dtor_Complete); const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration( Dtor, getFromDtorType(DtorType)); llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo); llvm::Value *Callee = getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty, CE ? CE->getLocStart() : SourceLocation()); CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(), This, /*ImplicitParam=*/nullptr, QualType(), CE); return nullptr; } void ItaniumCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) { CodeGenVTables &VTables = CGM.getVTables(); llvm::GlobalVariable *VTT = VTables.GetAddrOfVTT(RD); VTables.EmitVTTDefinition(VTT, CGM.getVTableLinkage(RD), RD); } static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF, llvm::Value *Ptr, int64_t NonVirtualAdjustment, int64_t VirtualAdjustment, bool IsReturnAdjustment) { if (!NonVirtualAdjustment && !VirtualAdjustment) return Ptr; llvm::Type *Int8PtrTy = CGF.Int8PtrTy; llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy); if (NonVirtualAdjustment && !IsReturnAdjustment) { // Perform the non-virtual adjustment for a base-to-derived cast. V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment); } if (VirtualAdjustment) { llvm::Type *PtrDiffTy = CGF.ConvertType(CGF.getContext().getPointerDiffType()); // Perform the virtual adjustment. llvm::Value *VTablePtrPtr = CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo()); llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr); llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment); OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo()); // Load the adjustment offset from the vtable. llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr); // Adjust our pointer. V = CGF.Builder.CreateInBoundsGEP(V, Offset); } if (NonVirtualAdjustment && IsReturnAdjustment) { // Perform the non-virtual adjustment for a derived-to-base cast. V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment); } // Cast back to the original type. return CGF.Builder.CreateBitCast(V, Ptr->getType()); } llvm::Value *ItaniumCXXABI::performThisAdjustment(CodeGenFunction &CGF, llvm::Value *This, const ThisAdjustment &TA) { return performTypeAdjustment(CGF, This, TA.NonVirtual, TA.Virtual.Itanium.VCallOffsetOffset, /*IsReturnAdjustment=*/false); } llvm::Value * ItaniumCXXABI::performReturnAdjustment(CodeGenFunction &CGF, llvm::Value *Ret, const ReturnAdjustment &RA) { return performTypeAdjustment(CGF, Ret, RA.NonVirtual, RA.Virtual.Itanium.VBaseOffsetOffset, /*IsReturnAdjustment=*/true); } void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResultType) { if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl())) return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType); // Destructor thunks in the ARM ABI have indeterminate results. llvm::Type *T = cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType(); RValue Undef = RValue::get(llvm::UndefValue::get(T)); return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType); } /************************** Array allocation cookies **************************/ CharUnits ItaniumCXXABI::getArrayCookieSizeImpl(QualType elementType) { // The array cookie is a size_t; pad that up to the element alignment. // The cookie is actually right-justified in that space. return std::max(CharUnits::fromQuantity(CGM.SizeSizeInBytes), CGM.getContext().getTypeAlignInChars(elementType)); } llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *NewPtr, llvm::Value *NumElements, const CXXNewExpr *expr, QualType ElementType) { assert(requiresArrayCookie(expr)); unsigned AS = NewPtr->getType()->getPointerAddressSpace(); ASTContext &Ctx = getContext(); QualType SizeTy = Ctx.getSizeType(); CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy); // The size of the cookie. CharUnits CookieSize = std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType)); assert(CookieSize == getArrayCookieSizeImpl(ElementType)); // Compute an offset to the cookie. llvm::Value *CookiePtr = NewPtr; CharUnits CookieOffset = CookieSize - SizeSize; if (!CookieOffset.isZero()) CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr, CookieOffset.getQuantity()); // Write the number of elements into the appropriate slot. llvm::Type *NumElementsTy = CGF.ConvertType(SizeTy)->getPointerTo(AS); llvm::Value *NumElementsPtr = CGF.Builder.CreateBitCast(CookiePtr, NumElementsTy); llvm::Instruction *SI = CGF.Builder.CreateStore(NumElements, NumElementsPtr); if (CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) && AS == 0 && expr->getOperatorNew()->isReplaceableGlobalAllocationFunction()) { // The store to the CookiePtr does not need to be instrumented. CGM.getSanitizerMetadata()->disableSanitizerForInstruction(SI); llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, NumElementsTy, false); llvm::Constant *F = CGM.CreateRuntimeFunction(FTy, "__asan_poison_cxx_array_cookie"); CGF.Builder.CreateCall(F, NumElementsPtr); } // Finally, compute a pointer to the actual data buffer by skipping // over the cookie completely. return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieSize.getQuantity()); } llvm::Value *ItaniumCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr, CharUnits cookieSize) { // The element size is right-justified in the cookie. llvm::Value *numElementsPtr = allocPtr; CharUnits numElementsOffset = cookieSize - CharUnits::fromQuantity(CGF.SizeSizeInBytes); if (!numElementsOffset.isZero()) numElementsPtr = CGF.Builder.CreateConstInBoundsGEP1_64(numElementsPtr, numElementsOffset.getQuantity()); unsigned AS = allocPtr->getType()->getPointerAddressSpace(); numElementsPtr = CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS)); if (!CGM.getLangOpts().Sanitize.has(SanitizerKind::Address) || AS != 0) return CGF.Builder.CreateLoad(numElementsPtr); // In asan mode emit a function call instead of a regular load and let the // run-time deal with it: if the shadow is properly poisoned return the // cookie, otherwise return 0 to avoid an infinite loop calling DTORs. // We can't simply ignore this load using nosanitize metadata because // the metadata may be lost. llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.SizeTy, CGF.SizeTy->getPointerTo(0), false); llvm::Constant *F = CGM.CreateRuntimeFunction(FTy, "__asan_load_cxx_array_cookie"); return CGF.Builder.CreateCall(F, numElementsPtr); } CharUnits ARMCXXABI::getArrayCookieSizeImpl(QualType elementType) { // ARM says that the cookie is always: // struct array_cookie { // std::size_t element_size; // element_size != 0 // std::size_t element_count; // }; // But the base ABI doesn't give anything an alignment greater than // 8, so we can dismiss this as typical ABI-author blindness to // actual language complexity and round up to the element alignment. return std::max(CharUnits::fromQuantity(2 * CGM.SizeSizeInBytes), CGM.getContext().getTypeAlignInChars(elementType)); } llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF, llvm::Value *newPtr, llvm::Value *numElements, const CXXNewExpr *expr, QualType elementType) { assert(requiresArrayCookie(expr)); // NewPtr is a char*, but we generalize to arbitrary addrspaces. unsigned AS = newPtr->getType()->getPointerAddressSpace(); // The cookie is always at the start of the buffer. llvm::Value *cookie = newPtr; // The first element is the element size. cookie = CGF.Builder.CreateBitCast(cookie, CGF.SizeTy->getPointerTo(AS)); llvm::Value *elementSize = llvm::ConstantInt::get(CGF.SizeTy, getContext().getTypeSizeInChars(elementType).getQuantity()); CGF.Builder.CreateStore(elementSize, cookie); // The second element is the element count. cookie = CGF.Builder.CreateConstInBoundsGEP1_32(CGF.SizeTy, cookie, 1); CGF.Builder.CreateStore(numElements, cookie); // Finally, compute a pointer to the actual data buffer by skipping // over the cookie completely. CharUnits cookieSize = ARMCXXABI::getArrayCookieSizeImpl(elementType); return CGF.Builder.CreateConstInBoundsGEP1_64(newPtr, cookieSize.getQuantity()); } llvm::Value *ARMCXXABI::readArrayCookieImpl(CodeGenFunction &CGF, llvm::Value *allocPtr, CharUnits cookieSize) { // The number of elements is at offset sizeof(size_t) relative to // the allocated pointer. llvm::Value *numElementsPtr = CGF.Builder.CreateConstInBoundsGEP1_64(allocPtr, CGF.SizeSizeInBytes); unsigned AS = allocPtr->getType()->getPointerAddressSpace(); numElementsPtr = CGF.Builder.CreateBitCast(numElementsPtr, CGF.SizeTy->getPointerTo(AS)); return CGF.Builder.CreateLoad(numElementsPtr); } /*********************** Static local initialization **************************/ static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy) { // int __cxa_guard_acquire(__guard *guard_object); llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy), GuardPtrTy, /*isVarArg=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire", llvm::AttributeSet::get(CGM.getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoUnwind)); } static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy) { // void __cxa_guard_release(__guard *guard_object); llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release", llvm::AttributeSet::get(CGM.getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoUnwind)); } static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM, llvm::PointerType *GuardPtrTy) { // void __cxa_guard_abort(__guard *guard_object); llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort", llvm::AttributeSet::get(CGM.getLLVMContext(), llvm::AttributeSet::FunctionIndex, llvm::Attribute::NoUnwind)); } namespace { struct CallGuardAbort : EHScopeStack::Cleanup { llvm::GlobalVariable *Guard; CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {} void Emit(CodeGenFunction &CGF, Flags flags) override { CGF.EmitNounwindRuntimeCall(getGuardAbortFn(CGF.CGM, Guard->getType()), Guard); } }; } /// The ARM code here follows the Itanium code closely enough that we /// just special-case it at particular places. void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D, llvm::GlobalVariable *var, bool shouldPerformInit) { CGBuilderTy &Builder = CGF.Builder; // We only need to use thread-safe statics for local non-TLS variables; // global initialization is always single-threaded. bool threadsafe = getContext().getLangOpts().ThreadsafeStatics && D.isLocalVarDecl() && !D.getTLSKind(); // If we have a global variable with internal linkage and thread-safe statics // are disabled, we can just let the guard variable be of type i8. bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage(); llvm::IntegerType *guardTy; if (useInt8GuardVariable) { guardTy = CGF.Int8Ty; } else { // Guard variables are 64 bits in the generic ABI and size width on ARM // (i.e. 32-bit on AArch32, 64-bit on AArch64). guardTy = (UseARMGuardVarABI ? CGF.SizeTy : CGF.Int64Ty); } llvm::PointerType *guardPtrTy = guardTy->getPointerTo(); // Create the guard variable if we don't already have it (as we // might if we're double-emitting this function body). llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D); if (!guard) { // Mangle the name for the guard. SmallString<256> guardName; { llvm::raw_svector_ostream out(guardName); getMangleContext().mangleStaticGuardVariable(&D, out); out.flush(); } // Create the guard variable with a zero-initializer. // Just absorb linkage and visibility from the guarded variable. guard = new llvm::GlobalVariable(CGM.getModule(), guardTy, false, var->getLinkage(), llvm::ConstantInt::get(guardTy, 0), guardName.str()); guard->setVisibility(var->getVisibility()); // If the variable is thread-local, so is its guard variable. guard->setThreadLocalMode(var->getThreadLocalMode()); // The ABI says: It is suggested that it be emitted in the same COMDAT group // as the associated data object llvm::Comdat *C = var->getComdat(); if (!D.isLocalVarDecl() && C) { guard->setComdat(C); CGF.CurFn->setComdat(C); } else if (CGM.supportsCOMDAT() && guard->isWeakForLinker()) { guard->setComdat(CGM.getModule().getOrInsertComdat(guard->getName())); } CGM.setStaticLocalDeclGuardAddress(&D, guard); } // Test whether the variable has completed initialization. // // Itanium C++ ABI 3.3.2: // The following is pseudo-code showing how these functions can be used: // if (obj_guard.first_byte == 0) { // if ( __cxa_guard_acquire (&obj_guard) ) { // try { // ... initialize the object ...; // } catch (...) { // __cxa_guard_abort (&obj_guard); // throw; // } // ... queue object destructor with __cxa_atexit() ...; // __cxa_guard_release (&obj_guard); // } // } // Load the first byte of the guard variable. llvm::LoadInst *LI = Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy)); LI->setAlignment(1); // Itanium ABI: // An implementation supporting thread-safety on multiprocessor // systems must also guarantee that references to the initialized // object do not occur before the load of the initialization flag. // // In LLVM, we do this by marking the load Acquire. if (threadsafe) LI->setAtomic(llvm::Acquire); // For ARM, we should only check the first bit, rather than the entire byte: // // ARM C++ ABI 3.2.3.1: // To support the potential use of initialization guard variables // as semaphores that are the target of ARM SWP and LDREX/STREX // synchronizing instructions we define a static initialization // guard variable to be a 4-byte aligned, 4-byte word with the // following inline access protocol. // #define INITIALIZED 1 // if ((obj_guard & INITIALIZED) != INITIALIZED) { // if (__cxa_guard_acquire(&obj_guard)) // ... // } // // and similarly for ARM64: // // ARM64 C++ ABI 3.2.2: // This ABI instead only specifies the value bit 0 of the static guard // variable; all other bits are platform defined. Bit 0 shall be 0 when the // variable is not initialized and 1 when it is. llvm::Value *V = (UseARMGuardVarABI && !useInt8GuardVariable) ? Builder.CreateAnd(LI, llvm::ConstantInt::get(CGM.Int8Ty, 1)) : LI; llvm::Value *isInitialized = Builder.CreateIsNull(V, "guard.uninitialized"); llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check"); llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end"); // Check if the first byte of the guard variable is zero. Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock); CGF.EmitBlock(InitCheckBlock); // Variables used when coping with thread-safe statics and exceptions. if (threadsafe) { // Call __cxa_guard_acquire. llvm::Value *V = CGF.EmitNounwindRuntimeCall(getGuardAcquireFn(CGM, guardPtrTy), guard); llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init"); Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"), InitBlock, EndBlock); // Call __cxa_guard_abort along the exceptional edge. CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard); CGF.EmitBlock(InitBlock); } // Emit the initializer and add a global destructor if appropriate. CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit); if (threadsafe) { // Pop the guard-abort cleanup if we pushed one. CGF.PopCleanupBlock(); // Call __cxa_guard_release. This cannot throw. CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy), guard); } else { Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard); } CGF.EmitBlock(EndBlock); } /// Register a global destructor using __cxa_atexit. static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF, llvm::Constant *dtor, llvm::Constant *addr, bool TLS) { const char *Name = "__cxa_atexit"; if (TLS) { const llvm::Triple &T = CGF.getTarget().getTriple(); Name = T.isMacOSX() ? "_tlv_atexit" : "__cxa_thread_atexit"; } // We're assuming that the destructor function is something we can // reasonably call with the default CC. Go ahead and cast it to the // right prototype. llvm::Type *dtorTy = llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo(); // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d); llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy }; llvm::FunctionType *atexitTy = llvm::FunctionType::get(CGF.IntTy, paramTys, false); // Fetch the actual function. llvm::Constant *atexit = CGF.CGM.CreateRuntimeFunction(atexitTy, Name); if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit)) fn->setDoesNotThrow(); // Create a variable that binds the atexit to this shared object. llvm::Constant *handle = CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle"); llvm::Value *args[] = { llvm::ConstantExpr::getBitCast(dtor, dtorTy), llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy), handle }; CGF.EmitNounwindRuntimeCall(atexit, args); } /// Register a global destructor as best as we know how. void ItaniumCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D, llvm::Constant *dtor, llvm::Constant *addr) { // Use __cxa_atexit if available. if (CGM.getCodeGenOpts().CXAAtExit) return emitGlobalDtorWithCXAAtExit(CGF, dtor, addr, D.getTLSKind()); if (D.getTLSKind()) CGM.ErrorUnsupported(&D, "non-trivial TLS destruction"); // In Apple kexts, we want to add a global destructor entry. // FIXME: shouldn't this be guarded by some variable? if (CGM.getLangOpts().AppleKext) { // Generate a global destructor entry. return CGM.AddCXXDtorEntry(dtor, addr); } CGF.registerGlobalDtorWithAtExit(D, dtor, addr); } static bool isThreadWrapperReplaceable(const VarDecl *VD, CodeGen::CodeGenModule &CGM) { assert(!VD->isStaticLocal() && "static local VarDecls don't need wrappers!"); // OS X prefers to have references to thread local variables to go through // the thread wrapper instead of directly referencing the backing variable. return VD->getTLSKind() == VarDecl::TLS_Dynamic && CGM.getTarget().getTriple().isMacOSX(); } /// Get the appropriate linkage for the wrapper function. This is essentially /// the weak form of the variable's linkage; every translation unit which needs /// the wrapper emits a copy, and we want the linker to merge them. static llvm::GlobalValue::LinkageTypes getThreadLocalWrapperLinkage(const VarDecl *VD, CodeGen::CodeGenModule &CGM) { llvm::GlobalValue::LinkageTypes VarLinkage = CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false); // For internal linkage variables, we don't need an external or weak wrapper. if (llvm::GlobalValue::isLocalLinkage(VarLinkage)) return VarLinkage; // If the thread wrapper is replaceable, give it appropriate linkage. if (isThreadWrapperReplaceable(VD, CGM)) { if (llvm::GlobalVariable::isLinkOnceLinkage(VarLinkage) || llvm::GlobalVariable::isWeakODRLinkage(VarLinkage)) return llvm::GlobalVariable::WeakAnyLinkage; return VarLinkage; } return llvm::GlobalValue::WeakODRLinkage; } llvm::Function * ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD, llvm::Value *Val) { // Mangle the name for the thread_local wrapper function. SmallString<256> WrapperName; { llvm::raw_svector_ostream Out(WrapperName); getMangleContext().mangleItaniumThreadLocalWrapper(VD, Out); Out.flush(); } if (llvm::Value *V = CGM.getModule().getNamedValue(WrapperName)) return cast<llvm::Function>(V); llvm::Type *RetTy = Val->getType(); if (VD->getType()->isReferenceType()) RetTy = RetTy->getPointerElementType(); llvm::FunctionType *FnTy = llvm::FunctionType::get(RetTy, false); llvm::Function *Wrapper = llvm::Function::Create(FnTy, getThreadLocalWrapperLinkage(VD, CGM), WrapperName.str(), &CGM.getModule()); // Always resolve references to the wrapper at link time. if (!Wrapper->hasLocalLinkage() && !isThreadWrapperReplaceable(VD, CGM)) Wrapper->setVisibility(llvm::GlobalValue::HiddenVisibility); return Wrapper; } void ItaniumCXXABI::EmitThreadLocalInitFuncs( CodeGenModule &CGM, ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>> CXXThreadLocals, ArrayRef<llvm::Function *> CXXThreadLocalInits, ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) { llvm::Function *InitFunc = nullptr; if (!CXXThreadLocalInits.empty()) { // Generate a guarded initialization function. llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); InitFunc = CGM.CreateGlobalInitOrDestructFunction(FTy, "__tls_init", SourceLocation(), /*TLS=*/true); llvm::GlobalVariable *Guard = new llvm::GlobalVariable( CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false, llvm::GlobalVariable::InternalLinkage, llvm::ConstantInt::get(CGM.Int8Ty, 0), "__tls_guard"); Guard->setThreadLocal(true); CodeGenFunction(CGM) .GenerateCXXGlobalInitFunc(InitFunc, CXXThreadLocalInits, Guard); } for (unsigned I = 0, N = CXXThreadLocals.size(); I != N; ++I) { const VarDecl *VD = CXXThreadLocals[I].first; llvm::GlobalVariable *Var = CXXThreadLocals[I].second; // Some targets require that all access to thread local variables go through // the thread wrapper. This means that we cannot attempt to create a thread // wrapper or a thread helper. if (isThreadWrapperReplaceable(VD, CGM) && !VD->hasDefinition()) continue; // Mangle the name for the thread_local initialization function. SmallString<256> InitFnName; { llvm::raw_svector_ostream Out(InitFnName); getMangleContext().mangleItaniumThreadLocalInit(VD, Out); Out.flush(); } // If we have a definition for the variable, emit the initialization // function as an alias to the global Init function (if any). Otherwise, // produce a declaration of the initialization function. llvm::GlobalValue *Init = nullptr; bool InitIsInitFunc = false; if (VD->hasDefinition()) { InitIsInitFunc = true; if (InitFunc) Init = llvm::GlobalAlias::create(Var->getLinkage(), InitFnName.str(), InitFunc); } else { // Emit a weak global function referring to the initialization function. // This function will not exist if the TU defining the thread_local // variable in question does not need any dynamic initialization for // its thread_local variables. llvm::FunctionType *FnTy = llvm::FunctionType::get(CGM.VoidTy, false); Init = llvm::Function::Create( FnTy, llvm::GlobalVariable::ExternalWeakLinkage, InitFnName.str(), &CGM.getModule()); } if (Init) Init->setVisibility(Var->getVisibility()); llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Var); llvm::LLVMContext &Context = CGM.getModule().getContext(); llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper); CGBuilderTy Builder(Entry); if (InitIsInitFunc) { if (Init) Builder.CreateCall(Init); } else { // Don't know whether we have an init function. Call it if it exists. llvm::Value *Have = Builder.CreateIsNotNull(Init); llvm::BasicBlock *InitBB = llvm::BasicBlock::Create(Context, "", Wrapper); llvm::BasicBlock *ExitBB = llvm::BasicBlock::Create(Context, "", Wrapper); Builder.CreateCondBr(Have, InitBB, ExitBB); Builder.SetInsertPoint(InitBB); Builder.CreateCall(Init); Builder.CreateBr(ExitBB); Builder.SetInsertPoint(ExitBB); } // For a reference, the result of the wrapper function is a pointer to // the referenced object. llvm::Value *Val = Var; if (VD->getType()->isReferenceType()) { llvm::LoadInst *LI = Builder.CreateLoad(Val); LI->setAlignment(CGM.getContext().getDeclAlign(VD).getQuantity()); Val = LI; } if (Val->getType() != Wrapper->getReturnType()) Val = Builder.CreatePointerBitCastOrAddrSpaceCast( Val, Wrapper->getReturnType(), ""); Builder.CreateRet(Val); } } LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF, const VarDecl *VD, QualType LValType) { QualType T = VD->getType(); llvm::Type *Ty = CGF.getTypes().ConvertTypeForMem(T); llvm::Value *Val = CGF.CGM.GetAddrOfGlobalVar(VD, Ty); llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val); Val = CGF.Builder.CreateCall(Wrapper); LValue LV; if (VD->getType()->isReferenceType()) LV = CGF.MakeNaturalAlignAddrLValue(Val, LValType); else LV = CGF.MakeAddrLValue(Val, LValType, CGF.getContext().getDeclAlign(VD)); // FIXME: need setObjCGCLValueClass? return LV; } /// Return whether the given global decl needs a VTT parameter, which it does /// if it's a base constructor or destructor with virtual bases. bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); // We don't have any virtual bases, just return early. if (!MD->getParent()->getNumVBases()) return false; // Check if we have a base constructor. if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base) return true; // Check if we have a base destructor. if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base) return true; return false; } namespace { class ItaniumRTTIBuilder { CodeGenModule &CGM; // Per-module state. llvm::LLVMContext &VMContext; const ItaniumCXXABI &CXXABI; // Per-module state. /// Fields - The fields of the RTTI descriptor currently being built. SmallVector<llvm::Constant *, 16> Fields; /// GetAddrOfTypeName - Returns the mangled type name of the given type. llvm::GlobalVariable * GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage); /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI /// descriptor of the given type. llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty); /// BuildVTablePointer - Build the vtable pointer for the given type. void BuildVTablePointer(const Type *Ty); /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b. void BuildSIClassTypeInfo(const CXXRecordDecl *RD); /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for /// classes with bases that do not satisfy the abi::__si_class_type_info /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. void BuildVMIClassTypeInfo(const CXXRecordDecl *RD); /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used /// for pointer types. void BuildPointerTypeInfo(QualType PointeeTy); /// BuildObjCObjectTypeInfo - Build the appropriate kind of /// type_info for an object type. void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty); /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info /// struct, used for member pointer types. void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty); public: ItaniumRTTIBuilder(const ItaniumCXXABI &ABI) : CGM(ABI.CGM), VMContext(CGM.getModule().getContext()), CXXABI(ABI) {} // Pointer type info flags. enum { /// PTI_Const - Type has const qualifier. PTI_Const = 0x1, /// PTI_Volatile - Type has volatile qualifier. PTI_Volatile = 0x2, /// PTI_Restrict - Type has restrict qualifier. PTI_Restrict = 0x4, /// PTI_Incomplete - Type is incomplete. PTI_Incomplete = 0x8, /// PTI_ContainingClassIncomplete - Containing class is incomplete. /// (in pointer to member). PTI_ContainingClassIncomplete = 0x10 }; // VMI type info flags. enum { /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance. VMI_NonDiamondRepeat = 0x1, /// VMI_DiamondShaped - Class is diamond shaped. VMI_DiamondShaped = 0x2 }; // Base class type info flags. enum { /// BCTI_Virtual - Base class is virtual. BCTI_Virtual = 0x1, /// BCTI_Public - Base class is public. BCTI_Public = 0x2 }; /// BuildTypeInfo - Build the RTTI type info struct for the given type. /// /// \param Force - true to force the creation of this RTTI value llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false); }; } llvm::GlobalVariable *ItaniumRTTIBuilder::GetAddrOfTypeName( QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage) { SmallString<256> OutName; llvm::raw_svector_ostream Out(OutName); CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out); Out.flush(); StringRef Name = OutName.str(); // We know that the mangled name of the type starts at index 4 of the // mangled name of the typename, so we can just index into it in order to // get the mangled name of the type. llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext, Name.substr(4)); llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage); GV->setInitializer(Init); return GV; } llvm::Constant * ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) { // Mangle the RTTI name. SmallString<256> OutName; llvm::raw_svector_ostream Out(OutName); CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); Out.flush(); StringRef Name = OutName.str(); // Look for an existing global. llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name); if (!GV) { // Create a new global variable. GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy, /*Constant=*/true, llvm::GlobalValue::ExternalLinkage, nullptr, Name); if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); if (RD->hasAttr<DLLImportAttr>()) GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass); } } return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); } /// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type /// info for that type is defined in the standard library. static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) { // Itanium C++ ABI 2.9.2: // Basic type information (e.g. for "int", "bool", etc.) will be kept in // the run-time support library. Specifically, the run-time support // library should contain type_info objects for the types X, X* and // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char, // unsigned char, signed char, short, unsigned short, int, unsigned int, // long, unsigned long, long long, unsigned long long, float, double, // long double, char16_t, char32_t, and the IEEE 754r decimal and // half-precision floating point types. switch (Ty->getKind()) { case BuiltinType::Void: case BuiltinType::NullPtr: case BuiltinType::Bool: case BuiltinType::WChar_S: case BuiltinType::WChar_U: case BuiltinType::Char_U: case BuiltinType::Char_S: case BuiltinType::UChar: case BuiltinType::SChar: case BuiltinType::Short: case BuiltinType::UShort: case BuiltinType::Int: case BuiltinType::UInt: case BuiltinType::Long: case BuiltinType::ULong: case BuiltinType::LongLong: case BuiltinType::ULongLong: case BuiltinType::Half: case BuiltinType::Float: case BuiltinType::Double: case BuiltinType::LongDouble: case BuiltinType::Char16: case BuiltinType::Char32: case BuiltinType::Int128: case BuiltinType::UInt128: case BuiltinType::OCLImage1d: case BuiltinType::OCLImage1dArray: case BuiltinType::OCLImage1dBuffer: case BuiltinType::OCLImage2d: case BuiltinType::OCLImage2dArray: case BuiltinType::OCLImage3d: case BuiltinType::OCLSampler: case BuiltinType::OCLEvent: return true; case BuiltinType::Dependent: #define BUILTIN_TYPE(Id, SingletonId) #define PLACEHOLDER_TYPE(Id, SingletonId) \ case BuiltinType::Id: #include "clang/AST/BuiltinTypes.def" llvm_unreachable("asking for RRTI for a placeholder type!"); case BuiltinType::ObjCId: case BuiltinType::ObjCClass: case BuiltinType::ObjCSel: llvm_unreachable("FIXME: Objective-C types are unsupported!"); case BuiltinType::Min12Int: case BuiltinType::Min16Int: case BuiltinType::Min16UInt: case BuiltinType::LitInt: case BuiltinType::Min10Float: case BuiltinType::Min16Float: case BuiltinType::HalfFloat: case BuiltinType::LitFloat: case BuiltinType::Int8_4Packed: case BuiltinType::UInt8_4Packed: llvm_unreachable("FIXME: HLSL types are unsupported!"); break; } llvm_unreachable("Invalid BuiltinType Kind!"); } static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) { QualType PointeeTy = PointerTy->getPointeeType(); const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy); if (!BuiltinTy) return false; // Check the qualifiers. Qualifiers Quals = PointeeTy.getQualifiers(); Quals.removeConst(); if (!Quals.empty()) return false; return TypeInfoIsInStandardLibrary(BuiltinTy); } /// IsStandardLibraryRTTIDescriptor - Returns whether the type /// information for the given type exists in the standard library. static bool IsStandardLibraryRTTIDescriptor(QualType Ty) { // Type info for builtin types is defined in the standard library. if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty)) return TypeInfoIsInStandardLibrary(BuiltinTy); // Type info for some pointer types to builtin types is defined in the // standard library. if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) return TypeInfoIsInStandardLibrary(PointerTy); return false; } /// ShouldUseExternalRTTIDescriptor - Returns whether the type information for /// the given type exists somewhere else, and that we should not emit the type /// information in this translation unit. Assumes that it is not a /// standard-library type. static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty) { ASTContext &Context = CGM.getContext(); // If RTTI is disabled, assume it might be disabled in the // translation unit that defines any potential key function, too. if (!Context.getLangOpts().RTTI) return false; if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl()); if (!RD->hasDefinition()) return false; if (!RD->isDynamicClass()) return false; // FIXME: this may need to be reconsidered if the key function // changes. // N.B. We must always emit the RTTI data ourselves if there exists a key // function. bool IsDLLImport = RD->hasAttr<DLLImportAttr>(); if (CGM.getVTables().isVTableExternal(RD)) return IsDLLImport ? false : true; if (IsDLLImport) return true; } return false; } /// IsIncompleteClassType - Returns whether the given record type is incomplete. static bool IsIncompleteClassType(const RecordType *RecordTy) { return !RecordTy->getDecl()->isCompleteDefinition(); } /// ContainsIncompleteClassType - Returns whether the given type contains an /// incomplete class type. This is true if /// /// * The given type is an incomplete class type. /// * The given type is a pointer type whose pointee type contains an /// incomplete class type. /// * The given type is a member pointer type whose class is an incomplete /// class type. /// * The given type is a member pointer type whoise pointee type contains an /// incomplete class type. /// is an indirect or direct pointer to an incomplete class type. static bool ContainsIncompleteClassType(QualType Ty) { if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) { if (IsIncompleteClassType(RecordTy)) return true; } if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty)) return ContainsIncompleteClassType(PointerTy->getPointeeType()); if (const MemberPointerType *MemberPointerTy = dyn_cast<MemberPointerType>(Ty)) { // Check if the class type is incomplete. const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass()); if (IsIncompleteClassType(ClassType)) return true; return ContainsIncompleteClassType(MemberPointerTy->getPointeeType()); } return false; } // CanUseSingleInheritance - Return whether the given record decl has a "single, // public, non-virtual base at offset zero (i.e. the derived class is dynamic // iff the base is)", according to Itanium C++ ABI, 2.95p6b. static bool CanUseSingleInheritance(const CXXRecordDecl *RD) { // Check the number of bases. if (RD->getNumBases() != 1) return false; // Get the base. CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(); // Check that the base is not virtual. if (Base->isVirtual()) return false; // Check that the base is public. if (Base->getAccessSpecifier() != AS_public) return false; // Check that the class is dynamic iff the base is. const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); if (!BaseDecl->isEmpty() && BaseDecl->isDynamicClass() != RD->isDynamicClass()) return false; return true; } void ItaniumRTTIBuilder::BuildVTablePointer(const Type *Ty) { // abi::__class_type_info. static const char * const ClassTypeInfo = "_ZTVN10__cxxabiv117__class_type_infoE"; // abi::__si_class_type_info. static const char * const SIClassTypeInfo = "_ZTVN10__cxxabiv120__si_class_type_infoE"; // abi::__vmi_class_type_info. static const char * const VMIClassTypeInfo = "_ZTVN10__cxxabiv121__vmi_class_type_infoE"; const char *VTableName = nullptr; switch (Ty->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: #define DEPENDENT_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.def" llvm_unreachable("Non-canonical and dependent types shouldn't get here"); case Type::LValueReference: case Type::RValueReference: llvm_unreachable("References shouldn't get here"); case Type::Auto: llvm_unreachable("Undeduced auto type shouldn't get here"); case Type::Builtin: // GCC treats vector and complex types as fundamental types. case Type::Vector: case Type::ExtVector: case Type::Complex: case Type::Atomic: // FIXME: GCC treats block pointers as fundamental types?! case Type::BlockPointer: // abi::__fundamental_type_info. VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE"; break; case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: // abi::__array_type_info. VTableName = "_ZTVN10__cxxabiv117__array_type_infoE"; break; case Type::FunctionNoProto: case Type::FunctionProto: // abi::__function_type_info. VTableName = "_ZTVN10__cxxabiv120__function_type_infoE"; break; case Type::Enum: // abi::__enum_type_info. VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE"; break; case Type::Record: { const CXXRecordDecl *RD = cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); if (!RD->hasDefinition() || !RD->getNumBases()) { VTableName = ClassTypeInfo; } else if (CanUseSingleInheritance(RD)) { VTableName = SIClassTypeInfo; } else { VTableName = VMIClassTypeInfo; } break; } case Type::ObjCObject: // Ignore protocol qualifiers. Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr(); // Handle id and Class. if (isa<BuiltinType>(Ty)) { VTableName = ClassTypeInfo; break; } assert(isa<ObjCInterfaceType>(Ty)); LLVM_FALLTHROUGH; // HLSL Change case Type::ObjCInterface: if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) { VTableName = SIClassTypeInfo; } else { VTableName = ClassTypeInfo; } break; case Type::ObjCObjectPointer: case Type::Pointer: // abi::__pointer_type_info. VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE"; break; case Type::MemberPointer: // abi::__pointer_to_member_type_info. VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE"; break; } llvm::Constant *VTable = CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy); llvm::Type *PtrDiffTy = CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType()); // The vtable address point is 2. llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2); VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(CGM.Int8PtrTy, VTable, Two); VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy); Fields.push_back(VTable); } /// \brief Return the linkage that the type info and type info name constants /// should have for the given type. static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) { // Itanium C++ ABI 2.9.5p7: // In addition, it and all of the intermediate abi::__pointer_type_info // structs in the chain down to the abi::__class_type_info for the // incomplete class type must be prevented from resolving to the // corresponding type_info structs for the complete class type, possibly // by making them local static objects. Finally, a dummy class RTTI is // generated for the incomplete type that will not resolve to the final // complete class RTTI (because the latter need not exist), possibly by // making it a local static object. if (ContainsIncompleteClassType(Ty)) return llvm::GlobalValue::InternalLinkage; switch (Ty->getLinkage()) { case NoLinkage: case InternalLinkage: case UniqueExternalLinkage: return llvm::GlobalValue::InternalLinkage; case VisibleNoLinkage: case ExternalLinkage: if (!CGM.getLangOpts().RTTI) { // RTTI is not enabled, which means that this type info struct is going // to be used for exception handling. Give it linkonce_odr linkage. return llvm::GlobalValue::LinkOnceODRLinkage; } if (const RecordType *Record = dyn_cast<RecordType>(Ty)) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()); if (RD->hasAttr<WeakAttr>()) return llvm::GlobalValue::WeakODRLinkage; if (RD->isDynamicClass()) { llvm::GlobalValue::LinkageTypes LT = CGM.getVTableLinkage(RD); // MinGW won't export the RTTI information when there is a key function. // Make sure we emit our own copy instead of attempting to dllimport it. if (RD->hasAttr<DLLImportAttr>() && llvm::GlobalValue::isAvailableExternallyLinkage(LT)) LT = llvm::GlobalValue::LinkOnceODRLinkage; return LT; } } return llvm::GlobalValue::LinkOnceODRLinkage; } llvm_unreachable("Invalid linkage!"); } llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) { // We want to operate on the canonical type. Ty = CGM.getContext().getCanonicalType(Ty); // Check if we've already emitted an RTTI descriptor for this type. SmallString<256> OutName; llvm::raw_svector_ostream Out(OutName); CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out); Out.flush(); StringRef Name = OutName.str(); llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name); if (OldGV && !OldGV->isDeclaration()) { assert(!OldGV->hasAvailableExternallyLinkage() && "available_externally typeinfos not yet implemented"); return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy); } // Check if there is already an external RTTI descriptor for this type. bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty); if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty))) return GetAddrOfExternalRTTIDescriptor(Ty); // Emit the standard library with external linkage. llvm::GlobalVariable::LinkageTypes Linkage; if (IsStdLib) Linkage = llvm::GlobalValue::ExternalLinkage; else Linkage = getTypeInfoLinkage(CGM, Ty); // Add the vtable pointer. BuildVTablePointer(cast<Type>(Ty)); // And the name. llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage); llvm::Constant *TypeNameField; // If we're supposed to demote the visibility, be sure to set a flag // to use a string comparison for type_info comparisons. ItaniumCXXABI::RTTIUniquenessKind RTTIUniqueness = CXXABI.classifyRTTIUniqueness(Ty, Linkage); if (RTTIUniqueness != ItaniumCXXABI::RUK_Unique) { // The flag is the sign bit, which on ARM64 is defined to be clear // for global pointers. This is very ARM64-specific. TypeNameField = llvm::ConstantExpr::getPtrToInt(TypeName, CGM.Int64Ty); llvm::Constant *flag = llvm::ConstantInt::get(CGM.Int64Ty, ((uint64_t)1) << 63); TypeNameField = llvm::ConstantExpr::getAdd(TypeNameField, flag); TypeNameField = llvm::ConstantExpr::getIntToPtr(TypeNameField, CGM.Int8PtrTy); } else { TypeNameField = llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy); } Fields.push_back(TypeNameField); switch (Ty->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: #define NON_CANONICAL_TYPE(Class, Base) case Type::Class: #define DEPENDENT_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.def" llvm_unreachable("Non-canonical and dependent types shouldn't get here"); // GCC treats vector types as fundamental types. case Type::Builtin: case Type::Vector: case Type::ExtVector: case Type::Complex: case Type::BlockPointer: // Itanium C++ ABI 2.9.5p4: // abi::__fundamental_type_info adds no data members to std::type_info. break; case Type::LValueReference: case Type::RValueReference: llvm_unreachable("References shouldn't get here"); case Type::Auto: llvm_unreachable("Undeduced auto type shouldn't get here"); case Type::ConstantArray: case Type::IncompleteArray: case Type::VariableArray: // Itanium C++ ABI 2.9.5p5: // abi::__array_type_info adds no data members to std::type_info. break; case Type::FunctionNoProto: case Type::FunctionProto: // Itanium C++ ABI 2.9.5p5: // abi::__function_type_info adds no data members to std::type_info. break; case Type::Enum: // Itanium C++ ABI 2.9.5p5: // abi::__enum_type_info adds no data members to std::type_info. break; case Type::Record: { const CXXRecordDecl *RD = cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl()); if (!RD->hasDefinition() || !RD->getNumBases()) { // We don't need to emit any fields. break; } if (CanUseSingleInheritance(RD)) BuildSIClassTypeInfo(RD); else BuildVMIClassTypeInfo(RD); break; } case Type::ObjCObject: case Type::ObjCInterface: BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty)); break; case Type::ObjCObjectPointer: BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); break; case Type::Pointer: BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType()); break; case Type::MemberPointer: BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty)); break; case Type::Atomic: // No fields, at least for the moment. break; } llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields); llvm::Module &M = CGM.getModule(); llvm::GlobalVariable *GV = new llvm::GlobalVariable(M, Init->getType(), /*Constant=*/true, Linkage, Init, Name); if (CGM.supportsCOMDAT() && GV->isWeakForLinker()) GV->setComdat(M.getOrInsertComdat(GV->getName())); // If there's already an old global variable, replace it with the new one. if (OldGV) { GV->takeName(OldGV); llvm::Constant *NewPtr = llvm::ConstantExpr::getBitCast(GV, OldGV->getType()); OldGV->replaceAllUsesWith(NewPtr); OldGV->eraseFromParent(); } // The Itanium ABI specifies that type_info objects must be globally // unique, with one exception: if the type is an incomplete class // type or a (possibly indirect) pointer to one. That exception // affects the general case of comparing type_info objects produced // by the typeid operator, which is why the comparison operators on // std::type_info generally use the type_info name pointers instead // of the object addresses. However, the language's built-in uses // of RTTI generally require class types to be complete, even when // manipulating pointers to those class types. This allows the // implementation of dynamic_cast to rely on address equality tests, // which is much faster. // All of this is to say that it's important that both the type_info // object and the type_info name be uniqued when weakly emitted. // Give the type_info object and name the formal visibility of the // type itself. llvm::GlobalValue::VisibilityTypes llvmVisibility; if (llvm::GlobalValue::isLocalLinkage(Linkage)) // If the linkage is local, only default visibility makes sense. llvmVisibility = llvm::GlobalValue::DefaultVisibility; else if (RTTIUniqueness == ItaniumCXXABI::RUK_NonUniqueHidden) llvmVisibility = llvm::GlobalValue::HiddenVisibility; else llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility()); TypeName->setVisibility(llvmVisibility); GV->setVisibility(llvmVisibility); return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy); } /// ComputeQualifierFlags - Compute the pointer type info flags from the /// given qualifier. static unsigned ComputeQualifierFlags(Qualifiers Quals) { unsigned Flags = 0; if (Quals.hasConst()) Flags |= ItaniumRTTIBuilder::PTI_Const; if (Quals.hasVolatile()) Flags |= ItaniumRTTIBuilder::PTI_Volatile; if (Quals.hasRestrict()) Flags |= ItaniumRTTIBuilder::PTI_Restrict; return Flags; } /// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info /// for the given Objective-C object type. void ItaniumRTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) { // Drop qualifiers. const Type *T = OT->getBaseType().getTypePtr(); assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T)); // The builtin types are abi::__class_type_infos and don't require // extra fields. if (isa<BuiltinType>(T)) return; ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl(); ObjCInterfaceDecl *Super = Class->getSuperClass(); // Root classes are also __class_type_info. if (!Super) return; QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super); // Everything else is single inheritance. llvm::Constant *BaseTypeInfo = ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(SuperTy); Fields.push_back(BaseTypeInfo); } /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single /// inheritance, according to the Itanium C++ ABI, 2.95p6b. void ItaniumRTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) { // Itanium C++ ABI 2.9.5p6b: // It adds to abi::__class_type_info a single member pointing to the // type_info structure for the base type, llvm::Constant *BaseTypeInfo = ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(RD->bases_begin()->getType()); Fields.push_back(BaseTypeInfo); } namespace { /// SeenBases - Contains virtual and non-virtual bases seen when traversing /// a class hierarchy. struct SeenBases { llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases; llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases; }; } /// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in /// abi::__vmi_class_type_info. /// static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, SeenBases &Bases) { unsigned Flags = 0; const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl()); if (Base->isVirtual()) { // Mark the virtual base as seen. if (!Bases.VirtualBases.insert(BaseDecl).second) { // If this virtual base has been seen before, then the class is diamond // shaped. Flags |= ItaniumRTTIBuilder::VMI_DiamondShaped; } else { if (Bases.NonVirtualBases.count(BaseDecl)) Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; } } else { // Mark the non-virtual base as seen. if (!Bases.NonVirtualBases.insert(BaseDecl).second) { // If this non-virtual base has been seen before, then the class has non- // diamond shaped repeated inheritance. Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; } else { if (Bases.VirtualBases.count(BaseDecl)) Flags |= ItaniumRTTIBuilder::VMI_NonDiamondRepeat; } } // Walk all bases. for (const auto &I : BaseDecl->bases()) Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); return Flags; } static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) { unsigned Flags = 0; SeenBases Bases; // Walk all bases. for (const auto &I : RD->bases()) Flags |= ComputeVMIClassTypeInfoFlags(&I, Bases); return Flags; } /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for /// classes with bases that do not satisfy the abi::__si_class_type_info /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c. void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) { llvm::Type *UnsignedIntLTy = CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); // Itanium C++ ABI 2.9.5p6c: // __flags is a word with flags describing details about the class // structure, which may be referenced by using the __flags_masks // enumeration. These flags refer to both direct and indirect bases. unsigned Flags = ComputeVMIClassTypeInfoFlags(RD); Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); // Itanium C++ ABI 2.9.5p6c: // __base_count is a word with the number of direct proper base class // descriptions that follow. Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases())); if (!RD->getNumBases()) return; llvm::Type *LongLTy = CGM.getTypes().ConvertType(CGM.getContext().LongTy); // Now add the base class descriptions. // Itanium C++ ABI 2.9.5p6c: // __base_info[] is an array of base class descriptions -- one for every // direct proper base. Each description is of the type: // // struct abi::__base_class_type_info { // public: // const __class_type_info *__base_type; // long __offset_flags; // // enum __offset_flags_masks { // __virtual_mask = 0x1, // __public_mask = 0x2, // __offset_shift = 8 // }; // }; for (const auto &Base : RD->bases()) { // The __base_type member points to the RTTI for the base type. Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType())); const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl()); int64_t OffsetFlags = 0; // All but the lower 8 bits of __offset_flags are a signed offset. // For a non-virtual base, this is the offset in the object of the base // subobject. For a virtual base, this is the offset in the virtual table of // the virtual base offset for the virtual base referenced (negative). CharUnits Offset; if (Base.isVirtual()) Offset = CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl); else { const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); Offset = Layout.getBaseClassOffset(BaseDecl); }; OffsetFlags = uint64_t(Offset.getQuantity()) << 8; // The low-order byte of __offset_flags contains flags, as given by the // masks from the enumeration __offset_flags_masks. if (Base.isVirtual()) OffsetFlags |= BCTI_Virtual; if (Base.getAccessSpecifier() == AS_public) OffsetFlags |= BCTI_Public; Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags)); } } /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, /// used for pointer types. void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) { Qualifiers Quals; QualType UnqualifiedPointeeTy = CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals); // Itanium C++ ABI 2.9.5p7: // __flags is a flag word describing the cv-qualification and other // attributes of the type pointed to unsigned Flags = ComputeQualifierFlags(Quals); // Itanium C++ ABI 2.9.5p7: // When the abi::__pbase_type_info is for a direct or indirect pointer to an // incomplete class type, the incomplete target type flag is set. if (ContainsIncompleteClassType(UnqualifiedPointeeTy)) Flags |= PTI_Incomplete; llvm::Type *UnsignedIntLTy = CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); // Itanium C++ ABI 2.9.5p7: // __pointee is a pointer to the std::type_info derivation for the // unqualified type being pointed to. llvm::Constant *PointeeTypeInfo = ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy); Fields.push_back(PointeeTypeInfo); } /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info /// struct, used for member pointer types. void ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) { QualType PointeeTy = Ty->getPointeeType(); Qualifiers Quals; QualType UnqualifiedPointeeTy = CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals); // Itanium C++ ABI 2.9.5p7: // __flags is a flag word describing the cv-qualification and other // attributes of the type pointed to. unsigned Flags = ComputeQualifierFlags(Quals); const RecordType *ClassType = cast<RecordType>(Ty->getClass()); // Itanium C++ ABI 2.9.5p7: // When the abi::__pbase_type_info is for a direct or indirect pointer to an // incomplete class type, the incomplete target type flag is set. if (ContainsIncompleteClassType(UnqualifiedPointeeTy)) Flags |= PTI_Incomplete; if (IsIncompleteClassType(ClassType)) Flags |= PTI_ContainingClassIncomplete; llvm::Type *UnsignedIntLTy = CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy); Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags)); // Itanium C++ ABI 2.9.5p7: // __pointee is a pointer to the std::type_info derivation for the // unqualified type being pointed to. llvm::Constant *PointeeTypeInfo = ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy); Fields.push_back(PointeeTypeInfo); // Itanium C++ ABI 2.9.5p9: // __context is a pointer to an abi::__class_type_info corresponding to the // class type containing the member pointed to // (e.g., the "A" in "int A::*"). Fields.push_back( ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(QualType(ClassType, 0))); } llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) { return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty); } void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type) { QualType PointerType = getContext().getPointerType(Type); QualType PointerTypeConst = getContext().getPointerType(Type.withConst()); ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, true); ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, true); ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true); } void ItaniumCXXABI::EmitFundamentalRTTIDescriptors() { QualType FundamentalTypes[] = { getContext().VoidTy, getContext().NullPtrTy, getContext().BoolTy, getContext().WCharTy, getContext().CharTy, getContext().UnsignedCharTy, getContext().SignedCharTy, getContext().ShortTy, getContext().UnsignedShortTy, getContext().IntTy, getContext().UnsignedIntTy, getContext().LongTy, getContext().UnsignedLongTy, getContext().LongLongTy, getContext().UnsignedLongLongTy, getContext().HalfTy, getContext().FloatTy, getContext().DoubleTy, getContext().LongDoubleTy, getContext().Char16Ty, getContext().Char32Ty, }; for (const QualType &FundamentalType : FundamentalTypes) EmitFundamentalRTTIDescriptor(FundamentalType); } /// What sort of uniqueness rules should we use for the RTTI for the /// given type? ItaniumCXXABI::RTTIUniquenessKind ItaniumCXXABI::classifyRTTIUniqueness( QualType CanTy, llvm::GlobalValue::LinkageTypes Linkage) const { if (shouldRTTIBeUnique()) return RUK_Unique; // It's only necessary for linkonce_odr or weak_odr linkage. if (Linkage != llvm::GlobalValue::LinkOnceODRLinkage && Linkage != llvm::GlobalValue::WeakODRLinkage) return RUK_Unique; // It's only necessary with default visibility. if (CanTy->getVisibility() != DefaultVisibility) return RUK_Unique; // If we're not required to publish this symbol, hide it. if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage) return RUK_NonUniqueHidden; // If we're required to publish this symbol, as we might be under an // explicit instantiation, leave it with default visibility but // enable string-comparisons. assert(Linkage == llvm::GlobalValue::WeakODRLinkage); return RUK_NonUniqueVisible; } // Find out how to codegen the complete destructor and constructor namespace { enum class StructorCodegen { Emit, RAUW, Alias, COMDAT }; } static StructorCodegen getCodegenToUse(CodeGenModule &CGM, const CXXMethodDecl *MD) { if (!CGM.getCodeGenOpts().CXXCtorDtorAliases) return StructorCodegen::Emit; // The complete and base structors are not equivalent if there are any virtual // bases, so emit separate functions. if (MD->getParent()->getNumVBases()) return StructorCodegen::Emit; GlobalDecl AliasDecl; if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) { AliasDecl = GlobalDecl(DD, Dtor_Complete); } else { const auto *CD = cast<CXXConstructorDecl>(MD); AliasDecl = GlobalDecl(CD, Ctor_Complete); } llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); if (llvm::GlobalValue::isDiscardableIfUnused(Linkage)) return StructorCodegen::RAUW; // FIXME: Should we allow available_externally aliases? if (!llvm::GlobalAlias::isValidLinkage(Linkage)) return StructorCodegen::RAUW; if (llvm::GlobalValue::isWeakForLinker(Linkage)) { // Only ELF supports COMDATs with arbitrary names (C5/D5). if (CGM.getTarget().getTriple().isOSBinFormatELF()) return StructorCodegen::COMDAT; return StructorCodegen::Emit; } return StructorCodegen::Alias; } static void emitConstructorDestructorAlias(CodeGenModule &CGM, GlobalDecl AliasDecl, GlobalDecl TargetDecl) { llvm::GlobalValue::LinkageTypes Linkage = CGM.getFunctionLinkage(AliasDecl); StringRef MangledName = CGM.getMangledName(AliasDecl); llvm::GlobalValue *Entry = CGM.GetGlobalValue(MangledName); if (Entry && !Entry->isDeclaration()) return; auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(TargetDecl)); llvm::PointerType *AliasType = Aliasee->getType(); // Create the alias with no name. auto *Alias = llvm::GlobalAlias::create(AliasType, Linkage, "", Aliasee, &CGM.getModule()); // Switch any previous uses to the alias. if (Entry) { assert(Entry->getType() == AliasType && "declaration exists with different type"); Alias->takeName(Entry); Entry->replaceAllUsesWith(Alias); Entry->eraseFromParent(); } else { Alias->setName(MangledName); } // Finally, set up the alias with its proper name and attributes. CGM.setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias); } void ItaniumCXXABI::emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) { auto *CD = dyn_cast<CXXConstructorDecl>(MD); const CXXDestructorDecl *DD = CD ? nullptr : cast<CXXDestructorDecl>(MD); StructorCodegen CGType = getCodegenToUse(CGM, MD); if (Type == StructorType::Complete) { GlobalDecl CompleteDecl; GlobalDecl BaseDecl; if (CD) { CompleteDecl = GlobalDecl(CD, Ctor_Complete); BaseDecl = GlobalDecl(CD, Ctor_Base); } else { CompleteDecl = GlobalDecl(DD, Dtor_Complete); BaseDecl = GlobalDecl(DD, Dtor_Base); } if (CGType == StructorCodegen::Alias || CGType == StructorCodegen::COMDAT) { emitConstructorDestructorAlias(CGM, CompleteDecl, BaseDecl); return; } if (CGType == StructorCodegen::RAUW) { StringRef MangledName = CGM.getMangledName(CompleteDecl); auto *Aliasee = cast<llvm::GlobalValue>(CGM.GetAddrOfGlobal(BaseDecl)); CGM.addReplacement(MangledName, Aliasee); return; } } // The base destructor is equivalent to the base destructor of its // base class if there is exactly one non-virtual base class with a // non-trivial destructor, there are no fields with a non-trivial // destructor, and the body of the destructor is trivial. if (DD && Type == StructorType::Base && CGType != StructorCodegen::COMDAT && !CGM.TryEmitBaseDestructorAsAlias(DD)) return; llvm::Function *Fn = CGM.codegenCXXStructor(MD, Type); if (CGType == StructorCodegen::COMDAT) { SmallString<256> Buffer; llvm::raw_svector_ostream Out(Buffer); if (DD) getMangleContext().mangleCXXDtorComdat(DD, Out); else getMangleContext().mangleCXXCtorComdat(CD, Out); llvm::Comdat *C = CGM.getModule().getOrInsertComdat(Out.str()); Fn->setComdat(C); } else { CGM.maybeSetTrivialComdat(*MD, *Fn); } } static llvm::Constant *getBeginCatchFn(CodeGenModule &CGM) { // void *__cxa_begin_catch(void*); llvm::FunctionType *FTy = llvm::FunctionType::get( CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch"); } static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) { // void __cxa_end_catch(); llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch"); } static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) { // void *__cxa_get_exception_ptr(void*); llvm::FunctionType *FTy = llvm::FunctionType::get( CGM.Int8PtrTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); return CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr"); } namespace { /// A cleanup to call __cxa_end_catch. In many cases, the caught /// exception type lets us state definitively that the thrown exception /// type does not have a destructor. In particular: /// - Catch-alls tell us nothing, so we have to conservatively /// assume that the thrown exception might have a destructor. /// - Catches by reference behave according to their base types. /// - Catches of non-record types will only trigger for exceptions /// of non-record types, which never have destructors. /// - Catches of record types can trigger for arbitrary subclasses /// of the caught type, so we have to assume the actual thrown /// exception type might have a throwing destructor, even if the /// caught type's destructor is trivial or nothrow. struct CallEndCatch : EHScopeStack::Cleanup { CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {} bool MightThrow; void Emit(CodeGenFunction &CGF, Flags flags) override { if (!MightThrow) { CGF.EmitNounwindRuntimeCall(getEndCatchFn(CGF.CGM)); return; } CGF.EmitRuntimeCallOrInvoke(getEndCatchFn(CGF.CGM)); } }; } /// Emits a call to __cxa_begin_catch and enters a cleanup to call /// __cxa_end_catch. /// /// \param EndMightThrow - true if __cxa_end_catch might throw static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, llvm::Value *Exn, bool EndMightThrow) { llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(getBeginCatchFn(CGF.CGM), Exn); CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow); return call; } /// A "special initializer" callback for initializing a catch /// parameter during catch initialization. static void InitCatchParam(CodeGenFunction &CGF, const VarDecl &CatchParam, llvm::Value *ParamAddr, SourceLocation Loc) { // Load the exception from where the landing pad saved it. llvm::Value *Exn = CGF.getExceptionFromSlot(); CanQualType CatchType = CGF.CGM.getContext().getCanonicalType(CatchParam.getType()); llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType); // If we're catching by reference, we can just cast the object // pointer to the appropriate pointer. if (isa<ReferenceType>(CatchType)) { QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType(); bool EndCatchMightThrow = CaughtType->isRecordType(); // __cxa_begin_catch returns the adjusted object pointer. llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow); // We have no way to tell the personality function that we're // catching by reference, so if we're catching a pointer, // __cxa_begin_catch will actually return that pointer by value. if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) { QualType PointeeType = PT->getPointeeType(); // When catching by reference, generally we should just ignore // this by-value pointer and use the exception object instead. if (!PointeeType->isRecordType()) { // Exn points to the struct _Unwind_Exception header, which // we have to skip past in order to reach the exception data. unsigned HeaderSize = CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException(); AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize); // However, if we're catching a pointer-to-record type that won't // work, because the personality function might have adjusted // the pointer. There's actually no way for us to fully satisfy // the language/ABI contract here: we can't use Exn because it // might have the wrong adjustment, but we can't use the by-value // pointer because it's off by a level of abstraction. // // The current solution is to dump the adjusted pointer into an // alloca, which breaks language semantics (because changing the // pointer doesn't change the exception) but at least works. // The better solution would be to filter out non-exact matches // and rethrow them, but this is tricky because the rethrow // really needs to be catchable by other sites at this landing // pad. The best solution is to fix the personality function. } else { // Pull the pointer for the reference type off. llvm::Type *PtrTy = cast<llvm::PointerType>(LLVMCatchTy)->getElementType(); // Create the temporary and write the adjusted pointer into it. llvm::Value *ExnPtrTmp = CGF.CreateTempAlloca(PtrTy, "exn.byref.tmp"); llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); CGF.Builder.CreateStore(Casted, ExnPtrTmp); // Bind the reference to the temporary. AdjustedExn = ExnPtrTmp; } } llvm::Value *ExnCast = CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref"); CGF.Builder.CreateStore(ExnCast, ParamAddr); return; } // Scalars and complexes. TypeEvaluationKind TEK = CGF.getEvaluationKind(CatchType); if (TEK != TEK_Aggregate) { llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false); // If the catch type is a pointer type, __cxa_begin_catch returns // the pointer by value. if (CatchType->hasPointerRepresentation()) { llvm::Value *CastExn = CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted"); switch (CatchType.getQualifiers().getObjCLifetime()) { case Qualifiers::OCL_Strong: CastExn = CGF.EmitARCRetainNonBlock(CastExn); LLVM_FALLTHROUGH; // HLSL Change case Qualifiers::OCL_None: case Qualifiers::OCL_ExplicitNone: case Qualifiers::OCL_Autoreleasing: CGF.Builder.CreateStore(CastExn, ParamAddr); return; case Qualifiers::OCL_Weak: CGF.EmitARCInitWeak(ParamAddr, CastExn); return; } llvm_unreachable("bad ownership qualifier!"); } // Otherwise, it returns a pointer into the exception object. llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy); LValue srcLV = CGF.MakeNaturalAlignAddrLValue(Cast, CatchType); LValue destLV = CGF.MakeAddrLValue(ParamAddr, CatchType, CGF.getContext().getDeclAlign(&CatchParam)); switch (TEK) { case TEK_Complex: CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV, /*init*/ true); return; case TEK_Scalar: { llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc); CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true); return; } case TEK_Aggregate: llvm_unreachable("evaluation kind filtered out!"); } llvm_unreachable("bad evaluation kind"); } assert(isa<RecordType>(CatchType) && "unexpected catch type!"); llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok // Check for a copy expression. If we don't have a copy expression, // that means a trivial copy is okay. const Expr *copyExpr = CatchParam.getInit(); if (!copyExpr) { llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true); llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy); CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType); return; } // We have to call __cxa_get_exception_ptr to get the adjusted // pointer before copying. llvm::CallInst *rawAdjustedExn = CGF.EmitNounwindRuntimeCall(getGetExceptionPtrFn(CGF.CGM), Exn); // Cast that to the appropriate type. llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy); // The copy expression is defined in terms of an OpaqueValueExpr. // Find it and map it to the adjusted expression. CodeGenFunction::OpaqueValueMapping opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr), CGF.MakeAddrLValue(adjustedExn, CatchParam.getType())); // Call the copy ctor in a terminate scope. CGF.EHStack.pushTerminate(); // Perform the copy construction. CharUnits Alignment = CGF.getContext().getDeclAlign(&CatchParam); CGF.EmitAggExpr(copyExpr, AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(), AggValueSlot::IsNotDestructed, AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased)); // Leave the terminate scope. CGF.EHStack.popTerminate(); // Undo the opaque value mapping. opaque.pop(); // Finally we can call __cxa_begin_catch. CallBeginCatch(CGF, Exn, true); } /// Begins a catch statement by initializing the catch variable and /// calling __cxa_begin_catch. void ItaniumCXXABI::emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) { // We have to be very careful with the ordering of cleanups here: // C++ [except.throw]p4: // The destruction [of the exception temporary] occurs // immediately after the destruction of the object declared in // the exception-declaration in the handler. // // So the precise ordering is: // 1. Construct catch variable. // 2. __cxa_begin_catch // 3. Enter __cxa_end_catch cleanup // 4. Enter dtor cleanup // // We do this by using a slightly abnormal initialization process. // Delegation sequence: // - ExitCXXTryStmt opens a RunCleanupsScope // - EmitAutoVarAlloca creates the variable and debug info // - InitCatchParam initializes the variable from the exception // - CallBeginCatch calls __cxa_begin_catch // - CallBeginCatch enters the __cxa_end_catch cleanup // - EmitAutoVarCleanups enters the variable destructor cleanup // - EmitCXXTryStmt emits the code for the catch body // - EmitCXXTryStmt close the RunCleanupsScope VarDecl *CatchParam = S->getExceptionDecl(); if (!CatchParam) { llvm::Value *Exn = CGF.getExceptionFromSlot(); CallBeginCatch(CGF, Exn, true); return; } // Emit the local. CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam); InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart()); CGF.EmitAutoVarCleanups(var); } #if 0 // HLSL Change Start /// Get or define the following function: /// void @__clang_call_terminate(i8* %exn) nounwind noreturn /// This code is used only in C++. static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) { llvm::FunctionType *fnTy = llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false); llvm::Constant *fnRef = CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate"); llvm::Function *fn = dyn_cast<llvm::Function>(fnRef); if (fn && fn->empty()) { fn->setDoesNotThrow(); fn->setDoesNotReturn(); // What we really want is to massively penalize inlining without // forbidding it completely. The difference between that and // 'noinline' is negligible. fn->addFnAttr(llvm::Attribute::NoInline); // Allow this function to be shared across translation units, but // we don't want it to turn into an exported symbol. fn->setLinkage(llvm::Function::LinkOnceODRLinkage); fn->setVisibility(llvm::Function::HiddenVisibility); if (CGM.supportsCOMDAT()) fn->setComdat(CGM.getModule().getOrInsertComdat(fn->getName())); // Set up the function. llvm::BasicBlock *entry = llvm::BasicBlock::Create(CGM.getLLVMContext(), "", fn); CGBuilderTy builder(entry); // Pull the exception pointer out of the parameter list. llvm::Value *exn = &*fn->arg_begin(); // Call __cxa_begin_catch(exn). llvm::CallInst *catchCall = builder.CreateCall(getBeginCatchFn(CGM), exn); catchCall->setDoesNotThrow(); catchCall->setCallingConv(CGM.getRuntimeCC()); // Call std::terminate(). llvm::CallInst *termCall = builder.CreateCall(CGM.getTerminateFn()); termCall->setDoesNotThrow(); termCall->setDoesNotReturn(); termCall->setCallingConv(CGM.getRuntimeCC()); // std::terminate cannot return. builder.CreateUnreachable(); } return fnRef; } #endif // HLSL Change End llvm::CallInst * ItaniumCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF, llvm::Value *Exn) { #if 0 // HLSL Change - no support for exceptions // In C++, we want to call __cxa_begin_catch() before terminating. if (Exn) { assert(CGF.CGM.getLangOpts().CPlusPlus); return CGF.EmitNounwindRuntimeCall(getClangCallTerminateFn(CGF.CGM), Exn); } return CGF.EmitNounwindRuntimeCall(CGF.CGM.getTerminateFn()); #else llvm_unreachable("HLSL does not support exception"); #endif // HLSL Change - no support for exceptions }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGExprConstant.cpp
//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This contains code to emit Constant Expr nodes as LLVM code. // //===----------------------------------------------------------------------===// #include "CodeGenFunction.h" #include "CGCXXABI.h" #include "CGObjCRuntime.h" #include "CGRecordLayout.h" #include "CodeGenModule.h" #include "clang/AST/APValue.h" #include "clang/AST/ASTContext.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtVisitor.h" #include "clang/Basic/Builtins.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/GlobalVariable.h" #include "CGHLSLRuntime.h" // HLSL Change using namespace clang; using namespace CodeGen; //===----------------------------------------------------------------------===// // ConstStructBuilder //===----------------------------------------------------------------------===// namespace { class ConstExprEmitter; class ConstStructBuilder { CodeGenModule &CGM; CodeGenFunction *CGF; bool Packed; CharUnits NextFieldOffsetInChars; CharUnits LLVMStructAlignment; SmallVector<llvm::Constant *, 32> Elements; public: static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CFG, ConstExprEmitter *Emitter, llvm::ConstantStruct *Base, InitListExpr *Updater); static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE); static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, const APValue &Value, QualType ValTy); private: ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF) : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInChars(CharUnits::Zero()), LLVMStructAlignment(CharUnits::One()) { } void AppendField(const FieldDecl *Field, uint64_t FieldOffset, llvm::Constant *InitExpr); void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst); void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, llvm::ConstantInt *InitExpr); void AppendPadding(CharUnits PadSize); void AppendTailPadding(CharUnits RecordSize); void ConvertStructToPacked(); bool Build(InitListExpr *ILE); bool Build(ConstExprEmitter *Emitter, llvm::ConstantStruct *Base, InitListExpr *Updater); void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase, const CXXRecordDecl *VTableClass, CharUnits BaseOffset); llvm::Constant *Finalize(QualType Ty); CharUnits getAlignment(const llvm::Constant *C) const { if (Packed) return CharUnits::One(); return CharUnits::fromQuantity( CGM.getDataLayout().getABITypeAlignment(C->getType())); } CharUnits getSizeInChars(const llvm::Constant *C) const { return CharUnits::fromQuantity( CGM.getDataLayout().getTypeAllocSize(C->getType())); } }; void ConstStructBuilder:: AppendField(const FieldDecl *Field, uint64_t FieldOffset, llvm::Constant *InitCst) { const ASTContext &Context = CGM.getContext(); CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset); AppendBytes(FieldOffsetInChars, InitCst); } void ConstStructBuilder:: AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) { assert(NextFieldOffsetInChars <= FieldOffsetInChars && "Field offset mismatch!"); CharUnits FieldAlignment = getAlignment(InitCst); // Round up the field offset to the alignment of the field type. CharUnits AlignedNextFieldOffsetInChars = NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment); if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) { // We need to append padding. AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars); assert(NextFieldOffsetInChars == FieldOffsetInChars && "Did not add enough padding!"); AlignedNextFieldOffsetInChars = NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment); } if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) { assert(!Packed && "Alignment is wrong even with a packed struct!"); // Convert the struct to a packed struct. ConvertStructToPacked(); // After we pack the struct, we may need to insert padding. if (NextFieldOffsetInChars < FieldOffsetInChars) { // We need to append padding. AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars); assert(NextFieldOffsetInChars == FieldOffsetInChars && "Did not add enough padding!"); } AlignedNextFieldOffsetInChars = NextFieldOffsetInChars; } // Add the field. Elements.push_back(InitCst); NextFieldOffsetInChars = AlignedNextFieldOffsetInChars + getSizeInChars(InitCst); if (Packed) assert(LLVMStructAlignment == CharUnits::One() && "Packed struct not byte-aligned!"); else LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment); } void ConstStructBuilder::AppendBitField(const FieldDecl *Field, uint64_t FieldOffset, llvm::ConstantInt *CI) { const ASTContext &Context = CGM.getContext(); const uint64_t CharWidth = Context.getCharWidth(); uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars); if (FieldOffset > NextFieldOffsetInBits) { // We need to add padding. CharUnits PadSize = Context.toCharUnitsFromBits( llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits, Context.getTargetInfo().getCharAlign())); AppendPadding(PadSize); } uint64_t FieldSize = Field->getBitWidthValue(Context); llvm::APInt FieldValue = CI->getValue(); // Promote the size of FieldValue if necessary // FIXME: This should never occur, but currently it can because initializer // constants are cast to bool, and because clang is not enforcing bitfield // width limits. if (FieldSize > FieldValue.getBitWidth()) FieldValue = FieldValue.zext(FieldSize); // Truncate the size of FieldValue to the bit field size. if (FieldSize < FieldValue.getBitWidth()) FieldValue = FieldValue.trunc(FieldSize); NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars); if (FieldOffset < NextFieldOffsetInBits) { // Either part of the field or the entire field can go into the previous // byte. assert(!Elements.empty() && "Elements can't be empty!"); unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset; bool FitsCompletelyInPreviousByte = BitsInPreviousByte >= FieldValue.getBitWidth(); llvm::APInt Tmp = FieldValue; if (!FitsCompletelyInPreviousByte) { unsigned NewFieldWidth = FieldSize - BitsInPreviousByte; if (CGM.getDataLayout().isBigEndian()) { Tmp = Tmp.lshr(NewFieldWidth); Tmp = Tmp.trunc(BitsInPreviousByte); // We want the remaining high bits. FieldValue = FieldValue.trunc(NewFieldWidth); } else { Tmp = Tmp.trunc(BitsInPreviousByte); // We want the remaining low bits. FieldValue = FieldValue.lshr(BitsInPreviousByte); FieldValue = FieldValue.trunc(NewFieldWidth); } } Tmp = Tmp.zext(CharWidth); if (CGM.getDataLayout().isBigEndian()) { if (FitsCompletelyInPreviousByte) Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth()); } else { Tmp = Tmp.shl(CharWidth - BitsInPreviousByte); } // 'or' in the bits that go into the previous byte. llvm::Value *LastElt = Elements.back(); if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt)) Tmp |= Val->getValue(); else { assert(isa<llvm::UndefValue>(LastElt)); // If there is an undef field that we're adding to, it can either be a // scalar undef (in which case, we just replace it with our field) or it // is an array. If it is an array, we have to pull one byte off the // array so that the other undef bytes stay around. if (!isa<llvm::IntegerType>(LastElt->getType())) { // The undef padding will be a multibyte array, create a new smaller // padding and then an hole for our i8 to get plopped into. assert(isa<llvm::ArrayType>(LastElt->getType()) && "Expected array padding of undefs"); llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType()); assert(AT->getElementType()->isIntegerTy(CharWidth) && AT->getNumElements() != 0 && "Expected non-empty array padding of undefs"); // Remove the padding array. NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements()); Elements.pop_back(); // Add the padding back in two chunks. AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1)); AppendPadding(CharUnits::One()); assert(isa<llvm::UndefValue>(Elements.back()) && Elements.back()->getType()->isIntegerTy(CharWidth) && "Padding addition didn't work right"); } } Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp); if (FitsCompletelyInPreviousByte) return; } while (FieldValue.getBitWidth() > CharWidth) { llvm::APInt Tmp; if (CGM.getDataLayout().isBigEndian()) { // We want the high bits. Tmp = FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth); } else { // We want the low bits. Tmp = FieldValue.trunc(CharWidth); FieldValue = FieldValue.lshr(CharWidth); } Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp)); ++NextFieldOffsetInChars; FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth); } assert(FieldValue.getBitWidth() > 0 && "Should have at least one bit left!"); assert(FieldValue.getBitWidth() <= CharWidth && "Should not have more than a byte left!"); if (FieldValue.getBitWidth() < CharWidth) { if (CGM.getDataLayout().isBigEndian()) { unsigned BitWidth = FieldValue.getBitWidth(); FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth); } else FieldValue = FieldValue.zext(CharWidth); } // Append the last element. Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), FieldValue)); ++NextFieldOffsetInChars; } void ConstStructBuilder::AppendPadding(CharUnits PadSize) { if (PadSize.isZero()) return; llvm::Type *Ty = CGM.Int8Ty; if (PadSize > CharUnits::One()) Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity()); llvm::Constant *C = llvm::UndefValue::get(Ty); Elements.push_back(C); assert(getAlignment(C) == CharUnits::One() && "Padding must have 1 byte alignment!"); NextFieldOffsetInChars += getSizeInChars(C); } void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) { assert(NextFieldOffsetInChars <= RecordSize && "Size mismatch!"); AppendPadding(RecordSize - NextFieldOffsetInChars); } void ConstStructBuilder::ConvertStructToPacked() { SmallVector<llvm::Constant *, 16> PackedElements; CharUnits ElementOffsetInChars = CharUnits::Zero(); for (unsigned i = 0, e = Elements.size(); i != e; ++i) { llvm::Constant *C = Elements[i]; CharUnits ElementAlign = CharUnits::fromQuantity( CGM.getDataLayout().getABITypeAlignment(C->getType())); CharUnits AlignedElementOffsetInChars = ElementOffsetInChars.RoundUpToAlignment(ElementAlign); if (AlignedElementOffsetInChars > ElementOffsetInChars) { // We need some padding. CharUnits NumChars = AlignedElementOffsetInChars - ElementOffsetInChars; llvm::Type *Ty = CGM.Int8Ty; if (NumChars > CharUnits::One()) Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity()); llvm::Constant *Padding = llvm::UndefValue::get(Ty); PackedElements.push_back(Padding); ElementOffsetInChars += getSizeInChars(Padding); } PackedElements.push_back(C); ElementOffsetInChars += getSizeInChars(C); } assert(ElementOffsetInChars == NextFieldOffsetInChars && "Packing the struct changed its size!"); Elements.swap(PackedElements); LLVMStructAlignment = CharUnits::One(); Packed = true; } bool ConstStructBuilder::Build(InitListExpr *ILE) { RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl(); const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); unsigned FieldNo = 0; unsigned ElementNo = 0; for (RecordDecl::field_iterator Field = RD->field_begin(), FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) { // If this is a union, skip all the fields that aren't being initialized. if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field) continue; // Don't emit anonymous bitfields, they just affect layout. if (Field->isUnnamedBitfield()) continue; // Get the initializer. A struct can include fields without initializers, // we just use explicit null values for them. llvm::Constant *EltInit; if (ElementNo < ILE->getNumInits()) EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++), Field->getType(), CGF); else EltInit = CGM.EmitNullConstant(Field->getType()); if (!EltInit) return false; if (!Field->isBitField()) { // Handle non-bitfield members. AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit); } else { // Otherwise we have a bitfield. if (auto *CI = dyn_cast<llvm::ConstantInt>(EltInit)) { AppendBitField(*Field, Layout.getFieldOffset(FieldNo), CI); } else { // We are trying to initialize a bitfield with a non-trivial constant, // this must require run-time code. return false; } } } return true; } namespace { struct BaseInfo { BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index) : Decl(Decl), Offset(Offset), Index(Index) { } const CXXRecordDecl *Decl; CharUnits Offset; unsigned Index; bool operator<(const BaseInfo &O) const { return Offset < O.Offset; } }; } void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase, const CXXRecordDecl *VTableClass, CharUnits Offset) { const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) { // Add a vtable pointer, if we need one and it hasn't already been added. if (CD->isDynamicClass() && !IsPrimaryBase) { llvm::Constant *VTableAddressPoint = CGM.getCXXABI().getVTableAddressPointForConstExpr( BaseSubobject(CD, Offset), VTableClass); AppendBytes(Offset, VTableAddressPoint); } // Accumulate and sort bases, in order to visit them in address order, which // may not be the same as declaration order. SmallVector<BaseInfo, 8> Bases; Bases.reserve(CD->getNumBases()); unsigned BaseNo = 0; for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(), BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) { assert(!Base->isVirtual() && "should not have virtual bases here"); const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl(); CharUnits BaseOffset = Layout.getBaseClassOffset(BD); Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo)); } std::stable_sort(Bases.begin(), Bases.end()); for (unsigned I = 0, N = Bases.size(); I != N; ++I) { BaseInfo &Base = Bases[I]; bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl; Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase, VTableClass, Offset + Base.Offset); } } unsigned FieldNo = 0; uint64_t OffsetBits = CGM.getContext().toBits(Offset); for (RecordDecl::field_iterator Field = RD->field_begin(), FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) { // If this is a union, skip all the fields that aren't being initialized. if (RD->isUnion() && Val.getUnionField() != *Field) continue; // Don't emit anonymous bitfields, they just affect layout. if (Field->isUnnamedBitfield()) continue; // Emit the value of the initializer. const APValue &FieldValue = RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo); llvm::Constant *EltInit = CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF); assert(EltInit && "EmitConstantValue can't fail"); if (!Field->isBitField()) { // Handle non-bitfield members. AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit); } else { // Otherwise we have a bitfield. AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, cast<llvm::ConstantInt>(EltInit)); } } } llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) { RecordDecl *RD = Ty->getAs<RecordType>()->getDecl(); const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); CharUnits LayoutSizeInChars = Layout.getSize(); if (NextFieldOffsetInChars > LayoutSizeInChars) { // If the struct is bigger than the size of the record type, // we must have a flexible array member at the end. assert(RD->hasFlexibleArrayMember() && "Must have flexible array member if struct is bigger than type!"); // No tail padding is necessary. } else { // Append tail padding if necessary. CharUnits LLVMSizeInChars = NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment); if (LLVMSizeInChars != LayoutSizeInChars) AppendTailPadding(LayoutSizeInChars); LLVMSizeInChars = NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment); // Check if we need to convert the struct to a packed struct. if (NextFieldOffsetInChars <= LayoutSizeInChars && LLVMSizeInChars > LayoutSizeInChars) { assert(!Packed && "Size mismatch!"); ConvertStructToPacked(); assert(NextFieldOffsetInChars <= LayoutSizeInChars && "Converting to packed did not help!"); } LLVMSizeInChars = NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment); assert(LayoutSizeInChars == LLVMSizeInChars && "Tail padding mismatch!"); } // Pick the type to use. If the type is layout identical to the ConvertType // type then use it, otherwise use whatever the builder produced for us. llvm::StructType *STy = llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(), Elements, Packed); llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty); if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) { if (ValSTy->isLayoutIdentical(STy)) STy = ValSTy; } llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements); assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) == getSizeInChars(Result) && "Size mismatch!"); return Result; } llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, ConstExprEmitter *Emitter, llvm::ConstantStruct *Base, InitListExpr *Updater) { ConstStructBuilder Builder(CGM, CGF); if (!Builder.Build(Emitter, Base, Updater)) return nullptr; return Builder.Finalize(Updater->getType()); } llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, InitListExpr *ILE) { ConstStructBuilder Builder(CGM, CGF); if (!Builder.Build(ILE)) return nullptr; return Builder.Finalize(ILE->getType()); } llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF, const APValue &Val, QualType ValTy) { ConstStructBuilder Builder(CGM, CGF); const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl(); const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD); Builder.Build(Val, RD, false, CD, CharUnits::Zero()); return Builder.Finalize(ValTy); } //===----------------------------------------------------------------------===// // ConstExprEmitter //===----------------------------------------------------------------------===// /// This class only needs to handle two cases: /// 1) Literals (this is used by APValue emission to emit literals). /// 2) Arrays, structs and unions (outside C++11 mode, we don't currently /// constant fold these types). class ConstExprEmitter : public StmtVisitor<ConstExprEmitter, llvm::Constant*> { CodeGenModule &CGM; CodeGenFunction *CGF; llvm::LLVMContext &VMContext; public: ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf) : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) { } //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// llvm::Constant *VisitStmt(Stmt *S) { return nullptr; } llvm::Constant *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); } llvm::Constant * VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) { return Visit(PE->getReplacement()); } llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) { return Visit(GE->getResultExpr()); } llvm::Constant *VisitChooseExpr(ChooseExpr *CE) { return Visit(CE->getChosenSubExpr()); } llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { return Visit(E->getInitializer()); } // HLSL changes begin static void ExtractConstantValueElems(llvm::Constant *constVec, llvm::SmallVector<llvm::Constant*, 4> &Elems, unsigned vecSize) { if (llvm::ConstantDataVector *CDV = dyn_cast<llvm::ConstantDataVector>(constVec)) { for (unsigned c = 0; c < vecSize; c++) { Elems[c] = CDV->getElementAsConstant(c); } } else if (llvm::ConstantVector *CV = dyn_cast<llvm::ConstantVector>(constVec)) { for (unsigned c = 0; c < vecSize; c++) { Elems[c] = CV->getOperand(c); } } else { llvm::ConstantAggregateZero *CAZ = cast<llvm::ConstantAggregateZero>(constVec); for (unsigned c = 0; c < vecSize; c++) { Elems[c] = CAZ->getElementValue(c); } } } static llvm::Constant* ConvertToMatchDestType (const clang::Type *srcTy, const clang::Type *destTy, llvm::Type *srcLLVMTy, llvm::Type *destLLVMTy, llvm::Constant *C, CodeGenModule &CGM) { assert(srcTy->isFloatingType() || srcTy->isIntegerType()); assert(destTy->isFloatingType() || destTy->isIntegerType()); // Special handling for cast to boolean type if (destLLVMTy->isIntegerTy() && destLLVMTy->getScalarSizeInBits() == 1) { return C->isZeroValue() ? llvm::ConstantInt::get(destLLVMTy, 0) : llvm::ConstantInt::get(destLLVMTy, 1); } llvm::Instruction::CastOps castOp = llvm::Instruction::CastOpsEnd; if (srcLLVMTy->isFloatingPointTy() && destLLVMTy->isFloatingPointTy()) { if (srcLLVMTy->getScalarSizeInBits() > destLLVMTy->getScalarSizeInBits()) { castOp = llvm::Instruction::FPTrunc; } else { castOp = llvm::Instruction::FPExt; } } else if (srcLLVMTy->isFloatingPointTy() && destLLVMTy->isIntegerTy()) { castOp = destTy->isSignedIntegerType() ? llvm::Instruction::FPToSI : llvm::Instruction::FPToUI; } else if (srcLLVMTy->isIntegerTy() && destLLVMTy->isFloatingPointTy()) { castOp = srcTy->isSignedIntegerType() ? llvm::Instruction::SIToFP : llvm::Instruction::UIToFP; } else { // Both src and dest should be of integer type here. assert(srcLLVMTy->isIntegerTy() && destLLVMTy->isIntegerTy()); if (srcLLVMTy->getScalarSizeInBits() > destLLVMTy->getScalarSizeInBits()) { castOp = llvm::Instruction::Trunc; } else { castOp = srcTy->isSignedIntegerType() ? llvm::Instruction::SExt : llvm::Instruction::ZExt; } } assert(castOp != llvm::Instruction::CastOpsEnd); return llvm::ConstantExpr::getCast(castOp, C, destLLVMTy); } // HLSL changes end llvm::Constant *VisitCastExpr(CastExpr* E) { Expr *subExpr = E->getSubExpr(); llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF); if (!C) return nullptr; llvm::Type *destType = ConvertType(E->getType()); switch (E->getCastKind()) { case CK_ToUnion: { // GCC cast to union extension assert(E->getType()->isUnionType() && "Destination type is not union type!"); // Build a struct with the union sub-element as the first member, // and padded to the appropriate size SmallVector<llvm::Constant*, 2> Elts; SmallVector<llvm::Type*, 2> Types; Elts.push_back(C); Types.push_back(C->getType()); unsigned CurSize = CGM.getDataLayout().getTypeAllocSize(C->getType()); unsigned TotalSize = CGM.getDataLayout().getTypeAllocSize(destType); assert(CurSize <= TotalSize && "Union size mismatch!"); if (unsigned NumPadBytes = TotalSize - CurSize) { llvm::Type *Ty = CGM.Int8Ty; if (NumPadBytes > 1) Ty = llvm::ArrayType::get(Ty, NumPadBytes); Elts.push_back(llvm::UndefValue::get(Ty)); Types.push_back(Ty); } llvm::StructType* STy = llvm::StructType::get(C->getType()->getContext(), Types, false); return llvm::ConstantStruct::get(STy, Elts); } case CK_AddressSpaceConversion: return llvm::ConstantExpr::getAddrSpaceCast(C, destType); case CK_LValueToRValue: case CK_AtomicToNonAtomic: case CK_NonAtomicToAtomic: case CK_NoOp: case CK_ConstructorConversion: return C; case CK_Dependent: llvm_unreachable("saw dependent cast!"); case CK_BuiltinFnToFnPtr: llvm_unreachable("builtin functions are handled elsewhere"); case CK_ReinterpretMemberPointer: case CK_DerivedToBaseMemberPointer: case CK_BaseToDerivedMemberPointer: return CGM.getCXXABI().EmitMemberPointerConversion(E, C); // These will never be supported. case CK_ObjCObjectLValueCast: case CK_ARCProduceObject: case CK_ARCConsumeObject: case CK_ARCReclaimReturnedObject: case CK_ARCExtendBlockObject: case CK_CopyAndAutoreleaseBlockObject: return nullptr; // These don't need to be handled here because Evaluate knows how to // evaluate them in the cases where they can be folded. case CK_BitCast: case CK_ToVoid: case CK_Dynamic: case CK_LValueBitCast: case CK_NullToMemberPointer: case CK_UserDefinedConversion: case CK_CPointerToObjCPointerCast: case CK_BlockPointerToObjCPointerCast: case CK_AnyPointerToBlockPointerCast: case CK_ArrayToPointerDecay: case CK_FunctionToPointerDecay: case CK_BaseToDerived: case CK_DerivedToBase: case CK_UncheckedDerivedToBase: case CK_MemberPointerToBoolean: case CK_VectorSplat: case CK_FloatingRealToComplex: case CK_FloatingComplexToReal: case CK_FloatingComplexToBoolean: case CK_FloatingComplexCast: case CK_FloatingComplexToIntegralComplex: case CK_IntegralRealToComplex: case CK_IntegralComplexToReal: case CK_IntegralComplexToBoolean: case CK_IntegralComplexCast: case CK_IntegralComplexToFloatingComplex: case CK_PointerToIntegral: case CK_PointerToBoolean: case CK_NullToPointer: case CK_IntegralCast: case CK_IntegralToPointer: case CK_IntegralToBoolean: case CK_IntegralToFloating: case CK_FloatingToIntegral: case CK_FloatingToBoolean: case CK_FloatingCast: case CK_ZeroToOCLEvent: return nullptr; // HLSL Change Begins. case CK_HLSLCC_FloatingCast: case CK_HLSLCC_IntegralCast: case CK_HLSLCC_IntegralToBoolean: case CK_HLSLCC_IntegralToFloating: case CK_HLSLCC_FloatingToIntegral: case CK_HLSLCC_FloatingToBoolean: { bool isMatrixCast = hlsl::IsHLSLMatType(E->getType()) && hlsl::IsHLSLMatType(E->getSubExpr()->getType()); if (!isMatrixCast) { // Since these cast kinds have already been handled in ExprConstant.cpp, // we can reuse the logic there. return CGM.EmitConstantExpr(E, E->getType(), CGF); } else { // For cast involving matrix type, if the subexperssion has already // been successfully evaluated to a constant, then just cast it to // match the destination type. llvm::Constant *SubExprResult = C; const clang::Type * srcEltType = hlsl::GetHLSLMatElementType(E->getSubExpr()->getType()).getCanonicalType().getTypePtr(); const clang::Type * destEltType = hlsl::GetHLSLMatElementType(E->getType()).getCanonicalType().getTypePtr(); // If the dest type is same as the src type, then trivially // return the result of the subexpression evaluation. llvm::Type *srcEltLLVMTy = CGM.getTypes().ConvertType(srcEltType->getCanonicalTypeInternal()); llvm::Type *destEltLLVMTy = CGM.getTypes().ConvertType(destEltType->getCanonicalTypeInternal()); // Use desugared llvm type for comparison as half and float could both mean float type // when -enable-16bit-types flag is not used. if (srcEltLLVMTy == destEltLLVMTy) { return SubExprResult; } unsigned destRow, destCol; hlsl::GetHLSLMatRowColCount(E->getType(), destRow, destCol); unsigned srcRow, srcCol; hlsl::GetHLSLMatRowColCount(E->getSubExpr()->getType(), srcRow, srcCol); // Src and Dest matrices must have same order assert(destRow == srcRow && destCol == srcCol); if (llvm::ConstantStruct *srcVal = dyn_cast<llvm::ConstantStruct>(SubExprResult)) { llvm::ConstantArray *srcMat = cast<llvm::ConstantArray>(srcVal->getOperand(0)); llvm::SmallVector<llvm::Constant*, 4> destRowElts; for (unsigned r = 0; r < srcRow; r++) { llvm::SmallVector<llvm::Constant*, 4> destColElts(srcCol); llvm::Constant *srcColVal = srcMat->getOperand(r); ExtractConstantValueElems(srcColVal, destColElts, srcCol); for (unsigned i = 0; i < srcCol; i++) { destColElts[i] = ConvertToMatchDestType(srcEltType, destEltType, srcEltLLVMTy, destEltLLVMTy, destColElts[i], CGM); } llvm::Constant *destCols = llvm::ConstantVector::get(destColElts); destRowElts.emplace_back(destCols); } llvm::StructType *destValType = cast<llvm::StructType>(destType); llvm::Constant *destMat = llvm::ConstantArray::get( cast<llvm::ArrayType>(destValType->getElementType(0)), destRowElts); llvm::Constant* destVal = llvm::ConstantStruct::get(destValType, destMat); return destVal; } else if (llvm::ConstantAggregateZero *CAZ = dyn_cast<llvm::ConstantAggregateZero>(SubExprResult)) { return llvm::Constant::getNullValue(destType); } } return nullptr; } case CK_FlatConversion: return nullptr; case CK_HLSLVectorSplat: { unsigned vecSize = hlsl::GetHLSLVecSize(E->getType()); return llvm::ConstantVector::getSplat(vecSize, C); } case CK_HLSLMatrixSplat: { llvm::StructType *ST = cast<llvm::StructType>(CGM.getTypes().ConvertType(E->getType())); unsigned row,col; hlsl::GetHLSLMatRowColCount(E->getType(), row, col); llvm::Constant *Row = llvm::ConstantVector::getSplat(col, C); std::vector<llvm::Constant *> Rows(row, Row); llvm::Constant *Mat = llvm::ConstantArray::get( cast<llvm::ArrayType>(ST->getElementType(0)), Rows); return llvm::ConstantStruct::get(ST, Mat); } case CK_HLSLVectorTruncationCast: { unsigned vecSize = hlsl::GetHLSLVecSize(E->getType()); SmallVector<llvm::Constant*, 4> Elts(vecSize); ExtractConstantValueElems(C, Elts, vecSize); return llvm::ConstantVector::get(Elts); } case CK_HLSLVectorToScalarCast: { SmallVector<llvm::Constant*, 4> Elts(1); ExtractConstantValueElems(C, Elts, 1); return Elts[0]; } case CK_HLSLMatrixToScalarCast: { unsigned rowCt, colCt; hlsl::GetHLSLMatRowColCount(E->getType(), rowCt, colCt); if (llvm::ConstantStruct *CS = dyn_cast<llvm::ConstantStruct>(C)) { llvm::ConstantArray *CA = dyn_cast<llvm::ConstantArray>(CS->getOperand(0)); SmallVector<llvm::Constant*, 4> Elts(colCt); ExtractConstantValueElems(CA->getOperand(0), Elts, colCt); return Elts[0]; } else if (llvm::ConstantAggregateZero *CAZ = dyn_cast<llvm::ConstantAggregateZero>(C)) { llvm::Constant *destVal = llvm::Constant::getNullValue(destType); return destVal; } llvm_unreachable("Invalid cast target"); return nullptr; } case CK_HLSLMatrixTruncationCast: { if (llvm::ConstantStruct *CS = dyn_cast<llvm::ConstantStruct>(C)) { unsigned rowCt, colCt; hlsl::GetHLSLMatRowColCount(E->getType(), rowCt, colCt); llvm::ConstantArray *CA = dyn_cast<llvm::ConstantArray>(CS->getOperand(0)); SmallVector<llvm::Constant *, 4> Rows(rowCt); for (unsigned i = 0; i < rowCt; i++) { SmallVector<llvm::Constant*, 4> Elts(colCt); ExtractConstantValueElems(CA->getOperand(i), Elts, colCt); Rows[i] = llvm::ConstantVector::get(Elts); } // Create truncated matrix llvm::StructType *ST = cast<llvm::StructType>(CGM.getTypes().ConvertType(E->getType())); llvm::Constant *Mat = llvm::ConstantArray::get( cast<llvm::ArrayType>(ST->getElementType(0)), Rows); return llvm::ConstantStruct::get(ST, Mat); } else if (llvm::ConstantAggregateZero *CAZ = dyn_cast<llvm::ConstantAggregateZero>(C)) { llvm::Constant *destVal = llvm::Constant::getNullValue(destType); return destVal; } llvm_unreachable("Invalid cast target"); return nullptr; } // HLSL Change Ends. } llvm_unreachable("Invalid CastKind"); } llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { return Visit(DAE->getExpr()); } llvm::Constant *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) { // No need for a DefaultInitExprScope: we don't handle 'this' in a // constant expression. return Visit(DIE->getExpr()); } llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) { return Visit(E->GetTemporaryExpr()); } llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) { if (ILE->isStringLiteralInit()) return Visit(ILE->getInit(0)); if (CGM.getLangOpts().HLSL) return nullptr; // HLSL Change - Not implement yet. llvm::ArrayType *AType = cast<llvm::ArrayType>(ConvertType(ILE->getType())); llvm::Type *ElemTy = AType->getElementType(); unsigned NumInitElements = ILE->getNumInits(); unsigned NumElements = AType->getNumElements(); // Initialising an array requires us to automatically // initialise any elements that have not been initialised explicitly unsigned NumInitableElts = std::min(NumInitElements, NumElements); // Initialize remaining array elements. // FIXME: This doesn't handle member pointers correctly! llvm::Constant *fillC; if (Expr *filler = ILE->getArrayFiller()) fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF); else fillC = llvm::Constant::getNullValue(ElemTy); if (!fillC) return nullptr; // Try to use a ConstantAggregateZero if we can. if (fillC->isNullValue() && !NumInitableElts) return llvm::ConstantAggregateZero::get(AType); // Copy initializer elements. std::vector<llvm::Constant*> Elts; Elts.reserve(NumInitableElts + NumElements); bool RewriteType = false; for (unsigned i = 0; i < NumInitableElts; ++i) { Expr *Init = ILE->getInit(i); llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF); if (!C) return nullptr; RewriteType |= (C->getType() != ElemTy); Elts.push_back(C); } RewriteType |= (fillC->getType() != ElemTy); Elts.resize(NumElements, fillC); if (RewriteType) { // FIXME: Try to avoid packing the array std::vector<llvm::Type*> Types; Types.reserve(NumInitableElts + NumElements); for (unsigned i = 0, e = Elts.size(); i < e; ++i) Types.push_back(Elts[i]->getType()); llvm::StructType *SType = llvm::StructType::get(AType->getContext(), Types, true); return llvm::ConstantStruct::get(SType, Elts); } return llvm::ConstantArray::get(AType, Elts); } llvm::Constant *EmitRecordInitialization(InitListExpr *ILE) { return ConstStructBuilder::BuildStruct(CGM, CGF, ILE); } llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) { return CGM.EmitNullConstant(E->getType()); } llvm::Constant *VisitInitListExpr(InitListExpr *ILE) { // HLSL Change Begins. if (CGM.getLangOpts().HLSL) return CGM.getHLSLRuntime().EmitHLSLConstInitListExpr(CGM, ILE); // HLSL Change Ends. if (ILE->getType()->isArrayType()) return EmitArrayInitialization(ILE); if (ILE->getType()->isRecordType()) return EmitRecordInitialization(ILE); return nullptr; } llvm::Constant *EmitDesignatedInitUpdater(llvm::Constant *Base, InitListExpr *Updater) { QualType ExprType = Updater->getType(); if (ExprType->isArrayType()) { llvm::ArrayType *AType = cast<llvm::ArrayType>(ConvertType(ExprType)); llvm::Type *ElemType = AType->getElementType(); unsigned NumInitElements = Updater->getNumInits(); unsigned NumElements = AType->getNumElements(); std::vector<llvm::Constant *> Elts; Elts.reserve(NumElements); if (llvm::ConstantDataArray *DataArray = dyn_cast<llvm::ConstantDataArray>(Base)) for (unsigned i = 0; i != NumElements; ++i) Elts.push_back(DataArray->getElementAsConstant(i)); else if (llvm::ConstantArray *Array = dyn_cast<llvm::ConstantArray>(Base)) for (unsigned i = 0; i != NumElements; ++i) Elts.push_back(Array->getOperand(i)); else return nullptr; // FIXME: other array types not implemented llvm::Constant *fillC = nullptr; if (Expr *filler = Updater->getArrayFiller()) if (!isa<NoInitExpr>(filler)) fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF); bool RewriteType = (fillC && fillC->getType() != ElemType); for (unsigned i = 0; i != NumElements; ++i) { Expr *Init = nullptr; if (i < NumInitElements) Init = Updater->getInit(i); if (!Init && fillC) Elts[i] = fillC; else if (!Init || isa<NoInitExpr>(Init)) ; // Do nothing. else if (InitListExpr *ChildILE = dyn_cast<InitListExpr>(Init)) Elts[i] = EmitDesignatedInitUpdater(Elts[i], ChildILE); else Elts[i] = CGM.EmitConstantExpr(Init, Init->getType(), CGF); if (!Elts[i]) return nullptr; RewriteType |= (Elts[i]->getType() != ElemType); } if (RewriteType) { std::vector<llvm::Type *> Types; Types.reserve(NumElements); for (unsigned i = 0; i != NumElements; ++i) Types.push_back(Elts[i]->getType()); llvm::StructType *SType = llvm::StructType::get(AType->getContext(), Types, true); return llvm::ConstantStruct::get(SType, Elts); } return llvm::ConstantArray::get(AType, Elts); } if (ExprType->isRecordType()) return ConstStructBuilder::BuildStruct(CGM, CGF, this, dyn_cast<llvm::ConstantStruct>(Base), Updater); return nullptr; } llvm::Constant *VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) { return EmitDesignatedInitUpdater( CGM.EmitConstantExpr(E->getBase(), E->getType(), CGF), E->getUpdater()); } llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) { if (!E->getConstructor()->isTrivial()) return nullptr; QualType Ty = E->getType(); // FIXME: We should not have to call getBaseElementType here. const RecordType *RT = CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>(); const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); // If the class doesn't have a trivial destructor, we can't emit it as a // constant expr. if (!RD->hasTrivialDestructor()) return nullptr; // Only copy and default constructors can be trivial. if (E->getNumArgs()) { assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument"); assert(E->getConstructor()->isCopyOrMoveConstructor() && "trivial ctor has argument but isn't a copy/move ctor"); Expr *Arg = E->getArg(0); assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) && "argument to copy ctor is of wrong type"); return Visit(Arg); } return CGM.EmitNullConstant(Ty); } llvm::Constant *VisitStringLiteral(StringLiteral *E) { return CGM.GetConstantArrayFromStringLiteral(E); } llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) { // This must be an @encode initializing an array in a static initializer. // Don't emit it as the address of the string, emit the string data itself // as an inline array. std::string Str; CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str); QualType T = E->getType(); if (T->getTypeClass() == Type::TypeOfExpr) T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType(); const ConstantArrayType *CAT = cast<ConstantArrayType>(T); // Resize the string to the right size, adding zeros at the end, or // truncating as needed. Str.resize(CAT->getSize().getZExtValue(), '\0'); return llvm::ConstantDataArray::getString(VMContext, Str, false); } llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) { return Visit(E->getSubExpr()); } // Utility methods llvm::Type *ConvertType(QualType T) { return CGM.getTypes().ConvertType(T); } public: llvm::Constant *EmitLValue(APValue::LValueBase LVBase) { if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) { if (Decl->hasAttr<WeakRefAttr>()) return CGM.GetWeakRefReference(Decl); if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl)) return CGM.GetAddrOfFunction(FD); if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) { // We can never refer to a variable with local storage. if (!VD->hasLocalStorage()) { if (VD->isFileVarDecl() || VD->hasExternalStorage()) return CGM.GetAddrOfGlobalVar(VD); else if (VD->isLocalVarDecl()) return CGM.getOrCreateStaticVarDecl( *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false)); } } return nullptr; } Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>()); switch (E->getStmtClass()) { default: break; case Expr::CompoundLiteralExprClass: { // Note that due to the nature of compound literals, this is guaranteed // to be the only use of the variable, so we just generate it here. CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E); llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(), CLE->getType(), CGF); // FIXME: "Leaked" on failure. if (C) C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), E->getType().isConstant(CGM.getContext()), llvm::GlobalValue::InternalLinkage, C, ".compoundliteral", nullptr, llvm::GlobalVariable::NotThreadLocal, CGM.getContext().getTargetAddressSpace(E->getType())); return C; } case Expr::StringLiteralClass: return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E)); case Expr::ObjCEncodeExprClass: return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E)); case Expr::ObjCStringLiteralClass: { ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E); llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(SL->getString()); return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); } case Expr::PredefinedExprClass: { unsigned Type = cast<PredefinedExpr>(E)->getIdentType(); if (CGF) { LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E)); return cast<llvm::Constant>(Res.getAddress()); } else if (Type == PredefinedExpr::PrettyFunction) { return CGM.GetAddrOfConstantCString("top level", ".tmp"); } return CGM.GetAddrOfConstantCString("", ".tmp"); } case Expr::AddrLabelExprClass: { assert(CGF && "Invalid address of label expression outside function."); llvm::Constant *Ptr = CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel()); return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType())); } case Expr::CallExprClass: { CallExpr* CE = cast<CallExpr>(E); unsigned builtin = CE->getBuiltinCallee(); if (builtin != Builtin::BI__builtin___CFStringMakeConstantString && builtin != Builtin::BI__builtin___NSStringMakeConstantString) break; const Expr *Arg = CE->getArg(0)->IgnoreParenCasts(); const StringLiteral *Literal = cast<StringLiteral>(Arg); if (builtin == Builtin::BI__builtin___NSStringMakeConstantString) { return CGM.getObjCRuntime().GenerateConstantString(Literal); } // FIXME: need to deal with UCN conversion issues. return CGM.GetAddrOfConstantCFString(Literal); } case Expr::BlockExprClass: { std::string FunctionName; if (CGF) FunctionName = CGF->CurFn->getName(); else FunctionName = "global"; return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str()); } case Expr::CXXTypeidExprClass: { CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E); QualType T; if (Typeid->isTypeOperand()) T = Typeid->getTypeOperand(CGM.getContext()); else T = Typeid->getExprOperand()->getType(); return CGM.GetAddrOfRTTIDescriptor(T); } case Expr::CXXUuidofExprClass: { return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E)); } case Expr::MaterializeTemporaryExprClass: { MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E); assert(MTE->getStorageDuration() == SD_Static); SmallVector<const Expr *, 2> CommaLHSs; SmallVector<SubobjectAdjustment, 2> Adjustments; const Expr *Inner = MTE->GetTemporaryExpr() ->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments); return CGM.GetAddrOfGlobalTemporary(MTE, Inner); } } return nullptr; } }; } // end anonymous namespace. bool ConstStructBuilder::Build(ConstExprEmitter *Emitter, llvm::ConstantStruct *Base, InitListExpr *Updater) { assert(Base && "base expression should not be empty"); QualType ExprType = Updater->getType(); RecordDecl *RD = ExprType->getAs<RecordType>()->getDecl(); const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD); const llvm::StructLayout *BaseLayout = CGM.getDataLayout().getStructLayout( Base->getType()); unsigned FieldNo = -1; unsigned ElementNo = 0; for (FieldDecl *Field : RD->fields()) { ++FieldNo; if (RD->isUnion() && Updater->getInitializedFieldInUnion() != Field) continue; // Skip anonymous bitfields. if (Field->isUnnamedBitfield()) continue; llvm::Constant *EltInit = Base->getOperand(ElementNo); // Bail out if the type of the ConstantStruct does not have the same layout // as the type of the InitListExpr. if (CGM.getTypes().ConvertType(Field->getType()) != EltInit->getType() || Layout.getFieldOffset(ElementNo) != BaseLayout->getElementOffsetInBits(ElementNo)) return false; // Get the initializer. If we encounter an empty field or a NoInitExpr, // we use values from the base expression. Expr *Init = nullptr; if (ElementNo < Updater->getNumInits()) Init = Updater->getInit(ElementNo); if (!Init || isa<NoInitExpr>(Init)) ; // Do nothing. else if (InitListExpr *ChildILE = dyn_cast<InitListExpr>(Init)) EltInit = Emitter->EmitDesignatedInitUpdater(EltInit, ChildILE); else EltInit = CGM.EmitConstantExpr(Init, Field->getType(), CGF); ++ElementNo; if (!EltInit) return false; if (!Field->isBitField()) AppendField(Field, Layout.getFieldOffset(FieldNo), EltInit); else if (llvm::ConstantInt *CI = dyn_cast<llvm::ConstantInt>(EltInit)) AppendBitField(Field, Layout.getFieldOffset(FieldNo), CI); else // Initializing a bitfield with a non-trivial constant? return false; } return true; } llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D, CodeGenFunction *CGF) { // Make a quick check if variable can be default NULL initialized // and avoid going through rest of code which may do, for c++11, // initialization of memory to all NULLs. if (!D.hasLocalStorage()) { QualType Ty = D.getType(); if (Ty->isArrayType()) Ty = Context.getBaseElementType(Ty); if (Ty->isRecordType()) if (const CXXConstructExpr *E = dyn_cast_or_null<CXXConstructExpr>(D.getInit())) { const CXXConstructorDecl *CD = E->getConstructor(); if (CD->isTrivial() && CD->isDefaultConstructor()) return EmitNullConstant(D.getType()); } } // HLSL Change Begin - External variable is in cbuffer, cannot use as immediate. if (D.hasExternalFormalLinkage() && !isa<EnumConstantDecl>(&D)) return nullptr; // HLSL Change End. if (const APValue *Value = D.evaluateValue()) return EmitConstantValueForMemory(*Value, D.getType(), CGF); // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a // reference is a constant expression, and the reference binds to a temporary, // then constant initialization is performed. ConstExprEmitter will // incorrectly emit a prvalue constant in this case, and the calling code // interprets that as the (pointer) value of the reference, rather than the // desired value of the referee. if (D.getType()->isReferenceType()) return nullptr; const Expr *E = D.getInit(); assert(E && "No initializer to emit"); llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E)); if (C && C->getType()->getScalarType()->isIntegerTy(1)) { // HLSL Change llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); C = llvm::ConstantExpr::getZExt(C, BoolTy); } return C; } llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, QualType DestType, CodeGenFunction *CGF) { Expr::EvalResult Result; bool Success = false; if (DestType->isReferenceType()) Success = E->EvaluateAsLValue(Result, Context); else Success = E->EvaluateAsRValue(Result, Context); llvm::Constant *C = nullptr; if (Success && !Result.HasSideEffects) C = EmitConstantValue(Result.Val, DestType, CGF); else C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E)); if (C && C->getType()->getScalarType()->isIntegerTy(1)) { // HLSL Change llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType()); C = llvm::ConstantExpr::getZExt(C, BoolTy); } return C; } llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value, QualType DestType, CodeGenFunction *CGF) { // For an _Atomic-qualified constant, we may need to add tail padding. if (auto *AT = DestType->getAs<AtomicType>()) { QualType InnerType = AT->getValueType(); auto *Inner = EmitConstantValue(Value, InnerType, CGF); uint64_t InnerSize = Context.getTypeSize(InnerType); uint64_t OuterSize = Context.getTypeSize(DestType); if (InnerSize == OuterSize) return Inner; assert(InnerSize < OuterSize && "emitted over-large constant for atomic"); llvm::Constant *Elts[] = { Inner, llvm::ConstantAggregateZero::get( llvm::ArrayType::get(Int8Ty, (OuterSize - InnerSize) / 8)) }; return llvm::ConstantStruct::getAnon(Elts); } switch (Value.getKind()) { case APValue::Uninitialized: llvm_unreachable("Constant expressions should be initialized."); case APValue::LValue: { llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType); llvm::Constant *Offset = llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity()); llvm::Constant *C; if (APValue::LValueBase LVBase = Value.getLValueBase()) { // An array can be represented as an lvalue referring to the base. if (isa<llvm::ArrayType>(DestTy)) { assert(Offset->isNullValue() && "offset on array initializer"); return ConstExprEmitter(*this, CGF).Visit( const_cast<Expr*>(LVBase.get<const Expr*>())); } C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase); // Apply offset if necessary. if (!Offset->isNullValue()) { unsigned AS = C->getType()->getPointerAddressSpace(); llvm::Type *CharPtrTy = Int8Ty->getPointerTo(AS); llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, CharPtrTy); Casted = llvm::ConstantExpr::getGetElementPtr(Int8Ty, Casted, Offset); C = llvm::ConstantExpr::getPointerCast(Casted, C->getType()); } // Convert to the appropriate type; this could be an lvalue for // an integer. if (isa<llvm::PointerType>(DestTy)) return llvm::ConstantExpr::getPointerCast(C, DestTy); return llvm::ConstantExpr::getPtrToInt(C, DestTy); } else { C = Offset; // Convert to the appropriate type; this could be an lvalue for // an integer. if (isa<llvm::PointerType>(DestTy)) return llvm::ConstantExpr::getIntToPtr(C, DestTy); // If the types don't match this should only be a truncate. if (C->getType() != DestTy) return llvm::ConstantExpr::getTrunc(C, DestTy); return C; } } case APValue::Int: return llvm::ConstantInt::get(VMContext, Value.getInt()); case APValue::ComplexInt: { llvm::Constant *Complex[2]; Complex[0] = llvm::ConstantInt::get(VMContext, Value.getComplexIntReal()); Complex[1] = llvm::ConstantInt::get(VMContext, Value.getComplexIntImag()); // FIXME: the target may want to specify that this is packed. llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(), Complex[1]->getType(), nullptr); return llvm::ConstantStruct::get(STy, Complex); } case APValue::Float: { const llvm::APFloat &Init = Value.getFloat(); // OACR error 6287 #pragma prefast(disable: __WARNING_REDUNDANTTEST, "language options are constants, by design") if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf && !Context.getLangOpts().NativeHalfType && !Context.getLangOpts().HalfArgsAndReturns) return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt()); else return llvm::ConstantFP::get(VMContext, Init); } case APValue::ComplexFloat: { llvm::Constant *Complex[2]; Complex[0] = llvm::ConstantFP::get(VMContext, Value.getComplexFloatReal()); Complex[1] = llvm::ConstantFP::get(VMContext, Value.getComplexFloatImag()); // FIXME: the target may want to specify that this is packed. llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(), Complex[1]->getType(), nullptr); return llvm::ConstantStruct::get(STy, Complex); } case APValue::Vector: { SmallVector<llvm::Constant *, 4> Inits; unsigned NumElts = Value.getVectorLength(); for (unsigned i = 0; i != NumElts; ++i) { const APValue &Elt = Value.getVectorElt(i); if (Elt.isInt()) Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt())); else Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat())); } return llvm::ConstantVector::get(Inits); } case APValue::AddrLabelDiff: { const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS(); const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS(); llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF); llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF); // Compute difference llvm::Type *ResultType = getTypes().ConvertType(DestType); LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy); RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy); llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS); // LLVM is a bit sensitive about the exact format of the // address-of-label difference; make sure to truncate after // the subtraction. return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType); } case APValue::Struct: case APValue::Union: return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType); case APValue::Array: { const ArrayType *CAT = Context.getAsArrayType(DestType); unsigned NumElements = Value.getArraySize(); unsigned NumInitElts = Value.getArrayInitializedElts(); // Emit array filler, if there is one. llvm::Constant *Filler = nullptr; if (Value.hasArrayFiller()) Filler = EmitConstantValueForMemory(Value.getArrayFiller(), CAT->getElementType(), CGF); // Emit initializer elements. llvm::Type *CommonElementType = getTypes().ConvertType(CAT->getElementType()); // Try to use a ConstantAggregateZero if we can. if (Filler && Filler->isNullValue() && !NumInitElts) { llvm::ArrayType *AType = llvm::ArrayType::get(CommonElementType, NumElements); return llvm::ConstantAggregateZero::get(AType); } std::vector<llvm::Constant*> Elts; Elts.reserve(NumElements); for (unsigned I = 0; I < NumElements; ++I) { llvm::Constant *C = Filler; if (I < NumInitElts) C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I), CAT->getElementType(), CGF); else assert(Filler && "Missing filler for implicit elements of initializer"); if (I == 0) CommonElementType = C->getType(); else if (C->getType() != CommonElementType) CommonElementType = nullptr; Elts.push_back(C); } if (!CommonElementType) { // FIXME: Try to avoid packing the array std::vector<llvm::Type*> Types; Types.reserve(NumElements); for (unsigned i = 0, e = Elts.size(); i < e; ++i) Types.push_back(Elts[i]->getType()); llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true); return llvm::ConstantStruct::get(SType, Elts); } llvm::ArrayType *AType = llvm::ArrayType::get(CommonElementType, NumElements); return llvm::ConstantArray::get(AType, Elts); } case APValue::MemberPointer: return getCXXABI().EmitMemberPointer(Value, DestType); } llvm_unreachable("Unknown APValue kind"); } llvm::Constant * CodeGenModule::EmitConstantValueForMemory(const APValue &Value, QualType DestType, CodeGenFunction *CGF) { llvm::Constant *C = EmitConstantValue(Value, DestType, CGF); if (C->getType()->getScalarType()->isIntegerTy(1)) { // HLSL Change llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType); C = llvm::ConstantExpr::getZExt(C, BoolTy); } return C; } llvm::Constant * CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) { assert(E->isFileScope() && "not a file-scope compound literal expr"); return ConstExprEmitter(*this, nullptr).EmitLValue(E); } llvm::Constant * CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) { // Member pointer constants always have a very particular form. const MemberPointerType *type = cast<MemberPointerType>(uo->getType()); const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl(); // A member function pointer. if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl)) return getCXXABI().EmitMemberFunctionPointer(method); // Otherwise, a member data pointer. uint64_t fieldOffset = getContext().getFieldOffset(decl); CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset); return getCXXABI().EmitMemberDataPointer(type, chars); } static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM, llvm::Type *baseType, const CXXRecordDecl *base); static llvm::Constant *EmitNullConstant(CodeGenModule &CGM, const CXXRecordDecl *record, bool asCompleteObject) { const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record); llvm::StructType *structure = (asCompleteObject ? layout.getLLVMType() : layout.getBaseSubobjectLLVMType()); unsigned numElements = structure->getNumElements(); std::vector<llvm::Constant *> elements(numElements); // Fill in all the bases. for (const auto &I : record->bases()) { if (I.isVirtual()) { // Ignore virtual bases; if we're laying out for a complete // object, we'll lay these out later. continue; } const CXXRecordDecl *base = cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); // Ignore empty bases. if (base->isEmpty()) continue; unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base); llvm::Type *baseType = structure->getElementType(fieldIndex); elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base); } // Fill in all the fields. for (const auto *Field : record->fields()) { // Fill in non-bitfields. (Bitfields always use a zero pattern, which we // will fill in later.) if (!Field->isBitField()) { unsigned fieldIndex = layout.getLLVMFieldNo(Field); elements[fieldIndex] = CGM.EmitNullConstant(Field->getType()); } // For unions, stop after the first named field. if (record->isUnion()) { if (Field->getIdentifier()) break; if (const auto *FieldRD = dyn_cast_or_null<RecordDecl>(Field->getType()->getAsTagDecl())) if (FieldRD->findFirstNamedDataMember()) break; } } // Fill in the virtual bases, if we're working with the complete object. if (asCompleteObject) { for (const auto &I : record->vbases()) { const CXXRecordDecl *base = cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl()); // Ignore empty bases. if (base->isEmpty()) continue; unsigned fieldIndex = layout.getVirtualBaseIndex(base); // We might have already laid this field out. if (elements[fieldIndex]) continue; llvm::Type *baseType = structure->getElementType(fieldIndex); elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base); } } // Now go through all other fields and zero them out. for (unsigned i = 0; i != numElements; ++i) { if (!elements[i]) elements[i] = llvm::Constant::getNullValue(structure->getElementType(i)); } return llvm::ConstantStruct::get(structure, elements); } /// Emit the null constant for a base subobject. static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM, llvm::Type *baseType, const CXXRecordDecl *base) { const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base); // Just zero out bases that don't have any pointer to data members. if (baseLayout.isZeroInitializableAsBase()) return llvm::Constant::getNullValue(baseType); // Otherwise, we can just use its null constant. return EmitNullConstant(CGM, base, /*asCompleteObject=*/false); } llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) { if (getTypes().isZeroInitializable(T)) return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T)); if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) { llvm::ArrayType *ATy = cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T)); QualType ElementTy = CAT->getElementType(); llvm::Constant *Element = EmitNullConstant(ElementTy); unsigned NumElements = CAT->getSize().getZExtValue(); SmallVector<llvm::Constant *, 8> Array(NumElements, Element); return llvm::ConstantArray::get(ATy, Array); } if (const RecordType *RT = T->getAs<RecordType>()) { const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); return ::EmitNullConstant(*this, RD, /*complete object*/ true); } assert(T->isMemberDataPointerType() && "Should only see pointers to data members here!"); return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>()); } llvm::Constant * CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) { return ::EmitNullConstant(*this, Record, false); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGBlocks.h
//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This is the internal state used for llvm translation for block literals. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGBLOCKS_H #define LLVM_CLANG_LIB_CODEGEN_CGBLOCKS_H #include "CGBuilder.h" #include "CGCall.h" #include "CGValue.h" #include "CodeGenFunction.h" #include "CodeGenTypes.h" #include "clang/AST/CharUnits.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/Type.h" #include "clang/Basic/TargetInfo.h" #include "llvm/IR/Module.h" namespace llvm { class Module; class Constant; class Function; class GlobalValue; class DataLayout; class FunctionType; class PointerType; class Value; class LLVMContext; } namespace clang { namespace CodeGen { class CodeGenModule; class CGBlockInfo; // Flags stored in __block variables. enum BlockByrefFlags { BLOCK_BYREF_HAS_COPY_DISPOSE = (1 << 25), // compiler BLOCK_BYREF_LAYOUT_MASK = (0xF << 28), // compiler BLOCK_BYREF_LAYOUT_EXTENDED = (1 << 28), BLOCK_BYREF_LAYOUT_NON_OBJECT = (2 << 28), BLOCK_BYREF_LAYOUT_STRONG = (3 << 28), BLOCK_BYREF_LAYOUT_WEAK = (4 << 28), BLOCK_BYREF_LAYOUT_UNRETAINED = (5 << 28) }; enum BlockLiteralFlags { BLOCK_HAS_COPY_DISPOSE = (1 << 25), BLOCK_HAS_CXX_OBJ = (1 << 26), BLOCK_IS_GLOBAL = (1 << 28), BLOCK_USE_STRET = (1 << 29), BLOCK_HAS_SIGNATURE = (1 << 30), BLOCK_HAS_EXTENDED_LAYOUT = (1 << 31) }; class BlockFlags { uint32_t flags; public: BlockFlags(uint32_t flags) : flags(flags) {} BlockFlags() : flags(0) {} BlockFlags(BlockLiteralFlags flag) : flags(flag) {} BlockFlags(BlockByrefFlags flag) : flags(flag) {} uint32_t getBitMask() const { return flags; } bool empty() const { return flags == 0; } friend BlockFlags operator|(BlockFlags l, BlockFlags r) { return BlockFlags(l.flags | r.flags); } friend BlockFlags &operator|=(BlockFlags &l, BlockFlags r) { l.flags |= r.flags; return l; } friend bool operator&(BlockFlags l, BlockFlags r) { return (l.flags & r.flags); } bool operator==(BlockFlags r) { return (flags == r.flags); } }; inline BlockFlags operator|(BlockLiteralFlags l, BlockLiteralFlags r) { return BlockFlags(l) | BlockFlags(r); } enum BlockFieldFlag_t { BLOCK_FIELD_IS_OBJECT = 0x03, /* id, NSObject, __attribute__((NSObject)), block, ... */ BLOCK_FIELD_IS_BLOCK = 0x07, /* a block variable */ BLOCK_FIELD_IS_BYREF = 0x08, /* the on stack structure holding the __block variable */ BLOCK_FIELD_IS_WEAK = 0x10, /* declared __weak, only used in byref copy helpers */ BLOCK_FIELD_IS_ARC = 0x40, /* field has ARC-specific semantics */ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose support routines */ BLOCK_BYREF_CURRENT_MAX = 256 }; class BlockFieldFlags { uint32_t flags; BlockFieldFlags(uint32_t flags) : flags(flags) {} public: BlockFieldFlags() : flags(0) {} BlockFieldFlags(BlockFieldFlag_t flag) : flags(flag) {} uint32_t getBitMask() const { return flags; } bool empty() const { return flags == 0; } /// Answers whether the flags indicate that this field is an object /// or block pointer that requires _Block_object_assign/dispose. bool isSpecialPointer() const { return flags & BLOCK_FIELD_IS_OBJECT; } friend BlockFieldFlags operator|(BlockFieldFlags l, BlockFieldFlags r) { return BlockFieldFlags(l.flags | r.flags); } friend BlockFieldFlags &operator|=(BlockFieldFlags &l, BlockFieldFlags r) { l.flags |= r.flags; return l; } friend bool operator&(BlockFieldFlags l, BlockFieldFlags r) { return (l.flags & r.flags); } }; inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) { return BlockFieldFlags(l) | BlockFieldFlags(r); } /// CGBlockInfo - Information to generate a block literal. class CGBlockInfo { public: /// Name - The name of the block, kindof. StringRef Name; /// The field index of 'this' within the block, if there is one. unsigned CXXThisIndex; class Capture { uintptr_t Data; EHScopeStack::stable_iterator Cleanup; public: bool isIndex() const { return (Data & 1) != 0; } bool isConstant() const { return !isIndex(); } unsigned getIndex() const { assert(isIndex()); return Data >> 1; } llvm::Value *getConstant() const { assert(isConstant()); return reinterpret_cast<llvm::Value*>(Data); } EHScopeStack::stable_iterator getCleanup() const { assert(isIndex()); return Cleanup; } void setCleanup(EHScopeStack::stable_iterator cleanup) { assert(isIndex()); Cleanup = cleanup; } static Capture makeIndex(unsigned index) { Capture v; v.Data = (index << 1) | 1; return v; } static Capture makeConstant(llvm::Value *value) { Capture v; v.Data = reinterpret_cast<uintptr_t>(value); return v; } }; /// CanBeGlobal - True if the block can be global, i.e. it has /// no non-constant captures. bool CanBeGlobal : 1; /// True if the block needs a custom copy or dispose function. bool NeedsCopyDispose : 1; /// HasCXXObject - True if the block's custom copy/dispose functions /// need to be run even in GC mode. bool HasCXXObject : 1; /// UsesStret : True if the block uses an stret return. Mutable /// because it gets set later in the block-creation process. mutable bool UsesStret : 1; /// HasCapturedVariableLayout : True if block has captured variables /// and their layout meta-data has been generated. bool HasCapturedVariableLayout : 1; /// The mapping of allocated indexes within the block. llvm::DenseMap<const VarDecl*, Capture> Captures; llvm::AllocaInst *Address; llvm::StructType *StructureType; const BlockDecl *Block; const BlockExpr *BlockExpression; CharUnits BlockSize; CharUnits BlockAlign; // Offset of the gap caused by block header having a smaller // alignment than the alignment of the block descriptor. This // is the gap offset before the first capturued field. CharUnits BlockHeaderForcedGapOffset; // Gap size caused by aligning first field after block header. // This could be zero if no forced alignment is required. CharUnits BlockHeaderForcedGapSize; /// An instruction which dominates the full-expression that the /// block is inside. llvm::Instruction *DominatingIP; /// The next block in the block-info chain. Invalid if this block /// info is not part of the CGF's block-info chain, which is true /// if it corresponds to a global block or a block whose expression /// has been encountered. CGBlockInfo *NextBlockInfo; const Capture &getCapture(const VarDecl *var) const { return const_cast<CGBlockInfo*>(this)->getCapture(var); } Capture &getCapture(const VarDecl *var) { llvm::DenseMap<const VarDecl*, Capture>::iterator it = Captures.find(var); assert(it != Captures.end() && "no entry for variable!"); return it->second; } const BlockDecl *getBlockDecl() const { return Block; } const BlockExpr *getBlockExpr() const { assert(BlockExpression); assert(BlockExpression->getBlockDecl() == Block); return BlockExpression; } CGBlockInfo(const BlockDecl *blockDecl, StringRef Name); }; } // end namespace CodeGen } // end namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGObjCRuntime.h
//===----- CGObjCRuntime.h - Interface to ObjC Runtimes ---------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This provides an abstract class for Objective-C code generation. Concrete // subclasses of this implement code generation for specific Objective-C // runtime libraries. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOBJCRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOBJCRUNTIME_H #include "CGBuilder.h" #include "CGCall.h" #include "CGValue.h" #include "clang/AST/DeclObjC.h" #include "clang/Basic/IdentifierTable.h" // Selector namespace llvm { class Constant; class Function; class Module; class StructLayout; class StructType; class Type; class Value; } namespace clang { namespace CodeGen { class CodeGenFunction; } class FieldDecl; class ObjCAtTryStmt; class ObjCAtThrowStmt; class ObjCAtSynchronizedStmt; class ObjCContainerDecl; class ObjCCategoryImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCProtocolDecl; class Selector; class ObjCIvarDecl; class ObjCStringLiteral; class BlockDeclRefExpr; namespace CodeGen { class CodeGenModule; class CGBlockInfo; // FIXME: Several methods should be pure virtual but aren't to avoid the // partially-implemented subclass breaking. /// Implements runtime-specific code generation functions. class CGObjCRuntime { protected: CodeGen::CodeGenModule &CGM; CGObjCRuntime(CodeGen::CodeGenModule &CGM) : CGM(CGM) {} // Utility functions for unified ivar access. These need to // eventually be folded into other places (the structure layout // code). /// Compute an offset to the given ivar, suitable for passing to /// EmitValueForIvarAtOffset. Note that the correct handling of /// bit-fields is carefully coordinated by these two, use caution! /// /// The latter overload is suitable for computing the offset of a /// sythesized ivar. uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, const ObjCInterfaceDecl *OID, const ObjCIvarDecl *Ivar); uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, const ObjCImplementationDecl *OID, const ObjCIvarDecl *Ivar); LValue EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *OID, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers, llvm::Value *Offset); /// Emits a try / catch statement. This function is intended to be called by /// subclasses, and provides a generic mechanism for generating these, which /// should be usable by all runtimes. The caller must provide the functions /// to call when entering and exiting a \@catch() block, and the function /// used to rethrow exceptions. If the begin and end catch functions are /// NULL, then the function assumes that the EH personality function provides /// the thrown object directly. void EmitTryCatchStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S, llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn, llvm::Constant *exceptionRethrowFn); /// Emits an \@synchronize() statement, using the \p syncEnterFn and /// \p syncExitFn arguments as the functions called to lock and unlock /// the object. This function can be called by subclasses that use /// zero-cost exception handling. void EmitAtSynchronizedStmt(CodeGenFunction &CGF, const ObjCAtSynchronizedStmt &S, llvm::Function *syncEnterFn, llvm::Function *syncExitFn); public: virtual ~CGObjCRuntime(); /// Generate the function required to register all Objective-C components in /// this compilation unit with the runtime library. virtual llvm::Function *ModuleInitFunction() = 0; /// Get a selector for the specified name and type values. The /// return value should have the LLVM type for pointer-to /// ASTContext::getObjCSelType(). virtual llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel, bool lval=false) = 0; /// Get a typed selector. virtual llvm::Value *GetSelector(CodeGenFunction &CGF, const ObjCMethodDecl *Method) = 0; /// Get the type constant to catch for the given ObjC pointer type. /// This is used externally to implement catching ObjC types in C++. /// Runtimes which don't support this should add the appropriate /// error to Sema. virtual llvm::Constant *GetEHType(QualType T) = 0; /// Generate a constant string object. virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0; /// Generate a category. A category contains a list of methods (and /// accompanying metadata) and a list of protocols. virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0; /// Generate a class structure for this class. virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0; /// Register an class alias. virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) = 0; /// Generate an Objective-C message send operation. /// /// \param Method - The method being called, this may be null if synthesizing /// a property setter or getter. virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF, ReturnValueSlot ReturnSlot, QualType ResultType, Selector Sel, llvm::Value *Receiver, const CallArgList &CallArgs, const ObjCInterfaceDecl *Class = nullptr, const ObjCMethodDecl *Method = nullptr) = 0; /// Generate an Objective-C message send operation to the super /// class initiated in a method for Class and with the given Self /// object. /// /// \param Method - The method being called, this may be null if synthesizing /// a property setter or getter. virtual CodeGen::RValue GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF, ReturnValueSlot ReturnSlot, QualType ResultType, Selector Sel, const ObjCInterfaceDecl *Class, bool isCategoryImpl, llvm::Value *Self, bool IsClassMessage, const CallArgList &CallArgs, const ObjCMethodDecl *Method = nullptr) = 0; /// Emit the code to return the named protocol as an object, as in a /// \@protocol expression. virtual llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF, const ObjCProtocolDecl *OPD) = 0; /// Generate the named protocol. Protocols contain method metadata but no /// implementations. virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0; /// Generate a function preamble for a method with the specified /// types. // FIXME: Current this just generates the Function definition, but really this // should also be generating the loads of the parameters, as the runtime // should have full control over how parameters are passed. virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD, const ObjCContainerDecl *CD) = 0; /// Return the runtime function for getting properties. virtual llvm::Constant *GetPropertyGetFunction() = 0; /// Return the runtime function for setting properties. virtual llvm::Constant *GetPropertySetFunction() = 0; /// Return the runtime function for optimized setting properties. virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic, bool copy) = 0; // API for atomic copying of qualified aggregates in getter. virtual llvm::Constant *GetGetStructFunction() = 0; // API for atomic copying of qualified aggregates in setter. virtual llvm::Constant *GetSetStructFunction() = 0; /// API for atomic copying of qualified aggregates with non-trivial copy /// assignment (c++) in setter. virtual llvm::Constant *GetCppAtomicObjectSetFunction() = 0; /// API for atomic copying of qualified aggregates with non-trivial copy /// assignment (c++) in getter. virtual llvm::Constant *GetCppAtomicObjectGetFunction() = 0; /// GetClass - Return a reference to the class for the given /// interface decl. virtual llvm::Value *GetClass(CodeGenFunction &CGF, const ObjCInterfaceDecl *OID) = 0; virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) { llvm_unreachable("autoreleasepool unsupported in this ABI"); } /// EnumerationMutationFunction - Return the function that's called by the /// compiler when a mutation is detected during foreach iteration. virtual llvm::Constant *EnumerationMutationFunction() = 0; virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtSynchronizedStmt &S) = 0; virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtTryStmt &S) = 0; virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF, const ObjCAtThrowStmt &S, bool ClearInsertionPoint=true) = 0; virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF, llvm::Value *AddrWeakObj) = 0; virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest) = 0; virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest, bool threadlocal=false) = 0; virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest, llvm::Value *ivarOffset) = 0; virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest) = 0; virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers) = 0; virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar) = 0; virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, llvm::Value *Size) = 0; virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM, const CodeGen::CGBlockInfo &blockInfo) = 0; virtual llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM, const CodeGen::CGBlockInfo &blockInfo) = 0; virtual llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM, QualType T) = 0; virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name, bool Weak = false) = 0; struct MessageSendInfo { const CGFunctionInfo &CallInfo; llvm::PointerType *MessengerType; MessageSendInfo(const CGFunctionInfo &callInfo, llvm::PointerType *messengerType) : CallInfo(callInfo), MessengerType(messengerType) {} }; MessageSendInfo getMessageSendInfo(const ObjCMethodDecl *method, QualType resultType, CallArgList &callArgs); // FIXME: This probably shouldn't be here, but the code to compute // it is here. unsigned ComputeBitfieldBitOffset(CodeGen::CodeGenModule &CGM, const ObjCInterfaceDecl *ID, const ObjCIvarDecl *Ivar); }; /// Creates an instance of an Objective-C runtime class. //TODO: This should include some way of selecting which runtime to target. CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM); CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM); } } #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGCUDANV.cpp
//===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This provides a class for CUDA code generation targeting the NVIDIA CUDA // runtime library. // //===----------------------------------------------------------------------===// #include "CGCUDARuntime.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "clang/AST/Decl.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" using namespace clang; using namespace CodeGen; namespace { class CGNVCUDARuntime : public CGCUDARuntime { private: llvm::Type *IntTy, *SizeTy, *VoidTy; llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy; /// Convenience reference to LLVM Context llvm::LLVMContext &Context; /// Convenience reference to the current module llvm::Module &TheModule; /// Keeps track of kernel launch stubs emitted in this module llvm::SmallVector<llvm::Function *, 16> EmittedKernels; /// Keeps track of variables containing handles of GPU binaries. Populated by /// ModuleCtorFunction() and used to create corresponding cleanup calls in /// ModuleDtorFunction() llvm::SmallVector<llvm::GlobalVariable *, 16> GpuBinaryHandles; llvm::Constant *getSetupArgumentFn() const; llvm::Constant *getLaunchFn() const; /// Creates a function to register all kernel stubs generated in this module. llvm::Function *makeRegisterKernelsFn(); /// Helper function that generates a constant string and returns a pointer to /// the start of the string. The result of this function can be used anywhere /// where the C code specifies const char*. llvm::Constant *makeConstantString(const std::string &Str, const std::string &Name = "", unsigned Alignment = 0) { llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0), llvm::ConstantInt::get(SizeTy, 0)}; auto *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str()); return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(), ConstStr, Zeros); } void emitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args); public: CGNVCUDARuntime(CodeGenModule &CGM); void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override; /// Creates module constructor function llvm::Function *makeModuleCtorFunction() override; /// Creates module destructor function llvm::Function *makeModuleDtorFunction() override; }; } CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM) : CGCUDARuntime(CGM), Context(CGM.getLLVMContext()), TheModule(CGM.getModule()) { CodeGen::CodeGenTypes &Types = CGM.getTypes(); ASTContext &Ctx = CGM.getContext(); IntTy = Types.ConvertType(Ctx.IntTy); SizeTy = Types.ConvertType(Ctx.getSizeType()); VoidTy = llvm::Type::getVoidTy(Context); CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy)); VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy)); VoidPtrPtrTy = VoidPtrTy->getPointerTo(); } llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const { // cudaError_t cudaSetupArgument(void *, size_t, size_t) std::vector<llvm::Type*> Params; Params.push_back(VoidPtrTy); Params.push_back(SizeTy); Params.push_back(SizeTy); return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy, Params, false), "cudaSetupArgument"); } llvm::Constant *CGNVCUDARuntime::getLaunchFn() const { // cudaError_t cudaLaunch(char *) return CGM.CreateRuntimeFunction( llvm::FunctionType::get(IntTy, CharPtrTy, false), "cudaLaunch"); } void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) { EmittedKernels.push_back(CGF.CurFn); emitDeviceStubBody(CGF, Args); } void CGNVCUDARuntime::emitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args) { // Build the argument value list and the argument stack struct type. SmallVector<llvm::Value *, 16> ArgValues; std::vector<llvm::Type *> ArgTypes; for (FunctionArgList::const_iterator I = Args.begin(), E = Args.end(); I != E; ++I) { llvm::Value *V = CGF.GetAddrOfLocalVar(*I); ArgValues.push_back(V); assert(isa<llvm::PointerType>(V->getType()) && "Arg type not PointerType"); ArgTypes.push_back(cast<llvm::PointerType>(V->getType())->getElementType()); } llvm::StructType *ArgStackTy = llvm::StructType::get(Context, ArgTypes); llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end"); // Emit the calls to cudaSetupArgument llvm::Constant *cudaSetupArgFn = getSetupArgumentFn(); for (unsigned I = 0, E = Args.size(); I != E; ++I) { llvm::Value *Args[3]; llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next"); Args[0] = CGF.Builder.CreatePointerCast(ArgValues[I], VoidPtrTy); Args[1] = CGF.Builder.CreateIntCast( llvm::ConstantExpr::getSizeOf(ArgTypes[I]), SizeTy, false); Args[2] = CGF.Builder.CreateIntCast( llvm::ConstantExpr::getOffsetOf(ArgStackTy, I), SizeTy, false); llvm::CallSite CS = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args); llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0); llvm::Value *CSZero = CGF.Builder.CreateICmpEQ(CS.getInstruction(), Zero); CGF.Builder.CreateCondBr(CSZero, NextBlock, EndBlock); CGF.EmitBlock(NextBlock); } // Emit the call to cudaLaunch llvm::Constant *cudaLaunchFn = getLaunchFn(); llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy); CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg); CGF.EmitBranch(EndBlock); CGF.EmitBlock(EndBlock); } /// Creates internal function to register all kernel stubs generated in this /// module with the CUDA runtime. /// \code /// void __cuda_register_kernels(void** GpuBinaryHandle) { /// __cudaRegisterFunction(GpuBinaryHandle,Kernel0,...); /// ... /// __cudaRegisterFunction(GpuBinaryHandle,KernelM,...); /// } /// \endcode llvm::Function *CGNVCUDARuntime::makeRegisterKernelsFn() { llvm::Function *RegisterKernelsFunc = llvm::Function::Create( llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false), llvm::GlobalValue::InternalLinkage, "__cuda_register_kernels", &TheModule); llvm::BasicBlock *EntryBB = llvm::BasicBlock::Create(Context, "entry", RegisterKernelsFunc); CGBuilderTy Builder(Context); Builder.SetInsertPoint(EntryBB); // void __cudaRegisterFunction(void **, const char *, char *, const char *, // int, uint3*, uint3*, dim3*, dim3*, int*) std::vector<llvm::Type *> RegisterFuncParams = { VoidPtrPtrTy, CharPtrTy, CharPtrTy, CharPtrTy, IntTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, VoidPtrTy, IntTy->getPointerTo()}; llvm::Constant *RegisterFunc = CGM.CreateRuntimeFunction( llvm::FunctionType::get(IntTy, RegisterFuncParams, false), "__cudaRegisterFunction"); // Extract GpuBinaryHandle passed as the first argument passed to // __cuda_register_kernels() and generate __cudaRegisterFunction() call for // each emitted kernel. llvm::Argument &GpuBinaryHandlePtr = *RegisterKernelsFunc->arg_begin(); for (llvm::Function *Kernel : EmittedKernels) { llvm::Constant *KernelName = makeConstantString(Kernel->getName()); llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy); llvm::Value *args[] = { &GpuBinaryHandlePtr, Builder.CreateBitCast(Kernel, VoidPtrTy), KernelName, KernelName, llvm::ConstantInt::get(IntTy, -1), NullPtr, NullPtr, NullPtr, NullPtr, llvm::ConstantPointerNull::get(IntTy->getPointerTo())}; Builder.CreateCall(RegisterFunc, args); } Builder.CreateRetVoid(); return RegisterKernelsFunc; } /// Creates a global constructor function for the module: /// \code /// void __cuda_module_ctor(void*) { /// Handle0 = __cudaRegisterFatBinary(GpuBinaryBlob0); /// __cuda_register_kernels(Handle0); /// ... /// HandleN = __cudaRegisterFatBinary(GpuBinaryBlobN); /// __cuda_register_kernels(HandleN); /// } /// \endcode llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() { // void __cuda_register_kernels(void* handle); llvm::Function *RegisterKernelsFunc = makeRegisterKernelsFn(); // void ** __cudaRegisterFatBinary(void *); llvm::Constant *RegisterFatbinFunc = CGM.CreateRuntimeFunction( llvm::FunctionType::get(VoidPtrPtrTy, VoidPtrTy, false), "__cudaRegisterFatBinary"); // struct { int magic, int version, void * gpu_binary, void * dont_care }; llvm::StructType *FatbinWrapperTy = llvm::StructType::get(IntTy, IntTy, VoidPtrTy, VoidPtrTy, nullptr); llvm::Function *ModuleCtorFunc = llvm::Function::Create( llvm::FunctionType::get(VoidTy, VoidPtrTy, false), llvm::GlobalValue::InternalLinkage, "__cuda_module_ctor", &TheModule); llvm::BasicBlock *CtorEntryBB = llvm::BasicBlock::Create(Context, "entry", ModuleCtorFunc); CGBuilderTy CtorBuilder(Context); CtorBuilder.SetInsertPoint(CtorEntryBB); // For each GPU binary, register it with the CUDA runtime and store returned // handle in a global variable and save the handle in GpuBinaryHandles vector // to be cleaned up in destructor on exit. Then associate all known kernels // with the GPU binary handle so CUDA runtime can figure out what to call on // the GPU side. for (const std::string &GpuBinaryFileName : CGM.getCodeGenOpts().CudaGpuBinaryFileNames) { llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> GpuBinaryOrErr = llvm::MemoryBuffer::getFileOrSTDIN(GpuBinaryFileName); if (std::error_code EC = GpuBinaryOrErr.getError()) { CGM.getDiags().Report(diag::err_cannot_open_file) << GpuBinaryFileName << EC.message(); continue; } // Create initialized wrapper structure that points to the loaded GPU binary llvm::Constant *Values[] = { llvm::ConstantInt::get(IntTy, 0x466243b1), // Fatbin wrapper magic. llvm::ConstantInt::get(IntTy, 1), // Fatbin version. makeConstantString(GpuBinaryOrErr.get()->getBuffer(), "", 16), // Data. llvm::ConstantPointerNull::get(VoidPtrTy)}; // Unused in fatbin v1. llvm::GlobalVariable *FatbinWrapper = new llvm::GlobalVariable( TheModule, FatbinWrapperTy, true, llvm::GlobalValue::InternalLinkage, llvm::ConstantStruct::get(FatbinWrapperTy, Values), "__cuda_fatbin_wrapper"); // GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper); llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall( RegisterFatbinFunc, CtorBuilder.CreateBitCast(FatbinWrapper, VoidPtrTy)); llvm::GlobalVariable *GpuBinaryHandle = new llvm::GlobalVariable( TheModule, VoidPtrPtrTy, false, llvm::GlobalValue::InternalLinkage, llvm::ConstantPointerNull::get(VoidPtrPtrTy), "__cuda_gpubin_handle"); CtorBuilder.CreateStore(RegisterFatbinCall, GpuBinaryHandle, false); // Call __cuda_register_kernels(GpuBinaryHandle); CtorBuilder.CreateCall(RegisterKernelsFunc, RegisterFatbinCall); // Save GpuBinaryHandle so we can unregister it in destructor. GpuBinaryHandles.push_back(GpuBinaryHandle); } CtorBuilder.CreateRetVoid(); return ModuleCtorFunc; } /// Creates a global destructor function that unregisters all GPU code blobs /// registered by constructor. /// \code /// void __cuda_module_dtor(void*) { /// __cudaUnregisterFatBinary(Handle0); /// ... /// __cudaUnregisterFatBinary(HandleN); /// } /// \endcode llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() { // void __cudaUnregisterFatBinary(void ** handle); llvm::Constant *UnregisterFatbinFunc = CGM.CreateRuntimeFunction( llvm::FunctionType::get(VoidTy, VoidPtrPtrTy, false), "__cudaUnregisterFatBinary"); llvm::Function *ModuleDtorFunc = llvm::Function::Create( llvm::FunctionType::get(VoidTy, VoidPtrTy, false), llvm::GlobalValue::InternalLinkage, "__cuda_module_dtor", &TheModule); llvm::BasicBlock *DtorEntryBB = llvm::BasicBlock::Create(Context, "entry", ModuleDtorFunc); CGBuilderTy DtorBuilder(Context); DtorBuilder.SetInsertPoint(DtorEntryBB); for (llvm::GlobalVariable *GpuBinaryHandle : GpuBinaryHandles) { DtorBuilder.CreateCall(UnregisterFatbinFunc, DtorBuilder.CreateLoad(GpuBinaryHandle, false)); } DtorBuilder.CreateRetVoid(); return ModuleDtorFunc; } CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) { return new CGNVCUDARuntime(CGM); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/SanitizerMetadata.cpp
//===--- SanitizerMetadata.cpp - Blacklist for sanitizers -----------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Class which emits metadata consumed by sanitizer instrumentation passes. // //===----------------------------------------------------------------------===// #include "SanitizerMetadata.h" #include "CodeGenModule.h" #include "clang/AST/Type.h" #include "llvm/ADT/StringRef.h" #include "llvm/IR/Constants.h" // // /////////////////////////////////////////////////////////////////////////////// using namespace clang; using namespace CodeGen; SanitizerMetadata::SanitizerMetadata(CodeGenModule &CGM) : CGM(CGM) {} void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV, SourceLocation Loc, StringRef Name, QualType Ty, bool IsDynInit, bool IsBlacklisted) { if (!CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) return; IsDynInit &= !CGM.isInSanitizerBlacklist(GV, Loc, Ty, "init"); IsBlacklisted |= CGM.isInSanitizerBlacklist(GV, Loc, Ty); llvm::Metadata *LocDescr = nullptr; llvm::Metadata *GlobalName = nullptr; llvm::LLVMContext &VMContext = CGM.getLLVMContext(); if (!IsBlacklisted) { // Don't generate source location and global name if it is blacklisted - // it won't be instrumented anyway. LocDescr = getLocationMetadata(Loc); if (!Name.empty()) GlobalName = llvm::MDString::get(VMContext, Name); } llvm::Metadata *GlobalMetadata[] = { llvm::ConstantAsMetadata::get(GV), LocDescr, GlobalName, llvm::ConstantAsMetadata::get( llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsDynInit)), llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::Type::getInt1Ty(VMContext), IsBlacklisted))}; llvm::MDNode *ThisGlobal = llvm::MDNode::get(VMContext, GlobalMetadata); llvm::NamedMDNode *AsanGlobals = CGM.getModule().getOrInsertNamedMetadata("llvm.asan.globals"); AsanGlobals->addOperand(ThisGlobal); } void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV, const VarDecl &D, bool IsDynInit) { if (!CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) return; std::string QualName; llvm::raw_string_ostream OS(QualName); D.printQualifiedName(OS); reportGlobalToASan(GV, D.getLocation(), OS.str(), D.getType(), IsDynInit); } void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) { // For now, just make sure the global is not modified by the ASan // instrumentation. if (CGM.getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress)) reportGlobalToASan(GV, SourceLocation(), "", QualType(), false, true); } void SanitizerMetadata::disableSanitizerForInstruction(llvm::Instruction *I) { I->setMetadata(CGM.getModule().getMDKindID("nosanitize"), llvm::MDNode::get(CGM.getLLVMContext(), None)); } llvm::MDNode *SanitizerMetadata::getLocationMetadata(SourceLocation Loc) { PresumedLoc PLoc = CGM.getContext().getSourceManager().getPresumedLoc(Loc); if (!PLoc.isValid()) return nullptr; llvm::LLVMContext &VMContext = CGM.getLLVMContext(); llvm::Metadata *LocMetadata[] = { llvm::MDString::get(VMContext, PLoc.getFilename()), llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::Type::getInt32Ty(VMContext), PLoc.getLine())), llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::Type::getInt32Ty(VMContext), PLoc.getColumn())), }; return llvm::MDNode::get(VMContext, LocMetadata); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGLoopInfo.cpp
//===---- CGLoopInfo.cpp - LLVM CodeGen for loop metadata -*- C++ -*-------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "CGLoopInfo.h" #include "clang/AST/Attr.h" #include "clang/Sema/LoopHint.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constants.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Metadata.h" using namespace clang::CodeGen; using namespace llvm; static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs) { if (!Attrs.IsParallel && Attrs.VectorizerWidth == 0 && Attrs.VectorizerUnroll == 0 && Attrs.HlslUnrollPolicy == LoopAttributes::HlslAllowUnroll && // HLSL Change Attrs.HlslUnrollCount == 0 && // HLSL Change Attrs.VectorizerEnable == LoopAttributes::VecUnspecified) return nullptr; SmallVector<Metadata *, 4> Args; // Reserve operand 0 for loop id self reference. auto TempNode = MDNode::getTemporary(Ctx, None); Args.push_back(TempNode.get()); // Setting vectorizer.width if (Attrs.VectorizerWidth > 0) { Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.vectorize.width"), ConstantAsMetadata::get(ConstantInt::get( Type::getInt32Ty(Ctx), Attrs.VectorizerWidth))}; Args.push_back(MDNode::get(Ctx, Vals)); } // Setting vectorizer.unroll if (Attrs.VectorizerUnroll > 0) { Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.interleave.count"), ConstantAsMetadata::get(ConstantInt::get( Type::getInt32Ty(Ctx), Attrs.VectorizerUnroll))}; Args.push_back(MDNode::get(Ctx, Vals)); } // Setting vectorizer.enable if (Attrs.VectorizerEnable != LoopAttributes::VecUnspecified) { Metadata *Vals[] = { MDString::get(Ctx, "llvm.loop.vectorize.enable"), ConstantAsMetadata::get(ConstantInt::get( Type::getInt1Ty(Ctx), (Attrs.VectorizerEnable == LoopAttributes::VecEnable)))}; Args.push_back(MDNode::get(Ctx, Vals)); } // HLSL Change Begins. if (Attrs.HlslUnrollPolicy == LoopAttributes::HlslDisableUnroll) { // Disable unroll. SmallVector<Metadata *, 1> DisableOperands; DisableOperands.push_back(MDString::get(Ctx, "llvm.loop.unroll.disable")); MDNode *DisableNode = MDNode::get(Ctx, DisableOperands); Args.push_back(DisableNode); } else if (Attrs.HlslUnrollPolicy == LoopAttributes::HlslForceUnroll) { if (Attrs.HlslUnrollCount == 0) { // Full unroll. SmallVector<Metadata *, 1> FullOperands; FullOperands.push_back(MDString::get(Ctx, "llvm.loop.unroll.full")); MDNode *FullNode = MDNode::get(Ctx, FullOperands); Args.push_back(FullNode); } else { Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.unroll.count"), ConstantAsMetadata::get(ConstantInt::get( Type::getInt32Ty(Ctx), Attrs.HlslUnrollCount))}; Args.push_back(MDNode::get(Ctx, Vals)); } } // HLSL Change Ends. // Set the first operand to itself. MDNode *LoopID = MDNode::get(Ctx, Args); LoopID->replaceOperandWith(0, LoopID); return LoopID; } LoopAttributes::LoopAttributes(bool IsParallel) : IsParallel(IsParallel), VectorizerEnable(LoopAttributes::VecUnspecified), VectorizerWidth(0), VectorizerUnroll(0), HlslUnrollPolicy(LoopAttributes::HlslAllowUnroll), HlslUnrollCount(0) {} // HLSL Change void LoopAttributes::clear() { IsParallel = false; VectorizerWidth = 0; VectorizerUnroll = 0; VectorizerEnable = LoopAttributes::VecUnspecified; HlslUnrollPolicy = LoopAttributes::HlslAllowUnroll; // HLSL Change HlslUnrollCount = 0; // HLSL Change } LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs) : LoopID(nullptr), Header(Header), Attrs(Attrs) { LoopID = createMetadata(Header->getContext(), Attrs); } void LoopInfoStack::push(BasicBlock *Header, ArrayRef<const clang::Attr *> Attrs) { for (const auto *Attr : Attrs) { const LoopHintAttr *LH = dyn_cast<LoopHintAttr>(Attr); // HLSL Change Begins if (dyn_cast<HLSLLoopAttr>(Attr)) { setHlslLoop(); } else if (const HLSLUnrollAttr *UnrollAttr = dyn_cast<HLSLUnrollAttr>(Attr)) { unsigned count = UnrollAttr->getCount(); setHlslUnroll(count); } // HLSL Change Ends // Skip non loop hint attributes if (!LH) continue; LoopHintAttr::OptionType Option = LH->getOption(); LoopHintAttr::LoopHintState State = LH->getState(); switch (Option) { case LoopHintAttr::Vectorize: case LoopHintAttr::Interleave: if (State == LoopHintAttr::AssumeSafety) { // Apply "llvm.mem.parallel_loop_access" metadata to load/stores. setParallel(true); } break; case LoopHintAttr::VectorizeWidth: case LoopHintAttr::InterleaveCount: case LoopHintAttr::Unroll: case LoopHintAttr::UnrollCount: // Nothing to do here for these loop hints. break; } } Active.push_back(LoopInfo(Header, StagedAttrs)); // Clear the attributes so nested loops do not inherit them. StagedAttrs.clear(); } void LoopInfoStack::pop() { assert(!Active.empty() && "No active loops to pop"); Active.pop_back(); } void LoopInfoStack::InsertHelper(Instruction *I) const { if (!hasInfo()) return; const LoopInfo &L = getInfo(); if (!L.getLoopID()) return; if (TerminatorInst *TI = dyn_cast<TerminatorInst>(I)) { for (unsigned i = 0, ie = TI->getNumSuccessors(); i < ie; ++i) if (TI->getSuccessor(i) == L.getHeader()) { TI->setMetadata("llvm.loop", L.getLoopID()); break; } return; } if (L.getAttributes().IsParallel && I->mayReadOrWriteMemory()) I->setMetadata("llvm.mem.parallel_loop_access", L.getLoopID()); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
//===--- CodeGenTypes.cpp - TBAA information for LLVM CodeGen -------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This is the code that manages TBAA information and defines the TBAA policy // for the optimizer to use. Relevant standards text includes: // // C99 6.5p7 // C++ [basic.lval] (p10 in n3126, p15 in some earlier versions) // //===----------------------------------------------------------------------===// #include "CodeGenTBAA.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Attr.h" #include "clang/AST/Mangle.h" #include "clang/AST/RecordLayout.h" #include "clang/Frontend/CodeGenOptions.h" #include "llvm/ADT/SmallSet.h" #include "llvm/IR/Constants.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Type.h" using namespace clang; using namespace CodeGen; CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext& VMContext, const CodeGenOptions &CGO, const LangOptions &Features, MangleContext &MContext) : Context(Ctx), CodeGenOpts(CGO), Features(Features), MContext(MContext), MDHelper(VMContext), Root(nullptr), Char(nullptr) { } CodeGenTBAA::~CodeGenTBAA() { } llvm::MDNode *CodeGenTBAA::getRoot() { // Define the root of the tree. This identifies the tree, so that // if our LLVM IR is linked with LLVM IR from a different front-end // (or a different version of this front-end), their TBAA trees will // remain distinct, and the optimizer will treat them conservatively. if (!Root) Root = MDHelper.createTBAARoot("Simple C/C++ TBAA"); return Root; } // For both scalar TBAA and struct-path aware TBAA, the scalar type has the // same format: name, parent node, and offset. llvm::MDNode *CodeGenTBAA::createTBAAScalarType(StringRef Name, llvm::MDNode *Parent) { return MDHelper.createTBAAScalarTypeNode(Name, Parent); } llvm::MDNode *CodeGenTBAA::getChar() { // Define the root of the tree for user-accessible memory. C and C++ // give special powers to char and certain similar types. However, // these special powers only cover user-accessible memory, and doesn't // include things like vtables. if (!Char) Char = createTBAAScalarType("omnipotent char", getRoot()); return Char; } static bool TypeHasMayAlias(QualType QTy) { // Tagged types have declarations, and therefore may have attributes. if (const TagType *TTy = dyn_cast<TagType>(QTy)) return TTy->getDecl()->hasAttr<MayAliasAttr>(); // Typedef types have declarations, and therefore may have attributes. if (const TypedefType *TTy = dyn_cast<TypedefType>(QTy)) { if (TTy->getDecl()->hasAttr<MayAliasAttr>()) return true; // Also, their underlying types may have relevant attributes. return TypeHasMayAlias(TTy->desugar()); } return false; } llvm::MDNode * CodeGenTBAA::getTBAAInfo(QualType QTy) { // At -O0 or relaxed aliasing, TBAA is not emitted for regular types. if (CodeGenOpts.OptimizationLevel == 0 || CodeGenOpts.RelaxedAliasing) return nullptr; // If the type has the may_alias attribute (even on a typedef), it is // effectively in the general char alias class. if (TypeHasMayAlias(QTy)) return getChar(); const Type *Ty = Context.getCanonicalType(QTy).getTypePtr(); if (llvm::MDNode *N = MetadataCache[Ty]) return N; // Handle builtin types. if (const BuiltinType *BTy = dyn_cast<BuiltinType>(Ty)) { switch (BTy->getKind()) { // Character types are special and can alias anything. // In C++, this technically only includes "char" and "unsigned char", // and not "signed char". In C, it includes all three. For now, // the risk of exploiting this detail in C++ seems likely to outweigh // the benefit. case BuiltinType::Char_U: case BuiltinType::Char_S: case BuiltinType::UChar: case BuiltinType::SChar: return getChar(); // Unsigned types can alias their corresponding signed types. case BuiltinType::UShort: return getTBAAInfo(Context.ShortTy); case BuiltinType::UInt: return getTBAAInfo(Context.IntTy); case BuiltinType::ULong: return getTBAAInfo(Context.LongTy); case BuiltinType::ULongLong: return getTBAAInfo(Context.LongLongTy); case BuiltinType::UInt128: return getTBAAInfo(Context.Int128Ty); // Treat all other builtin types as distinct types. This includes // treating wchar_t, char16_t, and char32_t as distinct from their // "underlying types". default: return MetadataCache[Ty] = createTBAAScalarType(BTy->getName(Features), getChar()); } } // Handle pointers. // TODO: Implement C++'s type "similarity" and consider dis-"similar" // pointers distinct. if (Ty->isPointerType()) return MetadataCache[Ty] = createTBAAScalarType("any pointer", getChar()); // Enum types are distinct types. In C++ they have "underlying types", // however they aren't related for TBAA. if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) { // In C++ mode, types have linkage, so we can rely on the ODR and // on their mangled names, if they're external. // TODO: Is there a way to get a program-wide unique name for a // decl with local linkage or no linkage? if (!Features.CPlusPlus || !ETy->getDecl()->isExternallyVisible()) return MetadataCache[Ty] = getChar(); SmallString<256> OutName; llvm::raw_svector_ostream Out(OutName); MContext.mangleTypeName(QualType(ETy, 0), Out); Out.flush(); return MetadataCache[Ty] = createTBAAScalarType(OutName, getChar()); } // For now, handle any other kind of type conservatively. return MetadataCache[Ty] = getChar(); } llvm::MDNode *CodeGenTBAA::getTBAAInfoForVTablePtr() { return createTBAAScalarType("vtable pointer", getRoot()); } bool CodeGenTBAA::CollectFields(uint64_t BaseOffset, QualType QTy, SmallVectorImpl<llvm::MDBuilder::TBAAStructField> & Fields, bool MayAlias) { /* Things not handled yet include: C++ base classes, bitfields, */ if (const RecordType *TTy = QTy->getAs<RecordType>()) { const RecordDecl *RD = TTy->getDecl()->getDefinition(); if (RD->hasFlexibleArrayMember()) return false; // TODO: Handle C++ base classes. if (const CXXRecordDecl *Decl = dyn_cast<CXXRecordDecl>(RD)) if (Decl->bases_begin() != Decl->bases_end()) return false; const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); unsigned idx = 0; for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i, ++idx) { uint64_t Offset = BaseOffset + Layout.getFieldOffset(idx) / Context.getCharWidth(); QualType FieldQTy = i->getType(); if (!CollectFields(Offset, FieldQTy, Fields, MayAlias || TypeHasMayAlias(FieldQTy))) return false; } return true; } /* Otherwise, treat whatever it is as a field. */ uint64_t Offset = BaseOffset; uint64_t Size = Context.getTypeSizeInChars(QTy).getQuantity(); llvm::MDNode *TBAAInfo = MayAlias ? getChar() : getTBAAInfo(QTy); llvm::MDNode *TBAATag = getTBAAScalarTagInfo(TBAAInfo); Fields.push_back(llvm::MDBuilder::TBAAStructField(Offset, Size, TBAATag)); return true; } llvm::MDNode * CodeGenTBAA::getTBAAStructInfo(QualType QTy) { const Type *Ty = Context.getCanonicalType(QTy).getTypePtr(); if (llvm::MDNode *N = StructMetadataCache[Ty]) return N; SmallVector<llvm::MDBuilder::TBAAStructField, 4> Fields; if (CollectFields(0, QTy, Fields, TypeHasMayAlias(QTy))) return MDHelper.createTBAAStructNode(Fields); // For now, handle any other kind of type conservatively. return StructMetadataCache[Ty] = nullptr; } /// Check if the given type can be handled by path-aware TBAA. static bool isTBAAPathStruct(QualType QTy) { if (const RecordType *TTy = QTy->getAs<RecordType>()) { const RecordDecl *RD = TTy->getDecl()->getDefinition(); if (RD->hasFlexibleArrayMember()) return false; // RD can be struct, union, class, interface or enum. // For now, we only handle struct and class. if (RD->isStruct() || RD->isClass()) return true; } return false; } llvm::MDNode * CodeGenTBAA::getTBAAStructTypeInfo(QualType QTy) { const Type *Ty = Context.getCanonicalType(QTy).getTypePtr(); assert(isTBAAPathStruct(QTy)); if (llvm::MDNode *N = StructTypeMetadataCache[Ty]) return N; if (const RecordType *TTy = QTy->getAs<RecordType>()) { const RecordDecl *RD = TTy->getDecl()->getDefinition(); const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); SmallVector <std::pair<llvm::MDNode*, uint64_t>, 4> Fields; unsigned idx = 0; for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end(); i != e; ++i, ++idx) { QualType FieldQTy = i->getType(); llvm::MDNode *FieldNode; if (isTBAAPathStruct(FieldQTy)) FieldNode = getTBAAStructTypeInfo(FieldQTy); else FieldNode = getTBAAInfo(FieldQTy); if (!FieldNode) return StructTypeMetadataCache[Ty] = nullptr; Fields.push_back(std::make_pair( FieldNode, Layout.getFieldOffset(idx) / Context.getCharWidth())); } SmallString<256> OutName; if (Features.CPlusPlus) { // Don't use the mangler for C code. llvm::raw_svector_ostream Out(OutName); MContext.mangleTypeName(QualType(Ty, 0), Out); Out.flush(); } else { OutName = RD->getName(); } // Create the struct type node with a vector of pairs (offset, type). return StructTypeMetadataCache[Ty] = MDHelper.createTBAAStructTypeNode(OutName, Fields); } return StructMetadataCache[Ty] = nullptr; } /// Return a TBAA tag node for both scalar TBAA and struct-path aware TBAA. llvm::MDNode * CodeGenTBAA::getTBAAStructTagInfo(QualType BaseQTy, llvm::MDNode *AccessNode, uint64_t Offset) { if (!AccessNode) return nullptr; if (!CodeGenOpts.StructPathTBAA) return getTBAAScalarTagInfo(AccessNode); const Type *BTy = Context.getCanonicalType(BaseQTy).getTypePtr(); TBAAPathTag PathTag = TBAAPathTag(BTy, AccessNode, Offset); if (llvm::MDNode *N = StructTagMetadataCache[PathTag]) return N; llvm::MDNode *BNode = nullptr; if (isTBAAPathStruct(BaseQTy)) BNode = getTBAAStructTypeInfo(BaseQTy); if (!BNode) return StructTagMetadataCache[PathTag] = MDHelper.createTBAAStructTagNode(AccessNode, AccessNode, 0); return StructTagMetadataCache[PathTag] = MDHelper.createTBAAStructTagNode(BNode, AccessNode, Offset); } llvm::MDNode * CodeGenTBAA::getTBAAScalarTagInfo(llvm::MDNode *AccessNode) { if (!AccessNode) return nullptr; if (llvm::MDNode *N = ScalarTagMetadataCache[AccessNode]) return N; return ScalarTagMetadataCache[AccessNode] = MDHelper.createTBAAStructTagNode(AccessNode, AccessNode, 0); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/EHScopeStack.h
//===-- EHScopeStack.h - Stack for cleanup IR generation --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // These classes should be the minimum interface required for other parts of // CodeGen to emit cleanups. The implementation is in CGCleanup.cpp and other // implemenentation details that are not widely needed are in CGCleanup.h. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_EHSCOPESTACK_H #define LLVM_CLANG_LIB_CODEGEN_EHSCOPESTACK_H #include "clang/Basic/LLVM.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Value.h" namespace clang { namespace CodeGen { class CodeGenFunction; /// A branch fixup. These are required when emitting a goto to a /// label which hasn't been emitted yet. The goto is optimistically /// emitted as a branch to the basic block for the label, and (if it /// occurs in a scope with non-trivial cleanups) a fixup is added to /// the innermost cleanup. When a (normal) cleanup is popped, any /// unresolved fixups in that scope are threaded through the cleanup. struct BranchFixup { /// The block containing the terminator which needs to be modified /// into a switch if this fixup is resolved into the current scope. /// If null, LatestBranch points directly to the destination. llvm::BasicBlock *OptimisticBranchBlock; /// The ultimate destination of the branch. /// /// This can be set to null to indicate that this fixup was /// successfully resolved. llvm::BasicBlock *Destination; /// The destination index value. unsigned DestinationIndex; /// The initial branch of the fixup. llvm::BranchInst *InitialBranch; }; template <class T> struct InvariantValue { typedef T type; typedef T saved_type; static bool needsSaving(type value) { return false; } static saved_type save(CodeGenFunction &CGF, type value) { return value; } static type restore(CodeGenFunction &CGF, saved_type value) { return value; } }; /// A metaprogramming class for ensuring that a value will dominate an /// arbitrary position in a function. template <class T> struct DominatingValue : InvariantValue<T> {}; template <class T, bool mightBeInstruction = std::is_base_of<llvm::Value, T>::value && !std::is_base_of<llvm::Constant, T>::value && !std::is_base_of<llvm::BasicBlock, T>::value> struct DominatingPointer; template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {}; // template <class T> struct DominatingPointer<T,true> at end of file template <class T> struct DominatingValue<T*> : DominatingPointer<T> {}; enum CleanupKind : unsigned { /// Denotes a cleanup that should run when a scope is exited using exceptional /// control flow (a throw statement leading to stack unwinding, ). EHCleanup = 0x1, /// Denotes a cleanup that should run when a scope is exited using normal /// control flow (falling off the end of the scope, return, goto, ...). NormalCleanup = 0x2, NormalAndEHCleanup = EHCleanup | NormalCleanup, InactiveCleanup = 0x4, InactiveEHCleanup = EHCleanup | InactiveCleanup, InactiveNormalCleanup = NormalCleanup | InactiveCleanup, InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup }; /// A stack of scopes which respond to exceptions, including cleanups /// and catch blocks. class EHScopeStack { public: /// A saved depth on the scope stack. This is necessary because /// pushing scopes onto the stack invalidates iterators. class stable_iterator { friend class EHScopeStack; /// Offset from StartOfData to EndOfBuffer. ptrdiff_t Size; stable_iterator(ptrdiff_t Size) : Size(Size) {} public: static stable_iterator invalid() { return stable_iterator(-1); } stable_iterator() : Size(-1) {} bool isValid() const { return Size >= 0; } /// Returns true if this scope encloses I. /// Returns false if I is invalid. /// This scope must be valid. bool encloses(stable_iterator I) const { return Size <= I.Size; } /// Returns true if this scope strictly encloses I: that is, /// if it encloses I and is not I. /// Returns false is I is invalid. /// This scope must be valid. bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; } friend bool operator==(stable_iterator A, stable_iterator B) { return A.Size == B.Size; } friend bool operator!=(stable_iterator A, stable_iterator B) { return A.Size != B.Size; } }; /// Information for lazily generating a cleanup. Subclasses must be /// POD-like: cleanups will not be destructed, and they will be /// allocated on the cleanup stack and freely copied and moved /// around. /// /// Cleanup implementations should generally be declared in an /// anonymous namespace. class Cleanup { // Anchor the construction vtable. virtual void anchor(); public: /// Generation flags. class Flags { enum { F_IsForEH = 0x1, F_IsNormalCleanupKind = 0x2, F_IsEHCleanupKind = 0x4 }; unsigned flags; public: Flags() : flags(0) {} /// isForEH - true if the current emission is for an EH cleanup. bool isForEHCleanup() const { return flags & F_IsForEH; } bool isForNormalCleanup() const { return !isForEHCleanup(); } void setIsForEHCleanup() { flags |= F_IsForEH; } bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; } void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; } /// isEHCleanupKind - true if the cleanup was pushed as an EH /// cleanup. bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; } void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; } }; // Provide a virtual destructor to suppress a very common warning // that unfortunately cannot be suppressed without this. Cleanups // should not rely on this destructor ever being called. virtual ~Cleanup() {} /// Emit the cleanup. For normal cleanups, this is run in the /// same EH context as when the cleanup was pushed, i.e. the /// immediately-enclosing context of the cleanup scope. For /// EH cleanups, this is run in a terminate context. /// // \param flags cleanup kind. virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0; }; /// ConditionalCleanup stores the saved form of its parameters, /// then restores them and performs the cleanup. template <class T, class... As> class ConditionalCleanup : public Cleanup { typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple; SavedTuple Saved; template <std::size_t... Is> T restore(CodeGenFunction &CGF, llvm::index_sequence<Is...>) { // It's important that the restores are emitted in order. The braced init // list guarentees that. return T{DominatingValue<As>::restore(CGF, std::get<Is>(Saved))...}; } void Emit(CodeGenFunction &CGF, Flags flags) override { restore(CGF, llvm::index_sequence_for<As...>()).Emit(CGF, flags); } public: ConditionalCleanup(typename DominatingValue<As>::saved_type... A) : Saved(A...) {} ConditionalCleanup(SavedTuple Tuple) : Saved(std::move(Tuple)) {} }; private: // The implementation for this class is in CGException.h and // CGException.cpp; the definition is here because it's used as a // member of CodeGenFunction. /// The start of the scope-stack buffer, i.e. the allocated pointer /// for the buffer. All of these pointers are either simultaneously /// null or simultaneously valid. char *StartOfBuffer; /// The end of the buffer. char *EndOfBuffer; /// The first valid entry in the buffer. char *StartOfData; /// The innermost normal cleanup on the stack. stable_iterator InnermostNormalCleanup; /// The innermost EH scope on the stack. stable_iterator InnermostEHScope; /// The current set of branch fixups. A branch fixup is a jump to /// an as-yet unemitted label, i.e. a label for which we don't yet /// know the EH stack depth. Whenever we pop a cleanup, we have /// to thread all the current branch fixups through it. /// /// Fixups are recorded as the Use of the respective branch or /// switch statement. The use points to the final destination. /// When popping out of a cleanup, these uses are threaded through /// the cleanup and adjusted to point to the new cleanup. /// /// Note that branches are allowed to jump into protected scopes /// in certain situations; e.g. the following code is legal: /// struct A { ~A(); }; // trivial ctor, non-trivial dtor /// goto foo; /// A a; /// foo: /// bar(); SmallVector<BranchFixup, 8> BranchFixups; char *allocate(size_t Size); void *pushCleanup(CleanupKind K, size_t DataSize); public: EHScopeStack() : StartOfBuffer(nullptr), EndOfBuffer(nullptr), StartOfData(nullptr), InnermostNormalCleanup(stable_end()), InnermostEHScope(stable_end()) {} ~EHScopeStack() { delete[] StartOfBuffer; } /// Push a lazily-created cleanup on the stack. template <class T, class... As> void pushCleanup(CleanupKind Kind, As... A) { void *Buffer = pushCleanup(Kind, sizeof(T)); Cleanup *Obj = new (Buffer) T(A...); (void) Obj; } /// Push a lazily-created cleanup on the stack. Tuple version. template <class T, class... As> void pushCleanupTuple(CleanupKind Kind, std::tuple<As...> A) { void *Buffer = pushCleanup(Kind, sizeof(T)); Cleanup *Obj = new (Buffer) T(std::move(A)); (void) Obj; } // Feel free to add more variants of the following: /// Push a cleanup with non-constant storage requirements on the /// stack. The cleanup type must provide an additional static method: /// static size_t getExtraSize(size_t); /// The argument to this method will be the value N, which will also /// be passed as the first argument to the constructor. /// /// The data stored in the extra storage must obey the same /// restrictions as normal cleanup member data. /// /// The pointer returned from this method is valid until the cleanup /// stack is modified. template <class T, class... As> T *pushCleanupWithExtra(CleanupKind Kind, size_t N, As... A) { void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N)); return new (Buffer) T(N, A...); } void pushCopyOfCleanup(CleanupKind Kind, const void *Cleanup, size_t Size) { void *Buffer = pushCleanup(Kind, Size); std::memcpy(Buffer, Cleanup, Size); } /// Pops a cleanup scope off the stack. This is private to CGCleanup.cpp. void popCleanup(); /// Push a set of catch handlers on the stack. The catch is /// uninitialized and will need to have the given number of handlers /// set on it. class EHCatchScope *pushCatch(unsigned NumHandlers); /// Pops a catch scope off the stack. This is private to CGException.cpp. void popCatch(); /// Push an exceptions filter on the stack. class EHFilterScope *pushFilter(unsigned NumFilters); /// Pops an exceptions filter off the stack. void popFilter(); /// Push a terminate handler on the stack. void pushTerminate(); /// Pops a terminate handler off the stack. void popTerminate(); // Returns true iff the current scope is either empty or contains only // lifetime markers, i.e. no real cleanup code bool containsOnlyLifetimeMarkers(stable_iterator Old) const; /// Determines whether the exception-scopes stack is empty. bool empty() const { return StartOfData == EndOfBuffer; } bool requiresLandingPad() const { return InnermostEHScope != stable_end(); } /// Determines whether there are any normal cleanups on the stack. bool hasNormalCleanups() const { return InnermostNormalCleanup != stable_end(); } /// Returns the innermost normal cleanup on the stack, or /// stable_end() if there are no normal cleanups. stable_iterator getInnermostNormalCleanup() const { return InnermostNormalCleanup; } stable_iterator getInnermostActiveNormalCleanup() const; stable_iterator getInnermostEHScope() const { return InnermostEHScope; } stable_iterator getInnermostActiveEHScope() const; /// An unstable reference to a scope-stack depth. Invalidated by /// pushes but not pops. class iterator; /// Returns an iterator pointing to the innermost EH scope. iterator begin() const; /// Returns an iterator pointing to the outermost EH scope. iterator end() const; /// Create a stable reference to the top of the EH stack. The /// returned reference is valid until that scope is popped off the /// stack. stable_iterator stable_begin() const { return stable_iterator(EndOfBuffer - StartOfData); } /// Create a stable reference to the bottom of the EH stack. static stable_iterator stable_end() { return stable_iterator(0); } /// Translates an iterator into a stable_iterator. stable_iterator stabilize(iterator it) const; /// Turn a stable reference to a scope depth into a unstable pointer /// to the EH stack. iterator find(stable_iterator save) const; /// Removes the cleanup pointed to by the given stable_iterator. void removeCleanup(stable_iterator save); /// Add a branch fixup to the current cleanup scope. BranchFixup &addBranchFixup() { assert(hasNormalCleanups() && "adding fixup in scope without cleanups"); BranchFixups.push_back(BranchFixup()); return BranchFixups.back(); } unsigned getNumBranchFixups() const { return BranchFixups.size(); } BranchFixup &getBranchFixup(unsigned I) { assert(I < getNumBranchFixups()); return BranchFixups[I]; } /// Pops lazily-removed fixups from the end of the list. This /// should only be called by procedures which have just popped a /// cleanup or resolved one or more fixups. void popNullFixups(); /// Clears the branch-fixups list. This should only be called by /// ResolveAllBranchFixups. void clearFixups() { BranchFixups.clear(); } }; } // namespace CodeGen } // namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGObjCGNU.cpp
//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This provides Objective-C code generation targeting the GNU runtime. The // class in this file generates structures used by the GNU Objective-C runtime // library. These structures are defined in objc/objc.h and objc/objc-api.h in // the GNU runtime distribution. // //===----------------------------------------------------------------------===// #include "CGObjCRuntime.h" #include "CGCleanup.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "clang/AST/ASTContext.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/RecordLayout.h" #include "clang/AST/StmtObjC.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceManager.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringMap.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" #include "llvm/Support/Compiler.h" #include <cstdarg> using namespace clang; using namespace CodeGen; // HLSL Change Starts // No ObjC codegen support, so simply skip all of this compilation. // Here are enough stubs to link the current targets. #if 0 // HLSL Change Ends namespace { /// Class that lazily initialises the runtime function. Avoids inserting the /// types and the function declaration into a module if they're not used, and /// avoids constructing the type more than once if it's used more than once. class LazyRuntimeFunction { CodeGenModule *CGM; llvm::FunctionType *FTy; const char *FunctionName; llvm::Constant *Function; public: /// Constructor leaves this class uninitialized, because it is intended to /// be used as a field in another class and not all of the types that are /// used as arguments will necessarily be available at construction time. LazyRuntimeFunction() : CGM(nullptr), FunctionName(nullptr), Function(nullptr) {} /// Initialises the lazy function with the name, return type, and the types /// of the arguments. LLVM_END_WITH_NULL void init(CodeGenModule *Mod, const char *name, llvm::Type *RetTy, ...) { CGM = Mod; FunctionName = name; Function = nullptr; std::vector<llvm::Type *> ArgTys; va_list Args; va_start(Args, RetTy); while (llvm::Type *ArgTy = va_arg(Args, llvm::Type *)) ArgTys.push_back(ArgTy); va_end(Args); FTy = llvm::FunctionType::get(RetTy, ArgTys, false); } llvm::FunctionType *getType() { return FTy; } /// Overloaded cast operator, allows the class to be implicitly cast to an /// LLVM constant. operator llvm::Constant *() { if (!Function) { if (!FunctionName) return nullptr; Function = cast<llvm::Constant>(CGM->CreateRuntimeFunction(FTy, FunctionName)); } return Function; } operator llvm::Function *() { return cast<llvm::Function>((llvm::Constant *)*this); } }; /// GNU Objective-C runtime code generation. This class implements the parts of /// Objective-C support that are specific to the GNU family of runtimes (GCC, /// GNUstep and ObjFW). class CGObjCGNU : public CGObjCRuntime { protected: /// The LLVM module into which output is inserted llvm::Module &TheModule; /// strut objc_super. Used for sending messages to super. This structure /// contains the receiver (object) and the expected class. llvm::StructType *ObjCSuperTy; /// struct objc_super*. The type of the argument to the superclass message /// lookup functions. llvm::PointerType *PtrToObjCSuperTy; /// LLVM type for selectors. Opaque pointer (i8*) unless a header declaring /// SEL is included in a header somewhere, in which case it will be whatever /// type is declared in that header, most likely {i8*, i8*}. llvm::PointerType *SelectorTy; /// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the /// places where it's used llvm::IntegerType *Int8Ty; /// Pointer to i8 - LLVM type of char*, for all of the places where the /// runtime needs to deal with C strings. llvm::PointerType *PtrToInt8Ty; /// Instance Method Pointer type. This is a pointer to a function that takes, /// at a minimum, an object and a selector, and is the generic type for /// Objective-C methods. Due to differences between variadic / non-variadic /// calling conventions, it must always be cast to the correct type before /// actually being used. llvm::PointerType *IMPTy; /// Type of an untyped Objective-C object. Clang treats id as a built-in type /// when compiling Objective-C code, so this may be an opaque pointer (i8*), /// but if the runtime header declaring it is included then it may be a /// pointer to a structure. llvm::PointerType *IdTy; /// Pointer to a pointer to an Objective-C object. Used in the new ABI /// message lookup function and some GC-related functions. llvm::PointerType *PtrToIdTy; /// The clang type of id. Used when using the clang CGCall infrastructure to /// call Objective-C methods. CanQualType ASTIdTy; /// LLVM type for C int type. llvm::IntegerType *IntTy; /// LLVM type for an opaque pointer. This is identical to PtrToInt8Ty, but is /// used in the code to document the difference between i8* meaning a pointer /// to a C string and i8* meaning a pointer to some opaque type. llvm::PointerType *PtrTy; /// LLVM type for C long type. The runtime uses this in a lot of places where /// it should be using intptr_t, but we can't fix this without breaking /// compatibility with GCC... llvm::IntegerType *LongTy; /// LLVM type for C size_t. Used in various runtime data structures. llvm::IntegerType *SizeTy; /// LLVM type for C intptr_t. llvm::IntegerType *IntPtrTy; /// LLVM type for C ptrdiff_t. Mainly used in property accessor functions. llvm::IntegerType *PtrDiffTy; /// LLVM type for C int*. Used for GCC-ABI-compatible non-fragile instance /// variables. llvm::PointerType *PtrToIntTy; /// LLVM type for Objective-C BOOL type. llvm::Type *BoolTy; /// 32-bit integer type, to save us needing to look it up every time it's used. llvm::IntegerType *Int32Ty; /// 64-bit integer type, to save us needing to look it up every time it's used. llvm::IntegerType *Int64Ty; /// Metadata kind used to tie method lookups to message sends. The GNUstep /// runtime provides some LLVM passes that can use this to do things like /// automatic IMP caching and speculative inlining. unsigned msgSendMDKind; /// Helper function that generates a constant string and returns a pointer to /// the start of the string. The result of this function can be used anywhere /// where the C code specifies const char*. llvm::Constant *MakeConstantString(const std::string &Str, const std::string &Name="") { auto *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str()); return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(), ConstStr, Zeros); } /// Emits a linkonce_odr string, whose name is the prefix followed by the /// string value. This allows the linker to combine the strings between /// different modules. Used for EH typeinfo names, selector strings, and a /// few other things. llvm::Constant *ExportUniqueString(const std::string &Str, const std::string prefix) { std::string name = prefix + Str; auto *ConstStr = TheModule.getGlobalVariable(name); if (!ConstStr) { llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str); ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true, llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str); } return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(), ConstStr, Zeros); } /// Generates a global structure, initialized by the elements in the vector. /// The element types must match the types of the structure elements in the /// first argument. llvm::GlobalVariable *MakeGlobal(llvm::StructType *Ty, ArrayRef<llvm::Constant *> V, StringRef Name="", llvm::GlobalValue::LinkageTypes linkage =llvm::GlobalValue::InternalLinkage) { llvm::Constant *C = llvm::ConstantStruct::get(Ty, V); return new llvm::GlobalVariable(TheModule, Ty, false, linkage, C, Name); } /// Generates a global array. The vector must contain the same number of /// elements that the array type declares, of the type specified as the array /// element type. llvm::GlobalVariable *MakeGlobal(llvm::ArrayType *Ty, ArrayRef<llvm::Constant *> V, StringRef Name="", llvm::GlobalValue::LinkageTypes linkage =llvm::GlobalValue::InternalLinkage) { llvm::Constant *C = llvm::ConstantArray::get(Ty, V); return new llvm::GlobalVariable(TheModule, Ty, false, linkage, C, Name); } /// Generates a global array, inferring the array type from the specified /// element type and the size of the initialiser. llvm::GlobalVariable *MakeGlobalArray(llvm::Type *Ty, ArrayRef<llvm::Constant *> V, StringRef Name="", llvm::GlobalValue::LinkageTypes linkage =llvm::GlobalValue::InternalLinkage) { llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size()); return MakeGlobal(ArrayTy, V, Name, linkage); } /// Returns a property name and encoding string. llvm::Constant *MakePropertyEncodingString(const ObjCPropertyDecl *PD, const Decl *Container) { const ObjCRuntime &R = CGM.getLangOpts().ObjCRuntime; if ((R.getKind() == ObjCRuntime::GNUstep) && (R.getVersion() >= VersionTuple(1, 6))) { std::string NameAndAttributes; std::string TypeStr; CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr); NameAndAttributes += '\0'; NameAndAttributes += TypeStr.length() + 3; NameAndAttributes += TypeStr; NameAndAttributes += '\0'; NameAndAttributes += PD->getNameAsString(); auto *ConstStr = CGM.GetAddrOfConstantCString(NameAndAttributes); return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(), ConstStr, Zeros); } return MakeConstantString(PD->getNameAsString()); } /// Push the property attributes into two structure fields. void PushPropertyAttributes(std::vector<llvm::Constant*> &Fields, ObjCPropertyDecl *property, bool isSynthesized=true, bool isDynamic=true) { int attrs = property->getPropertyAttributes(); // For read-only properties, clear the copy and retain flags if (attrs & ObjCPropertyDecl::OBJC_PR_readonly) { attrs &= ~ObjCPropertyDecl::OBJC_PR_copy; attrs &= ~ObjCPropertyDecl::OBJC_PR_retain; attrs &= ~ObjCPropertyDecl::OBJC_PR_weak; attrs &= ~ObjCPropertyDecl::OBJC_PR_strong; } // The first flags field has the same attribute values as clang uses internally Fields.push_back(llvm::ConstantInt::get(Int8Ty, attrs & 0xff)); attrs >>= 8; attrs <<= 2; // For protocol properties, synthesized and dynamic have no meaning, so we // reuse these flags to indicate that this is a protocol property (both set // has no meaning, as a property can't be both synthesized and dynamic) attrs |= isSynthesized ? (1<<0) : 0; attrs |= isDynamic ? (1<<1) : 0; // The second field is the next four fields left shifted by two, with the // low bit set to indicate whether the field is synthesized or dynamic. Fields.push_back(llvm::ConstantInt::get(Int8Ty, attrs & 0xff)); // Two padding fields Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0)); Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0)); } /// Ensures that the value has the required type, by inserting a bitcast if /// required. This function lets us avoid inserting bitcasts that are /// redundant. llvm::Value* EnforceType(CGBuilderTy &B, llvm::Value *V, llvm::Type *Ty) { if (V->getType() == Ty) return V; return B.CreateBitCast(V, Ty); } // Some zeros used for GEPs in lots of places. llvm::Constant *Zeros[2]; /// Null pointer value. Mainly used as a terminator in various arrays. llvm::Constant *NULLPtr; /// LLVM context. llvm::LLVMContext &VMContext; private: /// Placeholder for the class. Lots of things refer to the class before we've /// actually emitted it. We use this alias as a placeholder, and then replace /// it with a pointer to the class structure before finally emitting the /// module. llvm::GlobalAlias *ClassPtrAlias; /// Placeholder for the metaclass. Lots of things refer to the class before /// we've / actually emitted it. We use this alias as a placeholder, and then /// replace / it with a pointer to the metaclass structure before finally /// emitting the / module. llvm::GlobalAlias *MetaClassPtrAlias; /// All of the classes that have been generated for this compilation units. std::vector<llvm::Constant*> Classes; /// All of the categories that have been generated for this compilation units. std::vector<llvm::Constant*> Categories; /// All of the Objective-C constant strings that have been generated for this /// compilation units. std::vector<llvm::Constant*> ConstantStrings; /// Map from string values to Objective-C constant strings in the output. /// Used to prevent emitting Objective-C strings more than once. This should /// not be required at all - CodeGenModule should manage this list. llvm::StringMap<llvm::Constant*> ObjCStrings; /// All of the protocols that have been declared. llvm::StringMap<llvm::Constant*> ExistingProtocols; /// For each variant of a selector, we store the type encoding and a /// placeholder value. For an untyped selector, the type will be the empty /// string. Selector references are all done via the module's selector table, /// so we create an alias as a placeholder and then replace it with the real /// value later. typedef std::pair<std::string, llvm::GlobalAlias*> TypedSelector; /// Type of the selector map. This is roughly equivalent to the structure /// used in the GNUstep runtime, which maintains a list of all of the valid /// types for a selector in a table. typedef llvm::DenseMap<Selector, SmallVector<TypedSelector, 2> > SelectorMap; /// A map from selectors to selector types. This allows us to emit all /// selectors of the same name and type together. SelectorMap SelectorTable; /// Selectors related to memory management. When compiling in GC mode, we /// omit these. Selector RetainSel, ReleaseSel, AutoreleaseSel; /// Runtime functions used for memory management in GC mode. Note that clang /// supports code generation for calling these functions, but neither GNU /// runtime actually supports this API properly yet. LazyRuntimeFunction IvarAssignFn, StrongCastAssignFn, MemMoveFn, WeakReadFn, WeakAssignFn, GlobalAssignFn; typedef std::pair<std::string, std::string> ClassAliasPair; /// All classes that have aliases set for them. std::vector<ClassAliasPair> ClassAliases; protected: /// Function used for throwing Objective-C exceptions. LazyRuntimeFunction ExceptionThrowFn; /// Function used for rethrowing exceptions, used at the end of \@finally or /// \@synchronize blocks. LazyRuntimeFunction ExceptionReThrowFn; /// Function called when entering a catch function. This is required for /// differentiating Objective-C exceptions and foreign exceptions. LazyRuntimeFunction EnterCatchFn; /// Function called when exiting from a catch block. Used to do exception /// cleanup. LazyRuntimeFunction ExitCatchFn; /// Function called when entering an \@synchronize block. Acquires the lock. LazyRuntimeFunction SyncEnterFn; /// Function called when exiting an \@synchronize block. Releases the lock. LazyRuntimeFunction SyncExitFn; private: /// Function called if fast enumeration detects that the collection is /// modified during the update. LazyRuntimeFunction EnumerationMutationFn; /// Function for implementing synthesized property getters that return an /// object. LazyRuntimeFunction GetPropertyFn; /// Function for implementing synthesized property setters that return an /// object. LazyRuntimeFunction SetPropertyFn; /// Function used for non-object declared property getters. LazyRuntimeFunction GetStructPropertyFn; /// Function used for non-object declared property setters. LazyRuntimeFunction SetStructPropertyFn; /// The version of the runtime that this class targets. Must match the /// version in the runtime. int RuntimeVersion; /// The version of the protocol class. Used to differentiate between ObjC1 /// and ObjC2 protocols. Objective-C 1 protocols can not contain optional /// components and can not contain declared properties. We always emit /// Objective-C 2 property structures, but we have to pretend that they're /// Objective-C 1 property structures when targeting the GCC runtime or it /// will abort. const int ProtocolVersion; private: /// Generates an instance variable list structure. This is a structure /// containing a size and an array of structures containing instance variable /// metadata. This is used purely for introspection in the fragile ABI. In /// the non-fragile ABI, it's used for instance variable fixup. llvm::Constant *GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames, ArrayRef<llvm::Constant *> IvarTypes, ArrayRef<llvm::Constant *> IvarOffsets); /// Generates a method list structure. This is a structure containing a size /// and an array of structures containing method metadata. /// /// This structure is used by both classes and categories, and contains a next /// pointer allowing them to be chained together in a linked list. llvm::Constant *GenerateMethodList(StringRef ClassName, StringRef CategoryName, ArrayRef<Selector> MethodSels, ArrayRef<llvm::Constant *> MethodTypes, bool isClassMethodList); /// Emits an empty protocol. This is used for \@protocol() where no protocol /// is found. The runtime will (hopefully) fix up the pointer to refer to the /// real protocol. llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName); /// Generates a list of property metadata structures. This follows the same /// pattern as method and instance variable metadata lists. llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID, SmallVectorImpl<Selector> &InstanceMethodSels, SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes); /// Generates a list of referenced protocols. Classes, categories, and /// protocols all use this structure. llvm::Constant *GenerateProtocolList(ArrayRef<std::string> Protocols); /// To ensure that all protocols are seen by the runtime, we add a category on /// a class defined in the runtime, declaring no methods, but adopting the /// protocols. This is a horribly ugly hack, but it allows us to collect all /// of the protocols without changing the ABI. void GenerateProtocolHolderCategory(); /// Generates a class structure. llvm::Constant *GenerateClassStructure( llvm::Constant *MetaClass, llvm::Constant *SuperClass, unsigned info, const char *Name, llvm::Constant *Version, llvm::Constant *InstanceSize, llvm::Constant *IVars, llvm::Constant *Methods, llvm::Constant *Protocols, llvm::Constant *IvarOffsets, llvm::Constant *Properties, llvm::Constant *StrongIvarBitmap, llvm::Constant *WeakIvarBitmap, bool isMeta=false); /// Generates a method list. This is used by protocols to define the required /// and optional methods. llvm::Constant *GenerateProtocolMethodList( ArrayRef<llvm::Constant *> MethodNames, ArrayRef<llvm::Constant *> MethodTypes); /// Returns a selector with the specified type encoding. An empty string is /// used to return an untyped selector (with the types field set to NULL). llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel, const std::string &TypeEncoding, bool lval); /// Returns the variable used to store the offset of an instance variable. llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID, const ObjCIvarDecl *Ivar); /// Emits a reference to a class. This allows the linker to object if there /// is no class of the matching name. protected: void EmitClassRef(const std::string &className); /// Emits a pointer to the named class virtual llvm::Value *GetClassNamed(CodeGenFunction &CGF, const std::string &Name, bool isWeak); /// Looks up the method for sending a message to the specified object. This /// mechanism differs between the GCC and GNU runtimes, so this method must be /// overridden in subclasses. virtual llvm::Value *LookupIMP(CodeGenFunction &CGF, llvm::Value *&Receiver, llvm::Value *cmd, llvm::MDNode *node, MessageSendInfo &MSI) = 0; /// Looks up the method for sending a message to a superclass. This /// mechanism differs between the GCC and GNU runtimes, so this method must /// be overridden in subclasses. virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper, llvm::Value *cmd, MessageSendInfo &MSI) = 0; /// Libobjc2 uses a bitfield representation where small(ish) bitfields are /// stored in a 64-bit value with the low bit set to 1 and the remaining 63 /// bits set to their values, LSB first, while larger ones are stored in a /// structure of this / form: /// /// struct { int32_t length; int32_t values[length]; }; /// /// The values in the array are stored in host-endian format, with the least /// significant bit being assumed to come first in the bitfield. Therefore, /// a bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] }, /// while a bitfield / with the 63rd bit set will be 1<<64. llvm::Constant *MakeBitField(ArrayRef<bool> bits); public: CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion, unsigned protocolClassVersion); llvm::Constant *GenerateConstantString(const StringLiteral *) override; RValue GenerateMessageSend(CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, llvm::Value *Receiver, const CallArgList &CallArgs, const ObjCInterfaceDecl *Class, const ObjCMethodDecl *Method) override; RValue GenerateMessageSendSuper(CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, const ObjCInterfaceDecl *Class, bool isCategoryImpl, llvm::Value *Receiver, bool IsClassMessage, const CallArgList &CallArgs, const ObjCMethodDecl *Method) override; llvm::Value *GetClass(CodeGenFunction &CGF, const ObjCInterfaceDecl *OID) override; llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel, bool lval = false) override; llvm::Value *GetSelector(CodeGenFunction &CGF, const ObjCMethodDecl *Method) override; llvm::Constant *GetEHType(QualType T) override; llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD, const ObjCContainerDecl *CD) override; void GenerateCategory(const ObjCCategoryImplDecl *CMD) override; void GenerateClass(const ObjCImplementationDecl *ClassDecl) override; void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) override; llvm::Value *GenerateProtocolRef(CodeGenFunction &CGF, const ObjCProtocolDecl *PD) override; void GenerateProtocol(const ObjCProtocolDecl *PD) override; llvm::Function *ModuleInitFunction() override; llvm::Constant *GetPropertyGetFunction() override; llvm::Constant *GetPropertySetFunction() override; llvm::Constant *GetOptimizedPropertySetFunction(bool atomic, bool copy) override; llvm::Constant *GetSetStructFunction() override; llvm::Constant *GetGetStructFunction() override; llvm::Constant *GetCppAtomicObjectGetFunction() override; llvm::Constant *GetCppAtomicObjectSetFunction() override; llvm::Constant *EnumerationMutationFunction() override; void EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) override; void EmitSynchronizedStmt(CodeGenFunction &CGF, const ObjCAtSynchronizedStmt &S) override; void EmitThrowStmt(CodeGenFunction &CGF, const ObjCAtThrowStmt &S, bool ClearInsertionPoint=true) override; llvm::Value * EmitObjCWeakRead(CodeGenFunction &CGF, llvm::Value *AddrWeakObj) override; void EmitObjCWeakAssign(CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst) override; void EmitObjCGlobalAssign(CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest, bool threadlocal=false) override; void EmitObjCIvarAssign(CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest, llvm::Value *ivarOffset) override; void EmitObjCStrongCastAssign(CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dest) override; void EmitGCMemmoveCollectable(CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, llvm::Value *Size) override; LValue EmitObjCValueForIvar(CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers) override; llvm::Value *EmitIvarOffset(CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar) override; llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override; llvm::Constant *BuildGCBlockLayout(CodeGenModule &CGM, const CGBlockInfo &blockInfo) override { return NULLPtr; } llvm::Constant *BuildRCBlockLayout(CodeGenModule &CGM, const CGBlockInfo &blockInfo) override { return NULLPtr; } llvm::Constant *BuildByrefLayout(CodeGenModule &CGM, QualType T) override { return NULLPtr; } llvm::GlobalVariable *GetClassGlobal(const std::string &Name, bool Weak = false) override { return nullptr; } }; /// Class representing the legacy GCC Objective-C ABI. This is the default when /// -fobjc-nonfragile-abi is not specified. /// /// The GCC ABI target actually generates code that is approximately compatible /// with the new GNUstep runtime ABI, but refrains from using any features that /// would not work with the GCC runtime. For example, clang always generates /// the extended form of the class structure, and the extra fields are simply /// ignored by GCC libobjc. class CGObjCGCC : public CGObjCGNU { /// The GCC ABI message lookup function. Returns an IMP pointing to the /// method implementation for this message. LazyRuntimeFunction MsgLookupFn; /// The GCC ABI superclass message lookup function. Takes a pointer to a /// structure describing the receiver and the class, and a selector as /// arguments. Returns the IMP for the corresponding method. LazyRuntimeFunction MsgLookupSuperFn; protected: llvm::Value *LookupIMP(CodeGenFunction &CGF, llvm::Value *&Receiver, llvm::Value *cmd, llvm::MDNode *node, MessageSendInfo &MSI) override { CGBuilderTy &Builder = CGF.Builder; llvm::Value *args[] = { EnforceType(Builder, Receiver, IdTy), EnforceType(Builder, cmd, SelectorTy) }; llvm::CallSite imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFn, args); imp->setMetadata(msgSendMDKind, node); return imp.getInstruction(); } llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper, llvm::Value *cmd, MessageSendInfo &MSI) override { CGBuilderTy &Builder = CGF.Builder; llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy), cmd}; return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs); } public: CGObjCGCC(CodeGenModule &Mod) : CGObjCGNU(Mod, 8, 2) { // IMP objc_msg_lookup(id, SEL); MsgLookupFn.init(&CGM, "objc_msg_lookup", IMPTy, IdTy, SelectorTy, nullptr); // IMP objc_msg_lookup_super(struct objc_super*, SEL); MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy, PtrToObjCSuperTy, SelectorTy, nullptr); } }; /// Class used when targeting the new GNUstep runtime ABI. class CGObjCGNUstep : public CGObjCGNU { /// The slot lookup function. Returns a pointer to a cacheable structure /// that contains (among other things) the IMP. LazyRuntimeFunction SlotLookupFn; /// The GNUstep ABI superclass message lookup function. Takes a pointer to /// a structure describing the receiver and the class, and a selector as /// arguments. Returns the slot for the corresponding method. Superclass /// message lookup rarely changes, so this is a good caching opportunity. LazyRuntimeFunction SlotLookupSuperFn; /// Specialised function for setting atomic retain properties LazyRuntimeFunction SetPropertyAtomic; /// Specialised function for setting atomic copy properties LazyRuntimeFunction SetPropertyAtomicCopy; /// Specialised function for setting nonatomic retain properties LazyRuntimeFunction SetPropertyNonAtomic; /// Specialised function for setting nonatomic copy properties LazyRuntimeFunction SetPropertyNonAtomicCopy; /// Function to perform atomic copies of C++ objects with nontrivial copy /// constructors from Objective-C ivars. LazyRuntimeFunction CxxAtomicObjectGetFn; /// Function to perform atomic copies of C++ objects with nontrivial copy /// constructors to Objective-C ivars. LazyRuntimeFunction CxxAtomicObjectSetFn; /// Type of an slot structure pointer. This is returned by the various /// lookup functions. llvm::Type *SlotTy; public: llvm::Constant *GetEHType(QualType T) override; protected: llvm::Value *LookupIMP(CodeGenFunction &CGF, llvm::Value *&Receiver, llvm::Value *cmd, llvm::MDNode *node, MessageSendInfo &MSI) override { CGBuilderTy &Builder = CGF.Builder; llvm::Function *LookupFn = SlotLookupFn; // Store the receiver on the stack so that we can reload it later llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType()); Builder.CreateStore(Receiver, ReceiverPtr); llvm::Value *self; if (isa<ObjCMethodDecl>(CGF.CurCodeDecl)) { self = CGF.LoadObjCSelf(); } else { self = llvm::ConstantPointerNull::get(IdTy); } // The lookup function is guaranteed not to capture the receiver pointer. LookupFn->setDoesNotCapture(1); llvm::Value *args[] = { EnforceType(Builder, ReceiverPtr, PtrToIdTy), EnforceType(Builder, cmd, SelectorTy), EnforceType(Builder, self, IdTy) }; llvm::CallSite slot = CGF.EmitRuntimeCallOrInvoke(LookupFn, args); slot.setOnlyReadsMemory(); slot->setMetadata(msgSendMDKind, node); // Load the imp from the slot llvm::Value *imp = Builder.CreateLoad( Builder.CreateStructGEP(nullptr, slot.getInstruction(), 4)); // The lookup function may have changed the receiver, so make sure we use // the new one. Receiver = Builder.CreateLoad(ReceiverPtr, true); return imp; } llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper, llvm::Value *cmd, MessageSendInfo &MSI) override { CGBuilderTy &Builder = CGF.Builder; llvm::Value *lookupArgs[] = {ObjCSuper, cmd}; llvm::CallInst *slot = CGF.EmitNounwindRuntimeCall(SlotLookupSuperFn, lookupArgs); slot->setOnlyReadsMemory(); return Builder.CreateLoad(Builder.CreateStructGEP(nullptr, slot, 4)); } public: CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) { const ObjCRuntime &R = CGM.getLangOpts().ObjCRuntime; llvm::StructType *SlotStructTy = llvm::StructType::get(PtrTy, PtrTy, PtrTy, IntTy, IMPTy, nullptr); SlotTy = llvm::PointerType::getUnqual(SlotStructTy); // Slot_t objc_msg_lookup_sender(id *receiver, SEL selector, id sender); SlotLookupFn.init(&CGM, "objc_msg_lookup_sender", SlotTy, PtrToIdTy, SelectorTy, IdTy, nullptr); // Slot_t objc_msg_lookup_super(struct objc_super*, SEL); SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy, PtrToObjCSuperTy, SelectorTy, nullptr); // If we're in ObjC++ mode, then we want to make if (CGM.getLangOpts().CPlusPlus) { llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext); // void *__cxa_begin_catch(void *e) EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, nullptr); // void __cxa_end_catch(void) ExitCatchFn.init(&CGM, "__cxa_end_catch", VoidTy, nullptr); // void _Unwind_Resume_or_Rethrow(void*) ExceptionReThrowFn.init(&CGM, "_Unwind_Resume_or_Rethrow", VoidTy, PtrTy, nullptr); } else if (R.getVersion() >= VersionTuple(1, 7)) { llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext); // id objc_begin_catch(void *e) EnterCatchFn.init(&CGM, "objc_begin_catch", IdTy, PtrTy, nullptr); // void objc_end_catch(void) ExitCatchFn.init(&CGM, "objc_end_catch", VoidTy, nullptr); // void _Unwind_Resume_or_Rethrow(void*) ExceptionReThrowFn.init(&CGM, "objc_exception_rethrow", VoidTy, PtrTy, nullptr); } llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext); SetPropertyAtomic.init(&CGM, "objc_setProperty_atomic", VoidTy, IdTy, SelectorTy, IdTy, PtrDiffTy, nullptr); SetPropertyAtomicCopy.init(&CGM, "objc_setProperty_atomic_copy", VoidTy, IdTy, SelectorTy, IdTy, PtrDiffTy, nullptr); SetPropertyNonAtomic.init(&CGM, "objc_setProperty_nonatomic", VoidTy, IdTy, SelectorTy, IdTy, PtrDiffTy, nullptr); SetPropertyNonAtomicCopy.init(&CGM, "objc_setProperty_nonatomic_copy", VoidTy, IdTy, SelectorTy, IdTy, PtrDiffTy, nullptr); // void objc_setCppObjectAtomic(void *dest, const void *src, void // *helper); CxxAtomicObjectSetFn.init(&CGM, "objc_setCppObjectAtomic", VoidTy, PtrTy, PtrTy, PtrTy, nullptr); // void objc_getCppObjectAtomic(void *dest, const void *src, void // *helper); CxxAtomicObjectGetFn.init(&CGM, "objc_getCppObjectAtomic", VoidTy, PtrTy, PtrTy, PtrTy, nullptr); } llvm::Constant *GetCppAtomicObjectGetFunction() override { // The optimised functions were added in version 1.7 of the GNUstep // runtime. assert (CGM.getLangOpts().ObjCRuntime.getVersion() >= VersionTuple(1, 7)); return CxxAtomicObjectGetFn; } llvm::Constant *GetCppAtomicObjectSetFunction() override { // The optimised functions were added in version 1.7 of the GNUstep // runtime. assert (CGM.getLangOpts().ObjCRuntime.getVersion() >= VersionTuple(1, 7)); return CxxAtomicObjectSetFn; } llvm::Constant *GetOptimizedPropertySetFunction(bool atomic, bool copy) override { // The optimised property functions omit the GC check, and so are not // safe to use in GC mode. The standard functions are fast in GC mode, // so there is less advantage in using them. assert ((CGM.getLangOpts().getGC() == LangOptions::NonGC)); // The optimised functions were added in version 1.7 of the GNUstep // runtime. assert (CGM.getLangOpts().ObjCRuntime.getVersion() >= VersionTuple(1, 7)); if (atomic) { if (copy) return SetPropertyAtomicCopy; return SetPropertyAtomic; } return copy ? SetPropertyNonAtomicCopy : SetPropertyNonAtomic; } }; /// Support for the ObjFW runtime. class CGObjCObjFW: public CGObjCGNU { protected: /// The GCC ABI message lookup function. Returns an IMP pointing to the /// method implementation for this message. LazyRuntimeFunction MsgLookupFn; /// stret lookup function. While this does not seem to make sense at the /// first look, this is required to call the correct forwarding function. LazyRuntimeFunction MsgLookupFnSRet; /// The GCC ABI superclass message lookup function. Takes a pointer to a /// structure describing the receiver and the class, and a selector as /// arguments. Returns the IMP for the corresponding method. LazyRuntimeFunction MsgLookupSuperFn, MsgLookupSuperFnSRet; llvm::Value *LookupIMP(CodeGenFunction &CGF, llvm::Value *&Receiver, llvm::Value *cmd, llvm::MDNode *node, MessageSendInfo &MSI) override { CGBuilderTy &Builder = CGF.Builder; llvm::Value *args[] = { EnforceType(Builder, Receiver, IdTy), EnforceType(Builder, cmd, SelectorTy) }; llvm::CallSite imp; if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFnSRet, args); else imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFn, args); imp->setMetadata(msgSendMDKind, node); return imp.getInstruction(); } llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, llvm::Value *ObjCSuper, llvm::Value *cmd, MessageSendInfo &MSI) override { CGBuilderTy &Builder = CGF.Builder; llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy), cmd}; if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFnSRet, lookupArgs); else return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs); } llvm::Value *GetClassNamed(CodeGenFunction &CGF, const std::string &Name, bool isWeak) override { if (isWeak) return CGObjCGNU::GetClassNamed(CGF, Name, isWeak); EmitClassRef(Name); std::string SymbolName = "_OBJC_CLASS_" + Name; llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(SymbolName); if (!ClassSymbol) ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false, llvm::GlobalValue::ExternalLinkage, nullptr, SymbolName); return ClassSymbol; } public: CGObjCObjFW(CodeGenModule &Mod): CGObjCGNU(Mod, 9, 3) { // IMP objc_msg_lookup(id, SEL); MsgLookupFn.init(&CGM, "objc_msg_lookup", IMPTy, IdTy, SelectorTy, nullptr); MsgLookupFnSRet.init(&CGM, "objc_msg_lookup_stret", IMPTy, IdTy, SelectorTy, nullptr); // IMP objc_msg_lookup_super(struct objc_super*, SEL); MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy, PtrToObjCSuperTy, SelectorTy, nullptr); MsgLookupSuperFnSRet.init(&CGM, "objc_msg_lookup_super_stret", IMPTy, PtrToObjCSuperTy, SelectorTy, nullptr); } }; } // end anonymous namespace /// Emits a reference to a dummy variable which is emitted with each class. /// This ensures that a linker error will be generated when trying to link /// together modules where a referenced class is not defined. void CGObjCGNU::EmitClassRef(const std::string &className) { std::string symbolRef = "__objc_class_ref_" + className; // Don't emit two copies of the same symbol if (TheModule.getGlobalVariable(symbolRef)) return; std::string symbolName = "__objc_class_name_" + className; llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(symbolName); if (!ClassSymbol) { ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false, llvm::GlobalValue::ExternalLinkage, nullptr, symbolName); } new llvm::GlobalVariable(TheModule, ClassSymbol->getType(), true, llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef); } static std::string SymbolNameForMethod( StringRef ClassName, StringRef CategoryName, const Selector MethodName, bool isClassMethod) { std::string MethodNameColonStripped = MethodName.getAsString(); std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(), ':', '_'); return (Twine(isClassMethod ? "_c_" : "_i_") + ClassName + "_" + CategoryName + "_" + MethodNameColonStripped).str(); } CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion, unsigned protocolClassVersion) : CGObjCRuntime(cgm), TheModule(CGM.getModule()), VMContext(cgm.getLLVMContext()), ClassPtrAlias(nullptr), MetaClassPtrAlias(nullptr), RuntimeVersion(runtimeABIVersion), ProtocolVersion(protocolClassVersion) { msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend"); CodeGenTypes &Types = CGM.getTypes(); IntTy = cast<llvm::IntegerType>( Types.ConvertType(CGM.getContext().IntTy)); LongTy = cast<llvm::IntegerType>( Types.ConvertType(CGM.getContext().LongTy)); SizeTy = cast<llvm::IntegerType>( Types.ConvertType(CGM.getContext().getSizeType())); PtrDiffTy = cast<llvm::IntegerType>( Types.ConvertType(CGM.getContext().getPointerDiffType())); BoolTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy); Int8Ty = llvm::Type::getInt8Ty(VMContext); // C string type. Used in lots of places. PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty); Zeros[0] = llvm::ConstantInt::get(LongTy, 0); Zeros[1] = Zeros[0]; NULLPtr = llvm::ConstantPointerNull::get(PtrToInt8Ty); // Get the selector Type. QualType selTy = CGM.getContext().getObjCSelType(); if (QualType() == selTy) { SelectorTy = PtrToInt8Ty; } else { SelectorTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(selTy)); } PtrToIntTy = llvm::PointerType::getUnqual(IntTy); PtrTy = PtrToInt8Ty; Int32Ty = llvm::Type::getInt32Ty(VMContext); Int64Ty = llvm::Type::getInt64Ty(VMContext); IntPtrTy = CGM.getDataLayout().getPointerSizeInBits() == 32 ? Int32Ty : Int64Ty; // Object type QualType UnqualIdTy = CGM.getContext().getObjCIdType(); ASTIdTy = CanQualType(); if (UnqualIdTy != QualType()) { ASTIdTy = CGM.getContext().getCanonicalType(UnqualIdTy); IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy)); } else { IdTy = PtrToInt8Ty; } PtrToIdTy = llvm::PointerType::getUnqual(IdTy); ObjCSuperTy = llvm::StructType::get(IdTy, IdTy, nullptr); PtrToObjCSuperTy = llvm::PointerType::getUnqual(ObjCSuperTy); llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext); // void objc_exception_throw(id); ExceptionThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, nullptr); ExceptionReThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, nullptr); // int objc_sync_enter(id); SyncEnterFn.init(&CGM, "objc_sync_enter", IntTy, IdTy, nullptr); // int objc_sync_exit(id); SyncExitFn.init(&CGM, "objc_sync_exit", IntTy, IdTy, nullptr); // void objc_enumerationMutation (id) EnumerationMutationFn.init(&CGM, "objc_enumerationMutation", VoidTy, IdTy, nullptr); // id objc_getProperty(id, SEL, ptrdiff_t, BOOL) GetPropertyFn.init(&CGM, "objc_getProperty", IdTy, IdTy, SelectorTy, PtrDiffTy, BoolTy, nullptr); // void objc_setProperty(id, SEL, ptrdiff_t, id, BOOL, BOOL) SetPropertyFn.init(&CGM, "objc_setProperty", VoidTy, IdTy, SelectorTy, PtrDiffTy, IdTy, BoolTy, BoolTy, nullptr); // void objc_setPropertyStruct(void*, void*, ptrdiff_t, BOOL, BOOL) GetStructPropertyFn.init(&CGM, "objc_getPropertyStruct", VoidTy, PtrTy, PtrTy, PtrDiffTy, BoolTy, BoolTy, nullptr); // void objc_setPropertyStruct(void*, void*, ptrdiff_t, BOOL, BOOL) SetStructPropertyFn.init(&CGM, "objc_setPropertyStruct", VoidTy, PtrTy, PtrTy, PtrDiffTy, BoolTy, BoolTy, nullptr); // IMP type llvm::Type *IMPArgs[] = { IdTy, SelectorTy }; IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs, true)); const LangOptions &Opts = CGM.getLangOpts(); if ((Opts.getGC() != LangOptions::NonGC) || Opts.ObjCAutoRefCount) RuntimeVersion = 10; // Don't bother initialising the GC stuff unless we're compiling in GC mode if (Opts.getGC() != LangOptions::NonGC) { // This is a bit of an hack. We should sort this out by having a proper // CGObjCGNUstep subclass for GC, but we may want to really support the old // ABI and GC added in ObjectiveC2.framework, so we fudge it a bit for now // Get selectors needed in GC mode RetainSel = GetNullarySelector("retain", CGM.getContext()); ReleaseSel = GetNullarySelector("release", CGM.getContext()); AutoreleaseSel = GetNullarySelector("autorelease", CGM.getContext()); // Get functions needed in GC mode // id objc_assign_ivar(id, id, ptrdiff_t); IvarAssignFn.init(&CGM, "objc_assign_ivar", IdTy, IdTy, IdTy, PtrDiffTy, nullptr); // id objc_assign_strongCast (id, id*) StrongCastAssignFn.init(&CGM, "objc_assign_strongCast", IdTy, IdTy, PtrToIdTy, nullptr); // id objc_assign_global(id, id*); GlobalAssignFn.init(&CGM, "objc_assign_global", IdTy, IdTy, PtrToIdTy, nullptr); // id objc_assign_weak(id, id*); WeakAssignFn.init(&CGM, "objc_assign_weak", IdTy, IdTy, PtrToIdTy, nullptr); // id objc_read_weak(id*); WeakReadFn.init(&CGM, "objc_read_weak", IdTy, PtrToIdTy, nullptr); // void *objc_memmove_collectable(void*, void *, size_t); MemMoveFn.init(&CGM, "objc_memmove_collectable", PtrTy, PtrTy, PtrTy, SizeTy, nullptr); } } llvm::Value *CGObjCGNU::GetClassNamed(CodeGenFunction &CGF, const std::string &Name, bool isWeak) { llvm::GlobalVariable *ClassNameGV = CGM.GetAddrOfConstantCString(Name); // With the incompatible ABI, this will need to be replaced with a direct // reference to the class symbol. For the compatible nonfragile ABI we are // still performing this lookup at run time but emitting the symbol for the // class externally so that we can make the switch later. // // Libobjc2 contains an LLVM pass that replaces calls to objc_lookup_class // with memoized versions or with static references if it's safe to do so. if (!isWeak) EmitClassRef(Name); llvm::Value *ClassName = CGF.Builder.CreateStructGEP(ClassNameGV->getValueType(), ClassNameGV, 0); llvm::Constant *ClassLookupFn = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true), "objc_lookup_class"); return CGF.EmitNounwindRuntimeCall(ClassLookupFn, ClassName); } // This has to perform the lookup every time, since posing and related // techniques can modify the name -> class mapping. llvm::Value *CGObjCGNU::GetClass(CodeGenFunction &CGF, const ObjCInterfaceDecl *OID) { return GetClassNamed(CGF, OID->getNameAsString(), OID->isWeakImported()); } llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) { return GetClassNamed(CGF, "NSAutoreleasePool", false); } llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel, const std::string &TypeEncoding, bool lval) { SmallVectorImpl<TypedSelector> &Types = SelectorTable[Sel]; llvm::GlobalAlias *SelValue = nullptr; for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(), e = Types.end() ; i!=e ; i++) { if (i->first == TypeEncoding) { SelValue = i->second; break; } } if (!SelValue) { SelValue = llvm::GlobalAlias::create( SelectorTy, llvm::GlobalValue::PrivateLinkage, ".objc_selector_" + Sel.getAsString(), &TheModule); Types.emplace_back(TypeEncoding, SelValue); } if (lval) { llvm::Value *tmp = CGF.CreateTempAlloca(SelValue->getType()); CGF.Builder.CreateStore(SelValue, tmp); return tmp; } return SelValue; } llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel, bool lval) { return GetSelector(CGF, Sel, std::string(), lval); } llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, const ObjCMethodDecl *Method) { std::string SelTypes; CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes); return GetSelector(CGF, Method->getSelector(), SelTypes, false); } llvm::Constant *CGObjCGNU::GetEHType(QualType T) { if (T->isObjCIdType() || T->isObjCQualifiedIdType()) { // With the old ABI, there was only one kind of catchall, which broke // foreign exceptions. With the new ABI, we use __objc_id_typeinfo as // a pointer indicating object catchalls, and NULL to indicate real // catchalls if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) { return MakeConstantString("@id"); } else { return nullptr; } } // All other types should be Objective-C interface pointer types. const ObjCObjectPointerType *OPT = T->getAs<ObjCObjectPointerType>(); assert(OPT && "Invalid @catch type."); const ObjCInterfaceDecl *IDecl = OPT->getObjectType()->getInterface(); assert(IDecl && "Invalid @catch type."); return MakeConstantString(IDecl->getIdentifier()->getName()); } llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) { if (!CGM.getLangOpts().CPlusPlus) return CGObjCGNU::GetEHType(T); // For Objective-C++, we want to provide the ability to catch both C++ and // Objective-C objects in the same function. // There's a particular fixed type info for 'id'. if (T->isObjCIdType() || T->isObjCQualifiedIdType()) { llvm::Constant *IDEHType = CGM.getModule().getGlobalVariable("__objc_id_type_info"); if (!IDEHType) IDEHType = new llvm::GlobalVariable(CGM.getModule(), PtrToInt8Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr, "__objc_id_type_info"); return llvm::ConstantExpr::getBitCast(IDEHType, PtrToInt8Ty); } const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>(); assert(PT && "Invalid @catch type."); const ObjCInterfaceType *IT = PT->getInterfaceType(); assert(IT && "Invalid @catch type."); std::string className = IT->getDecl()->getIdentifier()->getName(); std::string typeinfoName = "__objc_eh_typeinfo_" + className; // Return the existing typeinfo if it exists llvm::Constant *typeinfo = TheModule.getGlobalVariable(typeinfoName); if (typeinfo) return llvm::ConstantExpr::getBitCast(typeinfo, PtrToInt8Ty); // Otherwise create it. // vtable for gnustep::libobjc::__objc_class_type_info // It's quite ugly hard-coding this. Ideally we'd generate it using the host // platform's name mangling. const char *vtableName = "_ZTVN7gnustep7libobjc22__objc_class_type_infoE"; auto *Vtable = TheModule.getGlobalVariable(vtableName); if (!Vtable) { Vtable = new llvm::GlobalVariable(TheModule, PtrToInt8Ty, true, llvm::GlobalValue::ExternalLinkage, nullptr, vtableName); } llvm::Constant *Two = llvm::ConstantInt::get(IntTy, 2); auto *BVtable = llvm::ConstantExpr::getBitCast( llvm::ConstantExpr::getGetElementPtr(Vtable->getValueType(), Vtable, Two), PtrToInt8Ty); llvm::Constant *typeName = ExportUniqueString(className, "__objc_eh_typename_"); std::vector<llvm::Constant*> fields; fields.push_back(BVtable); fields.push_back(typeName); llvm::Constant *TI = MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, nullptr), fields, "__objc_eh_typeinfo_" + className, llvm::GlobalValue::LinkOnceODRLinkage); return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty); } /// Generate an NSConstantString object. llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) { std::string Str = SL->getString().str(); // Look for an existing one llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str); if (old != ObjCStrings.end()) return old->getValue(); StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass; if (StringClass.empty()) StringClass = "NXConstantString"; std::string Sym = "_OBJC_CLASS_"; Sym += StringClass; llvm::Constant *isa = TheModule.getNamedGlobal(Sym); if (!isa) isa = new llvm::GlobalVariable(TheModule, IdTy, /* isConstant */false, llvm::GlobalValue::ExternalWeakLinkage, nullptr, Sym); else if (isa->getType() != PtrToIdTy) isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy); std::vector<llvm::Constant*> Ivars; Ivars.push_back(isa); Ivars.push_back(MakeConstantString(Str)); Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size())); llvm::Constant *ObjCStr = MakeGlobal( llvm::StructType::get(PtrToIdTy, PtrToInt8Ty, IntTy, nullptr), Ivars, ".objc_str"); ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty); ObjCStrings[Str] = ObjCStr; ConstantStrings.push_back(ObjCStr); return ObjCStr; } ///Generates a message send where the super is the receiver. This is a message ///send to self with special delivery semantics indicating which class's method ///should be called. RValue CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, const ObjCInterfaceDecl *Class, bool isCategoryImpl, llvm::Value *Receiver, bool IsClassMessage, const CallArgList &CallArgs, const ObjCMethodDecl *Method) { CGBuilderTy &Builder = CGF.Builder; if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { if (Sel == RetainSel || Sel == AutoreleaseSel) { return RValue::get(EnforceType(Builder, Receiver, CGM.getTypes().ConvertType(ResultType))); } if (Sel == ReleaseSel) { return RValue::get(nullptr); } } llvm::Value *cmd = GetSelector(CGF, Sel); CallArgList ActualArgs; ActualArgs.add(RValue::get(EnforceType(Builder, Receiver, IdTy)), ASTIdTy); ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType()); ActualArgs.addFrom(CallArgs); MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs); llvm::Value *ReceiverClass = nullptr; if (isCategoryImpl) { llvm::Constant *classLookupFunction = nullptr; if (IsClassMessage) { classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get( IdTy, PtrTy, true), "objc_get_meta_class"); } else { classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get( IdTy, PtrTy, true), "objc_get_class"); } ReceiverClass = Builder.CreateCall(classLookupFunction, MakeConstantString(Class->getNameAsString())); } else { // Set up global aliases for the metaclass or class pointer if they do not // already exist. These will are forward-references which will be set to // pointers to the class and metaclass structure created for the runtime // load function. To send a message to super, we look up the value of the // super_class pointer from either the class or metaclass structure. if (IsClassMessage) { if (!MetaClassPtrAlias) { MetaClassPtrAlias = llvm::GlobalAlias::create( IdTy, llvm::GlobalValue::InternalLinkage, ".objc_metaclass_ref" + Class->getNameAsString(), &TheModule); } ReceiverClass = MetaClassPtrAlias; } else { if (!ClassPtrAlias) { ClassPtrAlias = llvm::GlobalAlias::create( IdTy, llvm::GlobalValue::InternalLinkage, ".objc_class_ref" + Class->getNameAsString(), &TheModule); } ReceiverClass = ClassPtrAlias; } } // Cast the pointer to a simplified version of the class structure llvm::Type *CastTy = llvm::StructType::get(IdTy, IdTy, nullptr); ReceiverClass = Builder.CreateBitCast(ReceiverClass, llvm::PointerType::getUnqual(CastTy)); // Get the superclass pointer ReceiverClass = Builder.CreateStructGEP(CastTy, ReceiverClass, 1); // Load the superclass pointer ReceiverClass = Builder.CreateLoad(ReceiverClass); // Construct the structure used to look up the IMP llvm::StructType *ObjCSuperTy = llvm::StructType::get( Receiver->getType(), IdTy, nullptr); llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy); Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuperTy, ObjCSuper, 0)); Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuperTy, ObjCSuper, 1)); ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy); // Get the IMP llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd, MSI); imp = EnforceType(Builder, imp, MSI.MessengerType); llvm::Metadata *impMD[] = { llvm::MDString::get(VMContext, Sel.getAsString()), llvm::MDString::get(VMContext, Class->getSuperClass()->getNameAsString()), llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::Type::getInt1Ty(VMContext), IsClassMessage))}; llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD); llvm::Instruction *call; RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, nullptr, &call); call->setMetadata(msgSendMDKind, node); return msgRet; } /// Generate code for a message send expression. RValue CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, Selector Sel, llvm::Value *Receiver, const CallArgList &CallArgs, const ObjCInterfaceDecl *Class, const ObjCMethodDecl *Method) { CGBuilderTy &Builder = CGF.Builder; // Strip out message sends to retain / release in GC mode if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { if (Sel == RetainSel || Sel == AutoreleaseSel) { return RValue::get(EnforceType(Builder, Receiver, CGM.getTypes().ConvertType(ResultType))); } if (Sel == ReleaseSel) { return RValue::get(nullptr); } } // If the return type is something that goes in an integer register, the // runtime will handle 0 returns. For other cases, we fill in the 0 value // ourselves. // // The language spec says the result of this kind of message send is // undefined, but lots of people seem to have forgotten to read that // paragraph and insist on sending messages to nil that have structure // returns. With GCC, this generates a random return value (whatever happens // to be on the stack / in those registers at the time) on most platforms, // and generates an illegal instruction trap on SPARC. With LLVM it corrupts // the stack. bool isPointerSizedReturn = (ResultType->isAnyPointerType() || ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType()); llvm::BasicBlock *startBB = nullptr; llvm::BasicBlock *messageBB = nullptr; llvm::BasicBlock *continueBB = nullptr; if (!isPointerSizedReturn) { startBB = Builder.GetInsertBlock(); messageBB = CGF.createBasicBlock("msgSend"); continueBB = CGF.createBasicBlock("continue"); llvm::Value *isNil = Builder.CreateICmpEQ(Receiver, llvm::Constant::getNullValue(Receiver->getType())); Builder.CreateCondBr(isNil, continueBB, messageBB); CGF.EmitBlock(messageBB); } IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy)); llvm::Value *cmd; if (Method) cmd = GetSelector(CGF, Method); else cmd = GetSelector(CGF, Sel); cmd = EnforceType(Builder, cmd, SelectorTy); Receiver = EnforceType(Builder, Receiver, IdTy); llvm::Metadata *impMD[] = { llvm::MDString::get(VMContext, Sel.getAsString()), llvm::MDString::get(VMContext, Class ? Class->getNameAsString() : ""), llvm::ConstantAsMetadata::get(llvm::ConstantInt::get( llvm::Type::getInt1Ty(VMContext), Class != nullptr))}; llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD); CallArgList ActualArgs; ActualArgs.add(RValue::get(Receiver), ASTIdTy); ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType()); ActualArgs.addFrom(CallArgs); MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs); // Get the IMP to call llvm::Value *imp; // If we have non-legacy dispatch specified, we try using the objc_msgSend() // functions. These are not supported on all platforms (or all runtimes on a // given platform), so we switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) { case CodeGenOptions::Legacy: imp = LookupIMP(CGF, Receiver, cmd, node, MSI); break; case CodeGenOptions::Mixed: case CodeGenOptions::NonLegacy: if (CGM.ReturnTypeUsesFPRet(ResultType)) { imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true), "objc_msgSend_fpret"); } else if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) { // The actual types here don't matter - we're going to bitcast the // function anyway imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true), "objc_msgSend_stret"); } else { imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true), "objc_msgSend"); } } // Reset the receiver in case the lookup modified it ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false); imp = EnforceType(Builder, imp, MSI.MessengerType); llvm::Instruction *call; RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, nullptr, &call); call->setMetadata(msgSendMDKind, node); if (!isPointerSizedReturn) { messageBB = CGF.Builder.GetInsertBlock(); CGF.Builder.CreateBr(continueBB); CGF.EmitBlock(continueBB); if (msgRet.isScalar()) { llvm::Value *v = msgRet.getScalarVal(); llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2); phi->addIncoming(v, messageBB); phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB); msgRet = RValue::get(phi); } else if (msgRet.isAggregate()) { llvm::Value *v = msgRet.getAggregateAddr(); llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2); llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType()); llvm::AllocaInst *NullVal = CGF.CreateTempAlloca(RetTy->getElementType(), "null"); CGF.InitTempAlloca(NullVal, llvm::Constant::getNullValue(RetTy->getElementType())); phi->addIncoming(v, messageBB); phi->addIncoming(NullVal, startBB); msgRet = RValue::getAggregate(phi); } else /* isComplex() */ { std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal(); llvm::PHINode *phi = Builder.CreatePHI(v.first->getType(), 2); phi->addIncoming(v.first, messageBB); phi->addIncoming(llvm::Constant::getNullValue(v.first->getType()), startBB); llvm::PHINode *phi2 = Builder.CreatePHI(v.second->getType(), 2); phi2->addIncoming(v.second, messageBB); phi2->addIncoming(llvm::Constant::getNullValue(v.second->getType()), startBB); msgRet = RValue::getComplex(phi, phi2); } } return msgRet; } /// Generates a MethodList. Used in construction of a objc_class and /// objc_category structures. llvm::Constant *CGObjCGNU:: GenerateMethodList(StringRef ClassName, StringRef CategoryName, ArrayRef<Selector> MethodSels, ArrayRef<llvm::Constant *> MethodTypes, bool isClassMethodList) { if (MethodSels.empty()) return NULLPtr; // Get the method structure type. llvm::StructType *ObjCMethodTy = llvm::StructType::get( PtrToInt8Ty, // Really a selector, but the runtime creates it us. PtrToInt8Ty, // Method types IMPTy, //Method pointer nullptr); std::vector<llvm::Constant*> Methods; std::vector<llvm::Constant*> Elements; for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) { Elements.clear(); llvm::Constant *Method = TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName, MethodSels[i], isClassMethodList)); assert(Method && "Can't generate metadata for method that doesn't exist"); llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString()); Elements.push_back(C); Elements.push_back(MethodTypes[i]); Method = llvm::ConstantExpr::getBitCast(Method, IMPTy); Elements.push_back(Method); Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements)); } // Array of method structures llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy, Methods.size()); llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy, Methods); // Structure containing list pointer, array and array count llvm::StructType *ObjCMethodListTy = llvm::StructType::create(VMContext); llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(ObjCMethodListTy); ObjCMethodListTy->setBody( NextPtrTy, IntTy, ObjCMethodArrayTy, nullptr); Methods.clear(); Methods.push_back(llvm::ConstantPointerNull::get( llvm::PointerType::getUnqual(ObjCMethodListTy))); Methods.push_back(llvm::ConstantInt::get(Int32Ty, MethodTypes.size())); Methods.push_back(MethodArray); // Create an instance of the structure return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list"); } /// Generates an IvarList. Used in construction of a objc_class. llvm::Constant *CGObjCGNU:: GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames, ArrayRef<llvm::Constant *> IvarTypes, ArrayRef<llvm::Constant *> IvarOffsets) { if (IvarNames.size() == 0) return NULLPtr; // Get the method structure type. llvm::StructType *ObjCIvarTy = llvm::StructType::get( PtrToInt8Ty, PtrToInt8Ty, IntTy, nullptr); std::vector<llvm::Constant*> Ivars; std::vector<llvm::Constant*> Elements; for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) { Elements.clear(); Elements.push_back(IvarNames[i]); Elements.push_back(IvarTypes[i]); Elements.push_back(IvarOffsets[i]); Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements)); } // Array of method structures llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy, IvarNames.size()); Elements.clear(); Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size())); Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars)); // Structure containing array and array count llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy, ObjCIvarArrayTy, nullptr); // Create an instance of the structure return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list"); } /// Generate a class structure llvm::Constant *CGObjCGNU::GenerateClassStructure( llvm::Constant *MetaClass, llvm::Constant *SuperClass, unsigned info, const char *Name, llvm::Constant *Version, llvm::Constant *InstanceSize, llvm::Constant *IVars, llvm::Constant *Methods, llvm::Constant *Protocols, llvm::Constant *IvarOffsets, llvm::Constant *Properties, llvm::Constant *StrongIvarBitmap, llvm::Constant *WeakIvarBitmap, bool isMeta) { // Set up the class structure // Note: Several of these are char*s when they should be ids. This is // because the runtime performs this translation on load. // // Fields marked New ABI are part of the GNUstep runtime. We emit them // anyway; the classes will still work with the GNU runtime, they will just // be ignored. llvm::StructType *ClassTy = llvm::StructType::get( PtrToInt8Ty, // isa PtrToInt8Ty, // super_class PtrToInt8Ty, // name LongTy, // version LongTy, // info LongTy, // instance_size IVars->getType(), // ivars Methods->getType(), // methods // These are all filled in by the runtime, so we pretend PtrTy, // dtable PtrTy, // subclass_list PtrTy, // sibling_class PtrTy, // protocols PtrTy, // gc_object_type // New ABI: LongTy, // abi_version IvarOffsets->getType(), // ivar_offsets Properties->getType(), // properties IntPtrTy, // strong_pointers IntPtrTy, // weak_pointers nullptr); llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0); // Fill in the structure std::vector<llvm::Constant*> Elements; Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty)); Elements.push_back(SuperClass); Elements.push_back(MakeConstantString(Name, ".class_name")); Elements.push_back(Zero); Elements.push_back(llvm::ConstantInt::get(LongTy, info)); if (isMeta) { llvm::DataLayout td(&TheModule); Elements.push_back( llvm::ConstantInt::get(LongTy, td.getTypeSizeInBits(ClassTy) / CGM.getContext().getCharWidth())); } else Elements.push_back(InstanceSize); Elements.push_back(IVars); Elements.push_back(Methods); Elements.push_back(NULLPtr); Elements.push_back(NULLPtr); Elements.push_back(NULLPtr); Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy)); Elements.push_back(NULLPtr); Elements.push_back(llvm::ConstantInt::get(LongTy, 1)); Elements.push_back(IvarOffsets); Elements.push_back(Properties); Elements.push_back(StrongIvarBitmap); Elements.push_back(WeakIvarBitmap); // Create an instance of the structure // This is now an externally visible symbol, so that we can speed up class // messages in the next ABI. We may already have some weak references to // this, so check and fix them properly. std::string ClassSym((isMeta ? "_OBJC_METACLASS_": "_OBJC_CLASS_") + std::string(Name)); llvm::GlobalVariable *ClassRef = TheModule.getNamedGlobal(ClassSym); llvm::Constant *Class = MakeGlobal(ClassTy, Elements, ClassSym, llvm::GlobalValue::ExternalLinkage); if (ClassRef) { ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class, ClassRef->getType())); ClassRef->removeFromParent(); Class->setName(ClassSym); } return Class; } llvm::Constant *CGObjCGNU:: GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames, ArrayRef<llvm::Constant *> MethodTypes) { // Get the method structure type. llvm::StructType *ObjCMethodDescTy = llvm::StructType::get( PtrToInt8Ty, // Really a selector, but the runtime does the casting for us. PtrToInt8Ty, nullptr); std::vector<llvm::Constant*> Methods; std::vector<llvm::Constant*> Elements; for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) { Elements.clear(); Elements.push_back(MethodNames[i]); Elements.push_back(MethodTypes[i]); Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements)); } llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy, MethodNames.size()); llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy, Methods); llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get( IntTy, ObjCMethodArrayTy, nullptr); Methods.clear(); Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size())); Methods.push_back(Array); return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list"); } // Create the protocol list structure used in classes, categories and so on llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty, Protocols.size()); llvm::StructType *ProtocolListTy = llvm::StructType::get( PtrTy, //Should be a recurisve pointer, but it's always NULL here. SizeTy, ProtocolArrayTy, nullptr); std::vector<llvm::Constant*> Elements; for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end(); iter != endIter ; iter++) { llvm::Constant *protocol = nullptr; llvm::StringMap<llvm::Constant*>::iterator value = ExistingProtocols.find(*iter); if (value == ExistingProtocols.end()) { protocol = GenerateEmptyProtocol(*iter); } else { protocol = value->getValue(); } llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol, PtrToInt8Ty); Elements.push_back(Ptr); } llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy, Elements); Elements.clear(); Elements.push_back(NULLPtr); Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size())); Elements.push_back(ProtocolArray); return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list"); } llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF, const ObjCProtocolDecl *PD) { llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()]; llvm::Type *T = CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType()); return CGF.Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T)); } llvm::Constant *CGObjCGNU::GenerateEmptyProtocol( const std::string &ProtocolName) { SmallVector<std::string, 0> EmptyStringVector; SmallVector<llvm::Constant*, 0> EmptyConstantVector; llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector); llvm::Constant *MethodList = GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector); // Protocols are objects containing lists of the methods implemented and // protocols adopted. llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy, PtrToInt8Ty, ProtocolList->getType(), MethodList->getType(), MethodList->getType(), MethodList->getType(), MethodList->getType(), nullptr); std::vector<llvm::Constant*> Elements; // The isa pointer must be set to a magic number so the runtime knows it's // the correct layout. Elements.push_back(llvm::ConstantExpr::getIntToPtr( llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy)); Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name")); Elements.push_back(ProtocolList); Elements.push_back(MethodList); Elements.push_back(MethodList); Elements.push_back(MethodList); Elements.push_back(MethodList); return MakeGlobal(ProtocolTy, Elements, ".objc_protocol"); } void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) { ASTContext &Context = CGM.getContext(); std::string ProtocolName = PD->getNameAsString(); // Use the protocol definition, if there is one. if (const ObjCProtocolDecl *Def = PD->getDefinition()) PD = Def; SmallVector<std::string, 16> Protocols; for (const auto *PI : PD->protocols()) Protocols.push_back(PI->getNameAsString()); SmallVector<llvm::Constant*, 16> InstanceMethodNames; SmallVector<llvm::Constant*, 16> InstanceMethodTypes; SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames; SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes; for (const auto *I : PD->instance_methods()) { std::string TypeStr; Context.getObjCEncodingForMethodDecl(I, TypeStr); if (I->getImplementationControl() == ObjCMethodDecl::Optional) { OptionalInstanceMethodNames.push_back( MakeConstantString(I->getSelector().getAsString())); OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr)); } else { InstanceMethodNames.push_back( MakeConstantString(I->getSelector().getAsString())); InstanceMethodTypes.push_back(MakeConstantString(TypeStr)); } } // Collect information about class methods: SmallVector<llvm::Constant*, 16> ClassMethodNames; SmallVector<llvm::Constant*, 16> ClassMethodTypes; SmallVector<llvm::Constant*, 16> OptionalClassMethodNames; SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes; for (const auto *I : PD->class_methods()) { std::string TypeStr; Context.getObjCEncodingForMethodDecl(I,TypeStr); if (I->getImplementationControl() == ObjCMethodDecl::Optional) { OptionalClassMethodNames.push_back( MakeConstantString(I->getSelector().getAsString())); OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr)); } else { ClassMethodNames.push_back( MakeConstantString(I->getSelector().getAsString())); ClassMethodTypes.push_back(MakeConstantString(TypeStr)); } } llvm::Constant *ProtocolList = GenerateProtocolList(Protocols); llvm::Constant *InstanceMethodList = GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes); llvm::Constant *ClassMethodList = GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes); llvm::Constant *OptionalInstanceMethodList = GenerateProtocolMethodList(OptionalInstanceMethodNames, OptionalInstanceMethodTypes); llvm::Constant *OptionalClassMethodList = GenerateProtocolMethodList(OptionalClassMethodNames, OptionalClassMethodTypes); // Property metadata: name, attributes, isSynthesized, setter name, setter // types, getter name, getter types. // The isSynthesized value is always set to 0 in a protocol. It exists to // simplify the runtime library by allowing it to use the same data // structures for protocol metadata everywhere. llvm::StructType *PropertyMetadataTy = llvm::StructType::get( PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, nullptr); std::vector<llvm::Constant*> Properties; std::vector<llvm::Constant*> OptionalProperties; // Add all of the property methods need adding to the method list and to the // property metadata list. for (auto *property : PD->properties()) { std::vector<llvm::Constant*> Fields; Fields.push_back(MakePropertyEncodingString(property, nullptr)); PushPropertyAttributes(Fields, property); if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) { std::string TypeStr; Context.getObjCEncodingForMethodDecl(getter,TypeStr); llvm::Constant *TypeEncoding = MakeConstantString(TypeStr); InstanceMethodTypes.push_back(TypeEncoding); Fields.push_back(MakeConstantString(getter->getSelector().getAsString())); Fields.push_back(TypeEncoding); } else { Fields.push_back(NULLPtr); Fields.push_back(NULLPtr); } if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) { std::string TypeStr; Context.getObjCEncodingForMethodDecl(setter,TypeStr); llvm::Constant *TypeEncoding = MakeConstantString(TypeStr); InstanceMethodTypes.push_back(TypeEncoding); Fields.push_back(MakeConstantString(setter->getSelector().getAsString())); Fields.push_back(TypeEncoding); } else { Fields.push_back(NULLPtr); Fields.push_back(NULLPtr); } if (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) { OptionalProperties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields)); } else { Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields)); } } llvm::Constant *PropertyArray = llvm::ConstantArray::get( llvm::ArrayType::get(PropertyMetadataTy, Properties.size()), Properties); llvm::Constant* PropertyListInitFields[] = {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray}; llvm::Constant *PropertyListInit = llvm::ConstantStruct::getAnon(PropertyListInitFields); llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage, PropertyListInit, ".objc_property_list"); llvm::Constant *OptionalPropertyArray = llvm::ConstantArray::get(llvm::ArrayType::get(PropertyMetadataTy, OptionalProperties.size()) , OptionalProperties); llvm::Constant* OptionalPropertyListInitFields[] = { llvm::ConstantInt::get(IntTy, OptionalProperties.size()), NULLPtr, OptionalPropertyArray }; llvm::Constant *OptionalPropertyListInit = llvm::ConstantStruct::getAnon(OptionalPropertyListInitFields); llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule, OptionalPropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit, ".objc_property_list"); // Protocols are objects containing lists of the methods implemented and // protocols adopted. llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy, PtrToInt8Ty, ProtocolList->getType(), InstanceMethodList->getType(), ClassMethodList->getType(), OptionalInstanceMethodList->getType(), OptionalClassMethodList->getType(), PropertyList->getType(), OptionalPropertyList->getType(), nullptr); std::vector<llvm::Constant*> Elements; // The isa pointer must be set to a magic number so the runtime knows it's // the correct layout. Elements.push_back(llvm::ConstantExpr::getIntToPtr( llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy)); Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name")); Elements.push_back(ProtocolList); Elements.push_back(InstanceMethodList); Elements.push_back(ClassMethodList); Elements.push_back(OptionalInstanceMethodList); Elements.push_back(OptionalClassMethodList); Elements.push_back(PropertyList); Elements.push_back(OptionalPropertyList); ExistingProtocols[ProtocolName] = llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements, ".objc_protocol"), IdTy); } void CGObjCGNU::GenerateProtocolHolderCategory() { // Collect information about instance methods SmallVector<Selector, 1> MethodSels; SmallVector<llvm::Constant*, 1> MethodTypes; std::vector<llvm::Constant*> Elements; const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack"; const std::string CategoryName = "AnotherHack"; Elements.push_back(MakeConstantString(CategoryName)); Elements.push_back(MakeConstantString(ClassName)); // Instance method list Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList( ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy)); // Class method list Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList( ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy)); // Protocol list llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy, ExistingProtocols.size()); llvm::StructType *ProtocolListTy = llvm::StructType::get( PtrTy, //Should be a recurisve pointer, but it's always NULL here. SizeTy, ProtocolArrayTy, nullptr); std::vector<llvm::Constant*> ProtocolElements; for (llvm::StringMapIterator<llvm::Constant*> iter = ExistingProtocols.begin(), endIter = ExistingProtocols.end(); iter != endIter ; iter++) { llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(iter->getValue(), PtrTy); ProtocolElements.push_back(Ptr); } llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy, ProtocolElements); ProtocolElements.clear(); ProtocolElements.push_back(NULLPtr); ProtocolElements.push_back(llvm::ConstantInt::get(LongTy, ExistingProtocols.size())); ProtocolElements.push_back(ProtocolArray); Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy, ProtocolElements, ".objc_protocol_list"), PtrTy)); Categories.push_back(llvm::ConstantExpr::getBitCast( MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, PtrTy, PtrTy, PtrTy, nullptr), Elements), PtrTy)); } /// Libobjc2 uses a bitfield representation where small(ish) bitfields are /// stored in a 64-bit value with the low bit set to 1 and the remaining 63 /// bits set to their values, LSB first, while larger ones are stored in a /// structure of this / form: /// /// struct { int32_t length; int32_t values[length]; }; /// /// The values in the array are stored in host-endian format, with the least /// significant bit being assumed to come first in the bitfield. Therefore, a /// bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] }, while a /// bitfield / with the 63rd bit set will be 1<<64. llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) { int bitCount = bits.size(); int ptrBits = CGM.getDataLayout().getPointerSizeInBits(); if (bitCount < ptrBits) { uint64_t val = 1; for (int i=0 ; i<bitCount ; ++i) { if (bits[i]) val |= 1ULL<<(i+1); } return llvm::ConstantInt::get(IntPtrTy, val); } SmallVector<llvm::Constant *, 8> values; int v=0; while (v < bitCount) { int32_t word = 0; for (int i=0 ; (i<32) && (v<bitCount) ; ++i) { if (bits[v]) word |= 1<<i; v++; } values.push_back(llvm::ConstantInt::get(Int32Ty, word)); } llvm::ArrayType *arrayTy = llvm::ArrayType::get(Int32Ty, values.size()); llvm::Constant *array = llvm::ConstantArray::get(arrayTy, values); llvm::Constant *fields[2] = { llvm::ConstantInt::get(Int32Ty, values.size()), array }; llvm::Constant *GS = MakeGlobal(llvm::StructType::get(Int32Ty, arrayTy, nullptr), fields); llvm::Constant *ptr = llvm::ConstantExpr::getPtrToInt(GS, IntPtrTy); return ptr; } void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) { std::string ClassName = OCD->getClassInterface()->getNameAsString(); std::string CategoryName = OCD->getNameAsString(); // Collect information about instance methods SmallVector<Selector, 16> InstanceMethodSels; SmallVector<llvm::Constant*, 16> InstanceMethodTypes; for (const auto *I : OCD->instance_methods()) { InstanceMethodSels.push_back(I->getSelector()); std::string TypeStr; CGM.getContext().getObjCEncodingForMethodDecl(I,TypeStr); InstanceMethodTypes.push_back(MakeConstantString(TypeStr)); } // Collect information about class methods SmallVector<Selector, 16> ClassMethodSels; SmallVector<llvm::Constant*, 16> ClassMethodTypes; for (const auto *I : OCD->class_methods()) { ClassMethodSels.push_back(I->getSelector()); std::string TypeStr; CGM.getContext().getObjCEncodingForMethodDecl(I,TypeStr); ClassMethodTypes.push_back(MakeConstantString(TypeStr)); } // Collect the names of referenced protocols SmallVector<std::string, 16> Protocols; const ObjCCategoryDecl *CatDecl = OCD->getCategoryDecl(); const ObjCList<ObjCProtocolDecl> &Protos = CatDecl->getReferencedProtocols(); for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(), E = Protos.end(); I != E; ++I) Protocols.push_back((*I)->getNameAsString()); std::vector<llvm::Constant*> Elements; Elements.push_back(MakeConstantString(CategoryName)); Elements.push_back(MakeConstantString(ClassName)); // Instance method list Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList( ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes, false), PtrTy)); // Class method list Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList( ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true), PtrTy)); // Protocol list Elements.push_back(llvm::ConstantExpr::getBitCast( GenerateProtocolList(Protocols), PtrTy)); Categories.push_back(llvm::ConstantExpr::getBitCast( MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, PtrTy, PtrTy, PtrTy, nullptr), Elements), PtrTy)); } llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID, SmallVectorImpl<Selector> &InstanceMethodSels, SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) { ASTContext &Context = CGM.getContext(); // Property metadata: name, attributes, attributes2, padding1, padding2, // setter name, setter types, getter name, getter types. llvm::StructType *PropertyMetadataTy = llvm::StructType::get( PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, nullptr); std::vector<llvm::Constant*> Properties; // Add all of the property methods need adding to the method list and to the // property metadata list. for (auto *propertyImpl : OID->property_impls()) { std::vector<llvm::Constant*> Fields; ObjCPropertyDecl *property = propertyImpl->getPropertyDecl(); bool isSynthesized = (propertyImpl->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize); bool isDynamic = (propertyImpl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic); Fields.push_back(MakePropertyEncodingString(property, OID)); PushPropertyAttributes(Fields, property, isSynthesized, isDynamic); if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) { std::string TypeStr; Context.getObjCEncodingForMethodDecl(getter,TypeStr); llvm::Constant *TypeEncoding = MakeConstantString(TypeStr); if (isSynthesized) { InstanceMethodTypes.push_back(TypeEncoding); InstanceMethodSels.push_back(getter->getSelector()); } Fields.push_back(MakeConstantString(getter->getSelector().getAsString())); Fields.push_back(TypeEncoding); } else { Fields.push_back(NULLPtr); Fields.push_back(NULLPtr); } if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) { std::string TypeStr; Context.getObjCEncodingForMethodDecl(setter,TypeStr); llvm::Constant *TypeEncoding = MakeConstantString(TypeStr); if (isSynthesized) { InstanceMethodTypes.push_back(TypeEncoding); InstanceMethodSels.push_back(setter->getSelector()); } Fields.push_back(MakeConstantString(setter->getSelector().getAsString())); Fields.push_back(TypeEncoding); } else { Fields.push_back(NULLPtr); Fields.push_back(NULLPtr); } Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields)); } llvm::ArrayType *PropertyArrayTy = llvm::ArrayType::get(PropertyMetadataTy, Properties.size()); llvm::Constant *PropertyArray = llvm::ConstantArray::get(PropertyArrayTy, Properties); llvm::Constant* PropertyListInitFields[] = {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray}; llvm::Constant *PropertyListInit = llvm::ConstantStruct::getAnon(PropertyListInitFields); return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage, PropertyListInit, ".objc_property_list"); } void CGObjCGNU::RegisterAlias(const ObjCCompatibleAliasDecl *OAD) { // Get the class declaration for which the alias is specified. ObjCInterfaceDecl *ClassDecl = const_cast<ObjCInterfaceDecl *>(OAD->getClassInterface()); ClassAliases.emplace_back(ClassDecl->getNameAsString(), OAD->getNameAsString()); } void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) { ASTContext &Context = CGM.getContext(); // Get the superclass name. const ObjCInterfaceDecl * SuperClassDecl = OID->getClassInterface()->getSuperClass(); std::string SuperClassName; if (SuperClassDecl) { SuperClassName = SuperClassDecl->getNameAsString(); EmitClassRef(SuperClassName); } // Get the class name ObjCInterfaceDecl *ClassDecl = const_cast<ObjCInterfaceDecl *>(OID->getClassInterface()); std::string ClassName = ClassDecl->getNameAsString(); // Emit the symbol that is used to generate linker errors if this class is // referenced in other modules but not declared. std::string classSymbolName = "__objc_class_name_" + ClassName; if (llvm::GlobalVariable *symbol = TheModule.getGlobalVariable(classSymbolName)) { symbol->setInitializer(llvm::ConstantInt::get(LongTy, 0)); } else { new llvm::GlobalVariable(TheModule, LongTy, false, llvm::GlobalValue::ExternalLinkage, llvm::ConstantInt::get(LongTy, 0), classSymbolName); } // Get the size of instances. int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize().getQuantity(); // Collect information about instance variables. SmallVector<llvm::Constant*, 16> IvarNames; SmallVector<llvm::Constant*, 16> IvarTypes; SmallVector<llvm::Constant*, 16> IvarOffsets; std::vector<llvm::Constant*> IvarOffsetValues; SmallVector<bool, 16> WeakIvars; SmallVector<bool, 16> StrongIvars; int superInstanceSize = !SuperClassDecl ? 0 : Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity(); // For non-fragile ivars, set the instance size to 0 - {the size of just this // class}. The runtime will then set this to the correct value on load. if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) { instanceSize = 0 - (instanceSize - superInstanceSize); } for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD; IVD = IVD->getNextIvar()) { // Store the name IvarNames.push_back(MakeConstantString(IVD->getNameAsString())); // Get the type encoding for this ivar std::string TypeStr; Context.getObjCEncodingForType(IVD->getType(), TypeStr); IvarTypes.push_back(MakeConstantString(TypeStr)); // Get the offset uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD); uint64_t Offset = BaseOffset; if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) { Offset = BaseOffset - superInstanceSize; } llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset); // Create the direct offset value std::string OffsetName = "__objc_ivar_offset_value_" + ClassName +"." + IVD->getNameAsString(); llvm::GlobalVariable *OffsetVar = TheModule.getGlobalVariable(OffsetName); if (OffsetVar) { OffsetVar->setInitializer(OffsetValue); // If this is the real definition, change its linkage type so that // different modules will use this one, rather than their private // copy. OffsetVar->setLinkage(llvm::GlobalValue::ExternalLinkage); } else OffsetVar = new llvm::GlobalVariable(TheModule, IntTy, false, llvm::GlobalValue::ExternalLinkage, OffsetValue, "__objc_ivar_offset_value_" + ClassName +"." + IVD->getNameAsString()); IvarOffsets.push_back(OffsetValue); IvarOffsetValues.push_back(OffsetVar); Qualifiers::ObjCLifetime lt = IVD->getType().getQualifiers().getObjCLifetime(); switch (lt) { case Qualifiers::OCL_Strong: StrongIvars.push_back(true); WeakIvars.push_back(false); break; case Qualifiers::OCL_Weak: StrongIvars.push_back(false); WeakIvars.push_back(true); break; default: StrongIvars.push_back(false); WeakIvars.push_back(false); } } llvm::Constant *StrongIvarBitmap = MakeBitField(StrongIvars); llvm::Constant *WeakIvarBitmap = MakeBitField(WeakIvars); llvm::GlobalVariable *IvarOffsetArray = MakeGlobalArray(PtrToIntTy, IvarOffsetValues, ".ivar.offsets"); // Collect information about instance methods SmallVector<Selector, 16> InstanceMethodSels; SmallVector<llvm::Constant*, 16> InstanceMethodTypes; for (const auto *I : OID->instance_methods()) { InstanceMethodSels.push_back(I->getSelector()); std::string TypeStr; Context.getObjCEncodingForMethodDecl(I,TypeStr); InstanceMethodTypes.push_back(MakeConstantString(TypeStr)); } llvm::Constant *Properties = GeneratePropertyList(OID, InstanceMethodSels, InstanceMethodTypes); // Collect information about class methods SmallVector<Selector, 16> ClassMethodSels; SmallVector<llvm::Constant*, 16> ClassMethodTypes; for (const auto *I : OID->class_methods()) { ClassMethodSels.push_back(I->getSelector()); std::string TypeStr; Context.getObjCEncodingForMethodDecl(I,TypeStr); ClassMethodTypes.push_back(MakeConstantString(TypeStr)); } // Collect the names of referenced protocols SmallVector<std::string, 16> Protocols; for (const auto *I : ClassDecl->protocols()) Protocols.push_back(I->getNameAsString()); // Get the superclass pointer. llvm::Constant *SuperClass; if (!SuperClassName.empty()) { SuperClass = MakeConstantString(SuperClassName, ".super_class_name"); } else { SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty); } // Empty vector used to construct empty method lists SmallVector<llvm::Constant*, 1> empty; // Generate the method and instance variable lists llvm::Constant *MethodList = GenerateMethodList(ClassName, "", InstanceMethodSels, InstanceMethodTypes, false); llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "", ClassMethodSels, ClassMethodTypes, true); llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes, IvarOffsets); // Irrespective of whether we are compiling for a fragile or non-fragile ABI, // we emit a symbol containing the offset for each ivar in the class. This // allows code compiled for the non-Fragile ABI to inherit from code compiled // for the legacy ABI, without causing problems. The converse is also // possible, but causes all ivar accesses to be fragile. // Offset pointer for getting at the correct field in the ivar list when // setting up the alias. These are: The base address for the global, the // ivar array (second field), the ivar in this list (set for each ivar), and // the offset (third field in ivar structure) llvm::Type *IndexTy = Int32Ty; llvm::Constant *offsetPointerIndexes[] = {Zeros[0], llvm::ConstantInt::get(IndexTy, 1), nullptr, llvm::ConstantInt::get(IndexTy, 2) }; unsigned ivarIndex = 0; for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD; IVD = IVD->getNextIvar()) { const std::string Name = "__objc_ivar_offset_" + ClassName + '.' + IVD->getNameAsString(); offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, ivarIndex); // Get the correct ivar field llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr( cast<llvm::GlobalVariable>(IvarList)->getValueType(), IvarList, offsetPointerIndexes); // Get the existing variable, if one exists. llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name); if (offset) { offset->setInitializer(offsetValue); // If this is the real definition, change its linkage type so that // different modules will use this one, rather than their private // copy. offset->setLinkage(llvm::GlobalValue::ExternalLinkage); } else { // Add a new alias if there isn't one already. offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(), false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name); (void) offset; // Silence dead store warning. } ++ivarIndex; } llvm::Constant *ZeroPtr = llvm::ConstantInt::get(IntPtrTy, 0); //Generate metaclass for class methods llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr, NULLPtr, 0x12L, ClassName.c_str(), nullptr, Zeros[0], GenerateIvarList( empty, empty, empty), ClassMethodList, NULLPtr, NULLPtr, NULLPtr, ZeroPtr, ZeroPtr, true); // Generate the class structure llvm::Constant *ClassStruct = GenerateClassStructure(MetaClassStruct, SuperClass, 0x11L, ClassName.c_str(), nullptr, llvm::ConstantInt::get(LongTy, instanceSize), IvarList, MethodList, GenerateProtocolList(Protocols), IvarOffsetArray, Properties, StrongIvarBitmap, WeakIvarBitmap); // Resolve the class aliases, if they exist. if (ClassPtrAlias) { ClassPtrAlias->replaceAllUsesWith( llvm::ConstantExpr::getBitCast(ClassStruct, IdTy)); ClassPtrAlias->eraseFromParent(); ClassPtrAlias = nullptr; } if (MetaClassPtrAlias) { MetaClassPtrAlias->replaceAllUsesWith( llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy)); MetaClassPtrAlias->eraseFromParent(); MetaClassPtrAlias = nullptr; } // Add class structure to list to be added to the symtab later ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty); Classes.push_back(ClassStruct); } llvm::Function *CGObjCGNU::ModuleInitFunction() { // Only emit an ObjC load function if no Objective-C stuff has been called if (Classes.empty() && Categories.empty() && ConstantStrings.empty() && ExistingProtocols.empty() && SelectorTable.empty()) return nullptr; // Add all referenced protocols to a category. GenerateProtocolHolderCategory(); llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>( SelectorTy->getElementType()); llvm::Type *SelStructPtrTy = SelectorTy; if (!SelStructTy) { SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, nullptr); SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy); } std::vector<llvm::Constant*> Elements; llvm::Constant *Statics = NULLPtr; // Generate statics list: if (!ConstantStrings.empty()) { llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty, ConstantStrings.size() + 1); ConstantStrings.push_back(NULLPtr); StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass; if (StringClass.empty()) StringClass = "NXConstantString"; Elements.push_back(MakeConstantString(StringClass, ".objc_static_class_name")); Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy, ConstantStrings)); llvm::StructType *StaticsListTy = llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, nullptr); llvm::Type *StaticsListPtrTy = llvm::PointerType::getUnqual(StaticsListTy); Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics"); llvm::ArrayType *StaticsListArrayTy = llvm::ArrayType::get(StaticsListPtrTy, 2); Elements.clear(); Elements.push_back(Statics); Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy)); Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr"); Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy); } // Array of classes, categories, and constant objects llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty, Classes.size() + Categories.size() + 2); llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy, llvm::Type::getInt16Ty(VMContext), llvm::Type::getInt16Ty(VMContext), ClassListTy, nullptr); Elements.clear(); // Pointer to an array of selectors used in this module. std::vector<llvm::Constant*> Selectors; std::vector<llvm::GlobalAlias*> SelectorAliases; for (SelectorMap::iterator iter = SelectorTable.begin(), iterEnd = SelectorTable.end(); iter != iterEnd ; ++iter) { std::string SelNameStr = iter->first.getAsString(); llvm::Constant *SelName = ExportUniqueString(SelNameStr, ".objc_sel_name"); SmallVectorImpl<TypedSelector> &Types = iter->second; for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(), e = Types.end() ; i!=e ; i++) { llvm::Constant *SelectorTypeEncoding = NULLPtr; if (!i->first.empty()) SelectorTypeEncoding = MakeConstantString(i->first, ".objc_sel_types"); Elements.push_back(SelName); Elements.push_back(SelectorTypeEncoding); Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements)); Elements.clear(); // Store the selector alias for later replacement SelectorAliases.push_back(i->second); } } unsigned SelectorCount = Selectors.size(); // NULL-terminate the selector list. This should not actually be required, // because the selector list has a length field. Unfortunately, the GCC // runtime decides to ignore the length field and expects a NULL terminator, // and GCC cooperates with this by always setting the length to 0. Elements.push_back(NULLPtr); Elements.push_back(NULLPtr); Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements)); Elements.clear(); // Number of static selectors Elements.push_back(llvm::ConstantInt::get(LongTy, SelectorCount)); llvm::GlobalVariable *SelectorList = MakeGlobalArray(SelStructTy, Selectors, ".objc_selector_list"); Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList, SelStructPtrTy)); // Now that all of the static selectors exist, create pointers to them. for (unsigned int i=0 ; i<SelectorCount ; i++) { llvm::Constant *Idxs[] = {Zeros[0], llvm::ConstantInt::get(Int32Ty, i), Zeros[0]}; // FIXME: We're generating redundant loads and stores here! llvm::Constant *SelPtr = llvm::ConstantExpr::getGetElementPtr( SelectorList->getValueType(), SelectorList, makeArrayRef(Idxs, 2)); // If selectors are defined as an opaque type, cast the pointer to this // type. SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, SelectorTy); SelectorAliases[i]->replaceAllUsesWith(SelPtr); SelectorAliases[i]->eraseFromParent(); } // Number of classes defined. Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext), Classes.size())); // Number of categories defined Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext), Categories.size())); // Create an array of classes, then categories, then static object instances Classes.insert(Classes.end(), Categories.begin(), Categories.end()); // NULL-terminated list of static object instances (mainly constant strings) Classes.push_back(Statics); Classes.push_back(NULLPtr); llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes); Elements.push_back(ClassList); // Construct the symbol table llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements); // The symbol table is contained in a module which has some version-checking // constants llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy, PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), (RuntimeVersion >= 10) ? IntTy : nullptr, nullptr); Elements.clear(); // Runtime version, used for ABI compatibility checking. Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion)); // sizeof(ModuleTy) llvm::DataLayout td(&TheModule); Elements.push_back( llvm::ConstantInt::get(LongTy, td.getTypeSizeInBits(ModuleTy) / CGM.getContext().getCharWidth())); // The path to the source file where this module was declared SourceManager &SM = CGM.getContext().getSourceManager(); const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID()); std::string path = std::string(mainFile->getDir()->getName()) + '/' + mainFile->getName(); Elements.push_back(MakeConstantString(path, ".objc_source_file_name")); Elements.push_back(SymTab); if (RuntimeVersion >= 10) switch (CGM.getLangOpts().getGC()) { case LangOptions::GCOnly: Elements.push_back(llvm::ConstantInt::get(IntTy, 2)); break; case LangOptions::NonGC: if (CGM.getLangOpts().ObjCAutoRefCount) Elements.push_back(llvm::ConstantInt::get(IntTy, 1)); else Elements.push_back(llvm::ConstantInt::get(IntTy, 0)); break; case LangOptions::HybridGC: Elements.push_back(llvm::ConstantInt::get(IntTy, 1)); break; } llvm::Value *Module = MakeGlobal(ModuleTy, Elements); // Create the load function calling the runtime entry point with the module // structure llvm::Function * LoadFunction = llvm::Function::Create( llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false), llvm::GlobalValue::InternalLinkage, ".objc_load_function", &TheModule); llvm::BasicBlock *EntryBB = llvm::BasicBlock::Create(VMContext, "entry", LoadFunction); CGBuilderTy Builder(VMContext); Builder.SetInsertPoint(EntryBB); llvm::FunctionType *FT = llvm::FunctionType::get(Builder.getVoidTy(), llvm::PointerType::getUnqual(ModuleTy), true); llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class"); Builder.CreateCall(Register, Module); if (!ClassAliases.empty()) { llvm::Type *ArgTypes[2] = {PtrTy, PtrToInt8Ty}; llvm::FunctionType *RegisterAliasTy = llvm::FunctionType::get(Builder.getVoidTy(), ArgTypes, false); llvm::Function *RegisterAlias = llvm::Function::Create( RegisterAliasTy, llvm::GlobalValue::ExternalWeakLinkage, "class_registerAlias_np", &TheModule); llvm::BasicBlock *AliasBB = llvm::BasicBlock::Create(VMContext, "alias", LoadFunction); llvm::BasicBlock *NoAliasBB = llvm::BasicBlock::Create(VMContext, "no_alias", LoadFunction); // Branch based on whether the runtime provided class_registerAlias_np() llvm::Value *HasRegisterAlias = Builder.CreateICmpNE(RegisterAlias, llvm::Constant::getNullValue(RegisterAlias->getType())); Builder.CreateCondBr(HasRegisterAlias, AliasBB, NoAliasBB); // The true branch (has alias registration function): Builder.SetInsertPoint(AliasBB); // Emit alias registration calls: for (std::vector<ClassAliasPair>::iterator iter = ClassAliases.begin(); iter != ClassAliases.end(); ++iter) { llvm::Constant *TheClass = TheModule.getGlobalVariable(("_OBJC_CLASS_" + iter->first).c_str(), true); if (TheClass) { TheClass = llvm::ConstantExpr::getBitCast(TheClass, PtrTy); Builder.CreateCall(RegisterAlias, {TheClass, MakeConstantString(iter->second)}); } } // Jump to end: Builder.CreateBr(NoAliasBB); // Missing alias registration function, just return from the function: Builder.SetInsertPoint(NoAliasBB); } Builder.CreateRetVoid(); return LoadFunction; } llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD, const ObjCContainerDecl *CD) { const ObjCCategoryImplDecl *OCD = dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext()); StringRef CategoryName = OCD ? OCD->getName() : ""; StringRef ClassName = CD->getName(); Selector MethodName = OMD->getSelector(); bool isClassMethod = !OMD->isInstanceMethod(); CodeGenTypes &Types = CGM.getTypes(); llvm::FunctionType *MethodTy = Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD)); std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName, MethodName, isClassMethod); llvm::Function *Method = llvm::Function::Create(MethodTy, llvm::GlobalValue::InternalLinkage, FunctionName, &TheModule); return Method; } llvm::Constant *CGObjCGNU::GetPropertyGetFunction() { return GetPropertyFn; } llvm::Constant *CGObjCGNU::GetPropertySetFunction() { return SetPropertyFn; } llvm::Constant *CGObjCGNU::GetOptimizedPropertySetFunction(bool atomic, bool copy) { return nullptr; } llvm::Constant *CGObjCGNU::GetGetStructFunction() { return GetStructPropertyFn; } llvm::Constant *CGObjCGNU::GetSetStructFunction() { return SetStructPropertyFn; } llvm::Constant *CGObjCGNU::GetCppAtomicObjectGetFunction() { return nullptr; } llvm::Constant *CGObjCGNU::GetCppAtomicObjectSetFunction() { return nullptr; } llvm::Constant *CGObjCGNU::EnumerationMutationFunction() { return EnumerationMutationFn; } void CGObjCGNU::EmitSynchronizedStmt(CodeGenFunction &CGF, const ObjCAtSynchronizedStmt &S) { EmitAtSynchronizedStmt(CGF, S, SyncEnterFn, SyncExitFn); } void CGObjCGNU::EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) { // Unlike the Apple non-fragile runtimes, which also uses // unwind-based zero cost exceptions, the GNU Objective C runtime's // EH support isn't a veneer over C++ EH. Instead, exception // objects are created by objc_exception_throw and destroyed by // the personality function; this avoids the need for bracketing // catch handlers with calls to __blah_begin_catch/__blah_end_catch // (or even _Unwind_DeleteException), but probably doesn't // interoperate very well with foreign exceptions. // // In Objective-C++ mode, we actually emit something equivalent to the C++ // exception handler. EmitTryCatchStmt(CGF, S, EnterCatchFn, ExitCatchFn, ExceptionReThrowFn); return ; } void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF, const ObjCAtThrowStmt &S, bool ClearInsertionPoint) { llvm::Value *ExceptionAsObject; if (const Expr *ThrowExpr = S.getThrowExpr()) { llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr); ExceptionAsObject = Exception; } else { assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) && "Unexpected rethrow outside @catch block."); ExceptionAsObject = CGF.ObjCEHValueStack.back(); } ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy); llvm::CallSite Throw = CGF.EmitRuntimeCallOrInvoke(ExceptionThrowFn, ExceptionAsObject); Throw.setDoesNotReturn(); CGF.Builder.CreateUnreachable(); if (ClearInsertionPoint) CGF.Builder.ClearInsertionPoint(); } llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF, llvm::Value *AddrWeakObj) { CGBuilderTy &B = CGF.Builder; AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy); return B.CreateCall(WeakReadFn.getType(), WeakReadFn, AddrWeakObj); } void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst) { CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); dst = EnforceType(B, dst, PtrToIdTy); B.CreateCall(WeakAssignFn.getType(), WeakAssignFn, {src, dst}); } void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst, bool threadlocal) { CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); dst = EnforceType(B, dst, PtrToIdTy); // FIXME. Add threadloca assign API assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI"); B.CreateCall(GlobalAssignFn.getType(), GlobalAssignFn, {src, dst}); } void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst, llvm::Value *ivarOffset) { CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); dst = EnforceType(B, dst, IdTy); B.CreateCall(IvarAssignFn.getType(), IvarAssignFn, {src, dst, ivarOffset}); } void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF, llvm::Value *src, llvm::Value *dst) { CGBuilderTy &B = CGF.Builder; src = EnforceType(B, src, IdTy); dst = EnforceType(B, dst, PtrToIdTy); B.CreateCall(StrongCastAssignFn.getType(), StrongCastAssignFn, {src, dst}); } void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF, llvm::Value *DestPtr, llvm::Value *SrcPtr, llvm::Value *Size) { CGBuilderTy &B = CGF.Builder; DestPtr = EnforceType(B, DestPtr, PtrTy); SrcPtr = EnforceType(B, SrcPtr, PtrTy); B.CreateCall(MemMoveFn.getType(), MemMoveFn, {DestPtr, SrcPtr, Size}); } llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable( const ObjCInterfaceDecl *ID, const ObjCIvarDecl *Ivar) { const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString() + '.' + Ivar->getNameAsString(); // Emit the variable and initialize it with what we think the correct value // is. This allows code compiled with non-fragile ivars to work correctly // when linked against code which isn't (most of the time). llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name); if (!IvarOffsetPointer) { // This will cause a run-time crash if we accidentally use it. A value of // 0 would seem more sensible, but will silently overwrite the isa pointer // causing a great deal of confusion. uint64_t Offset = -1; // We can't call ComputeIvarBaseOffset() here if we have the // implementation, because it will create an invalid ASTRecordLayout object // that we are then stuck with forever, so we only initialize the ivar // offset variable with a guess if we only have the interface. The // initializer will be reset later anyway, when we are generating the class // description. if (!CGM.getContext().getObjCImplementation( const_cast<ObjCInterfaceDecl *>(ID))) Offset = ComputeIvarBaseOffset(CGM, ID, Ivar); llvm::ConstantInt *OffsetGuess = llvm::ConstantInt::get(Int32Ty, Offset, /*isSigned*/true); // Don't emit the guess in non-PIC code because the linker will not be able // to replace it with the real version for a library. In non-PIC code you // must compile with the fragile ABI if you want to use ivars from a // GCC-compiled class. if (CGM.getLangOpts().PICLevel || CGM.getLangOpts().PIELevel) { llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule, Int32Ty, false, llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess"); IvarOffsetPointer = new llvm::GlobalVariable(TheModule, IvarOffsetGV->getType(), false, llvm::GlobalValue::LinkOnceAnyLinkage, IvarOffsetGV, Name); } else { IvarOffsetPointer = new llvm::GlobalVariable(TheModule, llvm::Type::getInt32PtrTy(VMContext), false, llvm::GlobalValue::ExternalLinkage, nullptr, Name); } } return IvarOffsetPointer; } LValue CGObjCGNU::EmitObjCValueForIvar(CodeGenFunction &CGF, QualType ObjectTy, llvm::Value *BaseValue, const ObjCIvarDecl *Ivar, unsigned CVRQualifiers) { const ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface(); return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers, EmitIvarOffset(CGF, ID, Ivar)); } static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context, const ObjCInterfaceDecl *OID, const ObjCIvarDecl *OIVD) { for (const ObjCIvarDecl *next = OID->all_declared_ivar_begin(); next; next = next->getNextIvar()) { if (OIVD == next) return OID; } // Otherwise check in the super class. if (const ObjCInterfaceDecl *Super = OID->getSuperClass()) return FindIvarInterface(Context, Super, OIVD); return nullptr; } llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF, const ObjCInterfaceDecl *Interface, const ObjCIvarDecl *Ivar) { if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) { Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar); if (RuntimeVersion < 10) return CGF.Builder.CreateZExtOrBitCast( CGF.Builder.CreateLoad(CGF.Builder.CreateLoad( ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar")), PtrDiffTy); std::string name = "__objc_ivar_offset_value_" + Interface->getNameAsString() +"." + Ivar->getNameAsString(); llvm::Value *Offset = TheModule.getGlobalVariable(name); if (!Offset) Offset = new llvm::GlobalVariable(TheModule, IntTy, false, llvm::GlobalValue::LinkOnceAnyLinkage, llvm::Constant::getNullValue(IntTy), name); Offset = CGF.Builder.CreateLoad(Offset); if (Offset->getType() != PtrDiffTy) Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy); return Offset; } uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar); return llvm::ConstantInt::get(PtrDiffTy, Offset, /*isSigned*/true); } CGObjCRuntime * clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) { switch (CGM.getLangOpts().ObjCRuntime.getKind()) { case ObjCRuntime::GNUstep: return new CGObjCGNUstep(CGM); case ObjCRuntime::GCC: return new CGObjCGCC(CGM); case ObjCRuntime::ObjFW: return new CGObjCObjFW(CGM); case ObjCRuntime::FragileMacOSX: case ObjCRuntime::MacOSX: case ObjCRuntime::iOS: llvm_unreachable("these runtimes are not GNU runtimes"); } llvm_unreachable("bad runtime"); } #endif // HLSL Change
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CoverageMappingGen.cpp
//===--- CoverageMappingGen.cpp - Coverage mapping generation ---*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Instrumentation-based code coverage mapping generator // //===----------------------------------------------------------------------===// #include "CoverageMappingGen.h" #include "CodeGenFunction.h" #include "clang/AST/StmtVisitor.h" #include "clang/Lex/Lexer.h" #include "llvm/ADT/Optional.h" #include "llvm/ProfileData/CoverageMapping.h" #include "llvm/ProfileData/CoverageMappingReader.h" #include "llvm/ProfileData/CoverageMappingWriter.h" #include "llvm/ProfileData/InstrProfReader.h" #include "llvm/Support/FileSystem.h" using namespace clang; using namespace CodeGen; using namespace llvm::coverage; void CoverageSourceInfo::SourceRangeSkipped(SourceRange Range) { SkippedRanges.push_back(Range); } namespace { /// \brief A region of source code that can be mapped to a counter. class SourceMappingRegion { Counter Count; /// \brief The region's starting location. Optional<SourceLocation> LocStart; /// \brief The region's ending location. Optional<SourceLocation> LocEnd; public: SourceMappingRegion(Counter Count, Optional<SourceLocation> LocStart, Optional<SourceLocation> LocEnd) : Count(Count), LocStart(LocStart), LocEnd(LocEnd) {} SourceMappingRegion(SourceMappingRegion &&Region) : Count(std::move(Region.Count)), LocStart(std::move(Region.LocStart)), LocEnd(std::move(Region.LocEnd)) {} SourceMappingRegion &operator=(SourceMappingRegion &&RHS) { Count = std::move(RHS.Count); LocStart = std::move(RHS.LocStart); LocEnd = std::move(RHS.LocEnd); return *this; } const Counter &getCounter() const { return Count; } void setCounter(Counter C) { Count = C; } bool hasStartLoc() const { return LocStart.hasValue(); } void setStartLoc(SourceLocation Loc) { LocStart = Loc; } const SourceLocation &getStartLoc() const { assert(LocStart && "Region has no start location"); return *LocStart; } bool hasEndLoc() const { return LocEnd.hasValue(); } void setEndLoc(SourceLocation Loc) { LocEnd = Loc; } const SourceLocation &getEndLoc() const { assert(LocEnd && "Region has no end location"); return *LocEnd; } }; /// \brief Provides the common functionality for the different /// coverage mapping region builders. class CoverageMappingBuilder { public: CoverageMappingModuleGen &CVM; SourceManager &SM; const LangOptions &LangOpts; private: /// \brief Map of clang's FileIDs to IDs used for coverage mapping. llvm::SmallDenseMap<FileID, std::pair<unsigned, SourceLocation>, 8> FileIDMapping; public: /// \brief The coverage mapping regions for this function llvm::SmallVector<CounterMappingRegion, 32> MappingRegions; /// \brief The source mapping regions for this function. std::vector<SourceMappingRegion> SourceRegions; CoverageMappingBuilder(CoverageMappingModuleGen &CVM, SourceManager &SM, const LangOptions &LangOpts) : CVM(CVM), SM(SM), LangOpts(LangOpts) {} /// \brief Return the precise end location for the given token. SourceLocation getPreciseTokenLocEnd(SourceLocation Loc) { // We avoid getLocForEndOfToken here, because it doesn't do what we want for // macro locations, which we just treat as expanded files. unsigned TokLen = Lexer::MeasureTokenLength(SM.getSpellingLoc(Loc), SM, LangOpts); return Loc.getLocWithOffset(TokLen); } /// \brief Return the start location of an included file or expanded macro. SourceLocation getStartOfFileOrMacro(SourceLocation Loc) { if (Loc.isMacroID()) return Loc.getLocWithOffset(-SM.getFileOffset(Loc)); return SM.getLocForStartOfFile(SM.getFileID(Loc)); } /// \brief Return the end location of an included file or expanded macro. SourceLocation getEndOfFileOrMacro(SourceLocation Loc) { if (Loc.isMacroID()) return Loc.getLocWithOffset(SM.getFileIDSize(SM.getFileID(Loc)) - SM.getFileOffset(Loc)); return SM.getLocForEndOfFile(SM.getFileID(Loc)); } /// \brief Find out where the current file is included or macro is expanded. SourceLocation getIncludeOrExpansionLoc(SourceLocation Loc) { return Loc.isMacroID() ? SM.getImmediateExpansionRange(Loc).first : SM.getIncludeLoc(SM.getFileID(Loc)); } /// \brief Return true if \c Loc is a location in a built-in macro. bool isInBuiltin(SourceLocation Loc) { return strcmp(SM.getBufferName(SM.getSpellingLoc(Loc)), "<built-in>") == 0; } /// \brief Get the start of \c S ignoring macro arguments and builtin macros. SourceLocation getStart(const Stmt *S) { SourceLocation Loc = S->getLocStart(); while (SM.isMacroArgExpansion(Loc) || isInBuiltin(Loc)) Loc = SM.getImmediateExpansionRange(Loc).first; return Loc; } /// \brief Get the end of \c S ignoring macro arguments and builtin macros. SourceLocation getEnd(const Stmt *S) { SourceLocation Loc = S->getLocEnd(); while (SM.isMacroArgExpansion(Loc) || isInBuiltin(Loc)) Loc = SM.getImmediateExpansionRange(Loc).first; return getPreciseTokenLocEnd(Loc); } /// \brief Find the set of files we have regions for and assign IDs /// /// Fills \c Mapping with the virtual file mapping needed to write out /// coverage and collects the necessary file information to emit source and /// expansion regions. void gatherFileIDs(SmallVectorImpl<unsigned> &Mapping) { FileIDMapping.clear(); SmallVector<FileID, 8> Visited; SmallVector<std::pair<SourceLocation, unsigned>, 8> FileLocs; for (const auto &Region : SourceRegions) { SourceLocation Loc = Region.getStartLoc(); FileID File = SM.getFileID(Loc); if (std::find(Visited.begin(), Visited.end(), File) != Visited.end()) continue; Visited.push_back(File); unsigned Depth = 0; for (SourceLocation Parent = getIncludeOrExpansionLoc(Loc); !Parent.isInvalid(); Parent = getIncludeOrExpansionLoc(Parent)) ++Depth; FileLocs.push_back(std::make_pair(Loc, Depth)); } std::stable_sort(FileLocs.begin(), FileLocs.end(), llvm::less_second()); for (const auto &FL : FileLocs) { SourceLocation Loc = FL.first; FileID SpellingFile = SM.getDecomposedSpellingLoc(Loc).first; auto Entry = SM.getFileEntryForID(SpellingFile); if (!Entry) continue; FileIDMapping[SM.getFileID(Loc)] = std::make_pair(Mapping.size(), Loc); Mapping.push_back(CVM.getFileID(Entry)); } } /// \brief Get the coverage mapping file ID for \c Loc. /// /// If such file id doesn't exist, return None. Optional<unsigned> getCoverageFileID(SourceLocation Loc) { auto Mapping = FileIDMapping.find(SM.getFileID(Loc)); if (Mapping != FileIDMapping.end()) return Mapping->second.first; return None; } /// \brief Return true if the given clang's file id has a corresponding /// coverage file id. bool hasExistingCoverageFileID(FileID File) const { return FileIDMapping.count(File); } /// \brief Gather all the regions that were skipped by the preprocessor /// using the constructs like #if. void gatherSkippedRegions() { /// An array of the minimum lineStarts and the maximum lineEnds /// for mapping regions from the appropriate source files. llvm::SmallVector<std::pair<unsigned, unsigned>, 8> FileLineRanges; FileLineRanges.resize( FileIDMapping.size(), std::make_pair(std::numeric_limits<unsigned>::max(), 0)); for (const auto &R : MappingRegions) { FileLineRanges[R.FileID].first = std::min(FileLineRanges[R.FileID].first, R.LineStart); FileLineRanges[R.FileID].second = std::max(FileLineRanges[R.FileID].second, R.LineEnd); } auto SkippedRanges = CVM.getSourceInfo().getSkippedRanges(); for (const auto &I : SkippedRanges) { auto LocStart = I.getBegin(); auto LocEnd = I.getEnd(); assert(SM.isWrittenInSameFile(LocStart, LocEnd) && "region spans multiple files"); auto CovFileID = getCoverageFileID(LocStart); if (!CovFileID) continue; unsigned LineStart = SM.getSpellingLineNumber(LocStart); unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart); unsigned LineEnd = SM.getSpellingLineNumber(LocEnd); unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd); auto Region = CounterMappingRegion::makeSkipped( *CovFileID, LineStart, ColumnStart, LineEnd, ColumnEnd); // Make sure that we only collect the regions that are inside // the souce code of this function. if (Region.LineStart >= FileLineRanges[*CovFileID].first && Region.LineEnd <= FileLineRanges[*CovFileID].second) MappingRegions.push_back(Region); } } /// \brief Generate the coverage counter mapping regions from collected /// source regions. void emitSourceRegions() { for (const auto &Region : SourceRegions) { assert(Region.hasEndLoc() && "incomplete region"); SourceLocation LocStart = Region.getStartLoc(); assert(!SM.getFileID(LocStart).isInvalid() && "region in invalid file"); auto CovFileID = getCoverageFileID(LocStart); // Ignore regions that don't have a file, such as builtin macros. if (!CovFileID) continue; SourceLocation LocEnd = Region.getEndLoc(); assert(SM.isWrittenInSameFile(LocStart, LocEnd) && "region spans multiple files"); // Find the spilling locations for the mapping region. unsigned LineStart = SM.getSpellingLineNumber(LocStart); unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart); unsigned LineEnd = SM.getSpellingLineNumber(LocEnd); unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd); assert(LineStart <= LineEnd && "region start and end out of order"); MappingRegions.push_back(CounterMappingRegion::makeRegion( Region.getCounter(), *CovFileID, LineStart, ColumnStart, LineEnd, ColumnEnd)); } } /// \brief Generate expansion regions for each virtual file we've seen. void emitExpansionRegions() { for (const auto &FM : FileIDMapping) { SourceLocation ExpandedLoc = FM.second.second; SourceLocation ParentLoc = getIncludeOrExpansionLoc(ExpandedLoc); if (ParentLoc.isInvalid()) continue; auto ParentFileID = getCoverageFileID(ParentLoc); if (!ParentFileID) continue; auto ExpandedFileID = getCoverageFileID(ExpandedLoc); assert(ExpandedFileID && "expansion in uncovered file"); SourceLocation LocEnd = getPreciseTokenLocEnd(ParentLoc); assert(SM.isWrittenInSameFile(ParentLoc, LocEnd) && "region spans multiple files"); unsigned LineStart = SM.getSpellingLineNumber(ParentLoc); unsigned ColumnStart = SM.getSpellingColumnNumber(ParentLoc); unsigned LineEnd = SM.getSpellingLineNumber(LocEnd); unsigned ColumnEnd = SM.getSpellingColumnNumber(LocEnd); MappingRegions.push_back(CounterMappingRegion::makeExpansion( *ParentFileID, *ExpandedFileID, LineStart, ColumnStart, LineEnd, ColumnEnd)); } } }; /// \brief Creates unreachable coverage regions for the functions that /// are not emitted. struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder { EmptyCoverageMappingBuilder(CoverageMappingModuleGen &CVM, SourceManager &SM, const LangOptions &LangOpts) : CoverageMappingBuilder(CVM, SM, LangOpts) {} void VisitDecl(const Decl *D) { if (!D->hasBody()) return; auto Body = D->getBody(); SourceRegions.emplace_back(Counter(), getStart(Body), getEnd(Body)); } /// \brief Write the mapping data to the output stream void write(llvm::raw_ostream &OS) { SmallVector<unsigned, 16> FileIDMapping; gatherFileIDs(FileIDMapping); emitSourceRegions(); CoverageMappingWriter Writer(FileIDMapping, None, MappingRegions); Writer.write(OS); } }; /// \brief A StmtVisitor that creates coverage mapping regions which map /// from the source code locations to the PGO counters. struct CounterCoverageMappingBuilder : public CoverageMappingBuilder, public ConstStmtVisitor<CounterCoverageMappingBuilder> { /// \brief The map of statements to count values. llvm::DenseMap<const Stmt *, unsigned> &CounterMap; /// \brief A stack of currently live regions. std::vector<SourceMappingRegion> RegionStack; CounterExpressionBuilder Builder; /// \brief A location in the most recently visited file or macro. /// /// This is used to adjust the active source regions appropriately when /// expressions cross file or macro boundaries. SourceLocation MostRecentLocation; /// \brief Return a counter for the subtraction of \c RHS from \c LHS Counter subtractCounters(Counter LHS, Counter RHS) { return Builder.subtract(LHS, RHS); } /// \brief Return a counter for the sum of \c LHS and \c RHS. Counter addCounters(Counter LHS, Counter RHS) { return Builder.add(LHS, RHS); } Counter addCounters(Counter C1, Counter C2, Counter C3) { return addCounters(addCounters(C1, C2), C3); } Counter addCounters(Counter C1, Counter C2, Counter C3, Counter C4) { return addCounters(addCounters(C1, C2, C3), C4); } /// \brief Return the region counter for the given statement. /// /// This should only be called on statements that have a dedicated counter. Counter getRegionCounter(const Stmt *S) { return Counter::getCounter(CounterMap[S]); } /// \brief Push a region onto the stack. /// /// Returns the index on the stack where the region was pushed. This can be /// used with popRegions to exit a "scope", ending the region that was pushed. size_t pushRegion(Counter Count, Optional<SourceLocation> StartLoc = None, Optional<SourceLocation> EndLoc = None) { if (StartLoc) MostRecentLocation = *StartLoc; RegionStack.emplace_back(Count, StartLoc, EndLoc); return RegionStack.size() - 1; } /// \brief Pop regions from the stack into the function's list of regions. /// /// Adds all regions from \c ParentIndex to the top of the stack to the /// function's \c SourceRegions. void popRegions(size_t ParentIndex) { assert(RegionStack.size() >= ParentIndex && "parent not in stack"); while (RegionStack.size() > ParentIndex) { SourceMappingRegion &Region = RegionStack.back(); if (Region.hasStartLoc()) { SourceLocation StartLoc = Region.getStartLoc(); SourceLocation EndLoc = Region.hasEndLoc() ? Region.getEndLoc() : RegionStack[ParentIndex].getEndLoc(); while (!SM.isWrittenInSameFile(StartLoc, EndLoc)) { // The region ends in a nested file or macro expansion. Create a // separate region for each expansion. SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc); assert(SM.isWrittenInSameFile(NestedLoc, EndLoc)); SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc); EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc)); assert(!EndLoc.isInvalid() && "File exit was not handled before popRegions"); } Region.setEndLoc(EndLoc); MostRecentLocation = EndLoc; // If this region happens to span an entire expansion, we need to make // sure we don't overlap the parent region with it. if (StartLoc == getStartOfFileOrMacro(StartLoc) && EndLoc == getEndOfFileOrMacro(EndLoc)) MostRecentLocation = getIncludeOrExpansionLoc(EndLoc); assert(SM.isWrittenInSameFile(Region.getStartLoc(), EndLoc)); SourceRegions.push_back(std::move(Region)); } RegionStack.pop_back(); } } /// \brief Return the currently active region. SourceMappingRegion &getRegion() { assert(!RegionStack.empty() && "statement has no region"); return RegionStack.back(); } /// \brief Propagate counts through the children of \c S. Counter propagateCounts(Counter TopCount, const Stmt *S) { size_t Index = pushRegion(TopCount, getStart(S), getEnd(S)); Visit(S); Counter ExitCount = getRegion().getCounter(); popRegions(Index); return ExitCount; } /// \brief Adjust the most recently visited location to \c EndLoc. /// /// This should be used after visiting any statements in non-source order. void adjustForOutOfOrderTraversal(SourceLocation EndLoc) { MostRecentLocation = EndLoc; // Avoid adding duplicate regions if we have a completed region on the top // of the stack and are adjusting to the end of a virtual file. if (getRegion().hasEndLoc() && MostRecentLocation == getEndOfFileOrMacro(MostRecentLocation)) MostRecentLocation = getIncludeOrExpansionLoc(MostRecentLocation); } /// \brief Check whether \c Loc is included or expanded from \c Parent. bool isNestedIn(SourceLocation Loc, FileID Parent) { do { Loc = getIncludeOrExpansionLoc(Loc); if (Loc.isInvalid()) return false; } while (!SM.isInFileID(Loc, Parent)); return true; } /// \brief Adjust regions and state when \c NewLoc exits a file. /// /// If moving from our most recently tracked location to \c NewLoc exits any /// files, this adjusts our current region stack and creates the file regions /// for the exited file. void handleFileExit(SourceLocation NewLoc) { if (NewLoc.isInvalid() || SM.isWrittenInSameFile(MostRecentLocation, NewLoc)) return; // If NewLoc is not in a file that contains MostRecentLocation, walk up to // find the common ancestor. SourceLocation LCA = NewLoc; FileID ParentFile = SM.getFileID(LCA); while (!isNestedIn(MostRecentLocation, ParentFile)) { LCA = getIncludeOrExpansionLoc(LCA); if (LCA.isInvalid() || SM.isWrittenInSameFile(LCA, MostRecentLocation)) { // Since there isn't a common ancestor, no file was exited. We just need // to adjust our location to the new file. MostRecentLocation = NewLoc; return; } ParentFile = SM.getFileID(LCA); } llvm::SmallSet<SourceLocation, 8> StartLocs; Optional<Counter> ParentCounter; for (auto I = RegionStack.rbegin(), E = RegionStack.rend(); I != E; ++I) { if (!I->hasStartLoc()) continue; SourceLocation Loc = I->getStartLoc(); if (!isNestedIn(Loc, ParentFile)) { ParentCounter = I->getCounter(); break; } while (!SM.isInFileID(Loc, ParentFile)) { // The most nested region for each start location is the one with the // correct count. We avoid creating redundant regions by stopping once // we've seen this region. if (StartLocs.insert(Loc).second) SourceRegions.emplace_back(I->getCounter(), Loc, getEndOfFileOrMacro(Loc)); Loc = getIncludeOrExpansionLoc(Loc); } I->setStartLoc(getPreciseTokenLocEnd(Loc)); } if (ParentCounter) { // If the file is contained completely by another region and doesn't // immediately start its own region, the whole file gets a region // corresponding to the parent. SourceLocation Loc = MostRecentLocation; while (isNestedIn(Loc, ParentFile)) { SourceLocation FileStart = getStartOfFileOrMacro(Loc); if (StartLocs.insert(FileStart).second) SourceRegions.emplace_back(*ParentCounter, FileStart, getEndOfFileOrMacro(Loc)); Loc = getIncludeOrExpansionLoc(Loc); } } MostRecentLocation = NewLoc; } /// \brief Ensure that \c S is included in the current region. void extendRegion(const Stmt *S) { SourceMappingRegion &Region = getRegion(); SourceLocation StartLoc = getStart(S); handleFileExit(StartLoc); if (!Region.hasStartLoc()) Region.setStartLoc(StartLoc); } /// \brief Mark \c S as a terminator, starting a zero region. void terminateRegion(const Stmt *S) { extendRegion(S); SourceMappingRegion &Region = getRegion(); if (!Region.hasEndLoc()) Region.setEndLoc(getEnd(S)); pushRegion(Counter::getZero()); } /// \brief Keep counts of breaks and continues inside loops. struct BreakContinue { Counter BreakCount; Counter ContinueCount; }; SmallVector<BreakContinue, 8> BreakContinueStack; CounterCoverageMappingBuilder( CoverageMappingModuleGen &CVM, llvm::DenseMap<const Stmt *, unsigned> &CounterMap, SourceManager &SM, const LangOptions &LangOpts) : CoverageMappingBuilder(CVM, SM, LangOpts), CounterMap(CounterMap) {} /// \brief Write the mapping data to the output stream void write(llvm::raw_ostream &OS) { llvm::SmallVector<unsigned, 8> VirtualFileMapping; gatherFileIDs(VirtualFileMapping); emitSourceRegions(); emitExpansionRegions(); gatherSkippedRegions(); CoverageMappingWriter Writer(VirtualFileMapping, Builder.getExpressions(), MappingRegions); Writer.write(OS); } void VisitStmt(const Stmt *S) { if (!S->getLocStart().isInvalid()) extendRegion(S); for (const Stmt *Child : S->children()) if (Child) this->Visit(Child); handleFileExit(getEnd(S)); } void VisitDecl(const Decl *D) { Stmt *Body = D->getBody(); propagateCounts(getRegionCounter(Body), Body); } void VisitReturnStmt(const ReturnStmt *S) { extendRegion(S); if (S->getRetValue()) Visit(S->getRetValue()); terminateRegion(S); } void VisitCXXThrowExpr(const CXXThrowExpr *E) { extendRegion(E); if (E->getSubExpr()) Visit(E->getSubExpr()); terminateRegion(E); } void VisitGotoStmt(const GotoStmt *S) { terminateRegion(S); } void VisitLabelStmt(const LabelStmt *S) { SourceLocation Start = getStart(S); // We can't extendRegion here or we risk overlapping with our new region. handleFileExit(Start); pushRegion(getRegionCounter(S), Start); Visit(S->getSubStmt()); } void VisitBreakStmt(const BreakStmt *S) { assert(!BreakContinueStack.empty() && "break not in a loop or switch!"); BreakContinueStack.back().BreakCount = addCounters( BreakContinueStack.back().BreakCount, getRegion().getCounter()); terminateRegion(S); } void VisitContinueStmt(const ContinueStmt *S) { assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); BreakContinueStack.back().ContinueCount = addCounters( BreakContinueStack.back().ContinueCount, getRegion().getCounter()); terminateRegion(S); } void VisitWhileStmt(const WhileStmt *S) { extendRegion(S); Counter ParentCount = getRegion().getCounter(); Counter BodyCount = getRegionCounter(S); // Handle the body first so that we can get the backedge count. BreakContinueStack.push_back(BreakContinue()); extendRegion(S->getBody()); Counter BackedgeCount = propagateCounts(BodyCount, S->getBody()); BreakContinue BC = BreakContinueStack.pop_back_val(); // Go back to handle the condition. Counter CondCount = addCounters(ParentCount, BackedgeCount, BC.ContinueCount); propagateCounts(CondCount, S->getCond()); adjustForOutOfOrderTraversal(getEnd(S)); Counter OutCount = addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount)); if (OutCount != ParentCount) pushRegion(OutCount); } void VisitDoStmt(const DoStmt *S) { extendRegion(S); Counter ParentCount = getRegion().getCounter(); Counter BodyCount = getRegionCounter(S); BreakContinueStack.push_back(BreakContinue()); extendRegion(S->getBody()); Counter BackedgeCount = propagateCounts(addCounters(ParentCount, BodyCount), S->getBody()); BreakContinue BC = BreakContinueStack.pop_back_val(); Counter CondCount = addCounters(BackedgeCount, BC.ContinueCount); propagateCounts(CondCount, S->getCond()); Counter OutCount = addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount)); if (OutCount != ParentCount) pushRegion(OutCount); } void VisitForStmt(const ForStmt *S) { extendRegion(S); if (S->getInit()) Visit(S->getInit()); Counter ParentCount = getRegion().getCounter(); Counter BodyCount = getRegionCounter(S); // Handle the body first so that we can get the backedge count. BreakContinueStack.push_back(BreakContinue()); extendRegion(S->getBody()); Counter BackedgeCount = propagateCounts(BodyCount, S->getBody()); BreakContinue BC = BreakContinueStack.pop_back_val(); // The increment is essentially part of the body but it needs to include // the count for all the continue statements. if (const Stmt *Inc = S->getInc()) propagateCounts(addCounters(BackedgeCount, BC.ContinueCount), Inc); // Go back to handle the condition. Counter CondCount = addCounters(ParentCount, BackedgeCount, BC.ContinueCount); if (const Expr *Cond = S->getCond()) { propagateCounts(CondCount, Cond); adjustForOutOfOrderTraversal(getEnd(S)); } Counter OutCount = addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount)); if (OutCount != ParentCount) pushRegion(OutCount); } void VisitCXXForRangeStmt(const CXXForRangeStmt *S) { extendRegion(S); Visit(S->getLoopVarStmt()); Visit(S->getRangeStmt()); Counter ParentCount = getRegion().getCounter(); Counter BodyCount = getRegionCounter(S); BreakContinueStack.push_back(BreakContinue()); extendRegion(S->getBody()); Counter BackedgeCount = propagateCounts(BodyCount, S->getBody()); BreakContinue BC = BreakContinueStack.pop_back_val(); Counter LoopCount = addCounters(ParentCount, BackedgeCount, BC.ContinueCount); Counter OutCount = addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount)); if (OutCount != ParentCount) pushRegion(OutCount); } void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) { extendRegion(S); Visit(S->getElement()); Counter ParentCount = getRegion().getCounter(); Counter BodyCount = getRegionCounter(S); BreakContinueStack.push_back(BreakContinue()); extendRegion(S->getBody()); Counter BackedgeCount = propagateCounts(BodyCount, S->getBody()); BreakContinue BC = BreakContinueStack.pop_back_val(); Counter LoopCount = addCounters(ParentCount, BackedgeCount, BC.ContinueCount); Counter OutCount = addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount)); if (OutCount != ParentCount) pushRegion(OutCount); } void VisitSwitchStmt(const SwitchStmt *S) { extendRegion(S); Visit(S->getCond()); BreakContinueStack.push_back(BreakContinue()); const Stmt *Body = S->getBody(); extendRegion(Body); if (const auto *CS = dyn_cast<CompoundStmt>(Body)) { if (!CS->body_empty()) { // The body of the switch needs a zero region so that fallthrough counts // behave correctly, but it would be misleading to include the braces of // the compound statement in the zeroed area, so we need to handle this // specially. size_t Index = pushRegion(Counter::getZero(), getStart(CS->body_front()), getEnd(CS->body_back())); for (const auto *Child : CS->children()) Visit(Child); popRegions(Index); } } else propagateCounts(Counter::getZero(), Body); BreakContinue BC = BreakContinueStack.pop_back_val(); if (!BreakContinueStack.empty()) BreakContinueStack.back().ContinueCount = addCounters( BreakContinueStack.back().ContinueCount, BC.ContinueCount); Counter ExitCount = getRegionCounter(S); pushRegion(ExitCount); } void VisitSwitchCase(const SwitchCase *S) { extendRegion(S); SourceMappingRegion &Parent = getRegion(); Counter Count = addCounters(Parent.getCounter(), getRegionCounter(S)); // Reuse the existing region if it starts at our label. This is typical of // the first case in a switch. if (Parent.hasStartLoc() && Parent.getStartLoc() == getStart(S)) Parent.setCounter(Count); else pushRegion(Count, getStart(S)); if (const CaseStmt *CS = dyn_cast<CaseStmt>(S)) { Visit(CS->getLHS()); if (const Expr *RHS = CS->getRHS()) Visit(RHS); } Visit(S->getSubStmt()); } void VisitIfStmt(const IfStmt *S) { extendRegion(S); // Extend into the condition before we propagate through it below - this is // needed to handle macros that generate the "if" but not the condition. extendRegion(S->getCond()); Counter ParentCount = getRegion().getCounter(); Counter ThenCount = getRegionCounter(S); // Emitting a counter for the condition makes it easier to interpret the // counter for the body when looking at the coverage. propagateCounts(ParentCount, S->getCond()); extendRegion(S->getThen()); Counter OutCount = propagateCounts(ThenCount, S->getThen()); Counter ElseCount = subtractCounters(ParentCount, ThenCount); if (const Stmt *Else = S->getElse()) { extendRegion(S->getElse()); OutCount = addCounters(OutCount, propagateCounts(ElseCount, Else)); } else OutCount = addCounters(OutCount, ElseCount); if (OutCount != ParentCount) pushRegion(OutCount); } void VisitCXXTryStmt(const CXXTryStmt *S) { extendRegion(S); Visit(S->getTryBlock()); for (unsigned I = 0, E = S->getNumHandlers(); I < E; ++I) Visit(S->getHandler(I)); Counter ExitCount = getRegionCounter(S); pushRegion(ExitCount); } void VisitCXXCatchStmt(const CXXCatchStmt *S) { extendRegion(S); propagateCounts(getRegionCounter(S), S->getHandlerBlock()); } void VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) { extendRegion(E); Counter ParentCount = getRegion().getCounter(); Counter TrueCount = getRegionCounter(E); Visit(E->getCond()); if (!isa<BinaryConditionalOperator>(E)) { extendRegion(E->getTrueExpr()); propagateCounts(TrueCount, E->getTrueExpr()); } extendRegion(E->getFalseExpr()); propagateCounts(subtractCounters(ParentCount, TrueCount), E->getFalseExpr()); } void VisitBinLAnd(const BinaryOperator *E) { extendRegion(E); Visit(E->getLHS()); extendRegion(E->getRHS()); propagateCounts(getRegionCounter(E), E->getRHS()); } void VisitBinLOr(const BinaryOperator *E) { extendRegion(E); Visit(E->getLHS()); extendRegion(E->getRHS()); propagateCounts(getRegionCounter(E), E->getRHS()); } void VisitLambdaExpr(const LambdaExpr *LE) { // Lambdas are treated as their own functions for now, so we shouldn't // propagate counts into them. } }; } static bool isMachO(const CodeGenModule &CGM) { return CGM.getTarget().getTriple().isOSBinFormatMachO(); } static StringRef getCoverageSection(const CodeGenModule &CGM) { return isMachO(CGM) ? "__DATA,__llvm_covmap" : "__llvm_covmap"; } static void dump(llvm::raw_ostream &OS, StringRef FunctionName, ArrayRef<CounterExpression> Expressions, ArrayRef<CounterMappingRegion> Regions) { OS << FunctionName << ":\n"; CounterMappingContext Ctx(Expressions); for (const auto &R : Regions) { OS.indent(2); switch (R.Kind) { case CounterMappingRegion::CodeRegion: break; case CounterMappingRegion::ExpansionRegion: OS << "Expansion,"; break; case CounterMappingRegion::SkippedRegion: OS << "Skipped,"; break; } OS << "File " << R.FileID << ", " << R.LineStart << ":" << R.ColumnStart << " -> " << R.LineEnd << ":" << R.ColumnEnd << " = "; Ctx.dump(R.Count, OS); if (R.Kind == CounterMappingRegion::ExpansionRegion) OS << " (Expanded file = " << R.ExpandedFileID << ")"; OS << "\n"; } } void CoverageMappingModuleGen::addFunctionMappingRecord( llvm::GlobalVariable *FunctionName, StringRef FunctionNameValue, uint64_t FunctionHash, const std::string &CoverageMapping) { llvm::LLVMContext &Ctx = CGM.getLLVMContext(); auto *Int32Ty = llvm::Type::getInt32Ty(Ctx); auto *Int64Ty = llvm::Type::getInt64Ty(Ctx); auto *Int8PtrTy = llvm::Type::getInt8PtrTy(Ctx); if (!FunctionRecordTy) { llvm::Type *FunctionRecordTypes[] = {Int8PtrTy, Int32Ty, Int32Ty, Int64Ty}; FunctionRecordTy = llvm::StructType::get(Ctx, makeArrayRef(FunctionRecordTypes), /*isPacked=*/true); } llvm::Constant *FunctionRecordVals[] = { llvm::ConstantExpr::getBitCast(FunctionName, Int8PtrTy), llvm::ConstantInt::get(Int32Ty, FunctionNameValue.size()), llvm::ConstantInt::get(Int32Ty, CoverageMapping.size()), llvm::ConstantInt::get(Int64Ty, FunctionHash)}; FunctionRecords.push_back(llvm::ConstantStruct::get( FunctionRecordTy, makeArrayRef(FunctionRecordVals))); CoverageMappings += CoverageMapping; if (CGM.getCodeGenOpts().DumpCoverageMapping) { // Dump the coverage mapping data for this function by decoding the // encoded data. This allows us to dump the mapping regions which were // also processed by the CoverageMappingWriter which performs // additional minimization operations such as reducing the number of // expressions. std::vector<StringRef> Filenames; std::vector<CounterExpression> Expressions; std::vector<CounterMappingRegion> Regions; llvm::SmallVector<StringRef, 16> FilenameRefs; FilenameRefs.resize(FileEntries.size()); for (const auto &Entry : FileEntries) FilenameRefs[Entry.second] = Entry.first->getName(); RawCoverageMappingReader Reader(CoverageMapping, FilenameRefs, Filenames, Expressions, Regions); if (Reader.read()) return; dump(llvm::outs(), FunctionNameValue, Expressions, Regions); } } void CoverageMappingModuleGen::emit() { if (FunctionRecords.empty()) return; llvm::LLVMContext &Ctx = CGM.getLLVMContext(); auto *Int32Ty = llvm::Type::getInt32Ty(Ctx); // Create the filenames and merge them with coverage mappings llvm::SmallVector<std::string, 16> FilenameStrs; llvm::SmallVector<StringRef, 16> FilenameRefs; FilenameStrs.resize(FileEntries.size()); FilenameRefs.resize(FileEntries.size()); for (const auto &Entry : FileEntries) { llvm::SmallString<256> Path(Entry.first->getName()); llvm::sys::fs::make_absolute(Path); auto I = Entry.second; FilenameStrs[I] = std::string(Path.begin(), Path.end()); FilenameRefs[I] = FilenameStrs[I]; } std::string FilenamesAndCoverageMappings; llvm::raw_string_ostream OS(FilenamesAndCoverageMappings); CoverageFilenamesSectionWriter(FilenameRefs).write(OS); OS << CoverageMappings; size_t CoverageMappingSize = CoverageMappings.size(); size_t FilenamesSize = OS.str().size() - CoverageMappingSize; // Append extra zeroes if necessary to ensure that the size of the filenames // and coverage mappings is a multiple of 8. if (size_t Rem = OS.str().size() % 8) { CoverageMappingSize += 8 - Rem; for (size_t I = 0, S = 8 - Rem; I < S; ++I) OS << '\0'; } auto *FilenamesAndMappingsVal = llvm::ConstantDataArray::getString(Ctx, OS.str(), false); // Create the deferred function records array auto RecordsTy = llvm::ArrayType::get(FunctionRecordTy, FunctionRecords.size()); auto RecordsVal = llvm::ConstantArray::get(RecordsTy, FunctionRecords); // Create the coverage data record llvm::Type *CovDataTypes[] = {Int32Ty, Int32Ty, Int32Ty, Int32Ty, RecordsTy, FilenamesAndMappingsVal->getType()}; auto CovDataTy = llvm::StructType::get(Ctx, makeArrayRef(CovDataTypes)); llvm::Constant *TUDataVals[] = { llvm::ConstantInt::get(Int32Ty, FunctionRecords.size()), llvm::ConstantInt::get(Int32Ty, FilenamesSize), llvm::ConstantInt::get(Int32Ty, CoverageMappingSize), llvm::ConstantInt::get(Int32Ty, /*Version=*/CoverageMappingVersion1), RecordsVal, FilenamesAndMappingsVal}; auto CovDataVal = llvm::ConstantStruct::get(CovDataTy, makeArrayRef(TUDataVals)); auto CovData = new llvm::GlobalVariable(CGM.getModule(), CovDataTy, true, llvm::GlobalValue::InternalLinkage, CovDataVal, "__llvm_coverage_mapping"); CovData->setSection(getCoverageSection(CGM)); CovData->setAlignment(8); // Make sure the data doesn't get deleted. CGM.addUsedGlobal(CovData); } unsigned CoverageMappingModuleGen::getFileID(const FileEntry *File) { auto It = FileEntries.find(File); if (It != FileEntries.end()) return It->second; unsigned FileID = FileEntries.size(); FileEntries.insert(std::make_pair(File, FileID)); return FileID; } void CoverageMappingGen::emitCounterMapping(const Decl *D, llvm::raw_ostream &OS) { assert(CounterMap); CounterCoverageMappingBuilder Walker(CVM, *CounterMap, SM, LangOpts); Walker.VisitDecl(D); Walker.write(OS); } void CoverageMappingGen::emitEmptyMapping(const Decl *D, llvm::raw_ostream &OS) { EmptyCoverageMappingBuilder Walker(CVM, SM, LangOpts); Walker.VisitDecl(D); Walker.write(OS); }
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CoverageMappingGen.h
//===---- CoverageMappingGen.h - Coverage mapping generation ----*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // Instrumentation-based code coverage mapping generator // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_COVERAGEMAPPINGGEN_H #define LLVM_CLANG_LIB_CODEGEN_COVERAGEMAPPINGGEN_H #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "clang/Frontend/CodeGenOptions.h" #include "clang/Lex/PPCallbacks.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/StringMap.h" #include "llvm/IR/GlobalValue.h" #include "llvm/Support/raw_ostream.h" namespace clang { class LangOptions; class SourceManager; class FileEntry; class Preprocessor; class Decl; class Stmt; /// \brief Stores additional source code information like skipped ranges which /// is required by the coverage mapping generator and is obtained from /// the preprocessor. class CoverageSourceInfo : public PPCallbacks { std::vector<SourceRange> SkippedRanges; public: ArrayRef<SourceRange> getSkippedRanges() const { return SkippedRanges; } void SourceRangeSkipped(SourceRange Range) override; }; namespace CodeGen { class CodeGenModule; /// \brief Organizes the cross-function state that is used while generating /// code coverage mapping data. class CoverageMappingModuleGen { CodeGenModule &CGM; CoverageSourceInfo &SourceInfo; llvm::SmallDenseMap<const FileEntry *, unsigned, 8> FileEntries; std::vector<llvm::Constant *> FunctionRecords; llvm::StructType *FunctionRecordTy; std::string CoverageMappings; public: CoverageMappingModuleGen(CodeGenModule &CGM, CoverageSourceInfo &SourceInfo) : CGM(CGM), SourceInfo(SourceInfo), FunctionRecordTy(nullptr) {} CoverageSourceInfo &getSourceInfo() const { return SourceInfo; } /// \brief Add a function's coverage mapping record to the collection of the /// function mapping records. void addFunctionMappingRecord(llvm::GlobalVariable *FunctionName, StringRef FunctionNameValue, uint64_t FunctionHash, const std::string &CoverageMapping); /// \brief Emit the coverage mapping data for a translation unit. void emit(); /// \brief Return the coverage mapping translation unit file id /// for the given file. unsigned getFileID(const FileEntry *File); }; /// \brief Organizes the per-function state that is used while generating /// code coverage mapping data. class CoverageMappingGen { CoverageMappingModuleGen &CVM; SourceManager &SM; const LangOptions &LangOpts; llvm::DenseMap<const Stmt *, unsigned> *CounterMap; public: CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM, const LangOptions &LangOpts) : CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(nullptr) {} CoverageMappingGen(CoverageMappingModuleGen &CVM, SourceManager &SM, const LangOptions &LangOpts, llvm::DenseMap<const Stmt *, unsigned> *CounterMap) : CVM(CVM), SM(SM), LangOpts(LangOpts), CounterMap(CounterMap) {} /// \brief Emit the coverage mapping data which maps the regions of /// code to counters that will be used to find the execution /// counts for those regions. void emitCounterMapping(const Decl *D, llvm::raw_ostream &OS); /// \brief Emit the coverage mapping data for an unused function. /// It creates mapping regions with the counter of zero. void emitEmptyMapping(const Decl *D, llvm::raw_ostream &OS); }; } // end namespace CodeGen } // end namespace clang #endif
0
repos/DirectXShaderCompiler/tools/clang/lib
repos/DirectXShaderCompiler/tools/clang/lib/CodeGen/CGDebugInfo.cpp
//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This coordinates the debug information generation while generating code. // //===----------------------------------------------------------------------===// #include "CGDebugInfo.h" #include "CGBlocks.h" #include "CGCXXABI.h" #include "CGObjCRuntime.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "dxc/Support/Path.h" // HLSL Change #include "clang/AST/ASTContext.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/RecordLayout.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Version.h" #include "clang/Frontend/CodeGenOptions.h" #include "clang/Lex/HeaderSearchOptions.h" #include "clang/Lex/PreprocessorOptions.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/Module.h" #include "llvm/Support/Dwarf.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/Path.h" using namespace clang; using namespace clang::CodeGen; CGDebugInfo::CGDebugInfo(CodeGenModule &CGM) : CGM(CGM), DebugKind(CGM.getCodeGenOpts().getDebugInfo()), DBuilder(CGM.getModule()) { CreateCompileUnit(); } CGDebugInfo::~CGDebugInfo() { assert(LexicalBlockStack.empty() && "Region stack mismatch, stack not empty!"); } ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, SourceLocation TemporaryLocation) : CGF(CGF) { init(TemporaryLocation); } ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, bool DefaultToEmpty, SourceLocation TemporaryLocation) : CGF(CGF) { init(TemporaryLocation, DefaultToEmpty); } void ApplyDebugLocation::init(SourceLocation TemporaryLocation, bool DefaultToEmpty) { if (auto *DI = CGF.getDebugInfo()) { OriginalLocation = CGF.Builder.getCurrentDebugLocation(); if (TemporaryLocation.isInvalid()) { if (DefaultToEmpty) CGF.Builder.SetCurrentDebugLocation(llvm::DebugLoc()); else { // Construct a location that has a valid scope, but no line info. assert(!DI->LexicalBlockStack.empty()); CGF.Builder.SetCurrentDebugLocation( llvm::DebugLoc::get(0, 0, DI->LexicalBlockStack.back())); } } else DI->EmitLocation(CGF.Builder, TemporaryLocation); } } ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { init(E->getExprLoc()); } ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc) : CGF(CGF) { if (CGF.getDebugInfo()) { OriginalLocation = CGF.Builder.getCurrentDebugLocation(); if (Loc) CGF.Builder.SetCurrentDebugLocation(std::move(Loc)); } } ApplyDebugLocation::~ApplyDebugLocation() { // Query CGF so the location isn't overwritten when location updates are // temporarily disabled (for C++ default function arguments) if (CGF.getDebugInfo()) CGF.Builder.SetCurrentDebugLocation(std::move(OriginalLocation)); } void CGDebugInfo::setLocation(SourceLocation Loc) { // If the new location isn't valid return. if (Loc.isInvalid()) return; CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc); // If we've changed files in the middle of a lexical scope go ahead // and create a new lexical scope with file node if it's different // from the one in the scope. if (LexicalBlockStack.empty()) return; SourceManager &SM = CGM.getContext().getSourceManager(); auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back()); PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc, /*UseLineDirectives*/ false); // HLSL Change if (PCLoc.isInvalid() || Scope->getFilename() == PCLoc.getFilename()) return; if (auto *LBF = dyn_cast<llvm::DILexicalBlockFile>(Scope)) { LexicalBlockStack.pop_back(); LexicalBlockStack.emplace_back(DBuilder.createLexicalBlockFile( LBF->getScope(), getOrCreateFile(CurLoc))); } else if (isa<llvm::DILexicalBlock>(Scope) || isa<llvm::DISubprogram>(Scope)) { LexicalBlockStack.pop_back(); LexicalBlockStack.emplace_back( DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc))); } } llvm::DIScope *CGDebugInfo::getContextDescriptor(const Decl *Context) { if (!Context) return TheCU; auto I = RegionMap.find(Context); if (I != RegionMap.end()) { llvm::Metadata *V = I->second; return dyn_cast_or_null<llvm::DIScope>(V); } // Check namespace. if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context)) return getOrCreateNameSpace(NSDecl); if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context)) if (!RDecl->isDependentType()) return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl), getOrCreateMainFile()); return TheCU; } StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) { assert(FD && "Invalid FunctionDecl!"); IdentifierInfo *FII = FD->getIdentifier(); FunctionTemplateSpecializationInfo *Info = FD->getTemplateSpecializationInfo(); if (!Info && FII) return FII->getName(); // Otherwise construct human readable name for debug info. SmallString<128> NS; llvm::raw_svector_ostream OS(NS); FD->printName(OS); // Add any template specialization args. if (Info) { const TemplateArgumentList *TArgs = Info->TemplateArguments; const TemplateArgument *Args = TArgs->data(); unsigned NumArgs = TArgs->size(); PrintingPolicy Policy(CGM.getLangOpts()); TemplateSpecializationType::PrintTemplateArgumentList(OS, Args, NumArgs, Policy); } // Copy this name on the side and use its reference. return internString(OS.str()); } #if 0 // HLSL Change - no ObjC support StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) { SmallString<256> MethodName; llvm::raw_svector_ostream OS(MethodName); OS << (OMD->isInstanceMethod() ? '-' : '+') << '['; const DeclContext *DC = OMD->getDeclContext(); if (const ObjCImplementationDecl *OID = dyn_cast<const ObjCImplementationDecl>(DC)) { OS << OID->getName(); } else if (const ObjCInterfaceDecl *OID = dyn_cast<const ObjCInterfaceDecl>(DC)) { OS << OID->getName(); } else if (const ObjCCategoryImplDecl *OCD = dyn_cast<const ObjCCategoryImplDecl>(DC)) { OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' << OCD->getIdentifier()->getNameStart() << ')'; } else if (isa<ObjCProtocolDecl>(DC)) { // We can extract the type of the class from the self pointer. if (ImplicitParamDecl *SelfDecl = OMD->getSelfDecl()) { QualType ClassTy = cast<ObjCObjectPointerType>(SelfDecl->getType())->getPointeeType(); ClassTy.print(OS, PrintingPolicy(LangOptions())); } } OS << ' ' << OMD->getSelector().getAsString() << ']'; return internString(OS.str()); } #endif // HLSL Change - no ObjC support StringRef CGDebugInfo::getSelectorName(Selector S) { return internString(S.getAsString()); } StringRef CGDebugInfo::getClassName(const RecordDecl *RD) { // quick optimization to avoid having to intern strings that are already // stored reliably elsewhere if (!isa<ClassTemplateSpecializationDecl>(RD)) return RD->getName(); SmallString<128> Name; { llvm::raw_svector_ostream OS(Name); RD->getNameForDiagnostic(OS, CGM.getContext().getPrintingPolicy(), /*Qualified*/ false); } // Copy this name on the side and use its reference. return internString(Name); } llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) { if (!Loc.isValid()) // If Location is not valid then use main input file. return DBuilder.createFile(TheCU->getFilename(), TheCU->getDirectory()); SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(Loc, /*UseLineDirectives*/ false); // HLSL Change if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty()) // If the location is not valid then use main input file. return DBuilder.createFile(TheCU->getFilename(), TheCU->getDirectory()); // Cache the results. const char *fname = PLoc.getFilename(); auto it = DIFileCache.find(fname); if (it != DIFileCache.end()) { // Verify that the information still exists. if (llvm::Metadata *V = it->second) return cast<llvm::DIFile>(V); } llvm::DIFile *F = DBuilder.createFile(PLoc.getFilename(), getCurrentDirname()); DIFileCache[fname].reset(F); return F; } llvm::DIFile *CGDebugInfo::getOrCreateMainFile() { return DBuilder.createFile(TheCU->getFilename(), TheCU->getDirectory()); } unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) { if (Loc.isInvalid() && CurLoc.isInvalid()) return 0; SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc, /*UseLineDirectives*/ false); // HLSL Change return PLoc.isValid() ? PLoc.getLine() : 0; } unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) { // We may not want column information at all. if (!Force && !CGM.getCodeGenOpts().DebugColumnInfo) return 0; // If the location is invalid then use the current column. if (Loc.isInvalid() && CurLoc.isInvalid()) return 0; SourceManager &SM = CGM.getContext().getSourceManager(); PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc, /*UseLineDirectives*/ false); // HLSL Change return PLoc.isValid() ? PLoc.getColumn() : 0; } StringRef CGDebugInfo::getCurrentDirname() { if (!CGM.getCodeGenOpts().DebugCompilationDir.empty() || CGM.getLangOpts().HLSL) // HLSL Change return CGM.getCodeGenOpts().DebugCompilationDir; if (!CWDName.empty()) return CWDName; SmallString<256> CWD; llvm::sys::fs::current_path(CWD); return CWDName = internString(CWD); } void CGDebugInfo::CreateCompileUnit() { // Should we be asking the SourceManager for the main file name, instead of // accepting it as an argument? This just causes the main file name to // mismatch with source locations and create extra lexical scopes or // mismatched debug info (a CU with a DW_AT_file of "-", because that's what // the driver passed, but functions/other things have DW_AT_file of "<stdin>" // because that's what the SourceManager says) // Get absolute path name. // SourceManager &SM = CGM.getContext().getSourceManager(); // HLSL Change - unused std::string MainFileName = CGM.getCodeGenOpts().MainFileName; if (MainFileName.empty()) MainFileName = "<stdin>"; // The main file name provided via the "-main-file-name" option contains just // the file name itself with no path information. This file name may have had // a relative path, so we look into the actual file entry for the main // file to determine the real absolute path for the file. #if 0 // HLSL change. The directory is already part of the name, this would duplicate it std::string MainFileDir; if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) { MainFileDir = MainFile->getDir()->getName(); if (MainFileDir != ".") { llvm::SmallString<1024> MainFileDirSS(MainFileDir); llvm::sys::path::append(MainFileDirSS, MainFileName); MainFileName = MainFileDirSS.str(); } } #endif // HLSL change. The full path is already here // Save filename string. StringRef Filename = internString(MainFileName); // Save split dwarf file string. std::string SplitDwarfFile = CGM.getCodeGenOpts().SplitDwarfFile; StringRef SplitDwarfFilename = internString(SplitDwarfFile); llvm::dwarf::SourceLanguage LangTag; const LangOptions &LO = CGM.getLangOpts(); if (LO.CPlusPlus) { if (LO.ObjC1) LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus; else LangTag = llvm::dwarf::DW_LANG_C_plus_plus; } else if (LO.ObjC1) { LangTag = llvm::dwarf::DW_LANG_ObjC; } else if (LO.C99) { LangTag = llvm::dwarf::DW_LANG_C99; } else { LangTag = llvm::dwarf::DW_LANG_C89; } std::string Producer = getClangFullVersion(); // Figure out which version of the ObjC runtime we have. unsigned RuntimeVers = 0; if (LO.ObjC1) RuntimeVers = LO.ObjCRuntime.isNonFragile() ? 2 : 1; // Create new compile unit. // FIXME - Eliminate TheCU. TheCU = DBuilder.createCompileUnit( LangTag, Filename, getCurrentDirname(), Producer, LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers, SplitDwarfFilename, DebugKind <= CodeGenOptions::DebugLineTablesOnly ? llvm::DIBuilder::LineTablesOnly : llvm::DIBuilder::FullDebug, 0 /* DWOid */, DebugKind != CodeGenOptions::LocTrackingOnly); } llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) { llvm::dwarf::TypeKind Encoding; StringRef BTName; switch (BT->getKind()) { #define BUILTIN_TYPE(Id, SingletonId) #define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id: #include "clang/AST/BuiltinTypes.def" case BuiltinType::Dependent: llvm_unreachable("Unexpected builtin type"); case BuiltinType::NullPtr: return DBuilder.createNullPtrType(); case BuiltinType::Void: return nullptr; #if 0 // HLSL Change - no ObjC or OpenCL support case BuiltinType::ObjCClass: if (!ClassTy) ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, "objc_class", TheCU, getOrCreateMainFile(), 0); return ClassTy; case BuiltinType::ObjCId: { // typedef struct objc_class *Class; // typedef struct objc_object { // Class isa; // } *id; if (ObjTy) return ObjTy; if (!ClassTy) ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, "objc_class", TheCU, getOrCreateMainFile(), 0); unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy); auto *ISATy = DBuilder.createPointerType(ClassTy, Size); ObjTy = DBuilder.createStructType(TheCU, "objc_object", getOrCreateMainFile(), 0, 0, 0, 0, nullptr, llvm::DINodeArray()); DBuilder.replaceArrays( ObjTy, DBuilder.getOrCreateArray(&*DBuilder.createMemberType( ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0, 0, ISATy))); return ObjTy; } case BuiltinType::ObjCSel: { if (!SelTy) SelTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, "objc_selector", TheCU, getOrCreateMainFile(), 0); return SelTy; } case BuiltinType::OCLImage1d: return getOrCreateStructPtrType("opencl_image1d_t", OCLImage1dDITy); case BuiltinType::OCLImage1dArray: return getOrCreateStructPtrType("opencl_image1d_array_t", OCLImage1dArrayDITy); case BuiltinType::OCLImage1dBuffer: return getOrCreateStructPtrType("opencl_image1d_buffer_t", OCLImage1dBufferDITy); case BuiltinType::OCLImage2d: return getOrCreateStructPtrType("opencl_image2d_t", OCLImage2dDITy); case BuiltinType::OCLImage2dArray: return getOrCreateStructPtrType("opencl_image2d_array_t", OCLImage2dArrayDITy); case BuiltinType::OCLImage3d: return getOrCreateStructPtrType("opencl_image3d_t", OCLImage3dDITy); case BuiltinType::OCLSampler: return DBuilder.createBasicType( "opencl_sampler_t", CGM.getContext().getTypeSize(BT), CGM.getContext().getTypeAlign(BT), llvm::dwarf::DW_ATE_unsigned); case BuiltinType::OCLEvent: return getOrCreateStructPtrType("opencl_event_t", OCLEventDITy); #else case BuiltinType::ObjCClass: case BuiltinType::ObjCId: case BuiltinType::ObjCSel: case BuiltinType::OCLImage1d: case BuiltinType::OCLImage1dArray: case BuiltinType::OCLImage1dBuffer: case BuiltinType::OCLImage2d: case BuiltinType::OCLImage2dArray: case BuiltinType::OCLImage3d: case BuiltinType::OCLSampler: case BuiltinType::OCLEvent: llvm_unreachable("No ObjC or OpenCL support"); #endif // HLSL Change - no ObjC or OpenCL support case BuiltinType::UChar: case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break; case BuiltinType::Char_S: case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break; case BuiltinType::Char16: case BuiltinType::Char32: Encoding = llvm::dwarf::DW_ATE_UTF; break; case BuiltinType::UShort: case BuiltinType::Min16UInt: // HLSL Change case BuiltinType::Int8_4Packed: // HLSL Change case BuiltinType::UInt8_4Packed: // HLSL Change case BuiltinType::UInt: case BuiltinType::UInt128: case BuiltinType::ULong: case BuiltinType::WChar_U: case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break; // HLSL Changes begin case BuiltinType::Min12Int: case BuiltinType::Min16Int: // HLSL Changed end case BuiltinType::Short: case BuiltinType::Int: case BuiltinType::Int128: case BuiltinType::Long: case BuiltinType::WChar_S: case BuiltinType::LongLong: case BuiltinType::LitInt: Encoding = llvm::dwarf::DW_ATE_signed; break; case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break; // HLSL Changes begin case BuiltinType::Min10Float: case BuiltinType::Min16Float: case BuiltinType::HalfFloat: // HLSL Changes end case BuiltinType::Half: case BuiltinType::Float: case BuiltinType::LongDouble: case BuiltinType::Double: case BuiltinType::LitFloat: Encoding = llvm::dwarf::DW_ATE_float; break; } switch (BT->getKind()) { case BuiltinType::Long: BTName = "long int"; break; case BuiltinType::LongLong: // BTName = "long long int"; // HLSL Change BTName = "int64_t"; // HLSL Change break; case BuiltinType::ULong: BTName = "long unsigned int"; break; case BuiltinType::ULongLong: // BTName = "long long unsigned int"; // HLSL Change BTName = "uint64_t"; // HLSL Change break; default: BTName = BT->getName(CGM.getLangOpts()); break; } // Bit size, align and offset of the type. uint64_t Size = CGM.getContext().getTypeSize(BT); uint64_t Align = CGM.getContext().getTypeAlign(BT); return DBuilder.createBasicType(BTName, Size, Align, Encoding); } llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) { // Bit size, align and offset of the type. llvm::dwarf::TypeKind Encoding = llvm::dwarf::DW_ATE_complex_float; if (Ty->isComplexIntegerType()) Encoding = llvm::dwarf::DW_ATE_lo_user; uint64_t Size = CGM.getContext().getTypeSize(Ty); uint64_t Align = CGM.getContext().getTypeAlign(Ty); return DBuilder.createBasicType("complex", Size, Align, Encoding); } llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile *Unit) { QualifierCollector Qc; const Type *T = Qc.strip(Ty); // Ignore these qualifiers for now. Qc.removeObjCGCAttr(); Qc.removeAddressSpace(); Qc.removeObjCLifetime(); // We will create one Derived type for one qualifier and recurse to handle any // additional ones. llvm::dwarf::Tag Tag; if (Qc.hasConst()) { Tag = llvm::dwarf::DW_TAG_const_type; Qc.removeConst(); } else if (Qc.hasVolatile()) { Tag = llvm::dwarf::DW_TAG_volatile_type; Qc.removeVolatile(); } else if (Qc.hasRestrict()) { Tag = llvm::dwarf::DW_TAG_restrict_type; Qc.removeRestrict(); } else { assert(Qc.empty() && "Unknown type qualifier for debug info"); return getOrCreateType(QualType(T, 0), Unit); } auto *FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit); // No need to fill in the Name, Line, Size, Alignment, Offset in case of // CVR derived types. return DBuilder.createQualifiedType(Tag, FromTy); } #if 0 // HLSL Change - no ObjC support llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty, llvm::DIFile *Unit) { // The frontend treats 'id' as a typedef to an ObjCObjectType, // whereas 'id<protocol>' is treated as an ObjCPointerType. For the // debug info, we want to emit 'id' in both cases. if (Ty->isObjCQualifiedIdType()) return getOrCreateType(CGM.getContext().getObjCIdType(), Unit); return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty, Ty->getPointeeType(), Unit); } #endif // HLSL Change - no ObjC support llvm::DIType *CGDebugInfo::CreateType(const PointerType *Ty, llvm::DIFile *Unit) { return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty, Ty->getPointeeType(), Unit); } /// \return whether a C++ mangling exists for the type defined by TD. static bool hasCXXMangling(const TagDecl *TD, llvm::DICompileUnit *TheCU) { switch (TheCU->getSourceLanguage()) { case llvm::dwarf::DW_LANG_C_plus_plus: return true; case llvm::dwarf::DW_LANG_ObjC_plus_plus: return isa<CXXRecordDecl>(TD) || isa<EnumDecl>(TD); default: return false; } } /// In C++ mode, types have linkage, so we can rely on the ODR and /// on their mangled names, if they're external. static SmallString<256> getUniqueTagTypeName(const TagType *Ty, CodeGenModule &CGM, llvm::DICompileUnit *TheCU) { SmallString<256> FullName; const TagDecl *TD = Ty->getDecl(); if (!hasCXXMangling(TD, TheCU) || !TD->isExternallyVisible()) return FullName; // Microsoft Mangler does not have support for mangleCXXRTTIName yet. if (CGM.getTarget().getCXXABI().isMicrosoft()) return FullName; // TODO: This is using the RTTI name. Is there a better way to get // a unique string for a type? llvm::raw_svector_ostream Out(FullName); CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(QualType(Ty, 0), Out); Out.flush(); return FullName; } /// \return the approproate DWARF tag for a composite type. static llvm::dwarf::Tag getTagForRecord(const RecordDecl *RD) { llvm::dwarf::Tag Tag; if (RD->isStruct() || RD->isInterface()) Tag = llvm::dwarf::DW_TAG_structure_type; else if (RD->isUnion()) Tag = llvm::dwarf::DW_TAG_union_type; else { // FIXME: This could be a struct type giving a default visibility different // than C++ class type, but needs llvm metadata changes first. assert(RD->isClass()); Tag = llvm::dwarf::DW_TAG_class_type; } return Tag; } llvm::DICompositeType * CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty, llvm::DIScope *Ctx) { const RecordDecl *RD = Ty->getDecl(); if (llvm::DIType *T = getTypeOrNull(CGM.getContext().getRecordType(RD))) return cast<llvm::DICompositeType>(T); llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation()); unsigned Line = getLineNumber(RD->getLocation()); StringRef RDName = getClassName(RD); uint64_t Size = 0; uint64_t Align = 0; const RecordDecl *D = RD->getDefinition(); if (D && D->isCompleteDefinition()) { Size = CGM.getContext().getTypeSize(Ty); Align = CGM.getContext().getTypeAlign(Ty); } // Create the type. SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU); llvm::DICompositeType *RetTy = DBuilder.createReplaceableCompositeType( getTagForRecord(RD), RDName, Ctx, DefUnit, Line, 0, Size, Align, llvm::DINode::FlagFwdDecl, FullName); ReplaceMap.emplace_back( std::piecewise_construct, std::make_tuple(Ty), std::make_tuple(static_cast<llvm::Metadata *>(RetTy))); return RetTy; } llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag, const Type *Ty, QualType PointeeTy, llvm::DIFile *Unit) { if (Tag == llvm::dwarf::DW_TAG_reference_type || Tag == llvm::dwarf::DW_TAG_rvalue_reference_type) return DBuilder.createReferenceType(Tag, getOrCreateType(PointeeTy, Unit)); // Bit size, align and offset of the type. // Size is always the size of a pointer. We can't use getTypeSize here // because that does not return the correct value for references. unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy); uint64_t Size = CGM.getTarget().getPointerWidth(AS); uint64_t Align = CGM.getContext().getTypeAlign(Ty); return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit), Size, Align); } llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name, llvm::DIType *&Cache) { if (Cache) return Cache; Cache = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, Name, TheCU, getOrCreateMainFile(), 0); unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy); Cache = DBuilder.createPointerType(Cache, Size); return Cache; } #if 0 // HLSL Change - no block support llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty, llvm::DIFile *Unit) { SmallVector<llvm::Metadata *, 8> EltTys; QualType FType; uint64_t FieldSize, FieldOffset; unsigned FieldAlign; llvm::DINodeArray Elements; FieldOffset = 0; FType = CGM.getContext().UnsignedLongTy; EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset)); EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset)); Elements = DBuilder.getOrCreateArray(EltTys); EltTys.clear(); unsigned Flags = llvm::DINode::FlagAppleBlock; unsigned LineNo = 0; auto *EltTy = DBuilder.createStructType(Unit, "__block_descriptor", nullptr, LineNo, FieldOffset, 0, Flags, nullptr, Elements); // Bit size, align and offset of the type. uint64_t Size = CGM.getContext().getTypeSize(Ty); auto *DescTy = DBuilder.createPointerType(EltTy, Size); FieldOffset = 0; FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset)); FType = CGM.getContext().IntTy; EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset)); EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset)); FType = CGM.getContext().getPointerType(Ty->getPointeeType()); EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset)); FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); FieldSize = CGM.getContext().getTypeSize(Ty); FieldAlign = CGM.getContext().getTypeAlign(Ty); EltTys.push_back(DBuilder.createMemberType(Unit, "__descriptor", nullptr, LineNo, FieldSize, FieldAlign, FieldOffset, 0, DescTy)); FieldOffset += FieldSize; Elements = DBuilder.getOrCreateArray(EltTys); // The __block_literal_generic structs are marked with a special // DW_AT_APPLE_BLOCK attribute and are an implementation detail only // the debugger needs to know about. To allow type uniquing, emit // them without a name or a location. EltTy = DBuilder.createStructType(Unit, "", nullptr, LineNo, FieldOffset, 0, Flags, nullptr, Elements); return DBuilder.createPointerType(EltTy, Size); } #endif // HLSL Change - no block support llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty, llvm::DIFile *Unit) { assert(Ty->isTypeAlias()); llvm::DIType *Src = getOrCreateType(Ty->getAliasedType(), Unit); SmallString<128> NS; llvm::raw_svector_ostream OS(NS); Ty->getTemplateName().print(OS, CGM.getContext().getPrintingPolicy(), /*qualified*/ false); TemplateSpecializationType::PrintTemplateArgumentList( OS, Ty->getArgs(), Ty->getNumArgs(), CGM.getContext().getPrintingPolicy()); TypeAliasDecl *AliasDecl = cast<TypeAliasTemplateDecl>( Ty->getTemplateName().getAsTemplateDecl())->getTemplatedDecl(); SourceLocation Loc = AliasDecl->getLocation(); return DBuilder.createTypedef( Src, internString(OS.str()), getOrCreateFile(Loc), getLineNumber(Loc), getContextDescriptor(cast<Decl>(AliasDecl->getDeclContext()))); } llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIFile *Unit) { // We don't set size information, but do specify where the typedef was // declared. SourceLocation Loc = Ty->getDecl()->getLocation(); // Typedefs are derived from some other type. return DBuilder.createTypedef( getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit), Ty->getDecl()->getName(), getOrCreateFile(Loc), getLineNumber(Loc), getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()))); } llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty, llvm::DIFile *Unit) { SmallVector<llvm::Metadata *, 16> EltTys; // Add the result type at least. EltTys.push_back(getOrCreateType(Ty->getReturnType(), Unit)); // Set up remainder of arguments if there is a prototype. // otherwise emit it as a variadic function. if (isa<FunctionNoProtoType>(Ty)) EltTys.push_back(DBuilder.createUnspecifiedParameter()); else if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(Ty)) { for (unsigned i = 0, e = FPT->getNumParams(); i != e; ++i) EltTys.push_back(getOrCreateType(FPT->getParamType(i), Unit)); if (FPT->isVariadic()) EltTys.push_back(DBuilder.createUnspecifiedParameter()); } llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys); return DBuilder.createSubroutineType(Unit, EltTypeArray); } /// Convert an AccessSpecifier into the corresponding DINode flag. /// As an optimization, return 0 if the access specifier equals the /// default for the containing type. static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) { AccessSpecifier Default = clang::AS_none; if (RD && RD->isClass()) Default = clang::AS_private; else if (RD && (RD->isStruct() || RD->isUnion())) Default = clang::AS_public; if (Access == Default) return 0; switch (Access) { case clang::AS_private: return llvm::DINode::FlagPrivate; case clang::AS_protected: return llvm::DINode::FlagProtected; case clang::AS_public: return llvm::DINode::FlagPublic; case clang::AS_none: return 0; } llvm_unreachable("unexpected access enumerator"); } llvm::DIType *CGDebugInfo::createFieldType( StringRef name, QualType type, uint64_t sizeInBitsOverride, SourceLocation loc, AccessSpecifier AS, uint64_t offsetInBits, llvm::DIFile *tunit, llvm::DIScope *scope, const RecordDecl *RD) { llvm::DIType *debugType = getOrCreateType(type, tunit); // Get the location for the field. llvm::DIFile *file = getOrCreateFile(loc); unsigned line = getLineNumber(loc); uint64_t SizeInBits = 0; unsigned AlignInBits = 0; if (!type->isIncompleteArrayType()) { TypeInfo TI = CGM.getContext().getTypeInfo(type); SizeInBits = TI.Width; AlignInBits = TI.Align; if (sizeInBitsOverride) SizeInBits = sizeInBitsOverride; } unsigned flags = getAccessFlag(AS, RD); return DBuilder.createMemberType(scope, name, file, line, SizeInBits, AlignInBits, offsetInBits, flags, debugType); } void CGDebugInfo::CollectRecordLambdaFields( const CXXRecordDecl *CXXDecl, SmallVectorImpl<llvm::Metadata *> &elements, llvm::DIType *RecordTy) { // For C++11 Lambdas a Field will be the same as a Capture, but the Capture // has the name and the location of the variable so we should iterate over // both concurrently. const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(CXXDecl); RecordDecl::field_iterator Field = CXXDecl->field_begin(); unsigned fieldno = 0; for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(), E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) { const LambdaCapture &C = *I; if (C.capturesVariable()) { VarDecl *V = C.getCapturedVar(); llvm::DIFile *VUnit = getOrCreateFile(C.getLocation()); StringRef VName = V->getName(); uint64_t SizeInBitsOverride = 0; if (Field->isBitField()) { SizeInBitsOverride = Field->getBitWidthValue(CGM.getContext()); assert(SizeInBitsOverride && "found named 0-width bitfield"); } llvm::DIType *fieldType = createFieldType( VName, Field->getType(), SizeInBitsOverride, C.getLocation(), Field->getAccess(), layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl); elements.push_back(fieldType); } else if (C.capturesThis()) { // TODO: Need to handle 'this' in some way by probably renaming the // this of the lambda class and having a field member of 'this' or // by using AT_object_pointer for the function and having that be // used as 'this' for semantic references. FieldDecl *f = *Field; llvm::DIFile *VUnit = getOrCreateFile(f->getLocation()); QualType type = f->getType(); llvm::DIType *fieldType = createFieldType( "this", type, 0, f->getLocation(), f->getAccess(), layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl); elements.push_back(fieldType); } } } llvm::DIDerivedType * CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, llvm::DIType *RecordTy, const RecordDecl *RD) { // Create the descriptor for the static variable, with or without // constant initializers. Var = Var->getCanonicalDecl(); llvm::DIFile *VUnit = getOrCreateFile(Var->getLocation()); llvm::DIType *VTy = getOrCreateType(Var->getType(), VUnit); unsigned LineNumber = getLineNumber(Var->getLocation()); StringRef VName = Var->getName(); llvm::Constant *C = nullptr; if (Var->getInit()) { const APValue *Value = Var->evaluateValue(); if (Value) { if (Value->isInt()) C = llvm::ConstantInt::get(CGM.getLLVMContext(), Value->getInt()); if (Value->isFloat()) C = llvm::ConstantFP::get(CGM.getLLVMContext(), Value->getFloat()); } } unsigned Flags = getAccessFlag(Var->getAccess(), RD); llvm::DIDerivedType *GV = DBuilder.createStaticMemberType( RecordTy, VName, VUnit, LineNumber, VTy, Flags, C); StaticDataMemberCache[Var->getCanonicalDecl()].reset(GV); return GV; } void CGDebugInfo::CollectRecordNormalField( const FieldDecl *field, uint64_t OffsetInBits, llvm::DIFile *tunit, SmallVectorImpl<llvm::Metadata *> &elements, llvm::DIType *RecordTy, const RecordDecl *RD) { StringRef name = field->getName(); QualType type = field->getType(); // Ignore unnamed fields unless they're anonymous structs/unions. if (name.empty() && !type->isRecordType()) return; uint64_t SizeInBitsOverride = 0; if (field->isBitField()) { SizeInBitsOverride = field->getBitWidthValue(CGM.getContext()); assert(SizeInBitsOverride && "found named 0-width bitfield"); } llvm::DIType *fieldType = createFieldType(name, type, SizeInBitsOverride, field->getLocation(), field->getAccess(), OffsetInBits, tunit, RecordTy, RD); elements.push_back(fieldType); } void CGDebugInfo::CollectRecordFields( const RecordDecl *record, llvm::DIFile *tunit, SmallVectorImpl<llvm::Metadata *> &elements, llvm::DICompositeType *RecordTy) { const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record); if (CXXDecl && CXXDecl->isLambda()) CollectRecordLambdaFields(CXXDecl, elements, RecordTy); else { const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record); // Field number for non-static fields. unsigned fieldNo = 0; // Static and non-static members should appear in the same order as // the corresponding declarations in the source program. for (const auto *I : record->decls()) if (const auto *V = dyn_cast<VarDecl>(I)) { // Reuse the existing static member declaration if one exists auto MI = StaticDataMemberCache.find(V->getCanonicalDecl()); if (MI != StaticDataMemberCache.end()) { assert(MI->second && "Static data member declaration should still exist"); elements.push_back(cast<llvm::DIDerivedTypeBase>(MI->second)); } else { auto Field = CreateRecordStaticField(V, RecordTy, record); elements.push_back(Field); } } else if (const auto *field = dyn_cast<FieldDecl>(I)) { CollectRecordNormalField(field, layout.getFieldOffset(fieldNo), tunit, elements, RecordTy, record); // Bump field number for next field. ++fieldNo; } } } // HLSL Change Begins // Hook to allow us to lie about the contents of some HLSL types in the debug info, // by exposing clean members rather than our implementation detail internals. // Note that the debug size of types is not based on the fields reported here, // but rather on ASTContext::getTypeSize, so they should be consistent. bool CGDebugInfo::TryCollectHLSLRecordElements(const RecordType *Ty, llvm::DICompositeType *DITy, SmallVectorImpl<llvm::Metadata *> &Elements) { QualType QualTy(Ty, 0); if (hlsl::IsHLSLVecType(QualTy)) { // The HLSL vector type is defined as containing a field 'h' of // extended vector type, which is represented as an array in DWARF. // However, we logically represent it as one field per component. QualType ElemQualTy = hlsl::GetHLSLVecElementType(QualTy); unsigned AlignBits = CGM.getContext().getTypeAlign(ElemQualTy); unsigned VecSize = hlsl::GetHLSLVecSize(QualTy); unsigned ElemSizeInBits = CGM.getContext().getTypeSize(ElemQualTy); unsigned CurrentAlignedOffset = 0; for (unsigned ElemIdx = 0; ElemIdx < VecSize; ++ElemIdx) { StringRef FieldName = StringRef(&"xyzw"[ElemIdx], 1); CurrentAlignedOffset = llvm::RoundUpToAlignment(CurrentAlignedOffset, AlignBits); llvm::DIType *FieldType = createFieldType(FieldName, ElemQualTy, 0, SourceLocation(), AccessSpecifier::AS_public, CurrentAlignedOffset, /* tunit */ nullptr, DITy, Ty->getDecl()); CurrentAlignedOffset += ElemSizeInBits; Elements.emplace_back(FieldType); } return true; } else if (hlsl::IsHLSLMatType(QualTy)) { // The HLSL matrix type is defined as containing a field 'h' of // array of extended vector type, but logically we want to represent // it as per-element fields. QualType ElemQualTy = hlsl::GetHLSLMatElementType(QualTy); uint32_t NumRows, NumCols; hlsl::GetHLSLMatRowColCount(QualTy, NumRows, NumCols); unsigned ElemSizeInBits = CGM.getContext().getTypeSize(ElemQualTy); for (unsigned RowIdx = 0; RowIdx < NumRows; ++RowIdx) { for (unsigned ColIdx = 0; ColIdx < NumCols; ++ColIdx) { char FieldName[] = "_11"; FieldName[1] += RowIdx; FieldName[2] += ColIdx; unsigned RowMajorIdx = RowIdx * NumCols + ColIdx; unsigned OffsetInBits = ElemSizeInBits * RowMajorIdx; llvm::DIType *FieldType = createFieldType(FieldName, ElemQualTy, 0, SourceLocation(), AccessSpecifier::AS_public, OffsetInBits, /* tunit */ nullptr, DITy, Ty->getDecl()); Elements.emplace_back(FieldType); } } return true; } else if (hlsl::IsHLSLResourceType(QualTy) || hlsl::IsHLSLNodeType(QualTy) || hlsl::IsHLSLStreamOutputType(QualTy)) { // Should appear as having no members rather than exposing our internal handles. return true; } return false; } // HLSL Chage Ends llvm::DISubroutineType * CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method, llvm::DIFile *Unit) { const FunctionProtoType *Func = Method->getType()->getAs<FunctionProtoType>(); if (Method->isStatic()) return cast_or_null<llvm::DISubroutineType>( getOrCreateType(QualType(Func, 0), Unit)); return getOrCreateInstanceMethodType(Method->getThisType(CGM.getContext()), Func, Unit); } llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType( QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile *Unit) { // Add "this" pointer. llvm::DITypeRefArray Args( cast<llvm::DISubroutineType>(getOrCreateType(QualType(Func, 0), Unit)) ->getTypeArray()); assert(Args.size() && "Invalid number of arguments!"); SmallVector<llvm::Metadata *, 16> Elts; // First element is always return type. For 'void' functions it is NULL. Elts.push_back(Args[0]); // "this" pointer is always first argument. const CXXRecordDecl *RD = ThisPtr->getPointeeCXXRecordDecl(); if (isa<ClassTemplateSpecializationDecl>(RD)) { // HLSL Change Begin - This is a reference. llvm::DIType *ThisPtrType = getOrCreateType(ThisPtr, Unit); // HLSL Change End - This is a reference. TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType); // TODO: This and the artificial type below are misleading, the // types aren't artificial the argument is, but the current // metadata doesn't represent that. ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType); Elts.push_back(ThisPtrType); } else { llvm::DIType *ThisPtrType = getOrCreateType(ThisPtr, Unit); TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType); ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType); Elts.push_back(ThisPtrType); } // Copy rest of the arguments. for (unsigned i = 1, e = Args.size(); i != e; ++i) Elts.push_back(Args[i]); llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts); unsigned Flags = 0; if (Func->getExtProtoInfo().RefQualifier == RQ_LValue) Flags |= llvm::DINode::FlagLValueReference; if (Func->getExtProtoInfo().RefQualifier == RQ_RValue) Flags |= llvm::DINode::FlagRValueReference; return DBuilder.createSubroutineType(Unit, EltTypeArray, Flags); } /// isFunctionLocalClass - Return true if CXXRecordDecl is defined /// inside a function. static bool isFunctionLocalClass(const CXXRecordDecl *RD) { if (const CXXRecordDecl *NRD = dyn_cast<CXXRecordDecl>(RD->getDeclContext())) return isFunctionLocalClass(NRD); if (isa<FunctionDecl>(RD->getDeclContext())) return true; return false; } llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction( const CXXMethodDecl *Method, llvm::DIFile *Unit, llvm::DIType *RecordTy) { bool IsCtorOrDtor = isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method); StringRef MethodName = getFunctionName(Method); llvm::DISubroutineType *MethodTy = getOrCreateMethodType(Method, Unit); // Since a single ctor/dtor corresponds to multiple functions, it doesn't // make sense to give a single ctor/dtor a linkage name. StringRef MethodLinkageName; if (!IsCtorOrDtor && !isFunctionLocalClass(Method->getParent())) MethodLinkageName = CGM.getMangledName(Method); // Get the location for the method. llvm::DIFile *MethodDefUnit = nullptr; unsigned MethodLine = 0; if (!Method->isImplicit()) { MethodDefUnit = getOrCreateFile(Method->getLocation()); MethodLine = getLineNumber(Method->getLocation()); } // Collect virtual method info. llvm::DIType *ContainingType = nullptr; unsigned Virtuality = 0; unsigned VIndex = 0; if (Method->isVirtual()) { if (Method->isPure()) Virtuality = llvm::dwarf::DW_VIRTUALITY_pure_virtual; else Virtuality = llvm::dwarf::DW_VIRTUALITY_virtual; // It doesn't make sense to give a virtual destructor a vtable index, // since a single destructor has two entries in the vtable. // FIXME: Add proper support for debug info for virtual calls in // the Microsoft ABI, where we may use multiple vptrs to make a vftable // lookup if we have multiple or virtual inheritance. if (!isa<CXXDestructorDecl>(Method) && !CGM.getTarget().getCXXABI().isMicrosoft()) VIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(Method); ContainingType = RecordTy; } unsigned Flags = 0; if (Method->isImplicit()) Flags |= llvm::DINode::FlagArtificial; Flags |= getAccessFlag(Method->getAccess(), Method->getParent()); if (const CXXConstructorDecl *CXXC = dyn_cast<CXXConstructorDecl>(Method)) { if (CXXC->isExplicit()) Flags |= llvm::DINode::FlagExplicit; } else if (const CXXConversionDecl *CXXC = dyn_cast<CXXConversionDecl>(Method)) { if (CXXC->isExplicit()) Flags |= llvm::DINode::FlagExplicit; } if (Method->hasPrototype()) Flags |= llvm::DINode::FlagPrototyped; if (Method->getRefQualifier() == RQ_LValue) Flags |= llvm::DINode::FlagLValueReference; if (Method->getRefQualifier() == RQ_RValue) Flags |= llvm::DINode::FlagRValueReference; llvm::DINodeArray TParamsArray = CollectFunctionTemplateParams(Method, Unit); llvm::DISubprogram *SP = DBuilder.createMethod( RecordTy, MethodName, MethodLinkageName, MethodDefUnit, MethodLine, MethodTy, /*isLocalToUnit=*/false, /* isDefinition=*/false, Virtuality, VIndex, ContainingType, Flags, CGM.getLangOpts().Optimize, nullptr, TParamsArray.get()); SPCache[Method->getCanonicalDecl()].reset(SP); return SP; } void CGDebugInfo::CollectCXXMemberFunctions( const CXXRecordDecl *RD, llvm::DIFile *Unit, SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType *RecordTy) { // Since we want more than just the individual member decls if we // have templated functions iterate over every declaration to gather // the functions. for (const auto *I : RD->decls()) { const auto *Method = dyn_cast<CXXMethodDecl>(I); // If the member is implicit, don't add it to the member list. This avoids // the member being added to type units by LLVM, while still allowing it // to be emitted into the type declaration/reference inside the compile // unit. // Ditto 'nodebug' methods, for consistency with CodeGenFunction.cpp. // FIXME: Handle Using(Shadow?)Decls here to create // DW_TAG_imported_declarations inside the class for base decls brought into // derived classes. GDB doesn't seem to notice/leverage these when I tried // it, so I'm not rushing to fix this. (GCC seems to produce them, if // referenced) if (!Method || Method->isImplicit() || Method->hasAttr<NoDebugAttr>()) continue; if (Method->getType()->getAs<FunctionProtoType>()->getContainedAutoType()) continue; // Reuse the existing member function declaration if it exists. // It may be associated with the declaration of the type & should be // reused as we're building the definition. // // This situation can arise in the vtable-based debug info reduction where // implicit members are emitted in a non-vtable TU. auto MI = SPCache.find(Method->getCanonicalDecl()); EltTys.push_back(MI == SPCache.end() ? CreateCXXMemberFunction(Method, Unit, RecordTy) : static_cast<llvm::Metadata *>(MI->second)); } } void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile *Unit, SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType *RecordTy) { const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); for (const auto &BI : RD->bases()) { unsigned BFlags = 0; uint64_t BaseOffset; const CXXRecordDecl *Base = cast<CXXRecordDecl>(BI.getType()->getAs<RecordType>()->getDecl()); if (BI.isVirtual()) { if (CGM.getTarget().getCXXABI().isItaniumFamily()) { // virtual base offset offset is -ve. The code generator emits dwarf // expression where it expects +ve number. BaseOffset = 0 - CGM.getItaniumVTableContext() .getVirtualBaseOffsetOffset(RD, Base) .getQuantity(); } else { // In the MS ABI, store the vbtable offset, which is analogous to the // vbase offset offset in Itanium. BaseOffset = 4 * CGM.getMicrosoftVTableContext().getVBTableIndex(RD, Base); } BFlags = llvm::DINode::FlagVirtual; } else BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base)); // FIXME: Inconsistent units for BaseOffset. It is in bytes when // BI->isVirtual() and bits when not. BFlags |= getAccessFlag(BI.getAccessSpecifier(), RD); llvm::DIType *DTy = DBuilder.createInheritance( RecordTy, getOrCreateType(BI.getType(), Unit), BaseOffset, BFlags); EltTys.push_back(DTy); } } llvm::DINodeArray CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList, ArrayRef<TemplateArgument> TAList, llvm::DIFile *Unit) { SmallVector<llvm::Metadata *, 16> TemplateParams; for (unsigned i = 0, e = TAList.size(); i != e; ++i) { const TemplateArgument &TA = TAList[i]; StringRef Name; if (TPList) Name = TPList->getParam(i)->getName(); switch (TA.getKind()) { case TemplateArgument::Type: { llvm::DIType *TTy = getOrCreateType(TA.getAsType(), Unit); TemplateParams.push_back( DBuilder.createTemplateTypeParameter(TheCU, Name, TTy)); } break; case TemplateArgument::Integral: { llvm::DIType *TTy = getOrCreateType(TA.getIntegralType(), Unit); TemplateParams.push_back(DBuilder.createTemplateValueParameter( TheCU, Name, TTy, llvm::ConstantInt::get(CGM.getLLVMContext(), TA.getAsIntegral()))); } break; case TemplateArgument::Declaration: { const ValueDecl *D = TA.getAsDecl(); QualType T = TA.getParamTypeForDecl().getDesugaredType(CGM.getContext()); llvm::DIType *TTy = getOrCreateType(T, Unit); llvm::Constant *V = nullptr; const CXXMethodDecl *MD; // Variable pointer template parameters have a value that is the address // of the variable. if (const auto *VD = dyn_cast<VarDecl>(D)) V = CGM.GetAddrOfGlobalVar(VD); // Member function pointers have special support for building them, though // this is currently unsupported in LLVM CodeGen. else if ((MD = dyn_cast<CXXMethodDecl>(D)) && MD->isInstance()) V = CGM.getCXXABI().EmitMemberFunctionPointer(MD); else if (const auto *FD = dyn_cast<FunctionDecl>(D)) V = CGM.GetAddrOfFunction(FD); // Member data pointers have special handling too to compute the fixed // offset within the object. else if (const auto *MPT = dyn_cast<MemberPointerType>(T.getTypePtr())) { // These five lines (& possibly the above member function pointer // handling) might be able to be refactored to use similar code in // CodeGenModule::getMemberPointerConstant uint64_t fieldOffset = CGM.getContext().getFieldOffset(D); CharUnits chars = CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset); V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars); } TemplateParams.push_back(DBuilder.createTemplateValueParameter( TheCU, Name, TTy, cast_or_null<llvm::Constant>(V->stripPointerCasts()))); } break; case TemplateArgument::NullPtr: { QualType T = TA.getNullPtrType(); llvm::DIType *TTy = getOrCreateType(T, Unit); llvm::Constant *V = nullptr; // Special case member data pointer null values since they're actually -1 // instead of zero. if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(T.getTypePtr())) // But treat member function pointers as simple zero integers because // it's easier than having a special case in LLVM's CodeGen. If LLVM // CodeGen grows handling for values of non-null member function // pointers then perhaps we could remove this special case and rely on // EmitNullMemberPointer for member function pointers. if (MPT->isMemberDataPointer()) V = CGM.getCXXABI().EmitNullMemberPointer(MPT); if (!V) V = llvm::ConstantInt::get(CGM.Int8Ty, 0); TemplateParams.push_back(DBuilder.createTemplateValueParameter( TheCU, Name, TTy, cast<llvm::Constant>(V))); } break; case TemplateArgument::Template: TemplateParams.push_back(DBuilder.createTemplateTemplateParameter( TheCU, Name, nullptr, TA.getAsTemplate().getAsTemplateDecl()->getQualifiedNameAsString())); break; case TemplateArgument::Pack: TemplateParams.push_back(DBuilder.createTemplateParameterPack( TheCU, Name, nullptr, CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit))); break; case TemplateArgument::Expression: { const Expr *E = TA.getAsExpr(); QualType T = E->getType(); if (E->isGLValue()) T = CGM.getContext().getLValueReferenceType(T); llvm::Constant *V = CGM.EmitConstantExpr(E, T); assert(V && "Expression in template argument isn't constant"); llvm::DIType *TTy = getOrCreateType(T, Unit); TemplateParams.push_back(DBuilder.createTemplateValueParameter( TheCU, Name, TTy, cast<llvm::Constant>(V->stripPointerCasts()))); } break; // And the following should never occur: case TemplateArgument::TemplateExpansion: case TemplateArgument::Null: llvm_unreachable( "These argument types shouldn't exist in concrete types"); } } return DBuilder.getOrCreateArray(TemplateParams); } llvm::DINodeArray CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile *Unit) { if (FD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplateSpecialization) { const TemplateParameterList *TList = FD->getTemplateSpecializationInfo() ->getTemplate() ->getTemplateParameters(); return CollectTemplateParams( TList, FD->getTemplateSpecializationArgs()->asArray(), Unit); } return llvm::DINodeArray(); } llvm::DINodeArray CGDebugInfo::CollectCXXTemplateParams( const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile *Unit) { // Always get the full list of parameters, not just the ones from // the specialization. TemplateParameterList *TPList = TSpecial->getSpecializedTemplate()->getTemplateParameters(); const TemplateArgumentList &TAList = TSpecial->getTemplateArgs(); return CollectTemplateParams(TPList, TAList.asArray(), Unit); } llvm::DIType *CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile *Unit) { if (VTablePtrType) return VTablePtrType; ASTContext &Context = CGM.getContext(); /* Function type */ llvm::Metadata *STy = getOrCreateType(Context.IntTy, Unit); llvm::DITypeRefArray SElements = DBuilder.getOrCreateTypeArray(STy); llvm::DIType *SubTy = DBuilder.createSubroutineType(Unit, SElements); unsigned Size = Context.getTypeSize(Context.VoidPtrTy); llvm::DIType *vtbl_ptr_type = DBuilder.createPointerType(SubTy, Size, 0, "__vtbl_ptr_type"); VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size); return VTablePtrType; } StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) { // Copy the gdb compatible name on the side and use its reference. return internString("_vptr$", RD->getNameAsString()); } void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit, SmallVectorImpl<llvm::Metadata *> &EltTys) { const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); // If there is a primary base then it will hold vtable info. if (RL.getPrimaryBase()) return; // If this class is not dynamic then there is not any vtable info to collect. if (!RD->isDynamicClass()) return; unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy); llvm::DIType *VPTR = DBuilder.createMemberType( Unit, getVTableName(RD), Unit, 0, Size, 0, 0, llvm::DINode::FlagArtificial, getOrCreateVTablePtrType(Unit)); EltTys.push_back(VPTR); } llvm::DIType *CGDebugInfo::getOrCreateRecordType(QualType RTy, SourceLocation Loc) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); llvm::DIType *T = getOrCreateType(RTy, getOrCreateFile(Loc)); return T; } llvm::DIType *CGDebugInfo::getOrCreateInterfaceType(QualType D, SourceLocation Loc) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); llvm::DIType *T = getOrCreateType(D, getOrCreateFile(Loc)); RetainedTypes.push_back(D.getAsOpaquePtr()); return T; } void CGDebugInfo::completeType(const EnumDecl *ED) { if (DebugKind <= CodeGenOptions::DebugLineTablesOnly) return; QualType Ty = CGM.getContext().getEnumType(ED); void *TyPtr = Ty.getAsOpaquePtr(); auto I = TypeCache.find(TyPtr); if (I == TypeCache.end() || !cast<llvm::DIType>(I->second)->isForwardDecl()) return; llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<EnumType>()); assert(!Res->isForwardDecl()); TypeCache[TyPtr].reset(Res); } void CGDebugInfo::completeType(const RecordDecl *RD) { if (DebugKind > CodeGenOptions::LimitedDebugInfo || !CGM.getLangOpts().CPlusPlus) completeRequiredType(RD); } void CGDebugInfo::completeRequiredType(const RecordDecl *RD) { if (DebugKind <= CodeGenOptions::DebugLineTablesOnly) return; if (const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD)) if (CXXDecl->isDynamicClass()) return; QualType Ty = CGM.getContext().getRecordType(RD); llvm::DIType *T = getTypeOrNull(Ty); if (T && T->isForwardDecl()) completeClassData(RD); } void CGDebugInfo::completeClassData(const RecordDecl *RD) { if (DebugKind <= CodeGenOptions::DebugLineTablesOnly) return; QualType Ty = CGM.getContext().getRecordType(RD); void *TyPtr = Ty.getAsOpaquePtr(); auto I = TypeCache.find(TyPtr); if (I != TypeCache.end() && !cast<llvm::DIType>(I->second)->isForwardDecl()) return; llvm::DIType *Res = CreateTypeDefinition(Ty->castAs<RecordType>()); assert(!Res->isForwardDecl()); TypeCache[TyPtr].reset(Res); } static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I, CXXRecordDecl::method_iterator End) { for (; I != End; ++I) if (FunctionDecl *Tmpl = I->getInstantiatedFromMemberFunction()) if (!Tmpl->isImplicit() && Tmpl->isThisDeclarationADefinition() && !I->getMemberSpecializationInfo()->isExplicitSpecialization()) return true; return false; } static bool shouldOmitDefinition(CodeGenOptions::DebugInfoKind DebugKind, const RecordDecl *RD, const LangOptions &LangOpts) { if (DebugKind > CodeGenOptions::LimitedDebugInfo) return false; if (!LangOpts.CPlusPlus) return false; if (!RD->isCompleteDefinitionRequired()) return true; const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD); if (!CXXDecl) return false; if (CXXDecl->hasDefinition() && CXXDecl->isDynamicClass()) return true; TemplateSpecializationKind Spec = TSK_Undeclared; if (const ClassTemplateSpecializationDecl *SD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) Spec = SD->getSpecializationKind(); if (Spec == TSK_ExplicitInstantiationDeclaration && hasExplicitMemberDefinition(CXXDecl->method_begin(), CXXDecl->method_end())) return true; return false; } llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) { RecordDecl *RD = Ty->getDecl(); llvm::DIType *T = cast_or_null<llvm::DIType>(getTypeOrNull(QualType(Ty, 0))); if (T || shouldOmitDefinition(DebugKind, RD, CGM.getLangOpts())) { if (!T) T = getOrCreateRecordFwdDecl( Ty, getContextDescriptor(cast<Decl>(RD->getDeclContext()))); return T; } return CreateTypeDefinition(Ty); } llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) { RecordDecl *RD = Ty->getDecl(); // Get overall information about the record type for the debug info. llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation()); // Records and classes and unions can all be recursive. To handle them, we // first generate a debug descriptor for the struct as a forward declaration. // Then (if it is a definition) we go through and get debug info for all of // its members. Finally, we create a descriptor for the complete type (which // may refer to the forward decl if the struct is recursive) and replace all // uses of the forward declaration with the final definition. auto *FwdDecl = cast<llvm::DICompositeType>(getOrCreateLimitedType(Ty, DefUnit)); const RecordDecl *D = RD->getDefinition(); if (!D || !D->isCompleteDefinition()) return FwdDecl; if (const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD)) CollectContainingType(CXXDecl, FwdDecl); // Push the struct on region stack. LexicalBlockStack.emplace_back(&*FwdDecl); RegionMap[Ty->getDecl()].reset(FwdDecl); // Convert all the elements. SmallVector<llvm::Metadata *, 16> EltTys; // what about nested types? if (!TryCollectHLSLRecordElements(Ty, FwdDecl, EltTys)) { // HLSL Change // Note: The split of CXXDecl information here is intentional, the // gdb tests will depend on a certain ordering at printout. The debug // information offsets are still correct if we merge them all together // though. const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD); if (CXXDecl) { CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl); CollectVTableInfo(CXXDecl, DefUnit, EltTys); } // Collect data fields (including static variables and any initializers). CollectRecordFields(RD, DefUnit, EltTys, FwdDecl); if (CXXDecl) CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl); } // HLSL Change LexicalBlockStack.pop_back(); RegionMap.erase(Ty->getDecl()); llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys); DBuilder.replaceArrays(FwdDecl, Elements); if (FwdDecl->isTemporary()) FwdDecl = llvm::MDNode::replaceWithPermanent(llvm::TempDICompositeType(FwdDecl)); RegionMap[Ty->getDecl()].reset(FwdDecl); return FwdDecl; } #if 0 // HLSL Change - no ObjC support llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectType *Ty, llvm::DIFile *Unit) { // Ignore protocols. return getOrCreateType(Ty->getBaseType(), Unit); } /// \return true if Getter has the default name for the property PD. static bool hasDefaultGetterName(const ObjCPropertyDecl *PD, const ObjCMethodDecl *Getter) { assert(PD); if (!Getter) return true; assert(Getter->getDeclName().isObjCZeroArgSelector()); return PD->getName() == Getter->getDeclName().getObjCSelector().getNameForSlot(0); } /// \return true if Setter has the default name for the property PD. static bool hasDefaultSetterName(const ObjCPropertyDecl *PD, const ObjCMethodDecl *Setter) { assert(PD); if (!Setter) return true; assert(Setter->getDeclName().isObjCOneArgSelector()); return SelectorTable::constructSetterName(PD->getName()) == Setter->getDeclName().getObjCSelector().getNameForSlot(0); } llvm::DIType *CGDebugInfo::CreateType(const ObjCInterfaceType *Ty, llvm::DIFile *Unit) { ObjCInterfaceDecl *ID = Ty->getDecl(); if (!ID) return nullptr; // Get overall information about the record type for the debug info. llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation()); unsigned Line = getLineNumber(ID->getLocation()); auto RuntimeLang = static_cast<llvm::dwarf::SourceLanguage>(TheCU->getSourceLanguage()); // If this is just a forward declaration return a special forward-declaration // debug type since we won't be able to lay out the entire type. ObjCInterfaceDecl *Def = ID->getDefinition(); if (!Def || !Def->getImplementation()) { llvm::DIType *FwdDecl = DBuilder.createReplaceableCompositeType( llvm::dwarf::DW_TAG_structure_type, ID->getName(), TheCU, DefUnit, Line, RuntimeLang); ObjCInterfaceCache.push_back(ObjCInterfaceCacheEntry(Ty, FwdDecl, Unit)); return FwdDecl; } return CreateTypeDefinition(Ty, Unit); } #endif // HLSL Change - no ObjC support llvm::DIModule * CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod) { auto it = ModuleRefCache.find(Mod.Signature); if (it != ModuleRefCache.end()) return it->second; // Macro definitions that were defined with "-D" on the command line. SmallString<128> ConfigMacros; { llvm::raw_svector_ostream OS(ConfigMacros); const auto &PPOpts = CGM.getPreprocessorOpts(); unsigned I = 0; // Translate the macro definitions back into a commmand line. for (auto &M : PPOpts.Macros) { if (++I > 1) OS << " "; const std::string &Macro = M.first; bool Undef = M.second; OS << "\"-" << (Undef ? 'U' : 'D'); for (char c : Macro) switch (c) { case '\\' : OS << "\\\\"; break; case '"' : OS << "\\\""; break; default: OS << c; } OS << '\"'; } } llvm::DIBuilder DIB(CGM.getModule()); auto *CU = DIB.createCompileUnit( TheCU->getSourceLanguage(), internString(Mod.ModuleName), internString(Mod.Path), TheCU->getProducer(), true, StringRef(), 0, internString(Mod.ASTFile), llvm::DIBuilder::FullDebug, Mod.Signature); llvm::DIModule *ModuleRef = DIB.createModule(CU, Mod.ModuleName, ConfigMacros, internString(Mod.Path), internString(CGM.getHeaderSearchOpts().Sysroot)); DIB.finalize(); ModuleRefCache.insert(std::make_pair(Mod.Signature, ModuleRef)); return ModuleRef; } #if 0 // HLSL Change - no ObjC support llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm::DIFile *Unit) { ObjCInterfaceDecl *ID = Ty->getDecl(); llvm::DIFile *DefUnit = getOrCreateFile(ID->getLocation()); unsigned Line = getLineNumber(ID->getLocation()); unsigned RuntimeLang = TheCU->getSourceLanguage(); // Bit size, align and offset of the type. uint64_t Size = CGM.getContext().getTypeSize(Ty); uint64_t Align = CGM.getContext().getTypeAlign(Ty); unsigned Flags = 0; if (ID->getImplementation()) Flags |= llvm::DINode::FlagObjcClassComplete; llvm::DICompositeType *RealDecl = DBuilder.createStructType( Unit, ID->getName(), DefUnit, Line, Size, Align, Flags, nullptr, llvm::DINodeArray(), RuntimeLang); QualType QTy(Ty, 0); TypeCache[QTy.getAsOpaquePtr()].reset(RealDecl); // Push the struct on region stack. LexicalBlockStack.emplace_back(RealDecl); RegionMap[Ty->getDecl()].reset(RealDecl); // Convert all the elements. SmallVector<llvm::Metadata *, 16> EltTys; ObjCInterfaceDecl *SClass = ID->getSuperClass(); if (SClass) { llvm::DIType *SClassTy = getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit); if (!SClassTy) return nullptr; llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0); EltTys.push_back(InhTag); } // Create entries for all of the properties. for (const auto *PD : ID->properties()) { SourceLocation Loc = PD->getLocation(); llvm::DIFile *PUnit = getOrCreateFile(Loc); unsigned PLine = getLineNumber(Loc); ObjCMethodDecl *Getter = PD->getGetterMethodDecl(); ObjCMethodDecl *Setter = PD->getSetterMethodDecl(); llvm::MDNode *PropertyNode = DBuilder.createObjCProperty( PD->getName(), PUnit, PLine, hasDefaultGetterName(PD, Getter) ? "" : getSelectorName(PD->getGetterName()), hasDefaultSetterName(PD, Setter) ? "" : getSelectorName(PD->getSetterName()), PD->getPropertyAttributes(), getOrCreateType(PD->getType(), PUnit)); EltTys.push_back(PropertyNode); } const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID); unsigned FieldNo = 0; for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field; Field = Field->getNextIvar(), ++FieldNo) { llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit); if (!FieldTy) return nullptr; StringRef FieldName = Field->getName(); // Ignore unnamed fields. if (FieldName.empty()) continue; // Get the location for the field. llvm::DIFile *FieldDefUnit = getOrCreateFile(Field->getLocation()); unsigned FieldLine = getLineNumber(Field->getLocation()); QualType FType = Field->getType(); uint64_t FieldSize = 0; unsigned FieldAlign = 0; if (!FType->isIncompleteArrayType()) { // Bit size, align and offset of the type. FieldSize = Field->isBitField() ? Field->getBitWidthValue(CGM.getContext()) : CGM.getContext().getTypeSize(FType); FieldAlign = CGM.getContext().getTypeAlign(FType); } uint64_t FieldOffset; if (CGM.getLangOpts().ObjCRuntime.isNonFragile()) { // We don't know the runtime offset of an ivar if we're using the // non-fragile ABI. For bitfields, use the bit offset into the first // byte of storage of the bitfield. For other fields, use zero. if (Field->isBitField()) { FieldOffset = CGM.getObjCRuntime().ComputeBitfieldBitOffset(CGM, ID, Field); FieldOffset %= CGM.getContext().getCharWidth(); } else { FieldOffset = 0; } } else { FieldOffset = RL.getFieldOffset(FieldNo); } unsigned Flags = 0; if (Field->getAccessControl() == ObjCIvarDecl::Protected) Flags = llvm::DINode::FlagProtected; else if (Field->getAccessControl() == ObjCIvarDecl::Private) Flags = llvm::DINode::FlagPrivate; else if (Field->getAccessControl() == ObjCIvarDecl::Public) Flags = llvm::DINode::FlagPublic; llvm::MDNode *PropertyNode = nullptr; if (ObjCImplementationDecl *ImpD = ID->getImplementation()) { if (ObjCPropertyImplDecl *PImpD = ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) { if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) { SourceLocation Loc = PD->getLocation(); llvm::DIFile *PUnit = getOrCreateFile(Loc); unsigned PLine = getLineNumber(Loc); ObjCMethodDecl *Getter = PD->getGetterMethodDecl(); ObjCMethodDecl *Setter = PD->getSetterMethodDecl(); PropertyNode = DBuilder.createObjCProperty( PD->getName(), PUnit, PLine, hasDefaultGetterName(PD, Getter) ? "" : getSelectorName( PD->getGetterName()), hasDefaultSetterName(PD, Setter) ? "" : getSelectorName( PD->getSetterName()), PD->getPropertyAttributes(), getOrCreateType(PD->getType(), PUnit)); } } } FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit, FieldLine, FieldSize, FieldAlign, FieldOffset, Flags, FieldTy, PropertyNode); EltTys.push_back(FieldTy); } llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys); DBuilder.replaceArrays(RealDecl, Elements); LexicalBlockStack.pop_back(); return RealDecl; } #endif // HLSL Change - no ObjC support llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile *Unit) { llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit); int64_t Count = Ty->getNumElements(); if (Count == 0) // If number of elements are not known then this is an unbounded array. // Use Count == -1 to express such arrays. Count = -1; llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(0, Count); llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript); uint64_t Size = CGM.getContext().getTypeSize(Ty); uint64_t Align = CGM.getContext().getTypeAlign(Ty); return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray); } llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) { uint64_t Size; uint64_t Align; // FIXME: make getTypeAlign() aware of VLAs and incomplete array types if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) { Size = 0; Align = CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT)); } else if (Ty->isIncompleteArrayType()) { Size = 0; if (Ty->getElementType()->isIncompleteType()) Align = 0; else Align = CGM.getContext().getTypeAlign(Ty->getElementType()); } else if (Ty->isIncompleteType()) { Size = 0; Align = 0; } else { // Size and align of the whole array, not the element type. Size = CGM.getContext().getTypeSize(Ty); Align = CGM.getContext().getTypeAlign(Ty); } // Add the dimensions of the array. FIXME: This loses CV qualifiers from // interior arrays, do we care? Why aren't nested arrays represented the // obvious/recursive way? SmallVector<llvm::Metadata *, 8> Subscripts; QualType EltTy(Ty, 0); while ((Ty = dyn_cast<ArrayType>(EltTy))) { // If the number of elements is known, then count is that number. Otherwise, // it's -1. This allows us to represent a subrange with an array of 0 // elements, like this: // // struct foo { // int x[0]; // }; int64_t Count = -1; // Count == -1 is an unbounded array. if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty)) Count = CAT->getSize().getZExtValue(); // FIXME: Verify this is right for VLAs. Subscripts.push_back(DBuilder.getOrCreateSubrange(0, Count)); EltTy = Ty->getElementType(); } llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts); return DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit), SubscriptArray); } llvm::DIType *CGDebugInfo::CreateType(const LValueReferenceType *Ty, llvm::DIFile *Unit) { return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type, Ty, Ty->getPointeeType(), Unit); } llvm::DIType *CGDebugInfo::CreateType(const RValueReferenceType *Ty, llvm::DIFile *Unit) { return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type, Ty, Ty->getPointeeType(), Unit); } llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty, llvm::DIFile *U) { uint64_t Size = CGM.getCXXABI().isTypeInfoCalculable(QualType(Ty, 0)) ? CGM.getContext().getTypeSize(Ty) : 0; llvm::DIType *ClassType = getOrCreateType(QualType(Ty->getClass(), 0), U); if (Ty->isMemberDataPointerType()) return DBuilder.createMemberPointerType( getOrCreateType(Ty->getPointeeType(), U), ClassType, Size); const FunctionProtoType *FPT = Ty->getPointeeType()->getAs<FunctionProtoType>(); return DBuilder.createMemberPointerType( getOrCreateInstanceMethodType(CGM.getContext().getPointerType(QualType( Ty->getClass(), FPT->getTypeQuals())), FPT, U), ClassType, Size); } llvm::DIType *CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile *U) { // Ignore the atomic wrapping // FIXME: What is the correct representation? return getOrCreateType(Ty->getValueType(), U); } llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) { const EnumDecl *ED = Ty->getDecl(); uint64_t Size = 0; uint64_t Align = 0; if (!ED->getTypeForDecl()->isIncompleteType()) { Size = CGM.getContext().getTypeSize(ED->getTypeForDecl()); Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl()); } SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU); // If this is just a forward declaration, construct an appropriately // marked node and just return it. if (!ED->getDefinition()) { llvm::DIScope *EDContext = getContextDescriptor(cast<Decl>(ED->getDeclContext())); llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation()); unsigned Line = getLineNumber(ED->getLocation()); StringRef EDName = ED->getName(); llvm::DIType *RetTy = DBuilder.createReplaceableCompositeType( llvm::dwarf::DW_TAG_enumeration_type, EDName, EDContext, DefUnit, Line, 0, Size, Align, llvm::DINode::FlagFwdDecl, FullName); ReplaceMap.emplace_back( std::piecewise_construct, std::make_tuple(Ty), std::make_tuple(static_cast<llvm::Metadata *>(RetTy))); return RetTy; } return CreateTypeDefinition(Ty); } llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) { const EnumDecl *ED = Ty->getDecl(); uint64_t Size = 0; uint64_t Align = 0; if (!ED->getTypeForDecl()->isIncompleteType()) { Size = CGM.getContext().getTypeSize(ED->getTypeForDecl()); Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl()); } SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU); // Create elements for each enumerator. SmallVector<llvm::Metadata *, 16> Enumerators; ED = ED->getDefinition(); for (const auto *Enum : ED->enumerators()) { Enumerators.push_back(DBuilder.createEnumerator( Enum->getName(), Enum->getInitVal().getSExtValue())); } // Return a CompositeType for the enum itself. llvm::DINodeArray EltArray = DBuilder.getOrCreateArray(Enumerators); llvm::DIFile *DefUnit = getOrCreateFile(ED->getLocation()); unsigned Line = getLineNumber(ED->getLocation()); llvm::DIScope *EnumContext = getContextDescriptor(cast<Decl>(ED->getDeclContext())); llvm::DIType *ClassTy = ED->isFixed() ? getOrCreateType(ED->getIntegerType(), DefUnit) : nullptr; return DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line, Size, Align, EltArray, ClassTy, FullName); } static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) { Qualifiers Quals; do { Qualifiers InnerQuals = T.getLocalQualifiers(); // Qualifiers::operator+() doesn't like it if you add a Qualifier // that is already there. Quals += Qualifiers::removeCommonQualifiers(Quals, InnerQuals); Quals += InnerQuals; QualType LastT = T; switch (T->getTypeClass()) { default: return C.getQualifiedType(T.getTypePtr(), Quals); case Type::TemplateSpecialization: { const auto *Spec = cast<TemplateSpecializationType>(T); if (Spec->isTypeAlias()) return C.getQualifiedType(T.getTypePtr(), Quals); T = Spec->desugar(); break; } case Type::TypeOfExpr: T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType(); break; case Type::TypeOf: T = cast<TypeOfType>(T)->getUnderlyingType(); break; case Type::Decltype: T = cast<DecltypeType>(T)->getUnderlyingType(); break; case Type::UnaryTransform: T = cast<UnaryTransformType>(T)->getUnderlyingType(); break; case Type::Attributed: T = cast<AttributedType>(T)->getEquivalentType(); break; case Type::Elaborated: T = cast<ElaboratedType>(T)->getNamedType(); break; case Type::Paren: T = cast<ParenType>(T)->getInnerType(); break; case Type::SubstTemplateTypeParm: T = cast<SubstTemplateTypeParmType>(T)->getReplacementType(); break; case Type::Auto: QualType DT = cast<AutoType>(T)->getDeducedType(); assert(!DT.isNull() && "Undeduced types shouldn't reach here."); T = DT; break; } assert(T != LastT && "Type unwrapping failed to unwrap!"); (void)LastT; } while (true); } llvm::DIType *CGDebugInfo::getTypeOrNull(QualType Ty) { // Unwrap the type as needed for debug information. Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext()); auto it = TypeCache.find(Ty.getAsOpaquePtr()); if (it != TypeCache.end()) { // Verify that the debug info still exists. if (llvm::Metadata *V = it->second) return cast<llvm::DIType>(V); } return nullptr; } void CGDebugInfo::completeTemplateDefinition( const ClassTemplateSpecializationDecl &SD) { if (DebugKind <= CodeGenOptions::DebugLineTablesOnly) return; completeClassData(&SD); // In case this type has no member function definitions being emitted, ensure // it is retained RetainedTypes.push_back(CGM.getContext().getRecordType(&SD).getAsOpaquePtr()); } llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) { if (Ty.isNull()) return nullptr; // Unwrap the type as needed for debug information. Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext()); if (auto *T = getTypeOrNull(Ty)) return T; // Otherwise create the type. llvm::DIType *Res = CreateTypeNode(Ty, Unit); void *TyPtr = Ty.getAsOpaquePtr(); // And update the type cache. TypeCache[TyPtr].reset(Res); return Res; } #if 0 // HLSL Change - no ObjC support unsigned CGDebugInfo::Checksum(const ObjCInterfaceDecl *ID) { // The assumption is that the number of ivars can only increase // monotonically, so it is safe to just use their current number as // a checksum. unsigned Sum = 0; for (const ObjCIvarDecl *Ivar = ID->all_declared_ivar_begin(); Ivar != nullptr; Ivar = Ivar->getNextIvar()) ++Sum; return Sum; } ObjCInterfaceDecl *CGDebugInfo::getObjCInterfaceDecl(QualType Ty) { switch (Ty->getTypeClass()) { case Type::ObjCObjectPointer: return getObjCInterfaceDecl( cast<ObjCObjectPointerType>(Ty)->getPointeeType()); case Type::ObjCInterface: return cast<ObjCInterfaceType>(Ty)->getDecl(); default: return nullptr; } } #endif // HLSL Change - no ObjC support llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) { // Handle qualifiers, which recursively handles what they refer to. if (Ty.hasLocalQualifiers()) return CreateQualifiedType(Ty, Unit); // Work out details of type. switch (Ty->getTypeClass()) { #define TYPE(Class, Base) #define ABSTRACT_TYPE(Class, Base) #define NON_CANONICAL_TYPE(Class, Base) #define DEPENDENT_TYPE(Class, Base) case Type::Class: #include "clang/AST/TypeNodes.def" llvm_unreachable("Dependent types cannot show up in debug information"); case Type::ExtVector: case Type::Vector: return CreateType(cast<VectorType>(Ty), Unit); #if 0 // HLSL Change - no ObjC support case Type::ObjCObjectPointer: return CreateType(cast<ObjCObjectPointerType>(Ty), Unit); case Type::ObjCObject: return CreateType(cast<ObjCObjectType>(Ty), Unit); case Type::ObjCInterface: return CreateType(cast<ObjCInterfaceType>(Ty), Unit); #else case Type::ObjCObjectPointer: case Type::ObjCObject: case Type::ObjCInterface: llvm_unreachable("No ObjC Support"); #endif // HLSL Change - no ObjC support case Type::Builtin: return CreateType(cast<BuiltinType>(Ty)); case Type::Complex: return CreateType(cast<ComplexType>(Ty)); case Type::Pointer: return CreateType(cast<PointerType>(Ty), Unit); case Type::Adjusted: case Type::Decayed: // Decayed and adjusted types use the adjusted type in LLVM and DWARF. return CreateType( cast<PointerType>(cast<AdjustedType>(Ty)->getAdjustedType()), Unit); #if 0 // HLSL Change - no block support case Type::BlockPointer: return CreateType(cast<BlockPointerType>(Ty), Unit); #else case Type::BlockPointer: llvm_unreachable("No Block Support"); #endif // HLSL Change - no block support case Type::Typedef: return CreateType(cast<TypedefType>(Ty), Unit); case Type::Record: return CreateType(cast<RecordType>(Ty)); case Type::Enum: return CreateEnumType(cast<EnumType>(Ty)); case Type::FunctionProto: case Type::FunctionNoProto: return CreateType(cast<FunctionType>(Ty), Unit); case Type::ConstantArray: case Type::VariableArray: case Type::IncompleteArray: return CreateType(cast<ArrayType>(Ty), Unit); case Type::LValueReference: return CreateType(cast<LValueReferenceType>(Ty), Unit); case Type::RValueReference: return CreateType(cast<RValueReferenceType>(Ty), Unit); case Type::MemberPointer: return CreateType(cast<MemberPointerType>(Ty), Unit); case Type::Atomic: return CreateType(cast<AtomicType>(Ty), Unit); case Type::TemplateSpecialization: return CreateType(cast<TemplateSpecializationType>(Ty), Unit); case Type::Auto: case Type::Attributed: case Type::Elaborated: case Type::Paren: case Type::SubstTemplateTypeParm: case Type::TypeOfExpr: case Type::TypeOf: case Type::Decltype: case Type::UnaryTransform: case Type::PackExpansion: break; } llvm_unreachable("type should have been unwrapped!"); } llvm::DIType *CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty, llvm::DIFile *Unit) { QualType QTy(Ty, 0); auto *T = cast_or_null<llvm::DICompositeTypeBase>(getTypeOrNull(QTy)); // We may have cached a forward decl when we could have created // a non-forward decl. Go ahead and create a non-forward decl // now. if (T && !T->isForwardDecl()) return T; // Otherwise create the type. llvm::DICompositeType *Res = CreateLimitedType(Ty); // Propagate members from the declaration to the definition // CreateType(const RecordType*) will overwrite this with the members in the // correct order if the full type is needed. DBuilder.replaceArrays(Res, T ? T->getElements() : llvm::DINodeArray()); // And update the type cache. TypeCache[QTy.getAsOpaquePtr()].reset(Res); return Res; } // TODO: Currently used for context chains when limiting debug info. llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) { RecordDecl *RD = Ty->getDecl(); // Get overall information about the record type for the debug info. llvm::DIFile *DefUnit = getOrCreateFile(RD->getLocation()); unsigned Line = getLineNumber(RD->getLocation()); StringRef RDName = getClassName(RD); llvm::DIScope *RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext())); // If we ended up creating the type during the context chain construction, // just return that. auto *T = cast_or_null<llvm::DICompositeType>( getTypeOrNull(CGM.getContext().getRecordType(RD))); if (T && (!T->isForwardDecl() || !RD->getDefinition())) return T; // If this is just a forward or incomplete declaration, construct an // appropriately marked node and just return it. const RecordDecl *D = RD->getDefinition(); if (!D || !D->isCompleteDefinition()) return getOrCreateRecordFwdDecl(Ty, RDContext); uint64_t Size = CGM.getContext().getTypeSize(Ty); uint64_t Align = CGM.getContext().getTypeAlign(Ty); SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU); llvm::DICompositeType *RealDecl = DBuilder.createReplaceableCompositeType( getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align, 0, FullName); RegionMap[Ty->getDecl()].reset(RealDecl); TypeCache[QualType(Ty, 0).getAsOpaquePtr()].reset(RealDecl); if (const ClassTemplateSpecializationDecl *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD)) DBuilder.replaceArrays(RealDecl, llvm::DINodeArray(), CollectCXXTemplateParams(TSpecial, DefUnit)); return RealDecl; } void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD, llvm::DICompositeType *RealDecl) { // A class's primary base or the class itself contains the vtable. llvm::DICompositeType *ContainingType = nullptr; const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD); if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) { // Seek non-virtual primary base root. while (1) { const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase); const CXXRecordDecl *PBT = BRL.getPrimaryBase(); if (PBT && !BRL.isPrimaryBaseVirtual()) PBase = PBT; else break; } ContainingType = cast<llvm::DICompositeType>( getOrCreateType(QualType(PBase->getTypeForDecl(), 0), getOrCreateFile(RD->getLocation()))); } else if (RD->isDynamicClass()) ContainingType = RealDecl; DBuilder.replaceVTableHolder(RealDecl, ContainingType); } llvm::DIType *CGDebugInfo::CreateMemberType(llvm::DIFile *Unit, QualType FType, StringRef Name, uint64_t *Offset) { llvm::DIType *FieldTy = CGDebugInfo::getOrCreateType(FType, Unit); uint64_t FieldSize = CGM.getContext().getTypeSize(FType); unsigned FieldAlign = CGM.getContext().getTypeAlign(FType); llvm::DIType *Ty = DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize, FieldAlign, *Offset, 0, FieldTy); *Offset += FieldSize; return Ty; } void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit, StringRef &Name, StringRef &LinkageName, llvm::DIScope *&FDContext, llvm::DINodeArray &TParamsArray, unsigned &Flags) { const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); Name = getFunctionName(FD); // Use mangled name as linkage name for C/C++ functions. if (FD->hasPrototype()) { LinkageName = CGM.getMangledName(GD); Flags |= llvm::DINode::FlagPrototyped; } // No need to replicate the linkage name if it isn't different from the // subprogram name, no need to have it at all unless coverage is enabled or // debug is set to more than just line tables. if (LinkageName == Name || (!CGM.getCodeGenOpts().EmitGcovArcs && !CGM.getCodeGenOpts().EmitGcovNotes && DebugKind <= CodeGenOptions::DebugLineTablesOnly)) LinkageName = StringRef(); if (DebugKind >= CodeGenOptions::LimitedDebugInfo) { if (const NamespaceDecl *NSDecl = dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext())) FDContext = getOrCreateNameSpace(NSDecl); else if (const RecordDecl *RDecl = dyn_cast_or_null<RecordDecl>(FD->getDeclContext())) FDContext = getContextDescriptor(cast<Decl>(RDecl)); // Collect template parameters. TParamsArray = CollectFunctionTemplateParams(FD, Unit); } } void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit, unsigned &LineNo, QualType &T, StringRef &Name, StringRef &LinkageName, llvm::DIScope *&VDContext) { Unit = getOrCreateFile(VD->getLocation()); LineNo = getLineNumber(VD->getLocation()); setLocation(VD->getLocation()); T = VD->getType(); if (T->isIncompleteArrayType()) { // CodeGen turns int[] into int[1] so we'll do the same here. llvm::APInt ConstVal(32, 1); QualType ET = CGM.getContext().getAsArrayType(T)->getElementType(); T = CGM.getContext().getConstantArrayType(ET, ConstVal, ArrayType::Normal, 0); } Name = VD->getName(); if (VD->getDeclContext() && !isa<FunctionDecl>(VD->getDeclContext()) && !isa<ObjCMethodDecl>(VD->getDeclContext())) LinkageName = CGM.getMangledName(VD); if (LinkageName == Name) LinkageName = StringRef(); // Since we emit declarations (DW_AT_members) for static members, place the // definition of those static members in the namespace they were declared in // in the source code (the lexical decl context). // FIXME: Generalize this for even non-member global variables where the // declaration and definition may have different lexical decl contexts, once // we have support for emitting declarations of (non-member) global variables. const DeclContext *DC = VD->isStaticDataMember() ? VD->getLexicalDeclContext() : VD->getDeclContext(); // When a record type contains an in-line initialization of a static data // member, and the record type is marked as __declspec(dllexport), an implicit // definition of the member will be created in the record context. DWARF // doesn't seem to have a nice way to describe this in a form that consumers // are likely to understand, so fake the "normal" situation of a definition // outside the class by putting it in the global scope. if (DC->isRecord()) DC = CGM.getContext().getTranslationUnitDecl(); VDContext = getContextDescriptor(dyn_cast<Decl>(DC)); } llvm::DISubprogram * CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) { llvm::DINodeArray TParamsArray; StringRef Name, LinkageName; unsigned Flags = 0; SourceLocation Loc = FD->getLocation(); llvm::DIFile *Unit = getOrCreateFile(Loc); llvm::DIScope *DContext = Unit; unsigned Line = getLineNumber(Loc); collectFunctionDeclProps(FD, Unit, Name, LinkageName, DContext, TParamsArray, Flags); // Build function type. SmallVector<QualType, 16> ArgTypes; for (const ParmVarDecl *Parm: FD->parameters()) ArgTypes.push_back(Parm->getType()); QualType FnType = CGM.getContext().getFunctionType( FD->getReturnType(), ArgTypes, FunctionProtoType::ExtProtoInfo(), ArrayRef<hlsl::ParameterModifier>()); // HLSL Change - parameter modifiers llvm::DISubprogram *SP = DBuilder.createTempFunctionFwdDecl( DContext, Name, LinkageName, Unit, Line, getOrCreateFunctionType(FD, FnType, Unit), !FD->isExternallyVisible(), false /*declaration*/, 0, Flags, CGM.getLangOpts().Optimize, nullptr, TParamsArray.get(), getFunctionDeclaration(FD)); const FunctionDecl *CanonDecl = cast<FunctionDecl>(FD->getCanonicalDecl()); FwdDeclReplaceMap.emplace_back(std::piecewise_construct, std::make_tuple(CanonDecl), std::make_tuple(SP)); return SP; } llvm::DIGlobalVariable * CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) { QualType T; StringRef Name, LinkageName; SourceLocation Loc = VD->getLocation(); llvm::DIFile *Unit = getOrCreateFile(Loc); llvm::DIScope *DContext = Unit; unsigned Line = getLineNumber(Loc); collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, DContext); auto *GV = DBuilder.createTempGlobalVariableFwdDecl( DContext, Name, LinkageName, Unit, Line, getOrCreateType(T, Unit), !VD->isExternallyVisible(), nullptr, nullptr); FwdDeclReplaceMap.emplace_back( std::piecewise_construct, std::make_tuple(cast<VarDecl>(VD->getCanonicalDecl())), std::make_tuple(static_cast<llvm::Metadata *>(GV))); return GV; } llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) { // We only need a declaration (not a definition) of the type - so use whatever // we would otherwise do to get a type for a pointee. (forward declarations in // limited debug info, full definitions (if the type definition is available) // in unlimited debug info) if (const TypeDecl *TD = dyn_cast<TypeDecl>(D)) return getOrCreateType(CGM.getContext().getTypeDeclType(TD), getOrCreateFile(TD->getLocation())); auto I = DeclCache.find(D->getCanonicalDecl()); if (I != DeclCache.end()) return dyn_cast_or_null<llvm::DINode>(I->second); // No definition for now. Emit a forward definition that might be // merged with a potential upcoming definition. if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) return getFunctionForwardDeclaration(FD); else if (const auto *VD = dyn_cast<VarDecl>(D)) return getGlobalVariableForwardDeclaration(VD); return nullptr; } llvm::DISubprogram *CGDebugInfo::getFunctionDeclaration(const Decl *D) { if (!D || DebugKind <= CodeGenOptions::DebugLineTablesOnly) return nullptr; const FunctionDecl *FD = dyn_cast<FunctionDecl>(D); if (!FD) return nullptr; // Setup context. auto *S = getContextDescriptor(cast<Decl>(D->getDeclContext())); auto MI = SPCache.find(FD->getCanonicalDecl()); if (MI == SPCache.end()) { if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD->getCanonicalDecl())) { return CreateCXXMemberFunction(MD, getOrCreateFile(MD->getLocation()), cast<llvm::DICompositeType>(S)); } } if (MI != SPCache.end()) { auto *SP = dyn_cast_or_null<llvm::DISubprogram>(MI->second); if (SP && !SP->isDefinition()) return SP; } for (auto NextFD : FD->redecls()) { auto MI = SPCache.find(NextFD->getCanonicalDecl()); if (MI != SPCache.end()) { auto *SP = dyn_cast_or_null<llvm::DISubprogram>(MI->second); if (SP && !SP->isDefinition()) return SP; } } return nullptr; } // getOrCreateFunctionType - Construct type. If it is a c++ method, include // implicit parameter "this". llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D, QualType FnType, llvm::DIFile *F) { if (!D || DebugKind <= CodeGenOptions::DebugLineTablesOnly) // Create fake but valid subroutine type. Otherwise -verify would fail, and // subprogram DIE will miss DW_AT_decl_file and DW_AT_decl_line fields. return DBuilder.createSubroutineType(F, DBuilder.getOrCreateTypeArray(None)); if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) return getOrCreateMethodType(Method, F); #if 0 // HLSL Change - no ObjC support if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) { // Add "self" and "_cmd" SmallVector<llvm::Metadata *, 16> Elts; // First element is always return type. For 'void' functions it is NULL. QualType ResultTy = OMethod->getReturnType(); // Replace the instancetype keyword with the actual type. if (ResultTy == CGM.getContext().getObjCInstanceType()) ResultTy = CGM.getContext().getPointerType( QualType(OMethod->getClassInterface()->getTypeForDecl(), 0)); Elts.push_back(getOrCreateType(ResultTy, F)); // "self" pointer is always first argument. QualType SelfDeclTy = OMethod->getSelfDecl()->getType(); Elts.push_back(CreateSelfType(SelfDeclTy, getOrCreateType(SelfDeclTy, F))); // "_cmd" pointer is always second argument. Elts.push_back(DBuilder.createArtificialType( getOrCreateType(OMethod->getCmdDecl()->getType(), F))); // Get rest of the arguments. for (const auto *PI : OMethod->params()) Elts.push_back(getOrCreateType(PI->getType(), F)); // Variadic methods need a special marker at the end of the type list. if (OMethod->isVariadic()) Elts.push_back(DBuilder.createUnspecifiedParameter()); llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts); return DBuilder.createSubroutineType(F, EltTypeArray); } #endif // HLSL Change - no ObjC support // Handle variadic function types; they need an additional // unspecified parameter. if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) if (FD->isVariadic()) { SmallVector<llvm::Metadata *, 16> EltTys; EltTys.push_back(getOrCreateType(FD->getReturnType(), F)); if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FnType)) for (unsigned i = 0, e = FPT->getNumParams(); i != e; ++i) EltTys.push_back(getOrCreateType(FPT->getParamType(i), F)); EltTys.push_back(DBuilder.createUnspecifiedParameter()); llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys); return DBuilder.createSubroutineType(F, EltTypeArray); } return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F)); } void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc, SourceLocation ScopeLoc, QualType FnType, llvm::Function *Fn, CGBuilderTy &Builder) { StringRef Name; StringRef LinkageName; FnBeginRegionCount.push_back(LexicalBlockStack.size()); const Decl *D = GD.getDecl(); bool HasDecl = (D != nullptr); unsigned Flags = 0; llvm::DIFile *Unit = getOrCreateFile(Loc); llvm::DIScope *FDContext = Unit; llvm::DINodeArray TParamsArray; if (!HasDecl) { // Use llvm function name. LinkageName = Fn->getName(); } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { // If there is a subprogram for this function available then use it. auto FI = SPCache.find(FD->getCanonicalDecl()); if (FI != SPCache.end()) { auto *SP = dyn_cast_or_null<llvm::DISubprogram>(FI->second); if (SP && SP->isDefinition()) { LexicalBlockStack.emplace_back(SP); RegionMap[D].reset(SP); return; } } collectFunctionDeclProps(GD, Unit, Name, LinkageName, FDContext, TParamsArray, Flags); #if 0 // HLSL Change - no ObjC support } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) { Name = getObjCMethodName(OMD); Flags |= llvm::DINode::FlagPrototyped; #endif // HLSL Change - no ObjC support } else { // Use llvm function name. Name = Fn->getName(); Flags |= llvm::DINode::FlagPrototyped; } if (!Name.empty() && Name[0] == '\01') Name = Name.substr(1); if (!HasDecl || D->isImplicit()) { Flags |= llvm::DINode::FlagArtificial; // Artificial functions without a location should not silently reuse CurLoc. if (Loc.isInvalid()) CurLoc = SourceLocation(); } unsigned LineNo = getLineNumber(Loc); unsigned ScopeLine = getLineNumber(ScopeLoc); // FIXME: The function declaration we're constructing here is mostly reusing // declarations from CXXMethodDecl and not constructing new ones for arbitrary // FunctionDecls. When/if we fix this we can have FDContext be TheCU/null for // all subprograms instead of the actual context since subprogram definitions // are emitted as CU level entities by the backend. llvm::DISubprogram *SP = DBuilder.createFunction( FDContext, Name, LinkageName, Unit, LineNo, getOrCreateFunctionType(D, FnType, Unit), Fn->hasInternalLinkage(), true /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize, Fn, TParamsArray.get(), getFunctionDeclaration(D)); // We might get here with a VarDecl in the case we're generating // code for the initialization of globals. Do not record these decls // as they will overwrite the actual VarDecl Decl in the cache. if (HasDecl && isa<FunctionDecl>(D)) DeclCache[D->getCanonicalDecl()].reset(static_cast<llvm::Metadata *>(SP)); // Push the function onto the lexical block stack. LexicalBlockStack.emplace_back(SP); if (HasDecl) RegionMap[D].reset(SP); } void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) { // Update our current location setLocation(Loc); if (CurLoc.isInvalid() || CurLoc.isMacroID()) return; llvm::MDNode *Scope = LexicalBlockStack.back(); Builder.SetCurrentDebugLocation(llvm::DebugLoc::get( getLineNumber(CurLoc), getColumnNumber(CurLoc), Scope)); } void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) { llvm::MDNode *Back = nullptr; if (!LexicalBlockStack.empty()) Back = LexicalBlockStack.back().get(); LexicalBlockStack.emplace_back(DBuilder.createLexicalBlock( cast<llvm::DIScope>(Back), getOrCreateFile(CurLoc), getLineNumber(CurLoc), getColumnNumber(CurLoc))); } void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc) { // Set our current location. setLocation(Loc); // Emit a line table change for the current location inside the new scope. Builder.SetCurrentDebugLocation(llvm::DebugLoc::get( getLineNumber(Loc), getColumnNumber(Loc), LexicalBlockStack.back())); if (DebugKind <= CodeGenOptions::DebugLineTablesOnly) return; // Create a new lexical block and push it on the stack. CreateLexicalBlock(Loc); } void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc) { assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!"); // Provide an entry in the line table for the end of the block. EmitLocation(Builder, Loc); if (DebugKind <= CodeGenOptions::DebugLineTablesOnly) return; LexicalBlockStack.pop_back(); } void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) { assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!"); unsigned RCount = FnBeginRegionCount.back(); assert(RCount <= LexicalBlockStack.size() && "Region stack mismatch"); // Pop all regions for this function. while (LexicalBlockStack.size() != RCount) { // Provide an entry in the line table for the end of the block. EmitLocation(Builder, CurLoc); LexicalBlockStack.pop_back(); } FnBeginRegionCount.pop_back(); } llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD, uint64_t *XOffset) { SmallVector<llvm::Metadata *, 5> EltTys; QualType FType; uint64_t FieldSize, FieldOffset; unsigned FieldAlign; llvm::DIFile *Unit = getOrCreateFile(VD->getLocation()); QualType Type = VD->getType(); FieldOffset = 0; FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset)); EltTys.push_back(CreateMemberType(Unit, FType, "__forwarding", &FieldOffset)); FType = CGM.getContext().IntTy; EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset)); EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset)); #if 0 // HLSL Change - no ObjC or block support bool HasCopyAndDispose = CGM.getContext().BlockRequiresCopying(Type, VD); if (HasCopyAndDispose) { FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); EltTys.push_back( CreateMemberType(Unit, FType, "__copy_helper", &FieldOffset)); EltTys.push_back( CreateMemberType(Unit, FType, "__destroy_helper", &FieldOffset)); } bool HasByrefExtendedLayout; Qualifiers::ObjCLifetime Lifetime; if (CGM.getContext().getByrefLifetime(Type, Lifetime, HasByrefExtendedLayout) && HasByrefExtendedLayout) { FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy); EltTys.push_back( CreateMemberType(Unit, FType, "__byref_variable_layout", &FieldOffset)); } #endif // HLSL Change - no ObjC or block support CharUnits Align = CGM.getContext().getDeclAlign(VD); if (Align > CGM.getContext().toCharUnitsFromBits( CGM.getTarget().getPointerAlign(0))) { CharUnits FieldOffsetInBytes = CGM.getContext().toCharUnitsFromBits(FieldOffset); CharUnits AlignedOffsetInBytes = FieldOffsetInBytes.RoundUpToAlignment(Align); CharUnits NumPaddingBytes = AlignedOffsetInBytes - FieldOffsetInBytes; if (NumPaddingBytes.isPositive()) { llvm::APInt pad(32, NumPaddingBytes.getQuantity()); FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy, pad, ArrayType::Normal, 0); EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset)); } } FType = Type; llvm::DIType *FieldTy = getOrCreateType(FType, Unit); FieldSize = CGM.getContext().getTypeSize(FType); FieldAlign = CGM.getContext().toBits(Align); *XOffset = FieldOffset; FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit, 0, FieldSize, FieldAlign, FieldOffset, 0, FieldTy); EltTys.push_back(FieldTy); FieldOffset += FieldSize; llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys); unsigned Flags = llvm::DINode::FlagBlockByrefStruct; return DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags, nullptr, Elements); } void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::Tag Tag, llvm::Value *Storage, unsigned ArgNo, CGBuilderTy &Builder) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!"); bool Unwritten = VD->isImplicit() || (isa<Decl>(VD->getDeclContext()) && cast<Decl>(VD->getDeclContext())->isImplicit()); llvm::DIFile *Unit = nullptr; if (!Unwritten) Unit = getOrCreateFile(VD->getLocation()); llvm::DIType *Ty; uint64_t XOffset = 0; if (VD->hasAttr<BlocksAttr>()) Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset); else Ty = getOrCreateType(VD->getType(), Unit); // If there is no debug info for this type then do not emit debug info // for this variable. if (!Ty) return; // Get location information. unsigned Line = 0; unsigned Column = 0; if (!Unwritten) { Line = getLineNumber(VD->getLocation()); Column = getColumnNumber(VD->getLocation()); } SmallVector<int64_t, 9> Expr; unsigned Flags = 0; if (VD->isImplicit()) Flags |= llvm::DINode::FlagArtificial; // If this is the first argument and it is implicit then // give it an object pointer flag. // FIXME: There has to be a better way to do this, but for static // functions there won't be an implicit param at arg1 and // otherwise it is 'self' or 'this'. if (isa<ImplicitParamDecl>(VD) && ArgNo == 1) Flags |= llvm::DINode::FlagObjectPointer; if (llvm::Argument *Arg = dyn_cast<llvm::Argument>(Storage)) if (Arg->getType()->isPointerTy() && !Arg->hasByValAttr() && !VD->getType()->isPointerType()) Expr.push_back(llvm::dwarf::DW_OP_deref); auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back()); StringRef Name = VD->getName(); if (!Name.empty()) { if (VD->hasAttr<BlocksAttr>()) { CharUnits offset = CharUnits::fromQuantity(32); Expr.push_back(llvm::dwarf::DW_OP_plus); // offset of __forwarding field offset = CGM.getContext().toCharUnitsFromBits( CGM.getTarget().getPointerWidth(0)); Expr.push_back(offset.getQuantity()); Expr.push_back(llvm::dwarf::DW_OP_deref); Expr.push_back(llvm::dwarf::DW_OP_plus); // offset of x field offset = CGM.getContext().toCharUnitsFromBits(XOffset); Expr.push_back(offset.getQuantity()); // Create the descriptor for the variable. auto *D = DBuilder.createLocalVariable(Tag, Scope, VD->getName(), Unit, Line, Ty, ArgNo); // Insert an llvm.dbg.declare into the current block. DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr), llvm::DebugLoc::get(Line, Column, Scope), Builder.GetInsertBlock()); return; } else if (isa<VariableArrayType>(VD->getType())) Expr.push_back(llvm::dwarf::DW_OP_deref); } else if (const RecordType *RT = dyn_cast<RecordType>(VD->getType())) { // If VD is an anonymous union then Storage represents value for // all union fields. const RecordDecl *RD = cast<RecordDecl>(RT->getDecl()); if (RD->isUnion() && RD->isAnonymousStructOrUnion()) { // GDB has trouble finding local variables in anonymous unions, so we emit // artifical local variables for each of the members. // // FIXME: Remove this code as soon as GDB supports this. // The debug info verifier in LLVM operates based on the assumption that a // variable has the same size as its storage and we had to disable the check // for artificial variables. for (const auto *Field : RD->fields()) { llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit); StringRef FieldName = Field->getName(); // Ignore unnamed fields. Do not ignore unnamed records. if (FieldName.empty() && !isa<RecordType>(Field->getType())) continue; // Use VarDecl's Tag, Scope and Line number. auto *D = DBuilder.createLocalVariable( Tag, Scope, FieldName, Unit, Line, FieldTy, CGM.getLangOpts().Optimize, Flags | llvm::DINode::FlagArtificial, ArgNo); // Insert an llvm.dbg.declare into the current block. DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr), llvm::DebugLoc::get(Line, Column, Scope), Builder.GetInsertBlock()); } } } // Create the descriptor for the variable. auto *D = DBuilder.createLocalVariable(Tag, Scope, Name, Unit, Line, Ty, CGM.getLangOpts().Optimize, Flags, ArgNo); // Insert an llvm.dbg.declare into the current block. DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr), llvm::DebugLoc::get(Line, Column, Scope), Builder.GetInsertBlock()); } void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder); } llvm::DIType *CGDebugInfo::CreateSelfType(const QualType &QualTy, llvm::DIType *Ty) { llvm::DIType *CachedTy = getTypeOrNull(QualTy); if (CachedTy) Ty = CachedTy; return DBuilder.createObjectPointerType(Ty); } void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable( const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder, const CGBlockInfo &blockInfo, llvm::Instruction *InsertPoint) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!"); if (Builder.GetInsertBlock() == nullptr) return; bool isByRef = VD->hasAttr<BlocksAttr>(); uint64_t XOffset = 0; llvm::DIFile *Unit = getOrCreateFile(VD->getLocation()); llvm::DIType *Ty; if (isByRef) Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset); else Ty = getOrCreateType(VD->getType(), Unit); // Self is passed along as an implicit non-arg variable in a // block. Mark it as the object pointer. if (isa<ImplicitParamDecl>(VD) && VD->getName() == "self") Ty = CreateSelfType(VD->getType(), Ty); // Get location information. unsigned Line = getLineNumber(VD->getLocation()); unsigned Column = getColumnNumber(VD->getLocation()); const llvm::DataLayout &target = CGM.getDataLayout(); CharUnits offset = CharUnits::fromQuantity( target.getStructLayout(blockInfo.StructureType) ->getElementOffset(blockInfo.getCapture(VD).getIndex())); SmallVector<int64_t, 9> addr; if (isa<llvm::AllocaInst>(Storage)) addr.push_back(llvm::dwarf::DW_OP_deref); addr.push_back(llvm::dwarf::DW_OP_plus); addr.push_back(offset.getQuantity()); if (isByRef) { addr.push_back(llvm::dwarf::DW_OP_deref); addr.push_back(llvm::dwarf::DW_OP_plus); // offset of __forwarding field offset = CGM.getContext().toCharUnitsFromBits(target.getPointerSizeInBits(0)); addr.push_back(offset.getQuantity()); addr.push_back(llvm::dwarf::DW_OP_deref); addr.push_back(llvm::dwarf::DW_OP_plus); // offset of x field offset = CGM.getContext().toCharUnitsFromBits(XOffset); addr.push_back(offset.getQuantity()); } // Create the descriptor for the variable. auto *D = DBuilder.createLocalVariable( llvm::dwarf::DW_TAG_auto_variable, cast<llvm::DILocalScope>(LexicalBlockStack.back()), VD->getName(), Unit, Line, Ty); // Insert an llvm.dbg.declare into the current block. auto DL = llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back()); if (InsertPoint) DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), DL, InsertPoint); else DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), DL, Builder.GetInsertBlock()); } void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI, unsigned ArgNo, CGBuilderTy &Builder) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, ArgNo, Builder); } #if 0 // HLSL Change - no block support namespace { struct BlockLayoutChunk { uint64_t OffsetInBits; const BlockDecl::Capture *Capture; }; bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) { return l.OffsetInBits < r.OffsetInBits; } } void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block, llvm::Value *Arg, unsigned ArgNo, llvm::Value *LocalAddr, CGBuilderTy &Builder) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); ASTContext &C = CGM.getContext(); const BlockDecl *blockDecl = block.getBlockDecl(); // Collect some general information about the block's location. SourceLocation loc = blockDecl->getCaretLocation(); llvm::DIFile *tunit = getOrCreateFile(loc); unsigned line = getLineNumber(loc); unsigned column = getColumnNumber(loc); // Build the debug-info type for the block literal. getContextDescriptor(cast<Decl>(blockDecl->getDeclContext())); const llvm::StructLayout *blockLayout = CGM.getDataLayout().getStructLayout(block.StructureType); SmallVector<llvm::Metadata *, 16> fields; fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public, blockLayout->getElementOffsetInBits(0), tunit, tunit)); fields.push_back(createFieldType("__flags", C.IntTy, 0, loc, AS_public, blockLayout->getElementOffsetInBits(1), tunit, tunit)); fields.push_back(createFieldType("__reserved", C.IntTy, 0, loc, AS_public, blockLayout->getElementOffsetInBits(2), tunit, tunit)); auto *FnTy = block.getBlockExpr()->getFunctionType(); auto FnPtrType = CGM.getContext().getPointerType(FnTy->desugar()); fields.push_back(createFieldType("__FuncPtr", FnPtrType, 0, loc, AS_public, blockLayout->getElementOffsetInBits(3), tunit, tunit)); fields.push_back(createFieldType( "__descriptor", C.getPointerType(block.NeedsCopyDispose ? C.getBlockDescriptorExtendedType() : C.getBlockDescriptorType()), 0, loc, AS_public, blockLayout->getElementOffsetInBits(4), tunit, tunit)); // We want to sort the captures by offset, not because DWARF // requires this, but because we're paranoid about debuggers. SmallVector<BlockLayoutChunk, 8> chunks; // 'this' capture. if (blockDecl->capturesCXXThis()) { BlockLayoutChunk chunk; chunk.OffsetInBits = blockLayout->getElementOffsetInBits(block.CXXThisIndex); chunk.Capture = nullptr; chunks.push_back(chunk); } // Variable captures. for (const auto &capture : blockDecl->captures()) { const VarDecl *variable = capture.getVariable(); const CGBlockInfo::Capture &captureInfo = block.getCapture(variable); // Ignore constant captures. if (captureInfo.isConstant()) continue; BlockLayoutChunk chunk; chunk.OffsetInBits = blockLayout->getElementOffsetInBits(captureInfo.getIndex()); chunk.Capture = &capture; chunks.push_back(chunk); } // Sort by offset. llvm::array_pod_sort(chunks.begin(), chunks.end()); for (SmallVectorImpl<BlockLayoutChunk>::iterator i = chunks.begin(), e = chunks.end(); i != e; ++i) { uint64_t offsetInBits = i->OffsetInBits; const BlockDecl::Capture *capture = i->Capture; // If we have a null capture, this must be the C++ 'this' capture. if (!capture) { const CXXMethodDecl *method = cast<CXXMethodDecl>(blockDecl->getNonClosureContext()); QualType type = method->getThisType(C); fields.push_back(createFieldType("this", type, 0, loc, AS_public, offsetInBits, tunit, tunit)); continue; } const VarDecl *variable = capture->getVariable(); StringRef name = variable->getName(); llvm::DIType *fieldType; if (capture->isByRef()) { TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy); // FIXME: this creates a second copy of this type! uint64_t xoffset; fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset); fieldType = DBuilder.createPointerType(fieldType, PtrInfo.Width); fieldType = DBuilder.createMemberType(tunit, name, tunit, line, PtrInfo.Width, PtrInfo.Align, offsetInBits, 0, fieldType); } else { fieldType = createFieldType(name, variable->getType(), 0, loc, AS_public, offsetInBits, tunit, tunit); } fields.push_back(fieldType); } SmallString<36> typeName; llvm::raw_svector_ostream(typeName) << "__block_literal_" << CGM.getUniqueBlockCount(); llvm::DINodeArray fieldsArray = DBuilder.getOrCreateArray(fields); llvm::DIType *type = DBuilder.createStructType( tunit, typeName.str(), tunit, line, CGM.getContext().toBits(block.BlockSize), CGM.getContext().toBits(block.BlockAlign), 0, nullptr, fieldsArray); type = DBuilder.createPointerType(type, CGM.PointerWidthInBits); // Get overall information about the block. unsigned flags = llvm::DINode::FlagArtificial; auto *scope = cast<llvm::DILocalScope>(LexicalBlockStack.back()); // Create the descriptor for the parameter. auto *debugVar = DBuilder.createLocalVariable( llvm::dwarf::DW_TAG_arg_variable, scope, Arg->getName(), tunit, line, type, CGM.getLangOpts().Optimize, flags, ArgNo); if (LocalAddr) { // Insert an llvm.dbg.value into the current block. DBuilder.insertDbgValueIntrinsic( LocalAddr, 0, debugVar, DBuilder.createExpression(), llvm::DebugLoc::get(line, column, scope), Builder.GetInsertBlock()); } // Insert an llvm.dbg.declare into the current block. DBuilder.insertDeclare(Arg, debugVar, DBuilder.createExpression(), llvm::DebugLoc::get(line, column, scope), Builder.GetInsertBlock()); } #endif // HLSL Change - no block support llvm::DIDerivedType * CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) { if (!D->isStaticDataMember()) return nullptr; auto MI = StaticDataMemberCache.find(D->getCanonicalDecl()); if (MI != StaticDataMemberCache.end()) { assert(MI->second && "Static data member declaration should still exist"); return cast<llvm::DIDerivedType>(MI->second); } // If the member wasn't found in the cache, lazily construct and add it to the // type (used when a limited form of the type is emitted). auto DC = D->getDeclContext(); auto *Ctxt = cast<llvm::DICompositeType>(getContextDescriptor(cast<Decl>(DC))); return CreateRecordStaticField(D, Ctxt, cast<RecordDecl>(DC)); } llvm::DIGlobalVariable *CGDebugInfo::CollectAnonRecordDecls( const RecordDecl *RD, llvm::DIFile *Unit, unsigned LineNo, StringRef LinkageName, llvm::GlobalVariable *Var, llvm::DIScope *DContext) { llvm::DIGlobalVariable *GV = nullptr; for (const auto *Field : RD->fields()) { llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit); StringRef FieldName = Field->getName(); // Ignore unnamed fields, but recurse into anonymous records. if (FieldName.empty()) { const RecordType *RT = dyn_cast<RecordType>(Field->getType()); if (RT) GV = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName, Var, DContext); continue; } // Use VarDecl's Tag, Scope and Line number. GV = DBuilder.createGlobalVariable(DContext, FieldName, LinkageName, Unit, LineNo, FieldTy, Var->hasInternalLinkage(), Var, nullptr); } return GV; } void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var, const VarDecl *D) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); // Create global variable debug descriptor. llvm::DIFile *Unit = nullptr; llvm::DIScope *DContext = nullptr; unsigned LineNo; StringRef DeclName, LinkageName; QualType T; collectVarDeclProps(D, Unit, LineNo, T, DeclName, LinkageName, DContext); // Attempt to store one global variable for the declaration - even if we // emit a lot of fields. llvm::DIGlobalVariable *GV = nullptr; // If this is an anonymous union then we'll want to emit a global // variable for each member of the anonymous union so that it's possible // to find the name of any field in the union. if (T->isUnionType() && DeclName.empty()) { const RecordDecl *RD = cast<RecordType>(T)->getDecl(); assert(RD->isAnonymousStructOrUnion() && "unnamed non-anonymous struct or union?"); GV = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext); } else { GV = DBuilder.createGlobalVariable( DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit), Var->hasInternalLinkage(), Var, getOrCreateStaticDataMemberDeclarationOrNull(D)); } DeclCache[D->getCanonicalDecl()].reset(static_cast<llvm::Metadata *>(GV)); } void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init) { assert(DebugKind >= CodeGenOptions::LimitedDebugInfo); // Create the descriptor for the variable. llvm::DIFile *Unit = getOrCreateFile(VD->getLocation()); StringRef Name = VD->getName(); llvm::DIType *Ty = getOrCreateType(VD->getType(), Unit); if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) { const EnumDecl *ED = cast<EnumDecl>(ECD->getDeclContext()); assert(isa<EnumType>(ED->getTypeForDecl()) && "Enum without EnumType?"); Ty = getOrCreateType(QualType(ED->getTypeForDecl(), 0), Unit); } // Do not use global variables for enums. // // FIXME: why not? if (Ty->getTag() == llvm::dwarf::DW_TAG_enumeration_type) return; // Do not emit separate definitions for function local const/statics. if (isa<FunctionDecl>(VD->getDeclContext())) return; VD = cast<ValueDecl>(VD->getCanonicalDecl()); auto *VarD = cast<VarDecl>(VD); if (VarD->isStaticDataMember()) { auto *RD = cast<RecordDecl>(VarD->getDeclContext()); getContextDescriptor(RD); // Ensure that the type is retained even though it's otherwise unreferenced. RetainedTypes.push_back( CGM.getContext().getRecordType(RD).getAsOpaquePtr()); return; } llvm::DIScope *DContext = getContextDescriptor(dyn_cast<Decl>(VD->getDeclContext())); auto &GV = DeclCache[VD]; if (GV) return; GV.reset(DBuilder.createGlobalVariable( DContext, Name, StringRef(), Unit, getLineNumber(VD->getLocation()), Ty, true, Init, getOrCreateStaticDataMemberDeclarationOrNull(VarD))); } llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) { if (!LexicalBlockStack.empty()) return LexicalBlockStack.back(); return getContextDescriptor(D); } void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) { if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo) return; DBuilder.createImportedModule( getCurrentContextDescriptor(cast<Decl>(UD.getDeclContext())), getOrCreateNameSpace(UD.getNominatedNamespace()), getLineNumber(UD.getLocation())); } void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) { if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo) return; assert(UD.shadow_size() && "We shouldn't be codegening an invalid UsingDecl containing no decls"); // Emitting one decl is sufficient - debuggers can detect that this is an // overloaded name & provide lookup for all the overloads. const UsingShadowDecl &USD = **UD.shadow_begin(); if (llvm::DINode *Target = getDeclarationOrDefinition(USD.getUnderlyingDecl())) DBuilder.createImportedDeclaration( getCurrentContextDescriptor(cast<Decl>(USD.getDeclContext())), Target, getLineNumber(USD.getLocation())); } void CGDebugInfo::EmitImportDecl(const ImportDecl &ID) { auto *Reader = CGM.getContext().getExternalSource(); auto Info = Reader->getSourceDescriptor(*ID.getImportedModule()); DBuilder.createImportedDeclaration( getCurrentContextDescriptor(cast<Decl>(ID.getDeclContext())), getOrCreateModuleRef(Info), getLineNumber(ID.getLocation())); } llvm::DIImportedEntity * CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) { if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo) return nullptr; auto &VH = NamespaceAliasCache[&NA]; if (VH) return cast<llvm::DIImportedEntity>(VH); llvm::DIImportedEntity *R; if (const NamespaceAliasDecl *Underlying = dyn_cast<NamespaceAliasDecl>(NA.getAliasedNamespace())) // This could cache & dedup here rather than relying on metadata deduping. R = DBuilder.createImportedDeclaration( getCurrentContextDescriptor(cast<Decl>(NA.getDeclContext())), EmitNamespaceAlias(*Underlying), getLineNumber(NA.getLocation()), NA.getName()); else R = DBuilder.createImportedDeclaration( getCurrentContextDescriptor(cast<Decl>(NA.getDeclContext())), getOrCreateNameSpace(cast<NamespaceDecl>(NA.getAliasedNamespace())), getLineNumber(NA.getLocation()), NA.getName()); VH.reset(R); return R; } llvm::DINamespace * CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) { NSDecl = NSDecl->getCanonicalDecl(); auto I = NameSpaceCache.find(NSDecl); if (I != NameSpaceCache.end()) return cast<llvm::DINamespace>(I->second); unsigned LineNo = getLineNumber(NSDecl->getLocation()); llvm::DIFile *FileD = getOrCreateFile(NSDecl->getLocation()); llvm::DIScope *Context = getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext())); llvm::DINamespace *NS = DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo); NameSpaceCache[NSDecl].reset(NS); return NS; } void CGDebugInfo::finalize() { // Creating types might create further types - invalidating the current // element and the size(), so don't cache/reference them. #if 1 // HLSL Change - no ObjC support assert(ObjCInterfaceCache.size() == 0); #else for (size_t i = 0; i != ObjCInterfaceCache.size(); ++i) { ObjCInterfaceCacheEntry E = ObjCInterfaceCache[i]; llvm::DIType *Ty = E.Type->getDecl()->getDefinition() ? CreateTypeDefinition(E.Type, E.Unit) : E.Decl; DBuilder.replaceTemporary(llvm::TempDIType(E.Decl), Ty); } #endif // HLSL Change - no ObjC support for (auto p : ReplaceMap) { assert(p.second); auto *Ty = cast<llvm::DIType>(p.second); assert(Ty->isForwardDecl()); auto it = TypeCache.find(p.first); assert(it != TypeCache.end()); assert(it->second); DBuilder.replaceTemporary(llvm::TempDIType(Ty), cast<llvm::DIType>(it->second)); } for (const auto &p : FwdDeclReplaceMap) { assert(p.second); llvm::TempMDNode FwdDecl(cast<llvm::MDNode>(p.second)); llvm::Metadata *Repl; auto it = DeclCache.find(p.first); // If there has been no definition for the declaration, call RAUW // with ourselves, that will destroy the temporary MDNode and // replace it with a standard one, avoiding leaking memory. if (it == DeclCache.end()) Repl = p.second; else Repl = it->second; DBuilder.replaceTemporary(std::move(FwdDecl), cast<llvm::MDNode>(Repl)); } // We keep our own list of retained types, because we need to look // up the final type in the type cache. for (std::vector<void *>::const_iterator RI = RetainedTypes.begin(), RE = RetainedTypes.end(); RI != RE; ++RI) DBuilder.retainType(cast<llvm::DIType>(TypeCache[*RI])); DBuilder.finalize(); } void CGDebugInfo::EmitExplicitCastType(QualType Ty) { if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo) return; if (auto *DieTy = getOrCreateType(Ty, getOrCreateMainFile())) // Don't ignore in case of explicit cast where it is referenced indirectly. DBuilder.retainType(DieTy); }